ffmpeg-2.8.5

git-svn-id: svn://kolibrios.org@6147 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge)
2016-02-05 22:08:02 +00:00
parent a08f61ddb9
commit a4b787f4b8
5429 changed files with 1356786 additions and 0 deletions

View File

@@ -0,0 +1,164 @@
OBJS += x86/constants.o \
# subsystems
OBJS-$(CONFIG_AC3DSP) += x86/ac3dsp_init.o
OBJS-$(CONFIG_AUDIODSP) += x86/audiodsp_init.o
OBJS-$(CONFIG_BLOCKDSP) += x86/blockdsp_init.o
OBJS-$(CONFIG_BSWAPDSP) += x86/bswapdsp_init.o
OBJS-$(CONFIG_DCT) += x86/dct_init.o
OBJS-$(CONFIG_FDCTDSP) += x86/fdctdsp_init.o
OBJS-$(CONFIG_FFT) += x86/fft_init.o
OBJS-$(CONFIG_FLAC_DECODER) += x86/flacdsp_init.o
OBJS-$(CONFIG_FLAC_ENCODER) += x86/flacdsp_init.o
OBJS-$(CONFIG_FMTCONVERT) += x86/fmtconvert_init.o
OBJS-$(CONFIG_H263DSP) += x86/h263dsp_init.o
OBJS-$(CONFIG_H264CHROMA) += x86/h264chroma_init.o
OBJS-$(CONFIG_H264DSP) += x86/h264dsp_init.o
OBJS-$(CONFIG_H264PRED) += x86/h264_intrapred_init.o
OBJS-$(CONFIG_H264QPEL) += x86/h264_qpel.o
OBJS-$(CONFIG_HPELDSP) += x86/hpeldsp_init.o
OBJS-$(CONFIG_LLAUDDSP) += x86/lossless_audiodsp_init.o
OBJS-$(CONFIG_LLVIDDSP) += x86/lossless_videodsp_init.o
OBJS-$(CONFIG_HUFFYUVDSP) += x86/huffyuvdsp_init.o
OBJS-$(CONFIG_HUFFYUVENCDSP) += x86/huffyuvencdsp_mmx.o
OBJS-$(CONFIG_IDCTDSP) += x86/idctdsp_init.o
OBJS-$(CONFIG_LPC) += x86/lpc.o
OBJS-$(CONFIG_ME_CMP) += x86/me_cmp_init.o
OBJS-$(CONFIG_MPEGAUDIODSP) += x86/mpegaudiodsp.o
OBJS-$(CONFIG_MPEGVIDEO) += x86/mpegvideo.o \
x86/mpegvideodsp.o
OBJS-$(CONFIG_MPEGVIDEOENC) += x86/mpegvideoenc.o \
x86/mpegvideoencdsp_init.o
OBJS-$(CONFIG_PIXBLOCKDSP) += x86/pixblockdsp_init.o
OBJS-$(CONFIG_QPELDSP) += x86/qpeldsp_init.o
OBJS-$(CONFIG_RV34DSP) += x86/rv34dsp_init.o
OBJS-$(CONFIG_VIDEODSP) += x86/videodsp_init.o
OBJS-$(CONFIG_VP3DSP) += x86/vp3dsp_init.o
OBJS-$(CONFIG_VP8DSP) += x86/vp8dsp_init.o
OBJS-$(CONFIG_XMM_CLOBBER_TEST) += x86/w64xmmtest.o
# decoders/encoders
OBJS-$(CONFIG_AAC_DECODER) += x86/aacpsdsp_init.o \
x86/sbrdsp_init.o
OBJS-$(CONFIG_ADPCM_G722_DECODER) += x86/g722dsp_init.o
OBJS-$(CONFIG_ADPCM_G722_ENCODER) += x86/g722dsp_init.o
OBJS-$(CONFIG_APNG_DECODER) += x86/pngdsp_init.o
OBJS-$(CONFIG_CAVS_DECODER) += x86/cavsdsp.o
OBJS-$(CONFIG_DCA_DECODER) += x86/dcadsp_init.o
OBJS-$(CONFIG_DNXHD_ENCODER) += x86/dnxhdenc_init.o
OBJS-$(CONFIG_HEVC_DECODER) += x86/hevcdsp_init.o
OBJS-$(CONFIG_JPEG2000_DECODER) += x86/jpeg2000dsp_init.o
OBJS-$(CONFIG_MLP_DECODER) += x86/mlpdsp_init.o
OBJS-$(CONFIG_MPEG4_DECODER) += x86/xvididct_init.o
OBJS-$(CONFIG_PNG_DECODER) += x86/pngdsp_init.o
OBJS-$(CONFIG_PRORES_DECODER) += x86/proresdsp_init.o
OBJS-$(CONFIG_PRORES_LGPL_DECODER) += x86/proresdsp_init.o
OBJS-$(CONFIG_RV40_DECODER) += x86/rv40dsp_init.o
OBJS-$(CONFIG_SVQ1_ENCODER) += x86/svq1enc_init.o
OBJS-$(CONFIG_TRUEHD_DECODER) += x86/mlpdsp_init.o
OBJS-$(CONFIG_TTA_DECODER) += x86/ttadsp_init.o
OBJS-$(CONFIG_V210_DECODER) += x86/v210-init.o
OBJS-$(CONFIG_V210_ENCODER) += x86/v210enc_init.o
OBJS-$(CONFIG_VC1_DECODER) += x86/vc1dsp_init.o
OBJS-$(CONFIG_VORBIS_DECODER) += x86/vorbisdsp_init.o
OBJS-$(CONFIG_VP6_DECODER) += x86/vp6dsp_init.o
OBJS-$(CONFIG_VP9_DECODER) += x86/vp9dsp_init.o
OBJS-$(CONFIG_WEBP_DECODER) += x86/vp8dsp_init.o
# GCC inline assembly optimizations
# subsystems
MMX-OBJS-$(CONFIG_DIRAC_DECODER) += x86/dirac_dwt.o
MMX-OBJS-$(CONFIG_FDCTDSP) += x86/fdct.o
MMX-OBJS-$(CONFIG_IDCTDSP) += x86/simple_idct.o
# decoders/encoders
MMX-OBJS-$(CONFIG_SNOW_DECODER) += x86/snowdsp.o
MMX-OBJS-$(CONFIG_SNOW_ENCODER) += x86/snowdsp.o
MMX-OBJS-$(CONFIG_VC1_DECODER) += x86/vc1dsp_mmx.o
# YASM optimizations
YASM-OBJS += x86/deinterlace.o \
# subsystems
YASM-OBJS-$(CONFIG_AC3DSP) += x86/ac3dsp.o
YASM-OBJS-$(CONFIG_AUDIODSP) += x86/audiodsp.o
YASM-OBJS-$(CONFIG_BLOCKDSP) += x86/blockdsp.o
YASM-OBJS-$(CONFIG_BSWAPDSP) += x86/bswapdsp.o
YASM-OBJS-$(CONFIG_DCT) += x86/dct32.o
YASM-OBJS-$(CONFIG_DIRAC_DECODER) += x86/diracdsp_mmx.o x86/diracdsp_yasm.o\
x86/dwt_yasm.o
YASM-OBJS-$(CONFIG_DNXHD_ENCODER) += x86/dnxhdenc.o
YASM-OBJS-$(CONFIG_FFT) += x86/fft.o
YASM-OBJS-$(CONFIG_FLAC_DECODER) += x86/flacdsp.o
ifdef CONFIG_GPL
YASM-OBJS-$(CONFIG_FLAC_ENCODER) += x86/flac_dsp_gpl.o
endif
YASM-OBJS-$(CONFIG_FMTCONVERT) += x86/fmtconvert.o
YASM-OBJS-$(CONFIG_H263DSP) += x86/h263_loopfilter.o
YASM-OBJS-$(CONFIG_H264CHROMA) += x86/h264_chromamc.o \
x86/h264_chromamc_10bit.o
YASM-OBJS-$(CONFIG_H264DSP) += x86/h264_deblock.o \
x86/h264_deblock_10bit.o \
x86/h264_idct.o \
x86/h264_idct_10bit.o \
x86/h264_weight.o \
x86/h264_weight_10bit.o
YASM-OBJS-$(CONFIG_H264PRED) += x86/h264_intrapred.o \
x86/h264_intrapred_10bit.o
YASM-OBJS-$(CONFIG_H264QPEL) += x86/h264_qpel_8bit.o \
x86/h264_qpel_10bit.o \
x86/fpel.o \
x86/qpel.o
YASM-OBJS-$(CONFIG_HPELDSP) += x86/fpel.o \
x86/hpeldsp.o
YASM-OBJS-$(CONFIG_HUFFYUVDSP) += x86/huffyuvdsp.o
YASM-OBJS-$(CONFIG_IDCTDSP) += x86/idctdsp.o
YASM-OBJS-$(CONFIG_LLAUDDSP) += x86/lossless_audiodsp.o
YASM-OBJS-$(CONFIG_LLVIDDSP) += x86/lossless_videodsp.o
YASM-OBJS-$(CONFIG_ME_CMP) += x86/me_cmp.o
YASM-OBJS-$(CONFIG_MPEGAUDIODSP) += x86/imdct36.o
YASM-OBJS-$(CONFIG_MPEGVIDEOENC) += x86/mpegvideoencdsp.o
YASM-OBJS-$(CONFIG_PIXBLOCKDSP) += x86/pixblockdsp.o
YASM-OBJS-$(CONFIG_QPELDSP) += x86/qpeldsp.o \
x86/fpel.o \
x86/qpel.o
YASM-OBJS-$(CONFIG_RV34DSP) += x86/rv34dsp.o
YASM-OBJS-$(CONFIG_VIDEODSP) += x86/videodsp.o
YASM-OBJS-$(CONFIG_VP3DSP) += x86/vp3dsp.o
YASM-OBJS-$(CONFIG_VP8DSP) += x86/vp8dsp.o \
x86/vp8dsp_loopfilter.o
# decoders/encoders
YASM-OBJS-$(CONFIG_AAC_DECODER) += x86/aacpsdsp.o \
x86/sbrdsp.o
YASM-OBJS-$(CONFIG_ADPCM_G722_DECODER) += x86/g722dsp.o
YASM-OBJS-$(CONFIG_ADPCM_G722_ENCODER) += x86/g722dsp.o
YASM-OBJS-$(CONFIG_APNG_DECODER) += x86/pngdsp.o
YASM-OBJS-$(CONFIG_DCA_DECODER) += x86/dcadsp.o
YASM-OBJS-$(CONFIG_HEVC_DECODER) += x86/hevc_mc.o \
x86/hevc_deblock.o \
x86/hevc_idct.o \
x86/hevc_res_add.o \
x86/hevc_sao.o
YASM-OBJS-$(CONFIG_JPEG2000_DECODER) += x86/jpeg2000dsp.o
YASM-OBJS-$(CONFIG_MLP_DECODER) += x86/mlpdsp.o
YASM-OBJS-$(CONFIG_MPEG4_DECODER) += x86/xvididct.o
YASM-OBJS-$(CONFIG_PNG_DECODER) += x86/pngdsp.o
YASM-OBJS-$(CONFIG_PRORES_DECODER) += x86/proresdsp.o
YASM-OBJS-$(CONFIG_PRORES_LGPL_DECODER) += x86/proresdsp.o
YASM-OBJS-$(CONFIG_RV40_DECODER) += x86/rv40dsp.o
YASM-OBJS-$(CONFIG_SVQ1_ENCODER) += x86/svq1enc.o
YASM-OBJS-$(CONFIG_TRUEHD_DECODER) += x86/mlpdsp.o
YASM-OBJS-$(CONFIG_TTA_DECODER) += x86/ttadsp.o
YASM-OBJS-$(CONFIG_V210_ENCODER) += x86/v210enc.o
YASM-OBJS-$(CONFIG_V210_DECODER) += x86/v210.o
YASM-OBJS-$(CONFIG_VC1_DECODER) += x86/vc1dsp.o
YASM-OBJS-$(CONFIG_VORBIS_DECODER) += x86/vorbisdsp.o
YASM-OBJS-$(CONFIG_VP6_DECODER) += x86/vp6dsp.o
YASM-OBJS-$(CONFIG_VP9_DECODER) += x86/vp9intrapred.o \
x86/vp9itxfm.o \
x86/vp9lpf.o \
x86/vp9mc.o
YASM-OBJS-$(CONFIG_WEBP_DECODER) += x86/vp8dsp.o

View File

@@ -0,0 +1,215 @@
;******************************************************************************
;* SIMD optimized MPEG-4 Parametric Stereo decoding functions
;*
;* Copyright (C) 2015 James Almer
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
ps_p1m1p1m1: dd 0, 0x80000000, 0, 0x80000000
SECTION .text
;*************************************************************************
;void ff_ps_add_squares_<opt>(float *dst, const float (*src)[2], int n);
;*************************************************************************
%macro PS_ADD_SQUARES 1
cglobal ps_add_squares, 3, 3, %1, dst, src, n
.loop:
movaps m0, [srcq]
movaps m1, [srcq+mmsize]
mulps m0, m0
mulps m1, m1
%if cpuflag(sse3)
haddps m0, m1
%else
movaps m3, m0
movaps m4, m1
shufps m3, m3, q0301
shufps m4, m4, q0301
addps m0, m3
addps m1, m4
shufps m0, m1, q2020
%endif
addps m0, [dstq]
movaps [dstq], m0
add dstq, mmsize
add srcq, mmsize*2
sub nd, mmsize/4
jg .loop
REP_RET
%endmacro
INIT_XMM sse
PS_ADD_SQUARES 3
INIT_XMM sse3
PS_ADD_SQUARES 5
;*******************************************************************
;void ff_ps_mul_pair_single_sse(float (*dst)[2], float (*src0)[2],
; float *src1, int n);
;*******************************************************************
INIT_XMM sse
cglobal ps_mul_pair_single, 4, 5, 4, dst, src1, src2, n
xor r4q, r4q
.loop:
movu m0, [src1q+r4q]
movu m1, [src1q+r4q+mmsize]
mova m2, [src2q]
mova m3, m2
unpcklps m2, m2
unpckhps m3, m3
mulps m0, m2
mulps m1, m3
mova [dstq+r4q], m0
mova [dstq+r4q+mmsize], m1
add src2q, mmsize
add r4q, mmsize*2
sub nd, mmsize/4
jg .loop
REP_RET
;***********************************************************************
;void ff_ps_stereo_interpolate_sse3(float (*l)[2], float (*r)[2],
; float h[2][4], float h_step[2][4],
; int len);
;***********************************************************************
INIT_XMM sse3
cglobal ps_stereo_interpolate, 5, 5, 6, l, r, h, h_step, n
movaps m0, [hq]
movaps m1, [h_stepq]
cmp nd, 0
jle .ret
shl nd, 3
add lq, nq
add rq, nq
neg nq
align 16
.loop:
addps m0, m1
movddup m2, [lq+nq]
movddup m3, [rq+nq]
movaps m4, m0
movaps m5, m0
unpcklps m4, m4
unpckhps m5, m5
mulps m2, m4
mulps m3, m5
addps m2, m3
movsd [lq+nq], m2
movhps [rq+nq], m2
add nq, 8
jl .loop
.ret:
REP_RET
;*******************************************************************
;void ff_ps_hybrid_analysis_<opt>(float (*out)[2], float (*in)[2],
; const float (*filter)[8][2],
; int stride, int n);
;*******************************************************************
%macro PS_HYBRID_ANALYSIS_LOOP 3
movu %1, [inq+mmsize*%3]
movu m1, [inq+mmsize*(5-%3)+8]
%if cpuflag(sse3)
pshufd %2, %1, q2301
pshufd m4, m1, q0123
pshufd m1, m1, q1032
pshufd m2, [filterq+nq+mmsize*%3], q2301
addsubps %2, m4
addsubps %1, m1
%else
mova m2, [filterq+nq+mmsize*%3]
mova %2, %1
mova m4, m1
shufps %2, %2, q2301
shufps m4, m4, q0123
shufps m1, m1, q1032
shufps m2, m2, q2301
xorps m4, m7
xorps m1, m7
subps %2, m4
subps %1, m1
%endif
mulps %2, m2
mulps %1, m2
%if %3
addps m3, %2
addps m0, %1
%endif
%endmacro
%macro PS_HYBRID_ANALYSIS 0
cglobal ps_hybrid_analysis, 5, 5, 8, out, in, filter, stride, n
%if cpuflag(sse3)
%define MOVH movsd
%else
%define MOVH movlps
%endif
shl strided, 3
shl nd, 6
add filterq, nq
neg nq
mova m7, [ps_p1m1p1m1]
align 16
.loop:
PS_HYBRID_ANALYSIS_LOOP m0, m3, 0
PS_HYBRID_ANALYSIS_LOOP m5, m6, 1
PS_HYBRID_ANALYSIS_LOOP m5, m6, 2
%if cpuflag(sse3)
pshufd m3, m3, q2301
xorps m0, m7
hsubps m3, m0
pshufd m1, m3, q0020
pshufd m3, m3, q0031
addps m1, m3
movsd m2, [inq+6*8]
%else
mova m1, m3
mova m2, m0
shufps m1, m1, q2301
shufps m2, m2, q2301
subps m1, m3
addps m2, m0
unpcklps m3, m1, m2
unpckhps m1, m2
addps m1, m3
movu m2, [inq+6*8] ; faster than movlps and no risk of overread
%endif
movss m3, [filterq+nq+8*6]
SPLATD m3
mulps m2, m3
addps m1, m2
MOVH [outq], m1
add outq, strideq
add nq, 64
jl .loop
REP_RET
%endmacro
INIT_XMM sse
PS_HYBRID_ANALYSIS
INIT_XMM sse3
PS_HYBRID_ANALYSIS

View File

@@ -0,0 +1,55 @@
/*
* SIMD optimized MPEG-4 Parametric Stereo decoding functions
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/x86/cpu.h"
#include "libavutil/attributes.h"
#include "libavcodec/aacpsdsp.h"
void ff_ps_add_squares_sse (float *dst, const float (*src)[2], int n);
void ff_ps_add_squares_sse3 (float *dst, const float (*src)[2], int n);
void ff_ps_mul_pair_single_sse (float (*dst)[2], float (*src0)[2],
float *src1, int n);
void ff_ps_hybrid_analysis_sse (float (*out)[2], float (*in)[2],
const float (*filter)[8][2],
int stride, int n);
void ff_ps_hybrid_analysis_sse3(float (*out)[2], float (*in)[2],
const float (*filter)[8][2],
int stride, int n);
void ff_ps_stereo_interpolate_sse3(float (*l)[2], float (*r)[2],
float h[2][4], float h_step[2][4],
int len);
av_cold void ff_psdsp_init_x86(PSDSPContext *s)
{
int cpu_flags = av_get_cpu_flags();
if (EXTERNAL_SSE(cpu_flags)) {
s->add_squares = ff_ps_add_squares_sse;
s->mul_pair_single = ff_ps_mul_pair_single_sse;
s->hybrid_analysis = ff_ps_hybrid_analysis_sse;
}
if (EXTERNAL_SSE3(cpu_flags)) {
s->add_squares = ff_ps_add_squares_sse3;
s->stereo_interpolate[0] = ff_ps_stereo_interpolate_sse3;
s->hybrid_analysis = ff_ps_hybrid_analysis_sse3;
}
}

View File

@@ -0,0 +1,552 @@
;*****************************************************************************
;* x86-optimized AC-3 DSP functions
;* Copyright (c) 2011 Justin Ruggles
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
; 16777216.0f - used in ff_float_to_fixed24()
pf_1_24: times 4 dd 0x4B800000
; used in ff_ac3_compute_mantissa_size()
cextern ac3_bap_bits
pw_bap_mul1: dw 21846, 21846, 0, 32768, 21846, 21846, 0, 32768
pw_bap_mul2: dw 5, 7, 0, 7, 5, 7, 0, 7
; used in ff_ac3_extract_exponents()
cextern pd_1
pd_151: times 4 dd 151
; used in ff_apply_window_int16()
pb_revwords: SHUFFLE_MASK_W 7, 6, 5, 4, 3, 2, 1, 0
pd_16384: times 4 dd 16384
SECTION .text
;-----------------------------------------------------------------------------
; void ff_ac3_exponent_min(uint8_t *exp, int num_reuse_blocks, int nb_coefs)
;-----------------------------------------------------------------------------
%macro AC3_EXPONENT_MIN 0
cglobal ac3_exponent_min, 3, 4, 2, exp, reuse_blks, expn, offset
shl reuse_blksq, 8
jz .end
LOOP_ALIGN
.nextexp:
mov offsetq, reuse_blksq
mova m0, [expq+offsetq]
sub offsetq, 256
LOOP_ALIGN
.nextblk:
PMINUB m0, [expq+offsetq], m1
sub offsetq, 256
jae .nextblk
mova [expq], m0
add expq, mmsize
sub expnq, mmsize
jg .nextexp
.end:
REP_RET
%endmacro
%define LOOP_ALIGN
INIT_MMX mmx
AC3_EXPONENT_MIN
%if HAVE_MMXEXT_EXTERNAL
%define LOOP_ALIGN ALIGN 16
INIT_MMX mmxext
AC3_EXPONENT_MIN
%endif
%if HAVE_SSE2_EXTERNAL
INIT_XMM sse2
AC3_EXPONENT_MIN
%endif
%undef LOOP_ALIGN
;-----------------------------------------------------------------------------
; int ff_ac3_max_msb_abs_int16(const int16_t *src, int len)
;
; This function uses 2 different methods to calculate a valid result.
; 1) logical 'or' of abs of each element
; This is used for ssse3 because of the pabsw instruction.
; It is also used for mmx because of the lack of min/max instructions.
; 2) calculate min/max for the array, then or(abs(min),abs(max))
; This is used for mmxext and sse2 because they have pminsw/pmaxsw.
;-----------------------------------------------------------------------------
; logical 'or' of 4 or 8 words in an mmx or xmm register into the low word
%macro OR_WORDS_HORIZ 2 ; src, tmp
%if cpuflag(sse2)
movhlps %2, %1
por %1, %2
pshuflw %2, %1, q0032
por %1, %2
pshuflw %2, %1, q0001
por %1, %2
%elif cpuflag(mmxext)
pshufw %2, %1, q0032
por %1, %2
pshufw %2, %1, q0001
por %1, %2
%else ; mmx
movq %2, %1
psrlq %2, 32
por %1, %2
movq %2, %1
psrlq %2, 16
por %1, %2
%endif
%endmacro
%macro AC3_MAX_MSB_ABS_INT16 1
cglobal ac3_max_msb_abs_int16, 2,2,5, src, len
pxor m2, m2
pxor m3, m3
.loop:
%ifidn %1, min_max
mova m0, [srcq]
mova m1, [srcq+mmsize]
pminsw m2, m0
pminsw m2, m1
pmaxsw m3, m0
pmaxsw m3, m1
%else ; or_abs
%if notcpuflag(ssse3)
mova m0, [srcq]
mova m1, [srcq+mmsize]
ABS2 m0, m1, m3, m4
%else ; ssse3
; using memory args is faster for ssse3
pabsw m0, [srcq]
pabsw m1, [srcq+mmsize]
%endif
por m2, m0
por m2, m1
%endif
add srcq, mmsize*2
sub lend, mmsize
ja .loop
%ifidn %1, min_max
ABS2 m2, m3, m0, m1
por m2, m3
%endif
OR_WORDS_HORIZ m2, m0
movd eax, m2
and eax, 0xFFFF
RET
%endmacro
INIT_MMX mmx
AC3_MAX_MSB_ABS_INT16 or_abs
INIT_MMX mmxext
AC3_MAX_MSB_ABS_INT16 min_max
INIT_XMM sse2
AC3_MAX_MSB_ABS_INT16 min_max
INIT_XMM ssse3
AC3_MAX_MSB_ABS_INT16 or_abs
;-----------------------------------------------------------------------------
; macro used for ff_ac3_lshift_int16() and ff_ac3_rshift_int32()
;-----------------------------------------------------------------------------
%macro AC3_SHIFT 3 ; l/r, 16/32, shift instruction, instruction set
cglobal ac3_%1shift_int%2, 3, 3, 5, src, len, shift
movd m0, shiftd
.loop:
mova m1, [srcq ]
mova m2, [srcq+mmsize ]
mova m3, [srcq+mmsize*2]
mova m4, [srcq+mmsize*3]
%3 m1, m0
%3 m2, m0
%3 m3, m0
%3 m4, m0
mova [srcq ], m1
mova [srcq+mmsize ], m2
mova [srcq+mmsize*2], m3
mova [srcq+mmsize*3], m4
add srcq, mmsize*4
sub lend, mmsize*32/%2
ja .loop
.end:
REP_RET
%endmacro
;-----------------------------------------------------------------------------
; void ff_ac3_lshift_int16(int16_t *src, unsigned int len, unsigned int shift)
;-----------------------------------------------------------------------------
INIT_MMX mmx
AC3_SHIFT l, 16, psllw
INIT_XMM sse2
AC3_SHIFT l, 16, psllw
;-----------------------------------------------------------------------------
; void ff_ac3_rshift_int32(int32_t *src, unsigned int len, unsigned int shift)
;-----------------------------------------------------------------------------
INIT_MMX mmx
AC3_SHIFT r, 32, psrad
INIT_XMM sse2
AC3_SHIFT r, 32, psrad
;-----------------------------------------------------------------------------
; void ff_float_to_fixed24(int32_t *dst, const float *src, unsigned int len)
;-----------------------------------------------------------------------------
; The 3DNow! version is not bit-identical because pf2id uses truncation rather
; than round-to-nearest.
INIT_MMX 3dnow
cglobal float_to_fixed24, 3, 3, 0, dst, src, len
movq m0, [pf_1_24]
.loop:
movq m1, [srcq ]
movq m2, [srcq+8 ]
movq m3, [srcq+16]
movq m4, [srcq+24]
pfmul m1, m0
pfmul m2, m0
pfmul m3, m0
pfmul m4, m0
pf2id m1, m1
pf2id m2, m2
pf2id m3, m3
pf2id m4, m4
movq [dstq ], m1
movq [dstq+8 ], m2
movq [dstq+16], m3
movq [dstq+24], m4
add srcq, 32
add dstq, 32
sub lend, 8
ja .loop
femms
RET
INIT_XMM sse
cglobal float_to_fixed24, 3, 3, 3, dst, src, len
movaps m0, [pf_1_24]
.loop:
movaps m1, [srcq ]
movaps m2, [srcq+16]
mulps m1, m0
mulps m2, m0
cvtps2pi mm0, m1
movhlps m1, m1
cvtps2pi mm1, m1
cvtps2pi mm2, m2
movhlps m2, m2
cvtps2pi mm3, m2
movq [dstq ], mm0
movq [dstq+ 8], mm1
movq [dstq+16], mm2
movq [dstq+24], mm3
add srcq, 32
add dstq, 32
sub lend, 8
ja .loop
emms
RET
INIT_XMM sse2
cglobal float_to_fixed24, 3, 3, 9, dst, src, len
movaps m0, [pf_1_24]
.loop:
movaps m1, [srcq ]
movaps m2, [srcq+16 ]
movaps m3, [srcq+32 ]
movaps m4, [srcq+48 ]
%ifdef m8
movaps m5, [srcq+64 ]
movaps m6, [srcq+80 ]
movaps m7, [srcq+96 ]
movaps m8, [srcq+112]
%endif
mulps m1, m0
mulps m2, m0
mulps m3, m0
mulps m4, m0
%ifdef m8
mulps m5, m0
mulps m6, m0
mulps m7, m0
mulps m8, m0
%endif
cvtps2dq m1, m1
cvtps2dq m2, m2
cvtps2dq m3, m3
cvtps2dq m4, m4
%ifdef m8
cvtps2dq m5, m5
cvtps2dq m6, m6
cvtps2dq m7, m7
cvtps2dq m8, m8
%endif
movdqa [dstq ], m1
movdqa [dstq+16 ], m2
movdqa [dstq+32 ], m3
movdqa [dstq+48 ], m4
%ifdef m8
movdqa [dstq+64 ], m5
movdqa [dstq+80 ], m6
movdqa [dstq+96 ], m7
movdqa [dstq+112], m8
add srcq, 128
add dstq, 128
sub lenq, 32
%else
add srcq, 64
add dstq, 64
sub lenq, 16
%endif
ja .loop
REP_RET
;------------------------------------------------------------------------------
; int ff_ac3_compute_mantissa_size(uint16_t mant_cnt[6][16])
;------------------------------------------------------------------------------
%macro PHADDD4 2 ; xmm src, xmm tmp
movhlps %2, %1
paddd %1, %2
pshufd %2, %1, 0x1
paddd %1, %2
%endmacro
INIT_XMM sse2
cglobal ac3_compute_mantissa_size, 1, 2, 4, mant_cnt, sum
movdqa m0, [mant_cntq ]
movdqa m1, [mant_cntq+ 1*16]
paddw m0, [mant_cntq+ 2*16]
paddw m1, [mant_cntq+ 3*16]
paddw m0, [mant_cntq+ 4*16]
paddw m1, [mant_cntq+ 5*16]
paddw m0, [mant_cntq+ 6*16]
paddw m1, [mant_cntq+ 7*16]
paddw m0, [mant_cntq+ 8*16]
paddw m1, [mant_cntq+ 9*16]
paddw m0, [mant_cntq+10*16]
paddw m1, [mant_cntq+11*16]
pmaddwd m0, [ac3_bap_bits ]
pmaddwd m1, [ac3_bap_bits+16]
paddd m0, m1
PHADDD4 m0, m1
movd sumd, m0
movdqa m3, [pw_bap_mul1]
movhpd m0, [mant_cntq +2]
movlpd m0, [mant_cntq+1*32+2]
movhpd m1, [mant_cntq+2*32+2]
movlpd m1, [mant_cntq+3*32+2]
movhpd m2, [mant_cntq+4*32+2]
movlpd m2, [mant_cntq+5*32+2]
pmulhuw m0, m3
pmulhuw m1, m3
pmulhuw m2, m3
paddusw m0, m1
paddusw m0, m2
pmaddwd m0, [pw_bap_mul2]
PHADDD4 m0, m1
movd eax, m0
add eax, sumd
RET
;------------------------------------------------------------------------------
; void ff_ac3_extract_exponents(uint8_t *exp, int32_t *coef, int nb_coefs)
;------------------------------------------------------------------------------
%macro PABSD 1-2 ; src/dst, unused
%if cpuflag(ssse3)
pabsd %1, %1
%else ; src/dst, tmp
pxor %2, %2
pcmpgtd %2, %1
pxor %1, %2
psubd %1, %2
%endif
%endmacro
%macro AC3_EXTRACT_EXPONENTS 0
cglobal ac3_extract_exponents, 3, 3, 4, exp, coef, len
add expq, lenq
lea coefq, [coefq+4*lenq]
neg lenq
mova m2, [pd_1]
mova m3, [pd_151]
.loop:
; move 4 32-bit coefs to xmm0
mova m0, [coefq+4*lenq]
; absolute value
PABSD m0, m1
; convert to float and extract exponents
pslld m0, 1
por m0, m2
cvtdq2ps m1, m0
psrld m1, 23
mova m0, m3
psubd m0, m1
; move the lowest byte in each of 4 dwords to the low dword
; NOTE: We cannot just extract the low bytes with pshufb because the dword
; result for 16777215 is -1 due to float inaccuracy. Using packuswb
; clips this to 0, which is the correct exponent.
packssdw m0, m0
packuswb m0, m0
movd [expq+lenq], m0
add lenq, 4
jl .loop
REP_RET
%endmacro
%if HAVE_SSE2_EXTERNAL
INIT_XMM sse2
AC3_EXTRACT_EXPONENTS
%endif
%if HAVE_SSSE3_EXTERNAL
INIT_XMM ssse3
AC3_EXTRACT_EXPONENTS
%endif
;-----------------------------------------------------------------------------
; void ff_apply_window_int16(int16_t *output, const int16_t *input,
; const int16_t *window, unsigned int len)
;-----------------------------------------------------------------------------
%macro REVERSE_WORDS 1-2
%if cpuflag(ssse3) && notcpuflag(atom)
pshufb %1, %2
%elif cpuflag(sse2)
pshuflw %1, %1, 0x1B
pshufhw %1, %1, 0x1B
pshufd %1, %1, 0x4E
%elif cpuflag(mmxext)
pshufw %1, %1, 0x1B
%endif
%endmacro
%macro MUL16FIXED 3
%if cpuflag(ssse3) ; dst, src, unused
; dst = ((dst * src) + (1<<14)) >> 15
pmulhrsw %1, %2
%elif cpuflag(mmxext) ; dst, src, temp
; dst = (dst * src) >> 15
; pmulhw cuts off the bottom bit, so we have to lshift by 1 and add it back
; in from the pmullw result.
mova %3, %1
pmulhw %1, %2
pmullw %3, %2
psrlw %3, 15
psllw %1, 1
por %1, %3
%endif
%endmacro
%macro APPLY_WINDOW_INT16 1 ; %1 bitexact version
%if %1
cglobal apply_window_int16, 4,5,6, output, input, window, offset, offset2
%else
cglobal apply_window_int16_round, 4,5,6, output, input, window, offset, offset2
%endif
lea offset2q, [offsetq-mmsize]
%if cpuflag(ssse3) && notcpuflag(atom)
mova m5, [pb_revwords]
ALIGN 16
%elif %1
mova m5, [pd_16384]
%endif
.loop:
%if cpuflag(ssse3)
; This version does the 16x16->16 multiplication in-place without expanding
; to 32-bit. The ssse3 version is bit-identical.
mova m0, [windowq+offset2q]
mova m1, [ inputq+offset2q]
pmulhrsw m1, m0
REVERSE_WORDS m0, m5
pmulhrsw m0, [ inputq+offsetq ]
mova [outputq+offset2q], m1
mova [outputq+offsetq ], m0
%elif %1
; This version expands 16-bit to 32-bit, multiplies by the window,
; adds 16384 for rounding, right shifts 15, then repacks back to words to
; save to the output. The window is reversed for the second half.
mova m3, [windowq+offset2q]
mova m4, [ inputq+offset2q]
pxor m0, m0
punpcklwd m0, m3
punpcklwd m1, m4
pmaddwd m0, m1
paddd m0, m5
psrad m0, 15
pxor m2, m2
punpckhwd m2, m3
punpckhwd m1, m4
pmaddwd m2, m1
paddd m2, m5
psrad m2, 15
packssdw m0, m2
mova [outputq+offset2q], m0
REVERSE_WORDS m3
mova m4, [ inputq+offsetq]
pxor m0, m0
punpcklwd m0, m3
punpcklwd m1, m4
pmaddwd m0, m1
paddd m0, m5
psrad m0, 15
pxor m2, m2
punpckhwd m2, m3
punpckhwd m1, m4
pmaddwd m2, m1
paddd m2, m5
psrad m2, 15
packssdw m0, m2
mova [outputq+offsetq], m0
%else
; This version does the 16x16->16 multiplication in-place without expanding
; to 32-bit. The mmxext and sse2 versions do not use rounding, and
; therefore are not bit-identical to the C version.
mova m0, [windowq+offset2q]
mova m1, [ inputq+offset2q]
mova m2, [ inputq+offsetq ]
MUL16FIXED m1, m0, m3
REVERSE_WORDS m0
MUL16FIXED m2, m0, m3
mova [outputq+offset2q], m1
mova [outputq+offsetq ], m2
%endif
add offsetd, mmsize
sub offset2d, mmsize
jae .loop
REP_RET
%endmacro
INIT_MMX mmxext
APPLY_WINDOW_INT16 0
INIT_XMM sse2
APPLY_WINDOW_INT16 0
INIT_MMX mmxext
APPLY_WINDOW_INT16 1
INIT_XMM sse2
APPLY_WINDOW_INT16 1
INIT_XMM ssse3
APPLY_WINDOW_INT16 1
INIT_XMM ssse3, atom
APPLY_WINDOW_INT16 1

View File

@@ -0,0 +1,257 @@
/*
* x86-optimized AC-3 DSP functions
* Copyright (c) 2011 Justin Ruggles
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/mem.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/ac3.h"
#include "libavcodec/ac3dsp.h"
void ff_ac3_exponent_min_mmx (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
void ff_ac3_exponent_min_mmxext(uint8_t *exp, int num_reuse_blocks, int nb_coefs);
void ff_ac3_exponent_min_sse2 (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
int ff_ac3_max_msb_abs_int16_mmx (const int16_t *src, int len);
int ff_ac3_max_msb_abs_int16_mmxext(const int16_t *src, int len);
int ff_ac3_max_msb_abs_int16_sse2 (const int16_t *src, int len);
int ff_ac3_max_msb_abs_int16_ssse3(const int16_t *src, int len);
void ff_ac3_lshift_int16_mmx (int16_t *src, unsigned int len, unsigned int shift);
void ff_ac3_lshift_int16_sse2(int16_t *src, unsigned int len, unsigned int shift);
void ff_ac3_rshift_int32_mmx (int32_t *src, unsigned int len, unsigned int shift);
void ff_ac3_rshift_int32_sse2(int32_t *src, unsigned int len, unsigned int shift);
void ff_float_to_fixed24_3dnow(int32_t *dst, const float *src, unsigned int len);
void ff_float_to_fixed24_sse (int32_t *dst, const float *src, unsigned int len);
void ff_float_to_fixed24_sse2 (int32_t *dst, const float *src, unsigned int len);
int ff_ac3_compute_mantissa_size_sse2(uint16_t mant_cnt[6][16]);
void ff_ac3_extract_exponents_3dnow(uint8_t *exp, int32_t *coef, int nb_coefs);
void ff_ac3_extract_exponents_sse2 (uint8_t *exp, int32_t *coef, int nb_coefs);
void ff_ac3_extract_exponents_ssse3(uint8_t *exp, int32_t *coef, int nb_coefs);
void ff_apply_window_int16_round_mmxext(int16_t *output, const int16_t *input,
const int16_t *window, unsigned int len);
void ff_apply_window_int16_round_sse2(int16_t *output, const int16_t *input,
const int16_t *window, unsigned int len);
void ff_apply_window_int16_mmxext(int16_t *output, const int16_t *input,
const int16_t *window, unsigned int len);
void ff_apply_window_int16_sse2(int16_t *output, const int16_t *input,
const int16_t *window, unsigned int len);
void ff_apply_window_int16_ssse3(int16_t *output, const int16_t *input,
const int16_t *window, unsigned int len);
void ff_apply_window_int16_ssse3_atom(int16_t *output, const int16_t *input,
const int16_t *window, unsigned int len);
#if ARCH_X86_32 && defined(__INTEL_COMPILER)
# undef HAVE_7REGS
# define HAVE_7REGS 0
#endif
#if HAVE_SSE_INLINE && HAVE_7REGS
#define IF1(x) x
#define IF0(x)
#define MIX5(mono, stereo) \
__asm__ volatile ( \
"movss 0(%1), %%xmm5 \n" \
"movss 8(%1), %%xmm6 \n" \
"movss 24(%1), %%xmm7 \n" \
"shufps $0, %%xmm5, %%xmm5 \n" \
"shufps $0, %%xmm6, %%xmm6 \n" \
"shufps $0, %%xmm7, %%xmm7 \n" \
"1: \n" \
"movaps (%0, %2), %%xmm0 \n" \
"movaps (%0, %3), %%xmm1 \n" \
"movaps (%0, %4), %%xmm2 \n" \
"movaps (%0, %5), %%xmm3 \n" \
"movaps (%0, %6), %%xmm4 \n" \
"mulps %%xmm5, %%xmm0 \n" \
"mulps %%xmm6, %%xmm1 \n" \
"mulps %%xmm5, %%xmm2 \n" \
"mulps %%xmm7, %%xmm3 \n" \
"mulps %%xmm7, %%xmm4 \n" \
stereo("addps %%xmm1, %%xmm0 \n") \
"addps %%xmm1, %%xmm2 \n" \
"addps %%xmm3, %%xmm0 \n" \
"addps %%xmm4, %%xmm2 \n" \
mono("addps %%xmm2, %%xmm0 \n") \
"movaps %%xmm0, (%0, %2) \n" \
stereo("movaps %%xmm2, (%0, %3) \n") \
"add $16, %0 \n" \
"jl 1b \n" \
: "+&r"(i) \
: "r"(matrix), \
"r"(samples[0] + len), \
"r"(samples[1] + len), \
"r"(samples[2] + len), \
"r"(samples[3] + len), \
"r"(samples[4] + len) \
: XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
"%xmm4", "%xmm5", "%xmm6", "%xmm7",) \
"memory" \
);
#define MIX_MISC(stereo) \
__asm__ volatile ( \
"mov %5, %2 \n" \
"1: \n" \
"mov -%c7(%6, %2, %c8), %3 \n" \
"movaps (%3, %0), %%xmm0 \n" \
stereo("movaps %%xmm0, %%xmm1 \n") \
"mulps %%xmm4, %%xmm0 \n" \
stereo("mulps %%xmm5, %%xmm1 \n") \
"2: \n" \
"mov (%6, %2, %c8), %1 \n" \
"movaps (%1, %0), %%xmm2 \n" \
stereo("movaps %%xmm2, %%xmm3 \n") \
"mulps (%4, %2, 8), %%xmm2 \n" \
stereo("mulps 16(%4, %2, 8), %%xmm3 \n") \
"addps %%xmm2, %%xmm0 \n" \
stereo("addps %%xmm3, %%xmm1 \n") \
"add $4, %2 \n" \
"jl 2b \n" \
"mov %5, %2 \n" \
stereo("mov (%6, %2, %c8), %1 \n") \
"movaps %%xmm0, (%3, %0) \n" \
stereo("movaps %%xmm1, (%1, %0) \n") \
"add $16, %0 \n" \
"jl 1b \n" \
: "+&r"(i), "=&r"(j), "=&r"(k), "=&r"(m) \
: "r"(matrix_simd + in_ch), \
"g"((intptr_t) - 4 * (in_ch - 1)), \
"r"(samp + in_ch), \
"i"(sizeof(float *)), "i"(sizeof(float *)/4) \
: "memory" \
);
static void ac3_downmix_sse(float **samples, float (*matrix)[2],
int out_ch, int in_ch, int len)
{
int (*matrix_cmp)[2] = (int(*)[2])matrix;
intptr_t i, j, k, m;
i = -len * sizeof(float);
if (in_ch == 5 && out_ch == 2 &&
!(matrix_cmp[0][1] | matrix_cmp[2][0] |
matrix_cmp[3][1] | matrix_cmp[4][0] |
(matrix_cmp[1][0] ^ matrix_cmp[1][1]) |
(matrix_cmp[0][0] ^ matrix_cmp[2][1]))) {
MIX5(IF0, IF1);
} else if (in_ch == 5 && out_ch == 1 &&
matrix_cmp[0][0] == matrix_cmp[2][0] &&
matrix_cmp[3][0] == matrix_cmp[4][0]) {
MIX5(IF1, IF0);
} else {
LOCAL_ALIGNED(16, float, matrix_simd, [AC3_MAX_CHANNELS], [2][4]);
float *samp[AC3_MAX_CHANNELS];
for (j = 0; j < in_ch; j++)
samp[j] = samples[j] + len;
j = 2 * in_ch * sizeof(float);
__asm__ volatile (
"1: \n"
"sub $8, %0 \n"
"movss (%2, %0), %%xmm4 \n"
"movss 4(%2, %0), %%xmm5 \n"
"shufps $0, %%xmm4, %%xmm4 \n"
"shufps $0, %%xmm5, %%xmm5 \n"
"movaps %%xmm4, (%1, %0, 4) \n"
"movaps %%xmm5, 16(%1, %0, 4) \n"
"jg 1b \n"
: "+&r"(j)
: "r"(matrix_simd), "r"(matrix)
: "memory"
);
if (out_ch == 2) {
MIX_MISC(IF1);
} else {
MIX_MISC(IF0);
}
}
}
#endif /* HAVE_SSE_INLINE && HAVE_7REGS */
av_cold void ff_ac3dsp_init_x86(AC3DSPContext *c, int bit_exact)
{
int cpu_flags = av_get_cpu_flags();
if (EXTERNAL_MMX(cpu_flags)) {
c->ac3_exponent_min = ff_ac3_exponent_min_mmx;
c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmx;
c->ac3_lshift_int16 = ff_ac3_lshift_int16_mmx;
c->ac3_rshift_int32 = ff_ac3_rshift_int32_mmx;
}
if (EXTERNAL_AMD3DNOW(cpu_flags)) {
if (!bit_exact) {
c->float_to_fixed24 = ff_float_to_fixed24_3dnow;
}
}
if (EXTERNAL_MMXEXT(cpu_flags)) {
c->ac3_exponent_min = ff_ac3_exponent_min_mmxext;
c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmxext;
if (bit_exact) {
c->apply_window_int16 = ff_apply_window_int16_mmxext;
} else {
c->apply_window_int16 = ff_apply_window_int16_round_mmxext;
}
}
if (EXTERNAL_SSE(cpu_flags)) {
c->float_to_fixed24 = ff_float_to_fixed24_sse;
}
if (EXTERNAL_SSE2(cpu_flags)) {
c->ac3_exponent_min = ff_ac3_exponent_min_sse2;
c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_sse2;
c->float_to_fixed24 = ff_float_to_fixed24_sse2;
c->compute_mantissa_size = ff_ac3_compute_mantissa_size_sse2;
c->extract_exponents = ff_ac3_extract_exponents_sse2;
if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW)) {
c->ac3_lshift_int16 = ff_ac3_lshift_int16_sse2;
c->ac3_rshift_int32 = ff_ac3_rshift_int32_sse2;
}
if (bit_exact) {
c->apply_window_int16 = ff_apply_window_int16_sse2;
} else if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW)) {
c->apply_window_int16 = ff_apply_window_int16_round_sse2;
}
}
if (EXTERNAL_SSSE3(cpu_flags)) {
c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_ssse3;
if (cpu_flags & AV_CPU_FLAG_ATOM) {
c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
} else {
c->extract_exponents = ff_ac3_extract_exponents_ssse3;
c->apply_window_int16 = ff_apply_window_int16_ssse3;
}
}
#if HAVE_SSE_INLINE && HAVE_7REGS
if (INLINE_SSE(cpu_flags)) {
c->downmix = ac3_downmix_sse;
}
#endif
}

View File

@@ -0,0 +1,177 @@
;******************************************************************************
;* optimized audio functions
;* Copyright (c) 2008 Loren Merritt
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION .text
%macro SCALARPRODUCT 0
; int ff_scalarproduct_int16(int16_t *v1, int16_t *v2, int order)
cglobal scalarproduct_int16, 3,3,3, v1, v2, order
shl orderq, 1
add v1q, orderq
add v2q, orderq
neg orderq
pxor m2, m2
.loop:
movu m0, [v1q + orderq]
movu m1, [v1q + orderq + mmsize]
pmaddwd m0, [v2q + orderq]
pmaddwd m1, [v2q + orderq + mmsize]
paddd m2, m0
paddd m2, m1
add orderq, mmsize*2
jl .loop
HADDD m2, m0
movd eax, m2
%if mmsize == 8
emms
%endif
RET
%endmacro
INIT_MMX mmxext
SCALARPRODUCT
INIT_XMM sse2
SCALARPRODUCT
;-----------------------------------------------------------------------------
; void ff_vector_clip_int32(int32_t *dst, const int32_t *src, int32_t min,
; int32_t max, unsigned int len)
;-----------------------------------------------------------------------------
; %1 = number of xmm registers used
; %2 = number of inline load/process/store loops per asm loop
; %3 = process 4*mmsize (%3=0) or 8*mmsize (%3=1) bytes per loop
; %4 = CLIPD function takes min/max as float instead of int (CLIPD_SSE2)
; %5 = suffix
%macro VECTOR_CLIP_INT32 4-5
cglobal vector_clip_int32%5, 5,5,%1, dst, src, min, max, len
%if %4
cvtsi2ss m4, minm
cvtsi2ss m5, maxm
%else
movd m4, minm
movd m5, maxm
%endif
SPLATD m4
SPLATD m5
.loop:
%assign %%i 0
%rep %2
mova m0, [srcq+mmsize*(0+%%i)]
mova m1, [srcq+mmsize*(1+%%i)]
mova m2, [srcq+mmsize*(2+%%i)]
mova m3, [srcq+mmsize*(3+%%i)]
%if %3
mova m7, [srcq+mmsize*(4+%%i)]
mova m8, [srcq+mmsize*(5+%%i)]
mova m9, [srcq+mmsize*(6+%%i)]
mova m10, [srcq+mmsize*(7+%%i)]
%endif
CLIPD m0, m4, m5, m6
CLIPD m1, m4, m5, m6
CLIPD m2, m4, m5, m6
CLIPD m3, m4, m5, m6
%if %3
CLIPD m7, m4, m5, m6
CLIPD m8, m4, m5, m6
CLIPD m9, m4, m5, m6
CLIPD m10, m4, m5, m6
%endif
mova [dstq+mmsize*(0+%%i)], m0
mova [dstq+mmsize*(1+%%i)], m1
mova [dstq+mmsize*(2+%%i)], m2
mova [dstq+mmsize*(3+%%i)], m3
%if %3
mova [dstq+mmsize*(4+%%i)], m7
mova [dstq+mmsize*(5+%%i)], m8
mova [dstq+mmsize*(6+%%i)], m9
mova [dstq+mmsize*(7+%%i)], m10
%endif
%assign %%i %%i+4*(%3+1)
%endrep
add srcq, mmsize*4*(%2+%3)
add dstq, mmsize*4*(%2+%3)
sub lend, mmsize*(%2+%3)
jg .loop
REP_RET
%endmacro
INIT_MMX mmx
%define CLIPD CLIPD_MMX
VECTOR_CLIP_INT32 0, 1, 0, 0
INIT_XMM sse2
VECTOR_CLIP_INT32 6, 1, 0, 0, _int
%define CLIPD CLIPD_SSE2
VECTOR_CLIP_INT32 6, 2, 0, 1
INIT_XMM sse4
%define CLIPD CLIPD_SSE41
%ifdef m8
VECTOR_CLIP_INT32 11, 1, 1, 0
%else
VECTOR_CLIP_INT32 6, 1, 0, 0
%endif
;-----------------------------------------------------
;void ff_vector_clipf(float *dst, const float *src,
; float min, float max, int len)
;-----------------------------------------------------
INIT_XMM sse
%if UNIX64
cglobal vector_clipf, 3,3,6, dst, src, len
%else
cglobal vector_clipf, 5,5,6, dst, src, min, max, len
%endif
%if WIN64
SWAP 0, 2
SWAP 1, 3
%elif ARCH_X86_32
movss m0, minm
movss m1, maxm
%endif
SPLATD m0
SPLATD m1
shl lend, 2
add srcq, lenq
add dstq, lenq
neg lenq
.loop:
mova m2, [srcq+lenq+mmsize*0]
mova m3, [srcq+lenq+mmsize*1]
mova m4, [srcq+lenq+mmsize*2]
mova m5, [srcq+lenq+mmsize*3]
maxps m2, m0
maxps m3, m0
maxps m4, m0
maxps m5, m0
minps m2, m1
minps m3, m1
minps m4, m1
minps m5, m1
mova [dstq+lenq+mmsize*0], m2
mova [dstq+lenq+mmsize*1], m3
mova [dstq+lenq+mmsize*2], m4
mova [dstq+lenq+mmsize*3], m5
add lenq, mmsize*4
jl .loop
REP_RET

View File

@@ -0,0 +1,67 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/audiodsp.h"
int32_t ff_scalarproduct_int16_mmxext(const int16_t *v1, const int16_t *v2,
int order);
int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2,
int order);
void ff_vector_clip_int32_mmx(int32_t *dst, const int32_t *src,
int32_t min, int32_t max, unsigned int len);
void ff_vector_clip_int32_sse2(int32_t *dst, const int32_t *src,
int32_t min, int32_t max, unsigned int len);
void ff_vector_clip_int32_int_sse2(int32_t *dst, const int32_t *src,
int32_t min, int32_t max, unsigned int len);
void ff_vector_clip_int32_sse4(int32_t *dst, const int32_t *src,
int32_t min, int32_t max, unsigned int len);
void ff_vector_clipf_sse(float *dst, const float *src,
float min, float max, int len);
av_cold void ff_audiodsp_init_x86(AudioDSPContext *c)
{
int cpu_flags = av_get_cpu_flags();
if (EXTERNAL_MMX(cpu_flags))
c->vector_clip_int32 = ff_vector_clip_int32_mmx;
if (EXTERNAL_MMXEXT(cpu_flags))
c->scalarproduct_int16 = ff_scalarproduct_int16_mmxext;
if (EXTERNAL_SSE(cpu_flags))
c->vector_clipf = ff_vector_clipf_sse;
if (EXTERNAL_SSE2(cpu_flags)) {
c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
if (cpu_flags & AV_CPU_FLAG_ATOM)
c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
else
c->vector_clip_int32 = ff_vector_clip_int32_sse2;
}
if (EXTERNAL_SSE4(cpu_flags))
c->vector_clip_int32 = ff_vector_clip_int32_sse4;
}

View File

@@ -0,0 +1,86 @@
;******************************************************************************
;* SIMD-optimized clear block functions
;* Copyright (c) 2002 Michael Niedermayer
;* Copyright (c) 2008 Loren Merritt
;* Copyright (c) 2009 Fiona Glaser
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION .text
;----------------------------------------
; void ff_clear_block(int16_t *blocks);
;----------------------------------------
; %1 = number of xmm registers used
; %2 = number of inline store loops
%macro CLEAR_BLOCK 2
cglobal clear_block, 1, 1, %1, blocks
ZERO m0, m0
%assign %%i 0
%rep %2
mova [blocksq+mmsize*(0+%%i)], m0
mova [blocksq+mmsize*(1+%%i)], m0
mova [blocksq+mmsize*(2+%%i)], m0
mova [blocksq+mmsize*(3+%%i)], m0
mova [blocksq+mmsize*(4+%%i)], m0
mova [blocksq+mmsize*(5+%%i)], m0
mova [blocksq+mmsize*(6+%%i)], m0
mova [blocksq+mmsize*(7+%%i)], m0
%assign %%i %%i+8
%endrep
RET
%endmacro
INIT_MMX mmx
%define ZERO pxor
CLEAR_BLOCK 0, 2
INIT_XMM sse
%define ZERO xorps
CLEAR_BLOCK 1, 1
;-----------------------------------------
; void ff_clear_blocks(int16_t *blocks);
;-----------------------------------------
; %1 = number of xmm registers used
%macro CLEAR_BLOCKS 1
cglobal clear_blocks, 1, 2, %1, blocks, len
add blocksq, 768
mov lenq, -768
ZERO m0, m0
.loop:
mova [blocksq+lenq+mmsize*0], m0
mova [blocksq+lenq+mmsize*1], m0
mova [blocksq+lenq+mmsize*2], m0
mova [blocksq+lenq+mmsize*3], m0
mova [blocksq+lenq+mmsize*4], m0
mova [blocksq+lenq+mmsize*5], m0
mova [blocksq+lenq+mmsize*6], m0
mova [blocksq+lenq+mmsize*7], m0
add lenq, mmsize*8
js .loop
RET
%endmacro
INIT_MMX mmx
%define ZERO pxor
CLEAR_BLOCKS 0
INIT_XMM sse
%define ZERO xorps
CLEAR_BLOCKS 1

View File

@@ -0,0 +1,56 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/internal.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/blockdsp.h"
#include "libavcodec/version.h"
void ff_clear_block_mmx(int16_t *block);
void ff_clear_block_sse(int16_t *block);
void ff_clear_blocks_mmx(int16_t *blocks);
void ff_clear_blocks_sse(int16_t *blocks);
av_cold void ff_blockdsp_init_x86(BlockDSPContext *c, unsigned high_bit_depth,
AVCodecContext *avctx)
{
#if HAVE_YASM
int cpu_flags = av_get_cpu_flags();
if (!high_bit_depth) {
if (EXTERNAL_MMX(cpu_flags)) {
c->clear_block = ff_clear_block_mmx;
c->clear_blocks = ff_clear_blocks_mmx;
}
/* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
if (CONFIG_XVMC && avctx->hwaccel && avctx->hwaccel->decode_mb)
return;
if (EXTERNAL_SSE(cpu_flags)) {
c->clear_block = ff_clear_block_sse;
c->clear_blocks = ff_clear_blocks_sse;
}
}
#endif /* HAVE_YASM */
}

View File

@@ -0,0 +1,139 @@
;******************************************************************************
;* optimized bswap buffer functions
;* Copyright (c) 2008 Loren Merritt
;* Copyright (c) 2003-2013 Michael Niedermayer
;* Copyright (c) 2013 Daniel Kang
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
pb_bswap32: db 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
cextern pb_80
SECTION .text
; %1 = aligned/unaligned
%macro BSWAP_LOOPS 1
mov r3d, r2d
sar r2d, 3
jz .left4_%1
.loop8_%1:
mov%1 m0, [r1 + 0]
mov%1 m1, [r1 + 16]
%if cpuflag(ssse3)
pshufb m0, m2
pshufb m1, m2
mov%1 [r0 + 0], m0
mov%1 [r0 + 16], m1
%else
pshuflw m0, m0, 10110001b
pshuflw m1, m1, 10110001b
pshufhw m0, m0, 10110001b
pshufhw m1, m1, 10110001b
mova m2, m0
mova m3, m1
psllw m0, 8
psllw m1, 8
psrlw m2, 8
psrlw m3, 8
por m2, m0
por m3, m1
mov%1 [r0 + 0], m2
mov%1 [r0 + 16], m3
%endif
add r0, 32
add r1, 32
dec r2d
jnz .loop8_%1
.left4_%1:
mov r2d, r3d
test r3d, 4
jz .left
mov%1 m0, [r1]
%if cpuflag(ssse3)
pshufb m0, m2
mov%1 [r0], m0
%else
pshuflw m0, m0, 10110001b
pshufhw m0, m0, 10110001b
mova m2, m0
psllw m0, 8
psrlw m2, 8
por m2, m0
mov%1 [r0], m2
%endif
add r1, 16
add r0, 16
%endmacro
; void ff_bswap_buf(uint32_t *dst, const uint32_t *src, int w);
%macro BSWAP32_BUF 0
%if cpuflag(ssse3)
cglobal bswap32_buf, 3,4,3
mov r3, r1
mova m2, [pb_bswap32]
%else
cglobal bswap32_buf, 3,4,5
mov r3, r1
%endif
or r3, r0
test r3, 15
jz .start_align
BSWAP_LOOPS u
jmp .left
.start_align:
BSWAP_LOOPS a
.left:
%if cpuflag(ssse3)
test r2d, 2
jz .left1
movq m0, [r1]
pshufb m0, m2
movq [r0], m0
add r1, 8
add r0, 8
.left1:
test r2d, 1
jz .end
mov r2d, [r1]
bswap r2d
mov [r0], r2d
%else
and r2d, 3
jz .end
.loop2:
mov r3d, [r1]
bswap r3d
mov [r0], r3d
add r1, 4
add r0, 4
dec r2d
jnz .loop2
%endif
.end:
RET
%endmacro
INIT_XMM sse2
BSWAP32_BUF
INIT_XMM ssse3
BSWAP32_BUF

View File

@@ -0,0 +1,37 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/bswapdsp.h"
void ff_bswap32_buf_sse2(uint32_t *dst, const uint32_t *src, int w);
void ff_bswap32_buf_ssse3(uint32_t *dst, const uint32_t *src, int w);
av_cold void ff_bswapdsp_init_x86(BswapDSPContext *c)
{
int cpu_flags = av_get_cpu_flags();
if (EXTERNAL_SSE2(cpu_flags))
c->bswap_buf = ff_bswap32_buf_sse2;
if (EXTERNAL_SSSE3(cpu_flags))
c->bswap_buf = ff_bswap32_buf_ssse3;
}

View File

@@ -0,0 +1,301 @@
/*
* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_X86_CABAC_H
#define AVCODEC_X86_CABAC_H
#include "libavcodec/cabac.h"
#include "libavutil/attributes.h"
#include "libavutil/macros.h"
#include "libavutil/x86/asm.h"
#include "config.h"
#if (defined(__i386) && defined(__clang__) && (__clang_major__<2 || (__clang_major__==2 && __clang_minor__<10)))\
|| ( !defined(__clang__) && defined(__llvm__) && __GNUC__==4 && __GNUC_MINOR__==2 && __GNUC_PATCHLEVEL__<=1)\
|| (defined(__INTEL_COMPILER) && defined(_MSC_VER))
# define BROKEN_COMPILER 1
#else
# define BROKEN_COMPILER 0
#endif
#if HAVE_INLINE_ASM
#ifndef UNCHECKED_BITSTREAM_READER
#define UNCHECKED_BITSTREAM_READER !CONFIG_SAFE_BITSTREAM_READER
#endif
#if UNCHECKED_BITSTREAM_READER
#define END_CHECK(end) ""
#else
#define END_CHECK(end) \
"cmp "end" , %%"REG_c" \n\t"\
"jge 1f \n\t"
#endif
#ifdef BROKEN_RELOCATIONS
#define TABLES_ARG , "r"(tables)
#if HAVE_FAST_CMOV
#define BRANCHLESS_GET_CABAC_UPDATE(ret, retq, low, range, tmp) \
"cmp "low" , "tmp" \n\t"\
"cmova %%ecx , "range" \n\t"\
"sbb %%rcx , %%rcx \n\t"\
"and %%ecx , "tmp" \n\t"\
"xor %%rcx , "retq" \n\t"\
"sub "tmp" , "low" \n\t"
#else /* HAVE_FAST_CMOV */
#define BRANCHLESS_GET_CABAC_UPDATE(ret, retq, low, range, tmp) \
/* P4 Prescott has crappy cmov,sbb,64bit shift so avoid them */ \
"sub "low" , "tmp" \n\t"\
"sar $31 , "tmp" \n\t"\
"sub %%ecx , "range" \n\t"\
"and "tmp" , "range" \n\t"\
"add %%ecx , "range" \n\t"\
"shl $17 , %%ecx \n\t"\
"and "tmp" , %%ecx \n\t"\
"sub %%ecx , "low" \n\t"\
"xor "tmp" , "ret" \n\t"\
"movslq "ret" , "retq" \n\t"
#endif /* HAVE_FAST_CMOV */
#define BRANCHLESS_GET_CABAC(ret, retq, statep, low, lowword, range, rangeq, tmp, tmpbyte, byte, end, norm_off, lps_off, mlps_off, tables) \
"movzbl "statep" , "ret" \n\t"\
"mov "range" , "tmp" \n\t"\
"and $0xC0 , "range" \n\t"\
"lea ("ret", "range", 2), %%ecx \n\t"\
"movzbl "lps_off"("tables", %%rcx), "range" \n\t"\
"sub "range" , "tmp" \n\t"\
"mov "tmp" , %%ecx \n\t"\
"shl $17 , "tmp" \n\t"\
BRANCHLESS_GET_CABAC_UPDATE(ret, retq, low, range, tmp) \
"movzbl "norm_off"("tables", "rangeq"), %%ecx \n\t"\
"shl %%cl , "range" \n\t"\
"movzbl "mlps_off"+128("tables", "retq"), "tmp" \n\t"\
"shl %%cl , "low" \n\t"\
"mov "tmpbyte" , "statep" \n\t"\
"test "lowword" , "lowword" \n\t"\
"jnz 2f \n\t"\
"mov "byte" , %%"REG_c" \n\t"\
END_CHECK(end)\
"add"OPSIZE" $2 , "byte" \n\t"\
"1: \n\t"\
"movzwl (%%"REG_c") , "tmp" \n\t"\
"lea -1("low") , %%ecx \n\t"\
"xor "low" , %%ecx \n\t"\
"shr $15 , %%ecx \n\t"\
"bswap "tmp" \n\t"\
"shr $15 , "tmp" \n\t"\
"movzbl "norm_off"("tables", %%rcx), %%ecx \n\t"\
"sub $0xFFFF , "tmp" \n\t"\
"neg %%ecx \n\t"\
"add $7 , %%ecx \n\t"\
"shl %%cl , "tmp" \n\t"\
"add "tmp" , "low" \n\t"\
"2: \n\t"
#else /* BROKEN_RELOCATIONS */
#define TABLES_ARG NAMED_CONSTRAINTS_ARRAY_ADD(ff_h264_cabac_tables)
#define RIP_ARG
#if HAVE_FAST_CMOV
#define BRANCHLESS_GET_CABAC_UPDATE(ret, low, range, tmp)\
"mov "tmp" , %%ecx \n\t"\
"shl $17 , "tmp" \n\t"\
"cmp "low" , "tmp" \n\t"\
"cmova %%ecx , "range" \n\t"\
"sbb %%ecx , %%ecx \n\t"\
"and %%ecx , "tmp" \n\t"\
"xor %%ecx , "ret" \n\t"\
"sub "tmp" , "low" \n\t"
#else /* HAVE_FAST_CMOV */
#define BRANCHLESS_GET_CABAC_UPDATE(ret, low, range, tmp)\
"mov "tmp" , %%ecx \n\t"\
"shl $17 , "tmp" \n\t"\
"sub "low" , "tmp" \n\t"\
"sar $31 , "tmp" \n\t" /*lps_mask*/\
"sub %%ecx , "range" \n\t" /*RangeLPS - range*/\
"and "tmp" , "range" \n\t" /*(RangeLPS - range)&lps_mask*/\
"add %%ecx , "range" \n\t" /*new range*/\
"shl $17 , %%ecx \n\t"\
"and "tmp" , %%ecx \n\t"\
"sub %%ecx , "low" \n\t"\
"xor "tmp" , "ret" \n\t"
#endif /* HAVE_FAST_CMOV */
#define BRANCHLESS_GET_CABAC(ret, retq, statep, low, lowword, range, rangeq, tmp, tmpbyte, byte, end, norm_off, lps_off, mlps_off, tables) \
"movzbl "statep" , "ret" \n\t"\
"mov "range" , "tmp" \n\t"\
"and $0xC0 , "range" \n\t"\
"movzbl "MANGLE(ff_h264_cabac_tables)"+"lps_off"("ret", "range", 2), "range" \n\t"\
"sub "range" , "tmp" \n\t"\
BRANCHLESS_GET_CABAC_UPDATE(ret, low, range, tmp) \
"movzbl "MANGLE(ff_h264_cabac_tables)"+"norm_off"("range"), %%ecx \n\t"\
"shl %%cl , "range" \n\t"\
"movzbl "MANGLE(ff_h264_cabac_tables)"+"mlps_off"+128("ret"), "tmp" \n\t"\
"shl %%cl , "low" \n\t"\
"mov "tmpbyte" , "statep" \n\t"\
"test "lowword" , "lowword" \n\t"\
" jnz 2f \n\t"\
"mov "byte" , %%"REG_c" \n\t"\
END_CHECK(end)\
"add"OPSIZE" $2 , "byte" \n\t"\
"1: \n\t"\
"movzwl (%%"REG_c") , "tmp" \n\t"\
"lea -1("low") , %%ecx \n\t"\
"xor "low" , %%ecx \n\t"\
"shr $15 , %%ecx \n\t"\
"bswap "tmp" \n\t"\
"shr $15 , "tmp" \n\t"\
"movzbl "MANGLE(ff_h264_cabac_tables)"+"norm_off"(%%ecx), %%ecx \n\t"\
"sub $0xFFFF , "tmp" \n\t"\
"neg %%ecx \n\t"\
"add $7 , %%ecx \n\t"\
"shl %%cl , "tmp" \n\t"\
"add "tmp" , "low" \n\t"\
"2: \n\t"
#endif /* BROKEN_RELOCATIONS */
#if HAVE_7REGS && !BROKEN_COMPILER
#define get_cabac_inline get_cabac_inline_x86
static av_always_inline int get_cabac_inline_x86(CABACContext *c,
uint8_t *const state)
{
int bit, tmp;
#ifdef BROKEN_RELOCATIONS
void *tables;
__asm__ volatile(
"lea "MANGLE(ff_h264_cabac_tables)", %0 \n\t"
: "=&r"(tables)
: NAMED_CONSTRAINTS_ARRAY(ff_h264_cabac_tables)
);
#endif
__asm__ volatile(
BRANCHLESS_GET_CABAC("%0", "%q0", "(%4)", "%1", "%w1",
"%2", "%q2", "%3", "%b3",
"%c6(%5)", "%c7(%5)",
AV_STRINGIFY(H264_NORM_SHIFT_OFFSET),
AV_STRINGIFY(H264_LPS_RANGE_OFFSET),
AV_STRINGIFY(H264_MLPS_STATE_OFFSET),
"%8")
: "=&r"(bit), "=&r"(c->low), "=&r"(c->range), "=&q"(tmp)
: "r"(state), "r"(c),
"i"(offsetof(CABACContext, bytestream)),
"i"(offsetof(CABACContext, bytestream_end))
TABLES_ARG
,"1"(c->low), "2"(c->range)
: "%"REG_c, "memory"
);
return bit & 1;
}
#endif /* HAVE_7REGS && !BROKEN_COMPILER */
#if !BROKEN_COMPILER
#define get_cabac_bypass_sign get_cabac_bypass_sign_x86
static av_always_inline int get_cabac_bypass_sign_x86(CABACContext *c, int val)
{
x86_reg tmp;
__asm__ volatile(
"movl %c6(%2), %k1 \n\t"
"movl %c3(%2), %%eax \n\t"
"shl $17, %k1 \n\t"
"add %%eax, %%eax \n\t"
"sub %k1, %%eax \n\t"
"cdq \n\t"
"and %%edx, %k1 \n\t"
"add %k1, %%eax \n\t"
"xor %%edx, %%ecx \n\t"
"sub %%edx, %%ecx \n\t"
"test %%ax, %%ax \n\t"
"jnz 1f \n\t"
"mov %c4(%2), %1 \n\t"
"subl $0xFFFF, %%eax \n\t"
"movzwl (%1), %%edx \n\t"
"bswap %%edx \n\t"
"shrl $15, %%edx \n\t"
#if UNCHECKED_BITSTREAM_READER
"add $2, %1 \n\t"
"addl %%edx, %%eax \n\t"
"mov %1, %c4(%2) \n\t"
#else
"addl %%edx, %%eax \n\t"
"cmp %c5(%2), %1 \n\t"
"jge 1f \n\t"
"add"OPSIZE" $2, %c4(%2) \n\t"
#endif
"1: \n\t"
"movl %%eax, %c3(%2) \n\t"
: "+c"(val), "=&r"(tmp)
: "r"(c),
"i"(offsetof(CABACContext, low)),
"i"(offsetof(CABACContext, bytestream)),
"i"(offsetof(CABACContext, bytestream_end)),
"i"(offsetof(CABACContext, range))
: "%eax", "%edx", "memory"
);
return val;
}
#define get_cabac_bypass get_cabac_bypass_x86
static av_always_inline int get_cabac_bypass_x86(CABACContext *c)
{
x86_reg tmp;
int res;
__asm__ volatile(
"movl %c6(%2), %k1 \n\t"
"movl %c3(%2), %%eax \n\t"
"shl $17, %k1 \n\t"
"add %%eax, %%eax \n\t"
"sub %k1, %%eax \n\t"
"cdq \n\t"
"and %%edx, %k1 \n\t"
"add %k1, %%eax \n\t"
"inc %%edx \n\t"
"test %%ax, %%ax \n\t"
"jnz 1f \n\t"
"mov %c4(%2), %1 \n\t"
"subl $0xFFFF, %%eax \n\t"
"movzwl (%1), %%ecx \n\t"
"bswap %%ecx \n\t"
"shrl $15, %%ecx \n\t"
"addl %%ecx, %%eax \n\t"
"cmp %c5(%2), %1 \n\t"
"jge 1f \n\t"
"add"OPSIZE" $2, %c4(%2) \n\t"
"1: \n\t"
"movl %%eax, %c3(%2) \n\t"
: "=&d"(res), "=&r"(tmp)
: "r"(c),
"i"(offsetof(CABACContext, low)),
"i"(offsetof(CABACContext, bytestream)),
"i"(offsetof(CABACContext, bytestream_end)),
"i"(offsetof(CABACContext, range))
: "%eax", "%ecx", "memory"
);
return res;
}
#endif /* !BROKEN_COMPILER */
#endif /* HAVE_INLINE_ASM */
#endif /* AVCODEC_X86_CABAC_H */

View File

@@ -0,0 +1,593 @@
/*
* Chinese AVS video (AVS1-P2, JiZhun profile) decoder.
* Copyright (c) 2006 Stefan Gehrer <stefan.gehrer@gmx.de>
*
* MMX-optimized DSP functions, based on H.264 optimizations by
* Michael Niedermayer and Loren Merritt
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/cavsdsp.h"
#include "libavcodec/idctdsp.h"
#include "constants.h"
#include "fpel.h"
#include "idctdsp.h"
#include "config.h"
#if HAVE_MMX_INLINE
/* in/out: mma=mma+mmb, mmb=mmb-mma */
#define SUMSUB_BA( a, b ) \
"paddw "#b", "#a" \n\t"\
"paddw "#b", "#b" \n\t"\
"psubw "#a", "#b" \n\t"
/*****************************************************************************
*
* inverse transform
*
****************************************************************************/
static inline void cavs_idct8_1d(int16_t *block, uint64_t bias)
{
__asm__ volatile(
"movq 112(%0), %%mm4 \n\t" /* mm4 = src7 */
"movq 16(%0), %%mm5 \n\t" /* mm5 = src1 */
"movq 80(%0), %%mm2 \n\t" /* mm2 = src5 */
"movq 48(%0), %%mm7 \n\t" /* mm7 = src3 */
"movq %%mm4, %%mm0 \n\t"
"movq %%mm5, %%mm3 \n\t"
"movq %%mm2, %%mm6 \n\t"
"movq %%mm7, %%mm1 \n\t"
"paddw %%mm4, %%mm4 \n\t" /* mm4 = 2*src7 */
"paddw %%mm3, %%mm3 \n\t" /* mm3 = 2*src1 */
"paddw %%mm6, %%mm6 \n\t" /* mm6 = 2*src5 */
"paddw %%mm1, %%mm1 \n\t" /* mm1 = 2*src3 */
"paddw %%mm4, %%mm0 \n\t" /* mm0 = 3*src7 */
"paddw %%mm3, %%mm5 \n\t" /* mm5 = 3*src1 */
"paddw %%mm6, %%mm2 \n\t" /* mm2 = 3*src5 */
"paddw %%mm1, %%mm7 \n\t" /* mm7 = 3*src3 */
"psubw %%mm4, %%mm5 \n\t" /* mm5 = 3*src1 - 2*src7 = a0 */
"paddw %%mm6, %%mm7 \n\t" /* mm7 = 3*src3 + 2*src5 = a1 */
"psubw %%mm2, %%mm1 \n\t" /* mm1 = 2*src3 - 3*src5 = a2 */
"paddw %%mm0, %%mm3 \n\t" /* mm3 = 2*src1 + 3*src7 = a3 */
"movq %%mm5, %%mm4 \n\t"
"movq %%mm7, %%mm6 \n\t"
"movq %%mm3, %%mm0 \n\t"
"movq %%mm1, %%mm2 \n\t"
SUMSUB_BA( %%mm7, %%mm5 ) /* mm7 = a0 + a1 mm5 = a0 - a1 */
"paddw %%mm3, %%mm7 \n\t" /* mm7 = a0 + a1 + a3 */
"paddw %%mm1, %%mm5 \n\t" /* mm5 = a0 - a1 + a2 */
"paddw %%mm7, %%mm7 \n\t"
"paddw %%mm5, %%mm5 \n\t"
"paddw %%mm6, %%mm7 \n\t" /* mm7 = b4 */
"paddw %%mm4, %%mm5 \n\t" /* mm5 = b5 */
SUMSUB_BA( %%mm1, %%mm3 ) /* mm1 = a3 + a2 mm3 = a3 - a2 */
"psubw %%mm1, %%mm4 \n\t" /* mm4 = a0 - a2 - a3 */
"movq %%mm4, %%mm1 \n\t" /* mm1 = a0 - a2 - a3 */
"psubw %%mm6, %%mm3 \n\t" /* mm3 = a3 - a2 - a1 */
"paddw %%mm1, %%mm1 \n\t"
"paddw %%mm3, %%mm3 \n\t"
"psubw %%mm2, %%mm1 \n\t" /* mm1 = b7 */
"paddw %%mm0, %%mm3 \n\t" /* mm3 = b6 */
"movq 32(%0), %%mm2 \n\t" /* mm2 = src2 */
"movq 96(%0), %%mm6 \n\t" /* mm6 = src6 */
"movq %%mm2, %%mm4 \n\t"
"movq %%mm6, %%mm0 \n\t"
"psllw $2, %%mm4 \n\t" /* mm4 = 4*src2 */
"psllw $2, %%mm6 \n\t" /* mm6 = 4*src6 */
"paddw %%mm4, %%mm2 \n\t" /* mm2 = 5*src2 */
"paddw %%mm6, %%mm0 \n\t" /* mm0 = 5*src6 */
"paddw %%mm2, %%mm2 \n\t"
"paddw %%mm0, %%mm0 \n\t"
"psubw %%mm0, %%mm4 \n\t" /* mm4 = 4*src2 - 10*src6 = a7 */
"paddw %%mm2, %%mm6 \n\t" /* mm6 = 4*src6 + 10*src2 = a6 */
"movq (%0), %%mm2 \n\t" /* mm2 = src0 */
"movq 64(%0), %%mm0 \n\t" /* mm0 = src4 */
SUMSUB_BA( %%mm0, %%mm2 ) /* mm0 = src0+src4 mm2 = src0-src4 */
"psllw $3, %%mm0 \n\t"
"psllw $3, %%mm2 \n\t"
"paddw %1, %%mm0 \n\t" /* add rounding bias */
"paddw %1, %%mm2 \n\t" /* add rounding bias */
SUMSUB_BA( %%mm6, %%mm0 ) /* mm6 = a4 + a6 mm0 = a4 - a6 */
SUMSUB_BA( %%mm4, %%mm2 ) /* mm4 = a5 + a7 mm2 = a5 - a7 */
SUMSUB_BA( %%mm7, %%mm6 ) /* mm7 = dst0 mm6 = dst7 */
SUMSUB_BA( %%mm5, %%mm4 ) /* mm5 = dst1 mm4 = dst6 */
SUMSUB_BA( %%mm3, %%mm2 ) /* mm3 = dst2 mm2 = dst5 */
SUMSUB_BA( %%mm1, %%mm0 ) /* mm1 = dst3 mm0 = dst4 */
:: "r"(block), "m"(bias)
);
}
#define SBUTTERFLY(a,b,t,n,m)\
"mov" #m " " #a ", " #t " \n\t" /* abcd */\
"punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
"punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\
#define TRANSPOSE4(a,b,c,d,t)\
SBUTTERFLY(a,b,t,wd,q) /* a=aebf t=cgdh */\
SBUTTERFLY(c,d,b,wd,q) /* c=imjn b=kolp */\
SBUTTERFLY(a,c,d,dq,q) /* a=aeim d=bfjn */\
SBUTTERFLY(t,b,c,dq,q) /* t=cgko c=dhlp */
static void cavs_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
{
int i;
LOCAL_ALIGNED(16, int16_t, b2, [64]);
for(i=0; i<2; i++){
cavs_idct8_1d(block + 4 * i, ff_pw_4.a);
__asm__ volatile(
"psraw $3, %%mm7 \n\t"
"psraw $3, %%mm6 \n\t"
"psraw $3, %%mm5 \n\t"
"psraw $3, %%mm4 \n\t"
"psraw $3, %%mm3 \n\t"
"psraw $3, %%mm2 \n\t"
"psraw $3, %%mm1 \n\t"
"psraw $3, %%mm0 \n\t"
"movq %%mm7, (%0) \n\t"
TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
"movq %%mm0, 8(%0) \n\t"
"movq %%mm6, 24(%0) \n\t"
"movq %%mm7, 40(%0) \n\t"
"movq %%mm4, 56(%0) \n\t"
"movq (%0), %%mm7 \n\t"
TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
"movq %%mm7, (%0) \n\t"
"movq %%mm1, 16(%0) \n\t"
"movq %%mm0, 32(%0) \n\t"
"movq %%mm3, 48(%0) \n\t"
:
: "r"(b2 + 32 * i)
: "memory"
);
}
for(i=0; i<2; i++){
cavs_idct8_1d(b2+4*i, ff_pw_64.a);
__asm__ volatile(
"psraw $7, %%mm7 \n\t"
"psraw $7, %%mm6 \n\t"
"psraw $7, %%mm5 \n\t"
"psraw $7, %%mm4 \n\t"
"psraw $7, %%mm3 \n\t"
"psraw $7, %%mm2 \n\t"
"psraw $7, %%mm1 \n\t"
"psraw $7, %%mm0 \n\t"
"movq %%mm7, (%0) \n\t"
"movq %%mm5, 16(%0) \n\t"
"movq %%mm3, 32(%0) \n\t"
"movq %%mm1, 48(%0) \n\t"
"movq %%mm0, 64(%0) \n\t"
"movq %%mm2, 80(%0) \n\t"
"movq %%mm4, 96(%0) \n\t"
"movq %%mm6, 112(%0) \n\t"
:: "r"(b2+4*i)
: "memory"
);
}
ff_add_pixels_clamped(b2, dst, stride);
}
#endif /* HAVE_MMX_INLINE */
#if (HAVE_MMXEXT_INLINE || HAVE_AMD3DNOW_INLINE)
/*****************************************************************************
*
* motion compensation
*
****************************************************************************/
/* vertical filter [-1 -2 96 42 -7 0] */
#define QPEL_CAVSV1(A,B,C,D,E,F,OP,ADD, MUL1, MUL2) \
"movd (%0), "#F" \n\t"\
"movq "#C", %%mm6 \n\t"\
"pmullw "MANGLE(MUL1)", %%mm6\n\t"\
"movq "#D", %%mm7 \n\t"\
"pmullw "MANGLE(MUL2)", %%mm7\n\t"\
"psllw $3, "#E" \n\t"\
"psubw "#E", %%mm6 \n\t"\
"psraw $3, "#E" \n\t"\
"paddw %%mm7, %%mm6 \n\t"\
"paddw "#E", %%mm6 \n\t"\
"paddw "#B", "#B" \n\t"\
"pxor %%mm7, %%mm7 \n\t"\
"add %2, %0 \n\t"\
"punpcklbw %%mm7, "#F" \n\t"\
"psubw "#B", %%mm6 \n\t"\
"psraw $1, "#B" \n\t"\
"psubw "#A", %%mm6 \n\t"\
"paddw "MANGLE(ADD)", %%mm6 \n\t"\
"psraw $7, %%mm6 \n\t"\
"packuswb %%mm6, %%mm6 \n\t"\
OP(%%mm6, (%1), A, d) \
"add %3, %1 \n\t"
/* vertical filter [ 0 -1 5 5 -1 0] */
#define QPEL_CAVSV2(A,B,C,D,E,F,OP,ADD, MUL1, MUL2) \
"movd (%0), "#F" \n\t"\
"movq "#C", %%mm6 \n\t"\
"paddw "#D", %%mm6 \n\t"\
"pmullw "MANGLE(MUL1)", %%mm6\n\t"\
"add %2, %0 \n\t"\
"punpcklbw %%mm7, "#F" \n\t"\
"psubw "#B", %%mm6 \n\t"\
"psubw "#E", %%mm6 \n\t"\
"paddw "MANGLE(ADD)", %%mm6 \n\t"\
"psraw $3, %%mm6 \n\t"\
"packuswb %%mm6, %%mm6 \n\t"\
OP(%%mm6, (%1), A, d) \
"add %3, %1 \n\t"
/* vertical filter [ 0 -7 42 96 -2 -1] */
#define QPEL_CAVSV3(A,B,C,D,E,F,OP,ADD, MUL1, MUL2) \
"movd (%0), "#F" \n\t"\
"movq "#C", %%mm6 \n\t"\
"pmullw "MANGLE(MUL2)", %%mm6\n\t"\
"movq "#D", %%mm7 \n\t"\
"pmullw "MANGLE(MUL1)", %%mm7\n\t"\
"psllw $3, "#B" \n\t"\
"psubw "#B", %%mm6 \n\t"\
"psraw $3, "#B" \n\t"\
"paddw %%mm7, %%mm6 \n\t"\
"paddw "#B", %%mm6 \n\t"\
"paddw "#E", "#E" \n\t"\
"pxor %%mm7, %%mm7 \n\t"\
"add %2, %0 \n\t"\
"punpcklbw %%mm7, "#F" \n\t"\
"psubw "#E", %%mm6 \n\t"\
"psraw $1, "#E" \n\t"\
"psubw "#F", %%mm6 \n\t"\
"paddw "MANGLE(ADD)", %%mm6 \n\t"\
"psraw $7, %%mm6 \n\t"\
"packuswb %%mm6, %%mm6 \n\t"\
OP(%%mm6, (%1), A, d) \
"add %3, %1 \n\t"
#define QPEL_CAVSVNUM(VOP,OP,ADD,MUL1,MUL2)\
int w= 2;\
src -= 2*srcStride;\
\
while(w--){\
__asm__ volatile(\
"pxor %%mm7, %%mm7 \n\t"\
"movd (%0), %%mm0 \n\t"\
"add %2, %0 \n\t"\
"movd (%0), %%mm1 \n\t"\
"add %2, %0 \n\t"\
"movd (%0), %%mm2 \n\t"\
"add %2, %0 \n\t"\
"movd (%0), %%mm3 \n\t"\
"add %2, %0 \n\t"\
"movd (%0), %%mm4 \n\t"\
"add %2, %0 \n\t"\
"punpcklbw %%mm7, %%mm0 \n\t"\
"punpcklbw %%mm7, %%mm1 \n\t"\
"punpcklbw %%mm7, %%mm2 \n\t"\
"punpcklbw %%mm7, %%mm3 \n\t"\
"punpcklbw %%mm7, %%mm4 \n\t"\
VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, ADD, MUL1, MUL2)\
VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, ADD, MUL1, MUL2)\
VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, ADD, MUL1, MUL2)\
VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, ADD, MUL1, MUL2)\
VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, ADD, MUL1, MUL2)\
VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, ADD, MUL1, MUL2)\
VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, ADD, MUL1, MUL2)\
VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, ADD, MUL1, MUL2)\
\
: "+a"(src), "+c"(dst)\
: "S"((x86_reg)srcStride), "r"((x86_reg)dstStride)\
NAMED_CONSTRAINTS_ADD(ADD,MUL1,MUL2)\
: "memory"\
);\
if(h==16){\
__asm__ volatile(\
VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, ADD, MUL1, MUL2)\
VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, ADD, MUL1, MUL2)\
VOP(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP, ADD, MUL1, MUL2)\
VOP(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP, ADD, MUL1, MUL2)\
VOP(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP, ADD, MUL1, MUL2)\
VOP(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP, ADD, MUL1, MUL2)\
VOP(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP, ADD, MUL1, MUL2)\
VOP(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP, ADD, MUL1, MUL2)\
\
: "+a"(src), "+c"(dst)\
: "S"((x86_reg)srcStride), "r"((x86_reg)dstStride)\
NAMED_CONSTRAINTS_ADD(ADD,MUL1,MUL2)\
: "memory"\
);\
}\
src += 4-(h+5)*srcStride;\
dst += 4-h*dstStride;\
}
#define QPEL_CAVS(OPNAME, OP, MMX)\
static void OPNAME ## cavs_qpel8_h_ ## MMX(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride){\
int h=8;\
__asm__ volatile(\
"pxor %%mm7, %%mm7 \n\t"\
"movq "MANGLE(ff_pw_5)", %%mm6\n\t"\
"1: \n\t"\
"movq (%0), %%mm0 \n\t"\
"movq 1(%0), %%mm2 \n\t"\
"movq %%mm0, %%mm1 \n\t"\
"movq %%mm2, %%mm3 \n\t"\
"punpcklbw %%mm7, %%mm0 \n\t"\
"punpckhbw %%mm7, %%mm1 \n\t"\
"punpcklbw %%mm7, %%mm2 \n\t"\
"punpckhbw %%mm7, %%mm3 \n\t"\
"paddw %%mm2, %%mm0 \n\t"\
"paddw %%mm3, %%mm1 \n\t"\
"pmullw %%mm6, %%mm0 \n\t"\
"pmullw %%mm6, %%mm1 \n\t"\
"movq -1(%0), %%mm2 \n\t"\
"movq 2(%0), %%mm4 \n\t"\
"movq %%mm2, %%mm3 \n\t"\
"movq %%mm4, %%mm5 \n\t"\
"punpcklbw %%mm7, %%mm2 \n\t"\
"punpckhbw %%mm7, %%mm3 \n\t"\
"punpcklbw %%mm7, %%mm4 \n\t"\
"punpckhbw %%mm7, %%mm5 \n\t"\
"paddw %%mm4, %%mm2 \n\t"\
"paddw %%mm3, %%mm5 \n\t"\
"psubw %%mm2, %%mm0 \n\t"\
"psubw %%mm5, %%mm1 \n\t"\
"movq "MANGLE(ff_pw_4)", %%mm5\n\t"\
"paddw %%mm5, %%mm0 \n\t"\
"paddw %%mm5, %%mm1 \n\t"\
"psraw $3, %%mm0 \n\t"\
"psraw $3, %%mm1 \n\t"\
"packuswb %%mm1, %%mm0 \n\t"\
OP(%%mm0, (%1),%%mm5, q) \
"add %3, %0 \n\t"\
"add %4, %1 \n\t"\
"decl %2 \n\t"\
" jnz 1b \n\t"\
: "+a"(src), "+c"(dst), "+m"(h)\
: "d"((x86_reg)srcStride), "S"((x86_reg)dstStride)\
NAMED_CONSTRAINTS_ADD(ff_pw_4,ff_pw_5)\
: "memory"\
);\
}\
\
static inline void OPNAME ## cavs_qpel8or16_v1_ ## MMX(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h){\
QPEL_CAVSVNUM(QPEL_CAVSV1,OP,ff_pw_64,ff_pw_96,ff_pw_42) \
}\
\
static inline void OPNAME ## cavs_qpel8or16_v2_ ## MMX(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h){\
QPEL_CAVSVNUM(QPEL_CAVSV2,OP,ff_pw_4,ff_pw_5,ff_pw_42) \
}\
\
static inline void OPNAME ## cavs_qpel8or16_v3_ ## MMX(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h){\
QPEL_CAVSVNUM(QPEL_CAVSV3,OP,ff_pw_64,ff_pw_96,ff_pw_42) \
}\
\
static void OPNAME ## cavs_qpel8_v1_ ## MMX(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride){\
OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 8);\
}\
static void OPNAME ## cavs_qpel16_v1_ ## MMX(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride){\
OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst , src , dstStride, srcStride, 16);\
OPNAME ## cavs_qpel8or16_v1_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
}\
\
static void OPNAME ## cavs_qpel8_v2_ ## MMX(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride){\
OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 8);\
}\
static void OPNAME ## cavs_qpel16_v2_ ## MMX(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride){\
OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst , src , dstStride, srcStride, 16);\
OPNAME ## cavs_qpel8or16_v2_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
}\
\
static void OPNAME ## cavs_qpel8_v3_ ## MMX(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride){\
OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 8);\
}\
static void OPNAME ## cavs_qpel16_v3_ ## MMX(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride){\
OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst , src , dstStride, srcStride, 16);\
OPNAME ## cavs_qpel8or16_v3_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
}\
\
static void OPNAME ## cavs_qpel16_h_ ## MMX(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride){\
OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\
OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
src += 8*srcStride;\
dst += 8*dstStride;\
OPNAME ## cavs_qpel8_h_ ## MMX(dst , src , dstStride, srcStride);\
OPNAME ## cavs_qpel8_h_ ## MMX(dst+8, src+8, dstStride, srcStride);\
}\
#define CAVS_MC(OPNAME, SIZE, MMX) \
static void OPNAME ## cavs_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
OPNAME ## cavs_qpel ## SIZE ## _h_ ## MMX(dst, src, stride, stride);\
}\
\
static void OPNAME ## cavs_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
OPNAME ## cavs_qpel ## SIZE ## _v1_ ## MMX(dst, src, stride, stride);\
}\
\
static void OPNAME ## cavs_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
OPNAME ## cavs_qpel ## SIZE ## _v2_ ## MMX(dst, src, stride, stride);\
}\
\
static void OPNAME ## cavs_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
OPNAME ## cavs_qpel ## SIZE ## _v3_ ## MMX(dst, src, stride, stride);\
}\
#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
#define AVG_3DNOW_OP(a,b,temp, size) \
"mov" #size " " #b ", " #temp " \n\t"\
"pavgusb " #temp ", " #a " \n\t"\
"mov" #size " " #a ", " #b " \n\t"
#define AVG_MMXEXT_OP(a, b, temp, size) \
"mov" #size " " #b ", " #temp " \n\t"\
"pavgb " #temp ", " #a " \n\t"\
"mov" #size " " #a ", " #b " \n\t"
#endif /* (HAVE_MMXEXT_INLINE || HAVE_AMD3DNOW_INLINE) */
#if HAVE_MMX_EXTERNAL
static void put_cavs_qpel8_mc00_mmx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
ff_put_pixels8_mmx(dst, src, stride, 8);
}
static void avg_cavs_qpel8_mc00_mmx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
ff_avg_pixels8_mmx(dst, src, stride, 8);
}
static void avg_cavs_qpel8_mc00_mmxext(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
ff_avg_pixels8_mmxext(dst, src, stride, 8);
}
static void put_cavs_qpel16_mc00_mmx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
ff_put_pixels16_mmx(dst, src, stride, 16);
}
static void avg_cavs_qpel16_mc00_mmx(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
ff_avg_pixels16_mmx(dst, src, stride, 16);
}
static void avg_cavs_qpel16_mc00_mmxext(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
ff_avg_pixels16_mmxext(dst, src, stride, 16);
}
static void put_cavs_qpel16_mc00_sse2(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
ff_put_pixels16_sse2(dst, src, stride, 16);
}
static void avg_cavs_qpel16_mc00_sse2(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
ff_avg_pixels16_sse2(dst, src, stride, 16);
}
#endif
static av_cold void cavsdsp_init_mmx(CAVSDSPContext *c,
AVCodecContext *avctx)
{
#if HAVE_MMX_EXTERNAL
c->put_cavs_qpel_pixels_tab[0][0] = put_cavs_qpel16_mc00_mmx;
c->put_cavs_qpel_pixels_tab[1][0] = put_cavs_qpel8_mc00_mmx;
c->avg_cavs_qpel_pixels_tab[0][0] = avg_cavs_qpel16_mc00_mmx;
c->avg_cavs_qpel_pixels_tab[1][0] = avg_cavs_qpel8_mc00_mmx;
#endif
#if HAVE_MMX_INLINE
c->cavs_idct8_add = cavs_idct8_add_mmx;
c->idct_perm = FF_IDCT_PERM_TRANSPOSE;
#endif /* HAVE_MMX_INLINE */
}
#define DSPFUNC(PFX, IDX, NUM, EXT) \
c->PFX ## _cavs_qpel_pixels_tab[IDX][ 2] = PFX ## _cavs_qpel ## NUM ## _mc20_ ## EXT; \
c->PFX ## _cavs_qpel_pixels_tab[IDX][ 4] = PFX ## _cavs_qpel ## NUM ## _mc01_ ## EXT; \
c->PFX ## _cavs_qpel_pixels_tab[IDX][ 8] = PFX ## _cavs_qpel ## NUM ## _mc02_ ## EXT; \
c->PFX ## _cavs_qpel_pixels_tab[IDX][12] = PFX ## _cavs_qpel ## NUM ## _mc03_ ## EXT; \
#if HAVE_MMXEXT_INLINE
QPEL_CAVS(put_, PUT_OP, mmxext)
QPEL_CAVS(avg_, AVG_MMXEXT_OP, mmxext)
CAVS_MC(put_, 8, mmxext)
CAVS_MC(put_, 16, mmxext)
CAVS_MC(avg_, 8, mmxext)
CAVS_MC(avg_, 16, mmxext)
#endif /* HAVE_MMXEXT_INLINE */
#if HAVE_AMD3DNOW_INLINE
QPEL_CAVS(put_, PUT_OP, 3dnow)
QPEL_CAVS(avg_, AVG_3DNOW_OP, 3dnow)
CAVS_MC(put_, 8, 3dnow)
CAVS_MC(put_, 16,3dnow)
CAVS_MC(avg_, 8, 3dnow)
CAVS_MC(avg_, 16,3dnow)
static av_cold void cavsdsp_init_3dnow(CAVSDSPContext *c,
AVCodecContext *avctx)
{
DSPFUNC(put, 0, 16, 3dnow);
DSPFUNC(put, 1, 8, 3dnow);
DSPFUNC(avg, 0, 16, 3dnow);
DSPFUNC(avg, 1, 8, 3dnow);
}
#endif /* HAVE_AMD3DNOW_INLINE */
av_cold void ff_cavsdsp_init_x86(CAVSDSPContext *c, AVCodecContext *avctx)
{
int cpu_flags = av_get_cpu_flags();
cavsdsp_init_mmx(c, avctx);
#if HAVE_AMD3DNOW_INLINE
if (INLINE_AMD3DNOW(cpu_flags))
cavsdsp_init_3dnow(c, avctx);
#endif /* HAVE_AMD3DNOW_INLINE */
#if HAVE_MMXEXT_INLINE
if (INLINE_MMXEXT(cpu_flags)) {
DSPFUNC(put, 0, 16, mmxext);
DSPFUNC(put, 1, 8, mmxext);
DSPFUNC(avg, 0, 16, mmxext);
DSPFUNC(avg, 1, 8, mmxext);
}
#endif
#if HAVE_MMX_EXTERNAL
if (EXTERNAL_MMXEXT(cpu_flags)) {
c->avg_cavs_qpel_pixels_tab[0][0] = avg_cavs_qpel16_mc00_mmxext;
c->avg_cavs_qpel_pixels_tab[1][0] = avg_cavs_qpel8_mc00_mmxext;
}
#endif
#if HAVE_SSE2_EXTERNAL
if (EXTERNAL_SSE2(cpu_flags)) {
c->put_cavs_qpel_pixels_tab[0][0] = put_cavs_qpel16_mc00_sse2;
c->avg_cavs_qpel_pixels_tab[0][0] = avg_cavs_qpel16_mc00_sse2;
}
#endif
}

View File

@@ -0,0 +1,81 @@
/*
* MMX/SSE/AVX constants used across x86 dsp optimizations.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/mem.h"
#include "libavutil/x86/asm.h" // for xmm_reg
#include "constants.h"
DECLARE_ALIGNED(32, const ymm_reg, ff_pw_1) = { 0x0001000100010001ULL, 0x0001000100010001ULL,
0x0001000100010001ULL, 0x0001000100010001ULL };
DECLARE_ALIGNED(32, const ymm_reg, ff_pw_2) = { 0x0002000200020002ULL, 0x0002000200020002ULL,
0x0002000200020002ULL, 0x0002000200020002ULL };
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_3) = { 0x0003000300030003ULL, 0x0003000300030003ULL };
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_4) = { 0x0004000400040004ULL, 0x0004000400040004ULL };
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_5) = { 0x0005000500050005ULL, 0x0005000500050005ULL };
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_8) = { 0x0008000800080008ULL, 0x0008000800080008ULL };
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_9) = { 0x0009000900090009ULL, 0x0009000900090009ULL };
DECLARE_ALIGNED(8, const uint64_t, ff_pw_15) = 0x000F000F000F000FULL;
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_16) = { 0x0010001000100010ULL, 0x0010001000100010ULL };
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_17) = { 0x0011001100110011ULL, 0x0011001100110011ULL };
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_18) = { 0x0012001200120012ULL, 0x0012001200120012ULL };
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_20) = { 0x0014001400140014ULL, 0x0014001400140014ULL };
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_32) = { 0x0020002000200020ULL, 0x0020002000200020ULL };
DECLARE_ALIGNED(8, const uint64_t, ff_pw_42) = 0x002A002A002A002AULL;
DECLARE_ALIGNED(8, const uint64_t, ff_pw_53) = 0x0035003500350035ULL;
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_64) = { 0x0040004000400040ULL, 0x0040004000400040ULL };
DECLARE_ALIGNED(8, const uint64_t, ff_pw_96) = 0x0060006000600060ULL;
DECLARE_ALIGNED(8, const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
DECLARE_ALIGNED(32, const ymm_reg, ff_pw_255) = { 0x00ff00ff00ff00ffULL, 0x00ff00ff00ff00ffULL,
0x00ff00ff00ff00ffULL, 0x00ff00ff00ff00ffULL };
DECLARE_ALIGNED(32, const ymm_reg, ff_pw_256) = { 0x0100010001000100ULL, 0x0100010001000100ULL,
0x0100010001000100ULL, 0x0100010001000100ULL };
DECLARE_ALIGNED(32, const ymm_reg, ff_pw_512) = { 0x0200020002000200ULL, 0x0200020002000200ULL,
0x0200020002000200ULL, 0x0200020002000200ULL };
DECLARE_ALIGNED(16, const xmm_reg, ff_pw_1019) = { 0x03FB03FB03FB03FBULL, 0x03FB03FB03FB03FBULL };
DECLARE_ALIGNED(32, const ymm_reg, ff_pw_1023) = { 0x03ff03ff03ff03ffULL, 0x03ff03ff03ff03ffULL,
0x03ff03ff03ff03ffULL, 0x03ff03ff03ff03ffULL};
DECLARE_ALIGNED(32, const ymm_reg, ff_pw_1024) = { 0x0400040004000400ULL, 0x0400040004000400ULL,
0x0400040004000400ULL, 0x0400040004000400ULL};
DECLARE_ALIGNED(32, const ymm_reg, ff_pw_2048) = { 0x0800080008000800ULL, 0x0800080008000800ULL,
0x0800080008000800ULL, 0x0800080008000800ULL };
DECLARE_ALIGNED(32, const ymm_reg, ff_pw_4096) = { 0x1000100010001000ULL, 0x1000100010001000ULL,
0x1000100010001000ULL, 0x1000100010001000ULL };
DECLARE_ALIGNED(32, const ymm_reg, ff_pw_8192) = { 0x2000200020002000ULL, 0x2000200020002000ULL,
0x2000200020002000ULL, 0x2000200020002000ULL };
DECLARE_ALIGNED(32, const ymm_reg, ff_pw_m1) = { 0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFFFFFFFFFULL,
0xFFFFFFFFFFFFFFFFULL, 0xFFFFFFFFFFFFFFFFULL };
DECLARE_ALIGNED(32, const ymm_reg, ff_pb_0) = { 0x0000000000000000ULL, 0x0000000000000000ULL,
0x0000000000000000ULL, 0x0000000000000000ULL };
DECLARE_ALIGNED(32, const ymm_reg, ff_pb_1) = { 0x0101010101010101ULL, 0x0101010101010101ULL,
0x0101010101010101ULL, 0x0101010101010101ULL };
DECLARE_ALIGNED(32, const ymm_reg, ff_pb_2) = { 0x0202020202020202ULL, 0x0202020202020202ULL,
0x0202020202020202ULL, 0x0202020202020202ULL };
DECLARE_ALIGNED(32, const ymm_reg, ff_pb_3) = { 0x0303030303030303ULL, 0x0303030303030303ULL,
0x0303030303030303ULL, 0x0303030303030303ULL };
DECLARE_ALIGNED(32, const xmm_reg, ff_pb_15) = { 0x0F0F0F0F0F0F0F0FULL, 0x0F0F0F0F0F0F0F0FULL };
DECLARE_ALIGNED(16, const xmm_reg, ff_pb_80) = { 0x8080808080808080ULL, 0x8080808080808080ULL };
DECLARE_ALIGNED(16, const xmm_reg, ff_pb_FE) = { 0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL };
DECLARE_ALIGNED(8, const uint64_t, ff_pb_FC) = 0xFCFCFCFCFCFCFCFCULL;
DECLARE_ALIGNED(16, const xmm_reg, ff_ps_neg) = { 0x8000000080000000ULL, 0x8000000080000000ULL };
DECLARE_ALIGNED(32, const ymm_reg, ff_pd_1) = { 0x0000000100000001ULL, 0x0000000100000001ULL,
0x0000000100000001ULL, 0x0000000100000001ULL };

View File

@@ -0,0 +1,66 @@
/*
* MMX/SSE constants used across x86 dsp optimizations.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_X86_CONSTANTS_H
#define AVCODEC_X86_CONSTANTS_H
#include <stdint.h>
#include "libavutil/x86/asm.h"
extern const ymm_reg ff_pw_1;
extern const ymm_reg ff_pw_2;
extern const xmm_reg ff_pw_3;
extern const xmm_reg ff_pw_4;
extern const xmm_reg ff_pw_5;
extern const xmm_reg ff_pw_8;
extern const xmm_reg ff_pw_9;
extern const uint64_t ff_pw_15;
extern const xmm_reg ff_pw_16;
extern const xmm_reg ff_pw_18;
extern const xmm_reg ff_pw_20;
extern const xmm_reg ff_pw_32;
extern const uint64_t ff_pw_42;
extern const uint64_t ff_pw_53;
extern const xmm_reg ff_pw_64;
extern const uint64_t ff_pw_96;
extern const uint64_t ff_pw_128;
extern const ymm_reg ff_pw_255;
extern const ymm_reg ff_pw_512;
extern const ymm_reg ff_pw_1023;
extern const ymm_reg ff_pw_1024;
extern const ymm_reg ff_pw_2048;
extern const ymm_reg ff_pw_4096;
extern const ymm_reg ff_pw_8192;
extern const ymm_reg ff_pw_m1;
extern const ymm_reg ff_pb_0;
extern const ymm_reg ff_pb_1;
extern const ymm_reg ff_pb_2;
extern const ymm_reg ff_pb_3;
extern const xmm_reg ff_pb_80;
extern const xmm_reg ff_pb_FE;
extern const uint64_t ff_pb_FC;
extern const xmm_reg ff_ps_neg;
extern const ymm_reg ff_pd_1;
#endif /* AVCODEC_X86_CONSTANTS_H */

View File

@@ -0,0 +1,431 @@
;******************************************************************************
;* SSE-optimized functions for the DCA decoder
;* Copyright (C) 2012-2014 Christophe Gisquet <christophe.gisquet@gmail.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
pf_inv16: times 4 dd 0x3D800000 ; 1/16
SECTION .text
; void decode_hf(float dst[DCA_SUBBANDS][8], const int32_t vq_num[DCA_SUBBANDS],
; const int8_t hf_vq[1024][32], intptr_t vq_offset,
; int32_t scale[DCA_SUBBANDS][2], intptr_t start, intptr_t end)
%macro DECODE_HF 0
cglobal decode_hf, 6,6,5, dst, num, src, offset, scale, start, end
lea srcq, [srcq + offsetq]
shl startq, 2
mov offsetd, endm
%define DICT offsetq
shl offsetq, 2
mov endm, offsetq
.loop:
%if ARCH_X86_64
mov offsetd, [scaleq + 2 * startq]
cvtsi2ss m0, offsetd
%else
cvtsi2ss m0, [scaleq + 2 * startq]
%endif
mov offsetd, [numq + startq]
mulss m0, [pf_inv16]
shl DICT, 5
shufps m0, m0, 0
%if cpuflag(sse2)
%if cpuflag(sse4)
pmovsxbd m1, [srcq + DICT + 0]
pmovsxbd m2, [srcq + DICT + 4]
%else
movq m1, [srcq + DICT]
punpcklbw m1, m1
mova m2, m1
punpcklwd m1, m1
punpckhwd m2, m2
psrad m1, 24
psrad m2, 24
%endif
cvtdq2ps m1, m1
cvtdq2ps m2, m2
%else
movd mm0, [srcq + DICT + 0]
movd mm1, [srcq + DICT + 4]
punpcklbw mm0, mm0
punpcklbw mm1, mm1
movq mm2, mm0
movq mm3, mm1
punpcklwd mm0, mm0
punpcklwd mm1, mm1
punpckhwd mm2, mm2
punpckhwd mm3, mm3
psrad mm0, 24
psrad mm1, 24
psrad mm2, 24
psrad mm3, 24
cvtpi2ps m1, mm0
cvtpi2ps m2, mm1
cvtpi2ps m3, mm2
cvtpi2ps m4, mm3
shufps m0, m0, 0
shufps m1, m3, q1010
shufps m2, m4, q1010
%endif
mulps m1, m0
mulps m2, m0
mova [dstq + 8 * startq + 0], m1
mova [dstq + 8 * startq + 16], m2
add startq, 4
cmp startq, endm
jl .loop
.end:
%if notcpuflag(sse2)
emms
%endif
REP_RET
%endmacro
%if ARCH_X86_32
INIT_XMM sse
DECODE_HF
%endif
INIT_XMM sse2
DECODE_HF
INIT_XMM sse4
DECODE_HF
; %1=v0/v1 %2=in1 %3=in2
%macro FIR_LOOP 2-3
.loop%1:
%define va m1
%define vb m2
%if %1
%define OFFSET 0
%else
%define OFFSET NUM_COEF*count
%endif
; for v0, incrementing and for v1, decrementing
mova va, [cf0q + OFFSET]
mova vb, [cf0q + OFFSET + 4*NUM_COEF]
%if %0 == 3
mova m4, [cf0q + OFFSET + mmsize]
mova m0, [cf0q + OFFSET + 4*NUM_COEF + mmsize]
%endif
mulps va, %2
mulps vb, %2
%if %0 == 3
%if cpuflag(fma3)
fmaddps va, m4, %3, va
fmaddps vb, m0, %3, vb
%else
mulps m4, %3
mulps m0, %3
addps va, m4
addps vb, m0
%endif
%endif
; va = va1 va2 va3 va4
; vb = vb1 vb2 vb3 vb4
%if %1
SWAP va, vb
%endif
mova m4, va
unpcklps va, vb ; va3 vb3 va4 vb4
unpckhps m4, vb ; va1 vb1 va2 vb2
addps m4, va ; va1+3 vb1+3 va2+4 vb2+4
movhlps vb, m4 ; va1+3 vb1+3
addps vb, m4 ; va0..4 vb0..4
movlps [outq + count], vb
%if %1
sub cf0q, 8*NUM_COEF
%endif
add count, 8
jl .loop%1
%endmacro
; void dca_lfe_fir(float *out, float *in, float *coefs)
%macro DCA_LFE_FIR 1
cglobal dca_lfe_fir%1, 3,3,6-%1, out, in, cf0
%define IN1 m3
%define IN2 m5
%define count inq
%define NUM_COEF 4*(2-%1)
%define NUM_OUT 32*(%1+1)
movu IN1, [inq + 4 - 1*mmsize]
shufps IN1, IN1, q0123
%if %1 == 0
movu IN2, [inq + 4 - 2*mmsize]
shufps IN2, IN2, q0123
%endif
mov count, -4*NUM_OUT
add cf0q, 4*NUM_COEF*NUM_OUT
add outq, 4*NUM_OUT
; compute v0 first
%if %1 == 0
FIR_LOOP 0, IN1, IN2
%else
FIR_LOOP 0, IN1
%endif
shufps IN1, IN1, q0123
mov count, -4*NUM_OUT
; cf1 already correctly positioned
add outq, 4*NUM_OUT ; outq now at out2
sub cf0q, 8*NUM_COEF
%if %1 == 0
shufps IN2, IN2, q0123
FIR_LOOP 1, IN2, IN1
%else
FIR_LOOP 1, IN1
%endif
RET
%endmacro
INIT_XMM sse
DCA_LFE_FIR 0
DCA_LFE_FIR 1
%if HAVE_FMA3_EXTERNAL
INIT_XMM fma3
DCA_LFE_FIR 0
%endif
%macro SETZERO 1
%if cpuflag(sse2) && notcpuflag(avx)
pxor %1, %1
%else
xorps %1, %1, %1
%endif
%endmacro
%macro SHUF 3
%if cpuflag(avx)
mova %3, [%2 - 16]
vperm2f128 %1, %3, %3, 1
vshufps %1, %1, %1, q0123
%elif cpuflag(sse2)
pshufd %1, [%2], q0123
%else
mova %1, [%2]
shufps %1, %1, q0123
%endif
%endmacro
%macro INNER_LOOP 1
; reading backwards: ptr1 = synth_buf + j + i; ptr2 = synth_buf + j - i
;~ a += window[i + j] * (-synth_buf[15 - i + j])
;~ b += window[i + j + 16] * (synth_buf[i + j])
SHUF m5, ptr2 + j + (15 - 3) * 4, m6
mova m6, [ptr1 + j]
%if ARCH_X86_64
SHUF m11, ptr2 + j + (15 - 3) * 4 - mmsize, m12
mova m12, [ptr1 + j + mmsize]
%endif
%if cpuflag(fma3)
fmaddps m2, m6, [win + %1 + j + 16 * 4], m2
fnmaddps m1, m5, [win + %1 + j], m1
%if ARCH_X86_64
fmaddps m8, m12, [win + %1 + j + mmsize + 16 * 4], m8
fnmaddps m7, m11, [win + %1 + j + mmsize], m7
%endif
%else ; non-FMA
mulps m6, m6, [win + %1 + j + 16 * 4]
mulps m5, m5, [win + %1 + j]
%if ARCH_X86_64
mulps m12, m12, [win + %1 + j + mmsize + 16 * 4]
mulps m11, m11, [win + %1 + j + mmsize]
%endif
addps m2, m2, m6
subps m1, m1, m5
%if ARCH_X86_64
addps m8, m8, m12
subps m7, m7, m11
%endif
%endif ; cpuflag(fma3)
;~ c += window[i + j + 32] * (synth_buf[16 + i + j])
;~ d += window[i + j + 48] * (synth_buf[31 - i + j])
SHUF m6, ptr2 + j + (31 - 3) * 4, m5
mova m5, [ptr1 + j + 16 * 4]
%if ARCH_X86_64
SHUF m12, ptr2 + j + (31 - 3) * 4 - mmsize, m11
mova m11, [ptr1 + j + mmsize + 16 * 4]
%endif
%if cpuflag(fma3)
fmaddps m3, m5, [win + %1 + j + 32 * 4], m3
fmaddps m4, m6, [win + %1 + j + 48 * 4], m4
%if ARCH_X86_64
fmaddps m9, m11, [win + %1 + j + mmsize + 32 * 4], m9
fmaddps m10, m12, [win + %1 + j + mmsize + 48 * 4], m10
%endif
%else ; non-FMA
mulps m5, m5, [win + %1 + j + 32 * 4]
mulps m6, m6, [win + %1 + j + 48 * 4]
%if ARCH_X86_64
mulps m11, m11, [win + %1 + j + mmsize + 32 * 4]
mulps m12, m12, [win + %1 + j + mmsize + 48 * 4]
%endif
addps m3, m3, m5
addps m4, m4, m6
%if ARCH_X86_64
addps m9, m9, m11
addps m10, m10, m12
%endif
%endif ; cpuflag(fma3)
sub j, 64 * 4
%endmacro
; void ff_synth_filter_inner_<opt>(float *synth_buf, float synth_buf2[32],
; const float window[512], float out[32],
; intptr_t offset, float scale)
%macro SYNTH_FILTER 0
cglobal synth_filter_inner, 0, 6 + 4 * ARCH_X86_64, 7 + 6 * ARCH_X86_64, \
synth_buf, synth_buf2, window, out, off, scale
%define scale m0
%if ARCH_X86_32 || WIN64
%if cpuflag(sse2) && notcpuflag(avx)
movd scale, scalem
SPLATD m0
%else
VBROADCASTSS m0, scalem
%endif
; Make sure offset is in a register and not on the stack
%define OFFQ r4q
%else
SPLATD xmm0
%if cpuflag(avx)
vinsertf128 m0, m0, xmm0, 1
%endif
%define OFFQ offq
%endif
; prepare inner counter limit 1
mov r5q, 480
sub r5q, offmp
and r5q, -64
shl r5q, 2
%if ARCH_X86_32 || notcpuflag(avx)
mov OFFQ, r5q
%define i r5q
mov i, 16 * 4 - (ARCH_X86_64 + 1) * mmsize ; main loop counter
%else
%define i 0
%define OFFQ r5q
%endif
%define buf2 synth_buf2q
%if ARCH_X86_32
mov buf2, synth_buf2mp
%endif
.mainloop:
; m1 = a m2 = b m3 = c m4 = d
SETZERO m3
SETZERO m4
mova m1, [buf2 + i]
mova m2, [buf2 + i + 16 * 4]
%if ARCH_X86_32
%define ptr1 r0q
%define ptr2 r1q
%define win r2q
%define j r3q
mov win, windowm
mov ptr1, synth_bufm
%if ARCH_X86_32 || notcpuflag(avx)
add win, i
add ptr1, i
%endif
%else ; ARCH_X86_64
%define ptr1 r6q
%define ptr2 r7q ; must be loaded
%define win r8q
%define j r9q
SETZERO m9
SETZERO m10
mova m7, [buf2 + i + mmsize]
mova m8, [buf2 + i + mmsize + 16 * 4]
lea win, [windowq + i]
lea ptr1, [synth_bufq + i]
%endif
mov ptr2, synth_bufmp
; prepare the inner loop counter
mov j, OFFQ
%if ARCH_X86_32 || notcpuflag(avx)
sub ptr2, i
%endif
.loop1:
INNER_LOOP 0
jge .loop1
mov j, 448 * 4
sub j, OFFQ
jz .end
sub ptr1, j
sub ptr2, j
add win, OFFQ ; now at j-64, so define OFFSET
sub j, 64 * 4
.loop2:
INNER_LOOP 64 * 4
jge .loop2
.end:
%if ARCH_X86_32
mov buf2, synth_buf2m ; needed for next iteration anyway
mov outq, outmp ; j, which will be set again during it
%endif
;~ out[i] = a * scale;
;~ out[i + 16] = b * scale;
mulps m1, m1, scale
mulps m2, m2, scale
%if ARCH_X86_64
mulps m7, m7, scale
mulps m8, m8, scale
%endif
;~ synth_buf2[i] = c;
;~ synth_buf2[i + 16] = d;
mova [buf2 + i + 0 * 4], m3
mova [buf2 + i + 16 * 4], m4
%if ARCH_X86_64
mova [buf2 + i + 0 * 4 + mmsize], m9
mova [buf2 + i + 16 * 4 + mmsize], m10
%endif
;~ out[i] = a;
;~ out[i + 16] = a;
mova [outq + i + 0 * 4], m1
mova [outq + i + 16 * 4], m2
%if ARCH_X86_64
mova [outq + i + 0 * 4 + mmsize], m7
mova [outq + i + 16 * 4 + mmsize], m8
%endif
%if ARCH_X86_32 || notcpuflag(avx)
sub i, (ARCH_X86_64 + 1) * mmsize
jge .mainloop
%endif
RET
%endmacro
%if ARCH_X86_32
INIT_XMM sse
SYNTH_FILTER
%endif
INIT_XMM sse2
SYNTH_FILTER
INIT_YMM avx
SYNTH_FILTER
INIT_YMM fma3
SYNTH_FILTER

View File

@@ -0,0 +1,113 @@
/*
* Copyright (c) 2012-2014 Christophe Gisquet <christophe.gisquet@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/dcadsp.h"
void ff_decode_hf_sse(float dst[DCA_SUBBANDS][8], const int vq_num[DCA_SUBBANDS],
const int8_t hf_vq[1024][32], intptr_t vq_offset,
int scale[DCA_SUBBANDS][2], intptr_t start, intptr_t end);
void ff_decode_hf_sse2(float dst[DCA_SUBBANDS][8], const int vq_num[DCA_SUBBANDS],
const int8_t hf_vq[1024][32], intptr_t vq_offset,
int scale[DCA_SUBBANDS][2], intptr_t start, intptr_t end);
void ff_decode_hf_sse4(float dst[DCA_SUBBANDS][8], const int vq_num[DCA_SUBBANDS],
const int8_t hf_vq[1024][32], intptr_t vq_offset,
int scale[DCA_SUBBANDS][2], intptr_t start, intptr_t end);
void ff_dca_lfe_fir0_sse(float *out, const float *in, const float *coefs);
void ff_dca_lfe_fir1_sse(float *out, const float *in, const float *coefs);
void ff_dca_lfe_fir0_fma3(float *out, const float *in, const float *coefs);
av_cold void ff_dcadsp_init_x86(DCADSPContext *s)
{
int cpu_flags = av_get_cpu_flags();
if (EXTERNAL_SSE(cpu_flags)) {
#if ARCH_X86_32
s->decode_hf = ff_decode_hf_sse;
#endif
s->lfe_fir[0] = ff_dca_lfe_fir0_sse;
s->lfe_fir[1] = ff_dca_lfe_fir1_sse;
}
if (EXTERNAL_SSE2(cpu_flags)) {
s->decode_hf = ff_decode_hf_sse2;
}
if (EXTERNAL_SSE4(cpu_flags)) {
s->decode_hf = ff_decode_hf_sse4;
}
if (EXTERNAL_FMA3(cpu_flags)) {
s->lfe_fir[0] = ff_dca_lfe_fir0_fma3;
}
}
#define SYNTH_FILTER_FUNC(opt) \
void ff_synth_filter_inner_##opt(float *synth_buf_ptr, float synth_buf2[32], \
const float window[512], \
float out[32], intptr_t offset, float scale); \
static void synth_filter_##opt(FFTContext *imdct, \
float *synth_buf_ptr, int *synth_buf_offset, \
float synth_buf2[32], const float window[512], \
float out[32], const float in[32], float scale) \
{ \
float *synth_buf= synth_buf_ptr + *synth_buf_offset; \
\
imdct->imdct_half(imdct, synth_buf, in); \
\
ff_synth_filter_inner_##opt(synth_buf, synth_buf2, window, \
out, *synth_buf_offset, scale); \
\
*synth_buf_offset = (*synth_buf_offset - 32) & 511; \
} \
#if HAVE_YASM
#if ARCH_X86_32
SYNTH_FILTER_FUNC(sse)
#endif
SYNTH_FILTER_FUNC(sse2)
SYNTH_FILTER_FUNC(avx)
SYNTH_FILTER_FUNC(fma3)
#endif /* HAVE_YASM */
av_cold void ff_synth_filter_init_x86(SynthFilterContext *s)
{
#if HAVE_YASM
int cpu_flags = av_get_cpu_flags();
#if ARCH_X86_32
if (EXTERNAL_SSE(cpu_flags)) {
s->synth_filter_float = synth_filter_sse;
}
#endif
if (EXTERNAL_SSE2(cpu_flags)) {
s->synth_filter_float = synth_filter_sse2;
}
if (EXTERNAL_AVX_FAST(cpu_flags)) {
s->synth_filter_float = synth_filter_avx;
}
if (EXTERNAL_FMA3(cpu_flags) && !(cpu_flags & AV_CPU_FLAG_AVXSLOW)) {
s->synth_filter_float = synth_filter_fma3;
}
#endif /* HAVE_YASM */
}

View File

@@ -0,0 +1,121 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "fdct.h"
#include "xvididct.h"
#include "simple_idct.h"
#if (CONFIG_PRORES_DECODER || CONFIG_PRORES_LGPL_DECODER) && ARCH_X86_64 && HAVE_YASM
void ff_prores_idct_put_10_sse2(uint16_t *dst, int linesize,
int16_t *block, int16_t *qmat);
#define PR_WRAP(INSN) \
static void ff_prores_idct_put_10_##INSN##_wrap(int16_t *dst){ \
LOCAL_ALIGNED(16, int16_t, qmat, [64]); \
LOCAL_ALIGNED(16, int16_t, tmp, [64]); \
int i; \
\
for(i=0; i<64; i++){ \
qmat[i]=4; \
tmp[i]= dst[i]; \
} \
ff_prores_idct_put_10_##INSN (dst, 16, tmp, qmat); \
\
for(i=0; i<64; i++) { \
dst[i] -= 512; \
} \
}
PR_WRAP(sse2)
# if HAVE_AVX_EXTERNAL
void ff_prores_idct_put_10_avx(uint16_t *dst, int linesize,
int16_t *block, int16_t *qmat);
PR_WRAP(avx)
# endif
#endif
static const struct algo fdct_tab_arch[] = {
#if HAVE_MMX_INLINE
{ "MMX", ff_fdct_mmx, FF_IDCT_PERM_NONE, AV_CPU_FLAG_MMX },
#endif
#if HAVE_MMXEXT_INLINE
{ "MMXEXT", ff_fdct_mmxext, FF_IDCT_PERM_NONE, AV_CPU_FLAG_MMXEXT },
#endif
#if HAVE_SSE2_INLINE
{ "SSE2", ff_fdct_sse2, FF_IDCT_PERM_NONE, AV_CPU_FLAG_SSE2 },
#endif
{ 0 }
};
static const struct algo idct_tab_arch[] = {
#if HAVE_MMX_INLINE
{ "SIMPLE-MMX", ff_simple_idct_mmx, FF_IDCT_PERM_SIMPLE, AV_CPU_FLAG_MMX },
#endif
#if CONFIG_MPEG4_DECODER && HAVE_YASM
#if ARCH_X86_32
{ "XVID-MMX", ff_xvid_idct_mmx, FF_IDCT_PERM_NONE, AV_CPU_FLAG_MMX, 1 },
{ "XVID-MMXEXT", ff_xvid_idct_mmxext, FF_IDCT_PERM_NONE, AV_CPU_FLAG_MMXEXT, 1 },
#endif
#if HAVE_SSE2_EXTERNAL
{ "XVID-SSE2", ff_xvid_idct_sse2, FF_IDCT_PERM_SSE2, AV_CPU_FLAG_SSE2, 1 },
#endif
#endif /* CONFIG_MPEG4_DECODER && HAVE_YASM */
#if (CONFIG_PRORES_DECODER || CONFIG_PRORES_LGPL_DECODER) && ARCH_X86_64 && HAVE_YASM
{ "PR-SSE2", ff_prores_idct_put_10_sse2_wrap, FF_IDCT_PERM_TRANSPOSE, AV_CPU_FLAG_SSE2, 1 },
# if HAVE_AVX_EXTERNAL
{ "PR-AVX", ff_prores_idct_put_10_avx_wrap, FF_IDCT_PERM_TRANSPOSE, AV_CPU_FLAG_AVX, 1 },
# endif
#endif
{ 0 }
};
static const uint8_t idct_simple_mmx_perm[64] = {
0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D,
0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D,
0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D,
0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F,
0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F,
0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D,
0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F,
0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
};
static const uint8_t idct_sse2_row_perm[8] = { 0, 4, 1, 5, 2, 6, 3, 7 };
static int permute_x86(int16_t dst[64], const int16_t src[64],
enum idct_permutation_type perm_type)
{
int i;
switch (perm_type) {
case FF_IDCT_PERM_SIMPLE:
for (i = 0; i < 64; i++)
dst[idct_simple_mmx_perm[i]] = src[i];
return 1;
case FF_IDCT_PERM_SSE2:
for (i = 0; i < 64; i++)
dst[(i & 0x38) | idct_sse2_row_perm[i & 7]] = src[i];
return 1;
}
return 0;
}

View File

@@ -0,0 +1,493 @@
;******************************************************************************
;* 32 point SSE-optimized DCT transform
;* Copyright (c) 2010 Vitor Sessak
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA 32
align 32
ps_cos_vec: dd 0.500603, 0.505471, 0.515447, 0.531043
dd 0.553104, 0.582935, 0.622504, 0.674808
dd -10.190008, -3.407609, -2.057781, -1.484165
dd -1.169440, -0.972568, -0.839350, -0.744536
dd 0.502419, 0.522499, 0.566944, 0.646822
dd 0.788155, 1.060678, 1.722447, 5.101149
dd 0.509796, 0.601345, 0.899976, 2.562916
dd 0.509796, 0.601345, 0.899976, 2.562916
dd 1.000000, 1.000000, 1.306563, 0.541196
dd 1.000000, 1.000000, 1.306563, 0.541196
dd 1.000000, 0.707107, 1.000000, -0.707107
dd 1.000000, 0.707107, 1.000000, -0.707107
dd 0.707107, 0.707107, 0.707107, 0.707107
align 32
ps_p1p1m1m1: dd 0, 0, 0x80000000, 0x80000000, 0, 0, 0x80000000, 0x80000000
%macro BUTTERFLY 4
subps %4, %1, %2
addps %2, %2, %1
mulps %1, %4, %3
%endmacro
%macro BUTTERFLY0 5
%if cpuflag(sse2) && notcpuflag(avx)
pshufd %4, %1, %5
xorps %1, %2
addps %1, %4
mulps %1, %3
%else
shufps %4, %1, %1, %5
xorps %1, %1, %2
addps %4, %4, %1
mulps %1, %4, %3
%endif
%endmacro
%macro BUTTERFLY2 4
BUTTERFLY0 %1, %2, %3, %4, 0x1b
%endmacro
%macro BUTTERFLY3 4
BUTTERFLY0 %1, %2, %3, %4, 0xb1
%endmacro
%macro BUTTERFLY3V 5
movaps m%5, m%1
addps m%1, m%2
subps m%5, m%2
SWAP %2, %5
mulps m%2, [ps_cos_vec+192]
movaps m%5, m%3
addps m%3, m%4
subps m%4, m%5
mulps m%4, [ps_cos_vec+192]
%endmacro
%macro PASS6_AND_PERMUTE 0
mov tmpd, [outq+4]
movss m7, [outq+72]
addss m7, [outq+76]
movss m3, [outq+56]
addss m3, [outq+60]
addss m4, m3
movss m2, [outq+52]
addss m2, m3
movss m3, [outq+104]
addss m3, [outq+108]
addss m1, m3
addss m5, m4
movss [outq+ 16], m1
movss m1, [outq+100]
addss m1, m3
movss m3, [outq+40]
movss [outq+ 48], m1
addss m3, [outq+44]
movss m1, [outq+100]
addss m4, m3
addss m3, m2
addss m1, [outq+108]
movss [outq+ 40], m3
addss m2, [outq+36]
movss m3, [outq+8]
movss [outq+ 56], m2
addss m3, [outq+12]
movss [outq+ 32], m3
movss m3, [outq+80]
movss [outq+ 8], m5
movss [outq+ 80], m1
movss m2, [outq+52]
movss m5, [outq+120]
addss m5, [outq+124]
movss m1, [outq+64]
addss m2, [outq+60]
addss m0, m5
addss m5, [outq+116]
mov [outq+64], tmpd
addss m6, m0
addss m1, m6
mov tmpd, [outq+12]
mov [outq+ 96], tmpd
movss [outq+ 4], m1
movss m1, [outq+24]
movss [outq+ 24], m4
movss m4, [outq+88]
addss m4, [outq+92]
addss m3, m4
addss m4, [outq+84]
mov tmpd, [outq+108]
addss m1, [outq+28]
addss m0, m1
addss m1, m5
addss m6, m3
addss m3, m0
addss m0, m7
addss m5, [outq+20]
addss m7, m1
movss [outq+ 12], m6
mov [outq+112], tmpd
movss m6, [outq+28]
movss [outq+ 28], m0
movss m0, [outq+36]
movss [outq+ 36], m7
addss m1, m4
movss m7, [outq+116]
addss m0, m2
addss m7, [outq+124]
movss [outq+ 72], m0
movss m0, [outq+44]
addss m2, m0
movss [outq+ 44], m1
movss [outq+ 88], m2
addss m0, [outq+60]
mov tmpd, [outq+60]
mov [outq+120], tmpd
movss [outq+104], m0
addss m4, m5
addss m5, [outq+68]
movss [outq+52], m4
movss [outq+60], m5
movss m4, [outq+68]
movss m5, [outq+20]
movss [outq+ 20], m3
addss m5, m7
addss m7, m6
addss m4, m5
movss m2, [outq+84]
addss m2, [outq+92]
addss m5, m2
movss [outq+ 68], m4
addss m2, m7
movss m4, [outq+76]
movss [outq+ 84], m2
movss [outq+ 76], m5
addss m7, m4
addss m6, [outq+124]
addss m4, m6
addss m6, [outq+92]
movss [outq+100], m4
movss [outq+108], m6
movss m6, [outq+92]
movss [outq+92], m7
addss m6, [outq+124]
movss [outq+116], m6
%endmacro
INIT_YMM avx
SECTION .text
%if HAVE_AVX_EXTERNAL
; void ff_dct32_float_avx(FFTSample *out, const FFTSample *in)
cglobal dct32_float, 2,3,8, out, in, tmp
; pass 1
vmovaps m4, [inq+0]
vinsertf128 m5, m5, [inq+96], 1
vinsertf128 m5, m5, [inq+112], 0
vshufps m5, m5, m5, 0x1b
BUTTERFLY m4, m5, [ps_cos_vec], m6
vmovaps m2, [inq+64]
vinsertf128 m6, m6, [inq+32], 1
vinsertf128 m6, m6, [inq+48], 0
vshufps m6, m6, m6, 0x1b
BUTTERFLY m2, m6, [ps_cos_vec+32], m0
; pass 2
BUTTERFLY m5, m6, [ps_cos_vec+64], m0
BUTTERFLY m4, m2, [ps_cos_vec+64], m7
; pass 3
vperm2f128 m3, m6, m4, 0x31
vperm2f128 m1, m6, m4, 0x20
vshufps m3, m3, m3, 0x1b
BUTTERFLY m1, m3, [ps_cos_vec+96], m6
vperm2f128 m4, m5, m2, 0x20
vperm2f128 m5, m5, m2, 0x31
vshufps m5, m5, m5, 0x1b
BUTTERFLY m4, m5, [ps_cos_vec+96], m6
; pass 4
vmovaps m6, [ps_p1p1m1m1+0]
vmovaps m2, [ps_cos_vec+128]
BUTTERFLY2 m5, m6, m2, m7
BUTTERFLY2 m4, m6, m2, m7
BUTTERFLY2 m1, m6, m2, m7
BUTTERFLY2 m3, m6, m2, m7
; pass 5
vshufps m6, m6, m6, 0xcc
vmovaps m2, [ps_cos_vec+160]
BUTTERFLY3 m5, m6, m2, m7
BUTTERFLY3 m4, m6, m2, m7
BUTTERFLY3 m1, m6, m2, m7
BUTTERFLY3 m3, m6, m2, m7
vperm2f128 m6, m3, m3, 0x31
vmovaps [outq], m3
vextractf128 [outq+64], m5, 1
vextractf128 [outq+32], m5, 0
vextractf128 [outq+80], m4, 1
vextractf128 [outq+48], m4, 0
vperm2f128 m0, m1, m1, 0x31
vmovaps [outq+96], m1
vzeroupper
; pass 6, no SIMD...
INIT_XMM
PASS6_AND_PERMUTE
RET
%endif
%if ARCH_X86_64
%define SPILL SWAP
%define UNSPILL SWAP
%macro PASS5 0
nop ; FIXME code alignment
SWAP 5, 8
SWAP 4, 12
SWAP 6, 14
SWAP 7, 13
SWAP 0, 15
PERMUTE 9,10, 10,12, 11,14, 12,9, 13,11, 14,13
TRANSPOSE4x4PS 8, 9, 10, 11, 0
BUTTERFLY3V 8, 9, 10, 11, 0
addps m10, m11
TRANSPOSE4x4PS 12, 13, 14, 15, 0
BUTTERFLY3V 12, 13, 14, 15, 0
addps m14, m15
addps m12, m14
addps m14, m13
addps m13, m15
%endmacro
%macro PASS6 0
SWAP 9, 12
SWAP 11, 14
movss [outq+0x00], m8
pshuflw m0, m8, 0xe
movss [outq+0x10], m9
pshuflw m1, m9, 0xe
movss [outq+0x20], m10
pshuflw m2, m10, 0xe
movss [outq+0x30], m11
pshuflw m3, m11, 0xe
movss [outq+0x40], m12
pshuflw m4, m12, 0xe
movss [outq+0x50], m13
pshuflw m5, m13, 0xe
movss [outq+0x60], m14
pshuflw m6, m14, 0xe
movaps [outq+0x70], m15
pshuflw m7, m15, 0xe
addss m0, m1
addss m1, m2
movss [outq+0x08], m0
addss m2, m3
movss [outq+0x18], m1
addss m3, m4
movss [outq+0x28], m2
addss m4, m5
movss [outq+0x38], m3
addss m5, m6
movss [outq+0x48], m4
addss m6, m7
movss [outq+0x58], m5
movss [outq+0x68], m6
movss [outq+0x78], m7
PERMUTE 1,8, 3,9, 5,10, 7,11, 9,12, 11,13, 13,14, 8,1, 10,3, 12,5, 14,7
movhlps m0, m1
pshufd m1, m1, 3
SWAP 0, 2, 4, 6, 8, 10, 12, 14
SWAP 1, 3, 5, 7, 9, 11, 13, 15
%rep 7
movhlps m0, m1
pshufd m1, m1, 3
addss m15, m1
SWAP 0, 2, 4, 6, 8, 10, 12, 14
SWAP 1, 3, 5, 7, 9, 11, 13, 15
%endrep
%assign i 4
%rep 15
addss m0, m1
movss [outq+i], m0
SWAP 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
%assign i i+8
%endrep
%endmacro
%else ; ARCH_X86_32
%macro SPILL 2 ; xmm#, mempos
movaps [outq+(%2-8)*16], m%1
%endmacro
%macro UNSPILL 2
movaps m%1, [outq+(%2-8)*16]
%endmacro
%define PASS6 PASS6_AND_PERMUTE
%macro PASS5 0
movaps m2, [ps_cos_vec+160]
shufps m3, m3, 0xcc
BUTTERFLY3 m5, m3, m2, m1
SPILL 5, 8
UNSPILL 1, 9
BUTTERFLY3 m1, m3, m2, m5
SPILL 1, 14
BUTTERFLY3 m4, m3, m2, m5
SPILL 4, 12
BUTTERFLY3 m7, m3, m2, m5
SPILL 7, 13
UNSPILL 5, 10
BUTTERFLY3 m5, m3, m2, m7
SPILL 5, 10
UNSPILL 4, 11
BUTTERFLY3 m4, m3, m2, m7
SPILL 4, 11
BUTTERFLY3 m6, m3, m2, m7
SPILL 6, 9
BUTTERFLY3 m0, m3, m2, m7
SPILL 0, 15
%endmacro
%endif
; void ff_dct32_float_sse(FFTSample *out, const FFTSample *in)
%macro DCT32_FUNC 0
cglobal dct32_float, 2, 3, 16, out, in, tmp
; pass 1
movaps m0, [inq+0]
LOAD_INV m1, [inq+112]
BUTTERFLY m0, m1, [ps_cos_vec], m3
movaps m7, [inq+64]
LOAD_INV m4, [inq+48]
BUTTERFLY m7, m4, [ps_cos_vec+32], m3
; pass 2
movaps m2, [ps_cos_vec+64]
BUTTERFLY m1, m4, m2, m3
SPILL 1, 11
SPILL 4, 8
; pass 1
movaps m1, [inq+16]
LOAD_INV m6, [inq+96]
BUTTERFLY m1, m6, [ps_cos_vec+16], m3
movaps m4, [inq+80]
LOAD_INV m5, [inq+32]
BUTTERFLY m4, m5, [ps_cos_vec+48], m3
; pass 2
BUTTERFLY m0, m7, m2, m3
movaps m2, [ps_cos_vec+80]
BUTTERFLY m6, m5, m2, m3
BUTTERFLY m1, m4, m2, m3
; pass 3
movaps m2, [ps_cos_vec+96]
shufps m1, m1, 0x1b
BUTTERFLY m0, m1, m2, m3
SPILL 0, 15
SPILL 1, 14
UNSPILL 0, 8
shufps m5, m5, 0x1b
BUTTERFLY m0, m5, m2, m3
UNSPILL 1, 11
shufps m6, m6, 0x1b
BUTTERFLY m1, m6, m2, m3
SPILL 1, 11
shufps m4, m4, 0x1b
BUTTERFLY m7, m4, m2, m3
; pass 4
movaps m3, [ps_p1p1m1m1+0]
movaps m2, [ps_cos_vec+128]
BUTTERFLY2 m5, m3, m2, m1
BUTTERFLY2 m0, m3, m2, m1
SPILL 0, 9
BUTTERFLY2 m6, m3, m2, m1
SPILL 6, 10
UNSPILL 0, 11
BUTTERFLY2 m0, m3, m2, m1
SPILL 0, 11
BUTTERFLY2 m4, m3, m2, m1
BUTTERFLY2 m7, m3, m2, m1
UNSPILL 6, 14
BUTTERFLY2 m6, m3, m2, m1
UNSPILL 0, 15
BUTTERFLY2 m0, m3, m2, m1
PASS5
PASS6
RET
%endmacro
%macro LOAD_INV 2
%if cpuflag(sse2)
pshufd %1, %2, 0x1b
%elif cpuflag(sse)
movaps %1, %2
shufps %1, %1, 0x1b
%endif
%endmacro
%if ARCH_X86_32
INIT_XMM sse
DCT32_FUNC
%endif
INIT_XMM sse2
DCT32_FUNC

View File

@@ -0,0 +1,41 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/dct.h"
void ff_dct32_float_sse(FFTSample *out, const FFTSample *in);
void ff_dct32_float_sse2(FFTSample *out, const FFTSample *in);
void ff_dct32_float_avx(FFTSample *out, const FFTSample *in);
av_cold void ff_dct_init_x86(DCTContext *s)
{
int cpu_flags = av_get_cpu_flags();
#if ARCH_X86_32
if (EXTERNAL_SSE(cpu_flags))
s->dct32 = ff_dct32_float_sse;
#endif
if (EXTERNAL_SSE2(cpu_flags))
s->dct32 = ff_dct32_float_sse2;
if (EXTERNAL_AVX_FAST(cpu_flags))
s->dct32 = ff_dct32_float_avx;
}

View File

@@ -0,0 +1,84 @@
;******************************************************************************
;* SIMD-optimized deinterlacing functions
;* Copyright (c) 2010 Vitor Sessak
;* Copyright (c) 2002 Michael Niedermayer
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
cextern pw_4
SECTION .text
%macro DEINTERLACE 1
%ifidn %1, inplace
;void ff_deinterlace_line_inplace_mmx(const uint8_t *lum_m4, const uint8_t *lum_m3, const uint8_t *lum_m2, const uint8_t *lum_m1, const uint8_t *lum, int size)
cglobal deinterlace_line_inplace, 6,6,7, lum_m4, lum_m3, lum_m2, lum_m1, lum, size
%else
;void ff_deinterlace_line_mmx(uint8_t *dst, const uint8_t *lum_m4, const uint8_t *lum_m3, const uint8_t *lum_m2, const uint8_t *lum_m1, const uint8_t *lum, int size)
cglobal deinterlace_line, 7,7,7, dst, lum_m4, lum_m3, lum_m2, lum_m1, lum, size
%endif
pxor mm7, mm7
movq mm6, [pw_4]
.nextrow:
movd mm0, [lum_m4q]
movd mm1, [lum_m3q]
movd mm2, [lum_m2q]
%ifidn %1, inplace
movd [lum_m4q], mm2
%endif
movd mm3, [lum_m1q]
movd mm4, [lumq]
punpcklbw mm0, mm7
punpcklbw mm1, mm7
punpcklbw mm2, mm7
punpcklbw mm3, mm7
punpcklbw mm4, mm7
paddw mm1, mm3
psllw mm2, 1
paddw mm0, mm4
psllw mm1, 2
paddw mm2, mm6
paddw mm1, mm2
psubusw mm1, mm0
psrlw mm1, 3
packuswb mm1, mm7
%ifidn %1, inplace
movd [lum_m2q], mm1
%else
movd [dstq], mm1
add dstq, 4
%endif
add lum_m4q, 4
add lum_m3q, 4
add lum_m2q, 4
add lum_m1q, 4
add lumq, 4
sub sized, 4
jg .nextrow
REP_RET
%endmacro
INIT_MMX mmx
DEINTERLACE ""
DEINTERLACE inplace

View File

@@ -0,0 +1,202 @@
/*
* MMX optimized discrete wavelet transform
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2010 David Conrad
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "dirac_dwt.h"
#define COMPOSE_VERTICAL(ext, align) \
void ff_vertical_compose53iL0##ext(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, int width); \
void ff_vertical_compose_dirac53iH0##ext(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, int width); \
void ff_vertical_compose_dd137iL0##ext(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, int width); \
void ff_vertical_compose_dd97iH0##ext(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, int width); \
void ff_vertical_compose_haar##ext(IDWTELEM *b0, IDWTELEM *b1, int width); \
void ff_horizontal_compose_haar0i##ext(IDWTELEM *b, IDWTELEM *tmp, int w);\
void ff_horizontal_compose_haar1i##ext(IDWTELEM *b, IDWTELEM *tmp, int w);\
\
static void vertical_compose53iL0##ext(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, int width) \
{ \
int i, width_align = width&~(align-1); \
\
for(i=width_align; i<width; i++) \
b1[i] = COMPOSE_53iL0(b0[i], b1[i], b2[i]); \
\
ff_vertical_compose53iL0##ext(b0, b1, b2, width_align); \
} \
\
static void vertical_compose_dirac53iH0##ext(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, int width) \
{ \
int i, width_align = width&~(align-1); \
\
for(i=width_align; i<width; i++) \
b1[i] = COMPOSE_DIRAC53iH0(b0[i], b1[i], b2[i]); \
\
ff_vertical_compose_dirac53iH0##ext(b0, b1, b2, width_align); \
} \
\
static void vertical_compose_dd137iL0##ext(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, \
IDWTELEM *b3, IDWTELEM *b4, int width) \
{ \
int i, width_align = width&~(align-1); \
\
for(i=width_align; i<width; i++) \
b2[i] = COMPOSE_DD137iL0(b0[i], b1[i], b2[i], b3[i], b4[i]); \
\
ff_vertical_compose_dd137iL0##ext(b0, b1, b2, b3, b4, width_align); \
} \
\
static void vertical_compose_dd97iH0##ext(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, \
IDWTELEM *b3, IDWTELEM *b4, int width) \
{ \
int i, width_align = width&~(align-1); \
\
for(i=width_align; i<width; i++) \
b2[i] = COMPOSE_DD97iH0(b0[i], b1[i], b2[i], b3[i], b4[i]); \
\
ff_vertical_compose_dd97iH0##ext(b0, b1, b2, b3, b4, width_align); \
} \
static void vertical_compose_haar##ext(IDWTELEM *b0, IDWTELEM *b1, int width) \
{ \
int i, width_align = width&~(align-1); \
\
for(i=width_align; i<width; i++) { \
b0[i] = COMPOSE_HAARiL0(b0[i], b1[i]); \
b1[i] = COMPOSE_HAARiH0(b1[i], b0[i]); \
} \
\
ff_vertical_compose_haar##ext(b0, b1, width_align); \
} \
static void horizontal_compose_haar0i##ext(IDWTELEM *b, IDWTELEM *tmp, int w)\
{\
int w2= w>>1;\
int x= w2 - (w2&(align-1));\
ff_horizontal_compose_haar0i##ext(b, tmp, w);\
\
for (; x < w2; x++) {\
b[2*x ] = tmp[x];\
b[2*x+1] = COMPOSE_HAARiH0(b[x+w2], tmp[x]);\
}\
}\
static void horizontal_compose_haar1i##ext(IDWTELEM *b, IDWTELEM *tmp, int w)\
{\
int w2= w>>1;\
int x= w2 - (w2&(align-1));\
ff_horizontal_compose_haar1i##ext(b, tmp, w);\
\
for (; x < w2; x++) {\
b[2*x ] = (tmp[x] + 1)>>1;\
b[2*x+1] = (COMPOSE_HAARiH0(b[x+w2], tmp[x]) + 1)>>1;\
}\
}\
\
#if HAVE_YASM
#if !ARCH_X86_64
COMPOSE_VERTICAL(_mmx, 4)
#endif
COMPOSE_VERTICAL(_sse2, 8)
void ff_horizontal_compose_dd97i_ssse3(IDWTELEM *b, IDWTELEM *tmp, int w);
static void horizontal_compose_dd97i_ssse3(IDWTELEM *b, IDWTELEM *tmp, int w)
{
int w2= w>>1;
int x= w2 - (w2&7);
ff_horizontal_compose_dd97i_ssse3(b, tmp, w);
for (; x < w2; x++) {
b[2*x ] = (tmp[x] + 1)>>1;
b[2*x+1] = (COMPOSE_DD97iH0(tmp[x-1], tmp[x], b[x+w2], tmp[x+1], tmp[x+2]) + 1)>>1;
}
}
#endif
void ff_spatial_idwt_init_mmx(DWTContext *d, enum dwt_type type)
{
#if HAVE_YASM
int mm_flags = av_get_cpu_flags();
#if !ARCH_X86_64
if (!(mm_flags & AV_CPU_FLAG_MMX))
return;
switch (type) {
case DWT_DIRAC_DD9_7:
d->vertical_compose_l0 = (void*)vertical_compose53iL0_mmx;
d->vertical_compose_h0 = (void*)vertical_compose_dd97iH0_mmx;
break;
case DWT_DIRAC_LEGALL5_3:
d->vertical_compose_l0 = (void*)vertical_compose53iL0_mmx;
d->vertical_compose_h0 = (void*)vertical_compose_dirac53iH0_mmx;
break;
case DWT_DIRAC_DD13_7:
d->vertical_compose_l0 = (void*)vertical_compose_dd137iL0_mmx;
d->vertical_compose_h0 = (void*)vertical_compose_dd97iH0_mmx;
break;
case DWT_DIRAC_HAAR0:
d->vertical_compose = (void*)vertical_compose_haar_mmx;
d->horizontal_compose = horizontal_compose_haar0i_mmx;
break;
case DWT_DIRAC_HAAR1:
d->vertical_compose = (void*)vertical_compose_haar_mmx;
d->horizontal_compose = horizontal_compose_haar1i_mmx;
break;
}
#endif
if (!(mm_flags & AV_CPU_FLAG_SSE2))
return;
switch (type) {
case DWT_DIRAC_DD9_7:
d->vertical_compose_l0 = (void*)vertical_compose53iL0_sse2;
d->vertical_compose_h0 = (void*)vertical_compose_dd97iH0_sse2;
break;
case DWT_DIRAC_LEGALL5_3:
d->vertical_compose_l0 = (void*)vertical_compose53iL0_sse2;
d->vertical_compose_h0 = (void*)vertical_compose_dirac53iH0_sse2;
break;
case DWT_DIRAC_DD13_7:
d->vertical_compose_l0 = (void*)vertical_compose_dd137iL0_sse2;
d->vertical_compose_h0 = (void*)vertical_compose_dd97iH0_sse2;
break;
case DWT_DIRAC_HAAR0:
d->vertical_compose = (void*)vertical_compose_haar_sse2;
d->horizontal_compose = horizontal_compose_haar0i_sse2;
break;
case DWT_DIRAC_HAAR1:
d->vertical_compose = (void*)vertical_compose_haar_sse2;
d->horizontal_compose = horizontal_compose_haar1i_sse2;
break;
}
if (!(mm_flags & AV_CPU_FLAG_SSSE3))
return;
switch (type) {
case DWT_DIRAC_DD9_7:
d->horizontal_compose = horizontal_compose_dd97i_ssse3;
break;
}
#endif // HAVE_YASM
}

View File

@@ -0,0 +1,30 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_X86_DIRAC_DWT_H
#define AVCODEC_X86_DIRAC_DWT_H
#include "libavcodec/dirac_dwt.h"
void ff_horizontal_compose_dd97i_end_c(IDWTELEM *b, IDWTELEM *tmp, int w2, int x);
void ff_horizontal_compose_haar1i_end_c(IDWTELEM *b, IDWTELEM *tmp, int w2, int x);
void ff_horizontal_compose_haar0i_end_c(IDWTELEM *b, IDWTELEM *tmp, int w2, int x);
void ff_spatial_idwt_init_mmx(DWTContext *d, enum dwt_type type);
#endif

View File

@@ -0,0 +1,156 @@
/*
* Copyright (C) 2010 David Conrad
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/x86/cpu.h"
#include "diracdsp_mmx.h"
#include "fpel.h"
void ff_put_rect_clamped_mmx(uint8_t *dst, int dst_stride, const int16_t *src, int src_stride, int width, int height);
void ff_put_rect_clamped_sse2(uint8_t *dst, int dst_stride, const int16_t *src, int src_stride, int width, int height);
void ff_put_signed_rect_clamped_mmx(uint8_t *dst, int dst_stride, const int16_t *src, int src_stride, int width, int height);
void ff_put_signed_rect_clamped_sse2(uint8_t *dst, int dst_stride, const int16_t *src, int src_stride, int width, int height);
#define HPEL_FILTER(MMSIZE, EXT) \
void ff_dirac_hpel_filter_v_ ## EXT(uint8_t *, const uint8_t *, int, int); \
void ff_dirac_hpel_filter_h_ ## EXT(uint8_t *, const uint8_t *, int); \
\
static void dirac_hpel_filter_ ## EXT(uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, \
const uint8_t *src, int stride, int width, int height) \
{ \
while( height-- ) \
{ \
ff_dirac_hpel_filter_v_ ## EXT(dstv-MMSIZE, src-MMSIZE, stride, width+MMSIZE+5); \
ff_dirac_hpel_filter_h_ ## EXT(dsth, src, width); \
ff_dirac_hpel_filter_h_ ## EXT(dstc, dstv, width); \
\
dsth += stride; \
dstv += stride; \
dstc += stride; \
src += stride; \
} \
}
#if !ARCH_X86_64
HPEL_FILTER(8, mmx)
#endif
HPEL_FILTER(16, sse2)
#define PIXFUNC(PFX, IDX, EXT) \
/*MMXDISABLEDc->PFX ## _dirac_pixels_tab[0][IDX] = ff_ ## PFX ## _dirac_pixels8_ ## EXT;*/ \
c->PFX ## _dirac_pixels_tab[1][IDX] = ff_ ## PFX ## _dirac_pixels16_ ## EXT; \
c->PFX ## _dirac_pixels_tab[2][IDX] = ff_ ## PFX ## _dirac_pixels32_ ## EXT
#define DIRAC_PIXOP(OPNAME2, OPNAME, EXT)\
void ff_ ## OPNAME2 ## _dirac_pixels8_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
if (h&3)\
ff_ ## OPNAME2 ## _dirac_pixels8_c(dst, src, stride, h);\
else\
OPNAME ## _pixels8_ ## EXT(dst, src[0], stride, h);\
}\
void ff_ ## OPNAME2 ## _dirac_pixels16_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
if (h&3)\
ff_ ## OPNAME2 ## _dirac_pixels16_c(dst, src, stride, h);\
else\
OPNAME ## _pixels16_ ## EXT(dst, src[0], stride, h);\
}\
void ff_ ## OPNAME2 ## _dirac_pixels32_ ## EXT(uint8_t *dst, const uint8_t *src[5], int stride, int h)\
{\
if (h&3) {\
ff_ ## OPNAME2 ## _dirac_pixels32_c(dst, src, stride, h);\
} else {\
OPNAME ## _pixels16_ ## EXT(dst , src[0] , stride, h);\
OPNAME ## _pixels16_ ## EXT(dst+16, src[0]+16, stride, h);\
}\
}
DIRAC_PIXOP(put, ff_put, mmx)
DIRAC_PIXOP(avg, ff_avg, mmx)
DIRAC_PIXOP(avg, ff_avg, mmxext)
void ff_put_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
{
if (h&3)
ff_put_dirac_pixels16_c(dst, src, stride, h);
else
ff_put_pixels16_sse2(dst, src[0], stride, h);
}
void ff_avg_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
{
if (h&3)
ff_avg_dirac_pixels16_c(dst, src, stride, h);
else
ff_avg_pixels16_sse2(dst, src[0], stride, h);
}
void ff_put_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
{
if (h&3) {
ff_put_dirac_pixels32_c(dst, src, stride, h);
} else {
ff_put_pixels16_sse2(dst , src[0] , stride, h);
ff_put_pixels16_sse2(dst+16, src[0]+16, stride, h);
}
}
void ff_avg_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h)
{
if (h&3) {
ff_avg_dirac_pixels32_c(dst, src, stride, h);
} else {
ff_avg_pixels16_sse2(dst , src[0] , stride, h);
ff_avg_pixels16_sse2(dst+16, src[0]+16, stride, h);
}
}
void ff_diracdsp_init_mmx(DiracDSPContext* c)
{
int mm_flags = av_get_cpu_flags();
if (EXTERNAL_MMX(mm_flags)) {
c->add_dirac_obmc[0] = ff_add_dirac_obmc8_mmx;
#if !ARCH_X86_64
c->add_dirac_obmc[1] = ff_add_dirac_obmc16_mmx;
c->add_dirac_obmc[2] = ff_add_dirac_obmc32_mmx;
c->dirac_hpel_filter = dirac_hpel_filter_mmx;
c->add_rect_clamped = ff_add_rect_clamped_mmx;
c->put_signed_rect_clamped = ff_put_signed_rect_clamped_mmx;
#endif
PIXFUNC(put, 0, mmx);
PIXFUNC(avg, 0, mmx);
}
if (EXTERNAL_MMXEXT(mm_flags)) {
PIXFUNC(avg, 0, mmxext);
}
if (EXTERNAL_SSE2(mm_flags)) {
c->dirac_hpel_filter = dirac_hpel_filter_sse2;
c->add_rect_clamped = ff_add_rect_clamped_sse2;
c->put_signed_rect_clamped = ff_put_signed_rect_clamped_sse2;
c->add_dirac_obmc[1] = ff_add_dirac_obmc16_sse2;
c->add_dirac_obmc[2] = ff_add_dirac_obmc32_sse2;
c->put_dirac_pixels_tab[1][0] = ff_put_dirac_pixels16_sse2;
c->avg_dirac_pixels_tab[1][0] = ff_avg_dirac_pixels16_sse2;
c->put_dirac_pixels_tab[2][0] = ff_put_dirac_pixels32_sse2;
c->avg_dirac_pixels_tab[2][0] = ff_avg_dirac_pixels32_sse2;
}
}

View File

@@ -0,0 +1,47 @@
/*
* Copyright (c) 2010 David Conrad
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_X86_DIRACDSP_H
#define AVCODEC_X86_DIRACDSP_H
#include "libavcodec/diracdsp.h"
void ff_diracdsp_init_mmx(DiracDSPContext* c);
DECL_DIRAC_PIXOP(put, mmx);
DECL_DIRAC_PIXOP(avg, mmx);
DECL_DIRAC_PIXOP(avg, mmxext);
void ff_put_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h);
void ff_avg_dirac_pixels16_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h);
void ff_put_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h);
void ff_avg_dirac_pixels32_sse2(uint8_t *dst, const uint8_t *src[5], int stride, int h);
void ff_add_rect_clamped_mmx(uint8_t *, const uint16_t *, int, const int16_t *, int, int, int);
void ff_add_rect_clamped_sse2(uint8_t *, const uint16_t *, int, const int16_t *, int, int, int);
void ff_add_dirac_obmc8_mmx(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen);
void ff_add_dirac_obmc16_mmx(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen);
void ff_add_dirac_obmc32_mmx(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen);
void ff_add_dirac_obmc16_sse2(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen);
void ff_add_dirac_obmc32_sse2(uint16_t *dst, const uint8_t *src, int stride, const uint8_t *obmc_weight, int yblen);
#endif

View File

@@ -0,0 +1,265 @@
;******************************************************************************
;* Copyright (c) 2010 David Conrad
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
pw_7: times 8 dw 7
cextern pw_3
cextern pw_16
cextern pw_32
cextern pb_80
section .text
%macro UNPACK_ADD 6
mov%5 %1, %3
mov%6 m5, %4
mova m4, %1
mova %2, m5
punpcklbw %1, m7
punpcklbw m5, m7
punpckhbw m4, m7
punpckhbw %2, m7
paddw %1, m5
paddw %2, m4
%endmacro
%macro HPEL_FILTER 1
; dirac_hpel_filter_v_sse2(uint8_t *dst, uint8_t *src, int stride, int width);
cglobal dirac_hpel_filter_v_%1, 4,6,8, dst, src, stride, width, src0, stridex3
mov src0q, srcq
lea stridex3q, [3*strideq]
sub src0q, stridex3q
pxor m7, m7
.loop:
; 7*(src[0] + src[1])
UNPACK_ADD m0, m1, [srcq], [srcq + strideq], a,a
pmullw m0, [pw_7]
pmullw m1, [pw_7]
; 3*( ... + src[-2] + src[3])
UNPACK_ADD m2, m3, [src0q + strideq], [srcq + stridex3q], a,a
paddw m0, m2
paddw m1, m3
pmullw m0, [pw_3]
pmullw m1, [pw_3]
; ... - 7*(src[-1] + src[2])
UNPACK_ADD m2, m3, [src0q + strideq*2], [srcq + strideq*2], a,a
pmullw m2, [pw_7]
pmullw m3, [pw_7]
psubw m0, m2
psubw m1, m3
; ... - (src[-3] + src[4])
UNPACK_ADD m2, m3, [src0q], [srcq + strideq*4], a,a
psubw m0, m2
psubw m1, m3
paddw m0, [pw_16]
paddw m1, [pw_16]
psraw m0, 5
psraw m1, 5
packuswb m0, m1
mova [dstq], m0
add dstq, mmsize
add srcq, mmsize
add src0q, mmsize
sub widthd, mmsize
jg .loop
RET
; dirac_hpel_filter_h_sse2(uint8_t *dst, uint8_t *src, int width);
cglobal dirac_hpel_filter_h_%1, 3,3,8, dst, src, width
dec widthd
pxor m7, m7
and widthd, ~(mmsize-1)
.loop:
; 7*(src[0] + src[1])
UNPACK_ADD m0, m1, [srcq + widthq], [srcq + widthq + 1], u,u
pmullw m0, [pw_7]
pmullw m1, [pw_7]
; 3*( ... + src[-2] + src[3])
UNPACK_ADD m2, m3, [srcq + widthq - 2], [srcq + widthq + 3], u,u
paddw m0, m2
paddw m1, m3
pmullw m0, [pw_3]
pmullw m1, [pw_3]
; ... - 7*(src[-1] + src[2])
UNPACK_ADD m2, m3, [srcq + widthq - 1], [srcq + widthq + 2], u,u
pmullw m2, [pw_7]
pmullw m3, [pw_7]
psubw m0, m2
psubw m1, m3
; ... - (src[-3] + src[4])
UNPACK_ADD m2, m3, [srcq + widthq - 3], [srcq + widthq + 4], u,u
psubw m0, m2
psubw m1, m3
paddw m0, [pw_16]
paddw m1, [pw_16]
psraw m0, 5
psraw m1, 5
packuswb m0, m1
mova [dstq + widthq], m0
sub widthd, mmsize
jge .loop
RET
%endmacro
%macro PUT_RECT 1
; void put_rect_clamped(uint8_t *dst, int dst_stride, int16_t *src, int src_stride, int width, int height)
cglobal put_signed_rect_clamped_%1, 5,9,3, dst, dst_stride, src, src_stride, w, dst2, src2
mova m0, [pb_80]
add wd, (mmsize-1)
and wd, ~(mmsize-1)
%if ARCH_X86_64
movsxd dst_strideq, dst_strided
movsxd src_strideq, src_strided
mov r7d, r5m
mov r8d, wd
%define wspill r8d
%define hd r7d
%else
mov r4m, wd
%define wspill r4m
%define hd r5mp
%endif
.loopy:
lea src2q, [srcq+src_strideq*2]
lea dst2q, [dstq+dst_strideq]
.loopx:
sub wd, mmsize
mova m1, [srcq +2*wq]
mova m2, [src2q+2*wq]
packsswb m1, [srcq +2*wq+mmsize]
packsswb m2, [src2q+2*wq+mmsize]
paddb m1, m0
paddb m2, m0
mova [dstq +wq], m1
mova [dst2q+wq], m2
jg .loopx
lea srcq, [srcq+src_strideq*4]
lea dstq, [dstq+dst_strideq*2]
sub hd, 2
mov wd, wspill
jg .loopy
RET
%endm
%macro ADD_RECT 1
; void add_rect_clamped(uint8_t *dst, uint16_t *src, int stride, int16_t *idwt, int idwt_stride, int width, int height)
cglobal add_rect_clamped_%1, 7,9,3, dst, src, stride, idwt, idwt_stride, w, h
mova m0, [pw_32]
add wd, (mmsize-1)
and wd, ~(mmsize-1)
%if ARCH_X86_64
movsxd strideq, strided
movsxd idwt_strideq, idwt_strided
mov r8d, wd
%define wspill r8d
%else
mov r5m, wd
%define wspill r5m
%endif
.loop:
sub wd, mmsize
movu m1, [srcq +2*wq] ; FIXME: ensure alignment
paddw m1, m0
psraw m1, 6
movu m2, [srcq +2*wq+mmsize] ; FIXME: ensure alignment
paddw m2, m0
psraw m2, 6
paddw m1, [idwtq+2*wq]
paddw m2, [idwtq+2*wq+mmsize]
packuswb m1, m2
mova [dstq +wq], m1
jg .loop
lea srcq, [srcq + 2*strideq]
add dstq, strideq
lea idwtq, [idwtq+ 2*idwt_strideq]
sub hd, 1
mov wd, wspill
jg .loop
RET
%endm
%macro ADD_OBMC 2
; void add_obmc(uint16_t *dst, uint8_t *src, int stride, uint8_t *obmc_weight, int yblen)
cglobal add_dirac_obmc%1_%2, 6,6,5, dst, src, stride, obmc, yblen
pxor m4, m4
.loop:
%assign i 0
%rep %1 / mmsize
mova m0, [srcq+i]
mova m1, m0
punpcklbw m0, m4
punpckhbw m1, m4
mova m2, [obmcq+i]
mova m3, m2
punpcklbw m2, m4
punpckhbw m3, m4
pmullw m0, m2
pmullw m1, m3
movu m2, [dstq+2*i]
movu m3, [dstq+2*i+mmsize]
paddw m0, m2
paddw m1, m3
movu [dstq+2*i], m0
movu [dstq+2*i+mmsize], m1
%assign i i+mmsize
%endrep
lea srcq, [srcq+strideq]
lea dstq, [dstq+2*strideq]
add obmcq, 32
sub yblend, 1
jg .loop
RET
%endm
INIT_MMX
%if ARCH_X86_64 == 0
PUT_RECT mmx
ADD_RECT mmx
HPEL_FILTER mmx
ADD_OBMC 32, mmx
ADD_OBMC 16, mmx
%endif
ADD_OBMC 8, mmx
INIT_XMM
PUT_RECT sse2
ADD_RECT sse2
HPEL_FILTER sse2
ADD_OBMC 32, sse2
ADD_OBMC 16, sse2

View File

@@ -0,0 +1,49 @@
;************************************************************************
;* VC3/DNxHD SIMD functions
;* Copyright (c) 2007 Baptiste Coudurier <baptiste dot coudurier at smartjog dot com>
;* Copyright (c) 2014 Tiancheng "Timothy" Gu <timothygu99@gmail.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
section .text
; void get_pixels_8x4_sym_sse2(int16_t *block, const uint8_t *pixels,
; ptrdiff_t line_size)
INIT_XMM sse2
cglobal get_pixels_8x4_sym, 3,3,5, block, pixels, linesize
pxor m4, m4
movq m0, [pixelsq]
add pixelsq, linesizeq
movq m1, [pixelsq]
movq m2, [pixelsq+linesizeq]
movq m3, [pixelsq+linesizeq*2]
punpcklbw m0, m4
punpcklbw m1, m4
punpcklbw m2, m4
punpcklbw m3, m4
mova [blockq ], m0
mova [blockq+16 ], m1
mova [blockq+32 ], m2
mova [blockq+48 ], m3
mova [blockq+64 ], m3
mova [blockq+80 ], m2
mova [blockq+96 ], m1
mova [blockq+112], m0
RET

View File

@@ -0,0 +1,37 @@
/*
* VC3/DNxHD SIMD functions
* Copyright (c) 2007 Baptiste Coudurier <baptiste dot coudurier at smartjog dot com>
*
* VC-3 encoder funded by the British Broadcasting Corporation
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/dnxhdenc.h"
void ff_get_pixels_8x4_sym_sse2(int16_t *block, const uint8_t *pixels,
ptrdiff_t line_size);
av_cold void ff_dnxhdenc_init_x86(DNXHDEncContext *ctx)
{
if (EXTERNAL_SSE2(av_get_cpu_flags())) {
if (ctx->cid_table->bit_depth == 8)
ctx->get_pixels_8x4_sym = ff_get_pixels_8x4_sym_sse2;
}
}

View File

@@ -0,0 +1,307 @@
;******************************************************************************
;* MMX optimized discrete wavelet trasnform
;* Copyright (c) 2010 David Conrad
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
pw_1991: times 4 dw 9,-1
cextern pw_1
cextern pw_2
cextern pw_8
cextern pw_16
section .text
; %1 -= (%2 + %3 + 2)>>2 %4 is pw_2
%macro COMPOSE_53iL0 4
paddw %2, %3
paddw %2, %4
psraw %2, 2
psubw %1, %2
%endm
; m1 = %1 + (-m0 + 9*m1 + 9*%2 -%3 + 8)>>4
; if %4 is supplied, %1 is loaded unaligned from there
; m2: clobbered m3: pw_8 m4: pw_1991
%macro COMPOSE_DD97iH0 3-4
paddw m0, %3
paddw m1, %2
psubw m0, m3
mova m2, m1
punpcklwd m1, m0
punpckhwd m2, m0
pmaddwd m1, m4
pmaddwd m2, m4
%if %0 > 3
movu %1, %4
%endif
psrad m1, 4
psrad m2, 4
packssdw m1, m2
paddw m1, %1
%endm
%macro COMPOSE_VERTICAL 1
; void vertical_compose53iL0(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
; int width)
cglobal vertical_compose53iL0_%1, 4,4,1, b0, b1, b2, width
mova m2, [pw_2]
%if ARCH_X86_64
mov widthd, widthd
%endif
.loop:
sub widthq, mmsize/2
mova m1, [b0q+2*widthq]
mova m0, [b1q+2*widthq]
COMPOSE_53iL0 m0, m1, [b2q+2*widthq], m2
mova [b1q+2*widthq], m0
jg .loop
REP_RET
; void vertical_compose_dirac53iH0(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
; int width)
cglobal vertical_compose_dirac53iH0_%1, 4,4,1, b0, b1, b2, width
mova m1, [pw_1]
%if ARCH_X86_64
mov widthd, widthd
%endif
.loop:
sub widthq, mmsize/2
mova m0, [b0q+2*widthq]
paddw m0, [b2q+2*widthq]
paddw m0, m1
psraw m0, 1
paddw m0, [b1q+2*widthq]
mova [b1q+2*widthq], m0
jg .loop
REP_RET
; void vertical_compose_dd97iH0(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
; IDWTELEM *b3, IDWTELEM *b4, int width)
cglobal vertical_compose_dd97iH0_%1, 6,6,5, b0, b1, b2, b3, b4, width
mova m3, [pw_8]
mova m4, [pw_1991]
%if ARCH_X86_64
mov widthd, widthd
%endif
.loop:
sub widthq, mmsize/2
mova m0, [b0q+2*widthq]
mova m1, [b1q+2*widthq]
COMPOSE_DD97iH0 [b2q+2*widthq], [b3q+2*widthq], [b4q+2*widthq]
mova [b2q+2*widthq], m1
jg .loop
REP_RET
; void vertical_compose_dd137iL0(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2,
; IDWTELEM *b3, IDWTELEM *b4, int width)
cglobal vertical_compose_dd137iL0_%1, 6,6,6, b0, b1, b2, b3, b4, width
mova m3, [pw_16]
mova m4, [pw_1991]
%if ARCH_X86_64
mov widthd, widthd
%endif
.loop:
sub widthq, mmsize/2
mova m0, [b0q+2*widthq]
mova m1, [b1q+2*widthq]
mova m5, [b2q+2*widthq]
paddw m0, [b4q+2*widthq]
paddw m1, [b3q+2*widthq]
psubw m0, m3
mova m2, m1
punpcklwd m1, m0
punpckhwd m2, m0
pmaddwd m1, m4
pmaddwd m2, m4
psrad m1, 5
psrad m2, 5
packssdw m1, m2
psubw m5, m1
mova [b2q+2*widthq], m5
jg .loop
REP_RET
; void vertical_compose_haar(IDWTELEM *b0, IDWTELEM *b1, int width)
cglobal vertical_compose_haar_%1, 3,4,3, b0, b1, width
mova m3, [pw_1]
%if ARCH_X86_64
mov widthd, widthd
%endif
.loop:
sub widthq, mmsize/2
mova m1, [b1q+2*widthq]
mova m0, [b0q+2*widthq]
mova m2, m1
paddw m1, m3
psraw m1, 1
psubw m0, m1
mova [b0q+2*widthq], m0
paddw m2, m0
mova [b1q+2*widthq], m2
jg .loop
REP_RET
%endmacro
; extend the left and right edges of the tmp array by %1 and %2 respectively
%macro EDGE_EXTENSION 3
mov %3, [tmpq]
%assign %%i 1
%rep %1
mov [tmpq-2*%%i], %3
%assign %%i %%i+1
%endrep
mov %3, [tmpq+2*w2q-2]
%assign %%i 0
%rep %2
mov [tmpq+2*w2q+2*%%i], %3
%assign %%i %%i+1
%endrep
%endmacro
%macro HAAR_HORIZONTAL 2
; void horizontal_compose_haari(IDWTELEM *b, IDWTELEM *tmp, int width)
cglobal horizontal_compose_haar%2i_%1, 3,6,4, b, tmp, w, x, w2, b_w2
mov w2d, wd
xor xq, xq
shr w2d, 1
lea b_w2q, [bq+wq]
mova m3, [pw_1]
.lowpass_loop:
movu m1, [b_w2q + 2*xq]
mova m0, [bq + 2*xq]
paddw m1, m3
psraw m1, 1
psubw m0, m1
mova [tmpq + 2*xq], m0
add xq, mmsize/2
cmp xq, w2q
jl .lowpass_loop
xor xq, xq
and w2q, ~(mmsize/2 - 1)
cmp w2q, mmsize/2
jl .end
.highpass_loop:
movu m1, [b_w2q + 2*xq]
mova m0, [tmpq + 2*xq]
paddw m1, m0
; shift and interleave
%if %2 == 1
paddw m0, m3
paddw m1, m3
psraw m0, 1
psraw m1, 1
%endif
mova m2, m0
punpcklwd m0, m1
punpckhwd m2, m1
mova [bq+4*xq], m0
mova [bq+4*xq+mmsize], m2
add xq, mmsize/2
cmp xq, w2q
jl .highpass_loop
.end:
REP_RET
%endmacro
INIT_XMM
; void horizontal_compose_dd97i(IDWTELEM *b, IDWTELEM *tmp, int width)
cglobal horizontal_compose_dd97i_ssse3, 3,6,8, b, tmp, w, x, w2, b_w2
mov w2d, wd
xor xd, xd
shr w2d, 1
lea b_w2q, [bq+wq]
movu m4, [bq+wq]
mova m7, [pw_2]
pslldq m4, 14
.lowpass_loop:
movu m1, [b_w2q + 2*xq]
mova m0, [bq + 2*xq]
mova m2, m1
palignr m1, m4, 14
mova m4, m2
COMPOSE_53iL0 m0, m1, m2, m7
mova [tmpq + 2*xq], m0
add xd, mmsize/2
cmp xd, w2d
jl .lowpass_loop
EDGE_EXTENSION 1, 2, xw
; leave the last up to 7 (sse) or 3 (mmx) values for C
xor xd, xd
and w2d, ~(mmsize/2 - 1)
cmp w2d, mmsize/2
jl .end
mova m7, [tmpq-mmsize]
mova m0, [tmpq]
mova m5, [pw_1]
mova m3, [pw_8]
mova m4, [pw_1991]
.highpass_loop:
mova m6, m0
palignr m0, m7, 14
mova m7, [tmpq + 2*xq + 16]
mova m1, m7
mova m2, m7
palignr m1, m6, 2
palignr m2, m6, 4
COMPOSE_DD97iH0 m0, m6, m2, [b_w2q + 2*xq]
mova m0, m7
mova m7, m6
; shift and interleave
paddw m6, m5
paddw m1, m5
psraw m6, 1
psraw m1, 1
mova m2, m6
punpcklwd m6, m1
punpckhwd m2, m1
mova [bq+4*xq], m6
mova [bq+4*xq+mmsize], m2
add xd, mmsize/2
cmp xd, w2d
jl .highpass_loop
.end:
REP_RET
%if ARCH_X86_64 == 0
INIT_MMX
COMPOSE_VERTICAL mmx
HAAR_HORIZONTAL mmx, 0
HAAR_HORIZONTAL mmx, 1
%endif
;;INIT_XMM
INIT_XMM
COMPOSE_VERTICAL sse2
HAAR_HORIZONTAL sse2, 0
HAAR_HORIZONTAL sse2, 1

View File

@@ -0,0 +1,594 @@
/*
* SIMD-optimized forward DCT
* The gcc porting is Copyright (c) 2001 Fabrice Bellard.
* cleanup/optimizations are Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
* SSE2 optimization is Copyright (c) 2004 Denes Balatoni.
*
* from fdctam32.c - AP922 MMX(3D-Now) forward-DCT
*
* Intel Application Note AP-922 - fast, precise implementation of DCT
* http://developer.intel.com/vtune/cbts/appnotes.htm
*
* Also of inspiration:
* a page about fdct at http://www.geocities.com/ssavekar/dct.htm
* Skal's fdct at http://skal.planet-d.net/coding/dct.html
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/common.h"
#include "libavutil/x86/asm.h"
#include "fdct.h"
#if HAVE_MMX_INLINE
//////////////////////////////////////////////////////////////////////
//
// constants for the forward DCT
// -----------------------------
//
// Be sure to check that your compiler is aligning all constants to QWORD
// (8-byte) memory boundaries! Otherwise the unaligned memory access will
// severely stall MMX execution.
//
//////////////////////////////////////////////////////////////////////
#define BITS_FRW_ACC 3 //; 2 or 3 for accuracy
#define SHIFT_FRW_COL BITS_FRW_ACC
#define SHIFT_FRW_ROW (BITS_FRW_ACC + 17 - 3)
#define RND_FRW_ROW (1 << (SHIFT_FRW_ROW-1))
//#define RND_FRW_COL (1 << (SHIFT_FRW_COL-1))
#define X8(x) x,x,x,x,x,x,x,x
//concatenated table, for forward DCT transformation
DECLARE_ALIGNED(16, static const int16_t, fdct_tg_all_16)[24] = {
X8(13036), // tg * (2<<16) + 0.5
X8(27146), // tg * (2<<16) + 0.5
X8(-21746) // tg * (2<<16) + 0.5
};
DECLARE_ALIGNED(16, static const int16_t, ocos_4_16)[8] = {
X8(23170) //cos * (2<<15) + 0.5
};
DECLARE_ALIGNED(16, static const int16_t, fdct_one_corr)[8] = { X8(1) };
DECLARE_ALIGNED(8, static const int32_t, fdct_r_row)[2] = {RND_FRW_ROW, RND_FRW_ROW };
static const struct
{
DECLARE_ALIGNED(16, const int32_t, fdct_r_row_sse2)[4];
} fdct_r_row_sse2 =
{{
RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW
}};
//DECLARE_ALIGNED(16, static const long, fdct_r_row_sse2)[4] = {RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW, RND_FRW_ROW};
DECLARE_ALIGNED(8, static const int16_t, tab_frw_01234567)[] = { // forward_dct coeff table
16384, 16384, 22725, 19266,
16384, 16384, 12873, 4520,
21407, 8867, 19266, -4520,
-8867, -21407, -22725, -12873,
16384, -16384, 12873, -22725,
-16384, 16384, 4520, 19266,
8867, -21407, 4520, -12873,
21407, -8867, 19266, -22725,
22725, 22725, 31521, 26722,
22725, 22725, 17855, 6270,
29692, 12299, 26722, -6270,
-12299, -29692, -31521, -17855,
22725, -22725, 17855, -31521,
-22725, 22725, 6270, 26722,
12299, -29692, 6270, -17855,
29692, -12299, 26722, -31521,
21407, 21407, 29692, 25172,
21407, 21407, 16819, 5906,
27969, 11585, 25172, -5906,
-11585, -27969, -29692, -16819,
21407, -21407, 16819, -29692,
-21407, 21407, 5906, 25172,
11585, -27969, 5906, -16819,
27969, -11585, 25172, -29692,
19266, 19266, 26722, 22654,
19266, 19266, 15137, 5315,
25172, 10426, 22654, -5315,
-10426, -25172, -26722, -15137,
19266, -19266, 15137, -26722,
-19266, 19266, 5315, 22654,
10426, -25172, 5315, -15137,
25172, -10426, 22654, -26722,
16384, 16384, 22725, 19266,
16384, 16384, 12873, 4520,
21407, 8867, 19266, -4520,
-8867, -21407, -22725, -12873,
16384, -16384, 12873, -22725,
-16384, 16384, 4520, 19266,
8867, -21407, 4520, -12873,
21407, -8867, 19266, -22725,
19266, 19266, 26722, 22654,
19266, 19266, 15137, 5315,
25172, 10426, 22654, -5315,
-10426, -25172, -26722, -15137,
19266, -19266, 15137, -26722,
-19266, 19266, 5315, 22654,
10426, -25172, 5315, -15137,
25172, -10426, 22654, -26722,
21407, 21407, 29692, 25172,
21407, 21407, 16819, 5906,
27969, 11585, 25172, -5906,
-11585, -27969, -29692, -16819,
21407, -21407, 16819, -29692,
-21407, 21407, 5906, 25172,
11585, -27969, 5906, -16819,
27969, -11585, 25172, -29692,
22725, 22725, 31521, 26722,
22725, 22725, 17855, 6270,
29692, 12299, 26722, -6270,
-12299, -29692, -31521, -17855,
22725, -22725, 17855, -31521,
-22725, 22725, 6270, 26722,
12299, -29692, 6270, -17855,
29692, -12299, 26722, -31521,
};
static const struct
{
DECLARE_ALIGNED(16, const int16_t, tab_frw_01234567_sse2)[256];
} tab_frw_01234567_sse2 =
{{
//DECLARE_ALIGNED(16, static const int16_t, tab_frw_01234567_sse2)[] = { // forward_dct coeff table
#define TABLE_SSE2 C4, C4, C1, C3, -C6, -C2, -C1, -C5, \
C4, C4, C5, C7, C2, C6, C3, -C7, \
-C4, C4, C7, C3, C6, -C2, C7, -C5, \
C4, -C4, C5, -C1, C2, -C6, C3, -C1,
// c1..c7 * cos(pi/4) * 2^15
#define C1 22725
#define C2 21407
#define C3 19266
#define C4 16384
#define C5 12873
#define C6 8867
#define C7 4520
TABLE_SSE2
#undef C1
#undef C2
#undef C3
#undef C4
#undef C5
#undef C6
#undef C7
#define C1 31521
#define C2 29692
#define C3 26722
#define C4 22725
#define C5 17855
#define C6 12299
#define C7 6270
TABLE_SSE2
#undef C1
#undef C2
#undef C3
#undef C4
#undef C5
#undef C6
#undef C7
#define C1 29692
#define C2 27969
#define C3 25172
#define C4 21407
#define C5 16819
#define C6 11585
#define C7 5906
TABLE_SSE2
#undef C1
#undef C2
#undef C3
#undef C4
#undef C5
#undef C6
#undef C7
#define C1 26722
#define C2 25172
#define C3 22654
#define C4 19266
#define C5 15137
#define C6 10426
#define C7 5315
TABLE_SSE2
#undef C1
#undef C2
#undef C3
#undef C4
#undef C5
#undef C6
#undef C7
#define C1 22725
#define C2 21407
#define C3 19266
#define C4 16384
#define C5 12873
#define C6 8867
#define C7 4520
TABLE_SSE2
#undef C1
#undef C2
#undef C3
#undef C4
#undef C5
#undef C6
#undef C7
#define C1 26722
#define C2 25172
#define C3 22654
#define C4 19266
#define C5 15137
#define C6 10426
#define C7 5315
TABLE_SSE2
#undef C1
#undef C2
#undef C3
#undef C4
#undef C5
#undef C6
#undef C7
#define C1 29692
#define C2 27969
#define C3 25172
#define C4 21407
#define C5 16819
#define C6 11585
#define C7 5906
TABLE_SSE2
#undef C1
#undef C2
#undef C3
#undef C4
#undef C5
#undef C6
#undef C7
#define C1 31521
#define C2 29692
#define C3 26722
#define C4 22725
#define C5 17855
#define C6 12299
#define C7 6270
TABLE_SSE2
}};
#define S(s) AV_TOSTRING(s) //AV_STRINGIFY is too long
#define FDCT_COL(cpu, mm, mov)\
static av_always_inline void fdct_col_##cpu(const int16_t *in, int16_t *out, int offset)\
{\
__asm__ volatile (\
#mov" 16(%0), %%"#mm"0 \n\t" \
#mov" 96(%0), %%"#mm"1 \n\t" \
#mov" %%"#mm"0, %%"#mm"2 \n\t" \
#mov" 32(%0), %%"#mm"3 \n\t" \
"paddsw %%"#mm"1, %%"#mm"0 \n\t" \
#mov" 80(%0), %%"#mm"4 \n\t" \
"psllw $"S(SHIFT_FRW_COL)", %%"#mm"0 \n\t" \
#mov" (%0), %%"#mm"5 \n\t" \
"paddsw %%"#mm"3, %%"#mm"4 \n\t" \
"paddsw 112(%0), %%"#mm"5 \n\t" \
"psllw $"S(SHIFT_FRW_COL)", %%"#mm"4 \n\t" \
#mov" %%"#mm"0, %%"#mm"6 \n\t" \
"psubsw %%"#mm"1, %%"#mm"2 \n\t" \
#mov" 16(%1), %%"#mm"1 \n\t" \
"psubsw %%"#mm"4, %%"#mm"0 \n\t" \
#mov" 48(%0), %%"#mm"7 \n\t" \
"pmulhw %%"#mm"0, %%"#mm"1 \n\t" \
"paddsw 64(%0), %%"#mm"7 \n\t" \
"psllw $"S(SHIFT_FRW_COL)", %%"#mm"5 \n\t" \
"paddsw %%"#mm"4, %%"#mm"6 \n\t" \
"psllw $"S(SHIFT_FRW_COL)", %%"#mm"7 \n\t" \
#mov" %%"#mm"5, %%"#mm"4 \n\t" \
"psubsw %%"#mm"7, %%"#mm"5 \n\t" \
"paddsw %%"#mm"5, %%"#mm"1 \n\t" \
"paddsw %%"#mm"7, %%"#mm"4 \n\t" \
"por (%2), %%"#mm"1 \n\t" \
"psllw $"S(SHIFT_FRW_COL)"+1, %%"#mm"2 \n\t" \
"pmulhw 16(%1), %%"#mm"5 \n\t" \
#mov" %%"#mm"4, %%"#mm"7 \n\t" \
"psubsw 80(%0), %%"#mm"3 \n\t" \
"psubsw %%"#mm"6, %%"#mm"4 \n\t" \
#mov" %%"#mm"1, 32(%3) \n\t" \
"paddsw %%"#mm"6, %%"#mm"7 \n\t" \
#mov" 48(%0), %%"#mm"1 \n\t" \
"psllw $"S(SHIFT_FRW_COL)"+1, %%"#mm"3 \n\t" \
"psubsw 64(%0), %%"#mm"1 \n\t" \
#mov" %%"#mm"2, %%"#mm"6 \n\t" \
#mov" %%"#mm"4, 64(%3) \n\t" \
"paddsw %%"#mm"3, %%"#mm"2 \n\t" \
"pmulhw (%4), %%"#mm"2 \n\t" \
"psubsw %%"#mm"3, %%"#mm"6 \n\t" \
"pmulhw (%4), %%"#mm"6 \n\t" \
"psubsw %%"#mm"0, %%"#mm"5 \n\t" \
"por (%2), %%"#mm"5 \n\t" \
"psllw $"S(SHIFT_FRW_COL)", %%"#mm"1 \n\t" \
"por (%2), %%"#mm"2 \n\t" \
#mov" %%"#mm"1, %%"#mm"4 \n\t" \
#mov" (%0), %%"#mm"3 \n\t" \
"paddsw %%"#mm"6, %%"#mm"1 \n\t" \
"psubsw 112(%0), %%"#mm"3 \n\t" \
"psubsw %%"#mm"6, %%"#mm"4 \n\t" \
#mov" (%1), %%"#mm"0 \n\t" \
"psllw $"S(SHIFT_FRW_COL)", %%"#mm"3 \n\t" \
#mov" 32(%1), %%"#mm"6 \n\t" \
"pmulhw %%"#mm"1, %%"#mm"0 \n\t" \
#mov" %%"#mm"7, (%3) \n\t" \
"pmulhw %%"#mm"4, %%"#mm"6 \n\t" \
#mov" %%"#mm"5, 96(%3) \n\t" \
#mov" %%"#mm"3, %%"#mm"7 \n\t" \
#mov" 32(%1), %%"#mm"5 \n\t" \
"psubsw %%"#mm"2, %%"#mm"7 \n\t" \
"paddsw %%"#mm"2, %%"#mm"3 \n\t" \
"pmulhw %%"#mm"7, %%"#mm"5 \n\t" \
"paddsw %%"#mm"3, %%"#mm"0 \n\t" \
"paddsw %%"#mm"4, %%"#mm"6 \n\t" \
"pmulhw (%1), %%"#mm"3 \n\t" \
"por (%2), %%"#mm"0 \n\t" \
"paddsw %%"#mm"7, %%"#mm"5 \n\t" \
"psubsw %%"#mm"6, %%"#mm"7 \n\t" \
#mov" %%"#mm"0, 16(%3) \n\t" \
"paddsw %%"#mm"4, %%"#mm"5 \n\t" \
#mov" %%"#mm"7, 48(%3) \n\t" \
"psubsw %%"#mm"1, %%"#mm"3 \n\t" \
#mov" %%"#mm"5, 80(%3) \n\t" \
#mov" %%"#mm"3, 112(%3) \n\t" \
: \
: "r" (in + offset), "r" (fdct_tg_all_16), "r" (fdct_one_corr), \
"r" (out + offset), "r" (ocos_4_16)); \
}
FDCT_COL(mmx, mm, movq)
FDCT_COL(sse2, xmm, movdqa)
static av_always_inline void fdct_row_sse2(const int16_t *in, int16_t *out)
{
__asm__ volatile(
#define FDCT_ROW_SSE2_H1(i,t) \
"movq " #i "(%0), %%xmm2 \n\t" \
"movq " #i "+8(%0), %%xmm0 \n\t" \
"movdqa " #t "+32(%1), %%xmm3 \n\t" \
"movdqa " #t "+48(%1), %%xmm7 \n\t" \
"movdqa " #t "(%1), %%xmm4 \n\t" \
"movdqa " #t "+16(%1), %%xmm5 \n\t"
#define FDCT_ROW_SSE2_H2(i,t) \
"movq " #i "(%0), %%xmm2 \n\t" \
"movq " #i "+8(%0), %%xmm0 \n\t" \
"movdqa " #t "+32(%1), %%xmm3 \n\t" \
"movdqa " #t "+48(%1), %%xmm7 \n\t"
#define FDCT_ROW_SSE2(i) \
"movq %%xmm2, %%xmm1 \n\t" \
"pshuflw $27, %%xmm0, %%xmm0 \n\t" \
"paddsw %%xmm0, %%xmm1 \n\t" \
"psubsw %%xmm0, %%xmm2 \n\t" \
"punpckldq %%xmm2, %%xmm1 \n\t" \
"pshufd $78, %%xmm1, %%xmm2 \n\t" \
"pmaddwd %%xmm2, %%xmm3 \n\t" \
"pmaddwd %%xmm1, %%xmm7 \n\t" \
"pmaddwd %%xmm5, %%xmm2 \n\t" \
"pmaddwd %%xmm4, %%xmm1 \n\t" \
"paddd %%xmm7, %%xmm3 \n\t" \
"paddd %%xmm2, %%xmm1 \n\t" \
"paddd %%xmm6, %%xmm3 \n\t" \
"paddd %%xmm6, %%xmm1 \n\t" \
"psrad %3, %%xmm3 \n\t" \
"psrad %3, %%xmm1 \n\t" \
"packssdw %%xmm3, %%xmm1 \n\t" \
"movdqa %%xmm1, " #i "(%4) \n\t"
"movdqa (%2), %%xmm6 \n\t"
FDCT_ROW_SSE2_H1(0,0)
FDCT_ROW_SSE2(0)
FDCT_ROW_SSE2_H2(64,0)
FDCT_ROW_SSE2(64)
FDCT_ROW_SSE2_H1(16,64)
FDCT_ROW_SSE2(16)
FDCT_ROW_SSE2_H2(112,64)
FDCT_ROW_SSE2(112)
FDCT_ROW_SSE2_H1(32,128)
FDCT_ROW_SSE2(32)
FDCT_ROW_SSE2_H2(96,128)
FDCT_ROW_SSE2(96)
FDCT_ROW_SSE2_H1(48,192)
FDCT_ROW_SSE2(48)
FDCT_ROW_SSE2_H2(80,192)
FDCT_ROW_SSE2(80)
:
: "r" (in), "r" (tab_frw_01234567_sse2.tab_frw_01234567_sse2),
"r" (fdct_r_row_sse2.fdct_r_row_sse2), "i" (SHIFT_FRW_ROW), "r" (out)
XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7")
);
}
static av_always_inline void fdct_row_mmxext(const int16_t *in, int16_t *out,
const int16_t *table)
{
__asm__ volatile (
"pshufw $0x1B, 8(%0), %%mm5 \n\t"
"movq (%0), %%mm0 \n\t"
"movq %%mm0, %%mm1 \n\t"
"paddsw %%mm5, %%mm0 \n\t"
"psubsw %%mm5, %%mm1 \n\t"
"movq %%mm0, %%mm2 \n\t"
"punpckldq %%mm1, %%mm0 \n\t"
"punpckhdq %%mm1, %%mm2 \n\t"
"movq (%1), %%mm1 \n\t"
"movq 8(%1), %%mm3 \n\t"
"movq 16(%1), %%mm4 \n\t"
"movq 24(%1), %%mm5 \n\t"
"movq 32(%1), %%mm6 \n\t"
"movq 40(%1), %%mm7 \n\t"
"pmaddwd %%mm0, %%mm1 \n\t"
"pmaddwd %%mm2, %%mm3 \n\t"
"pmaddwd %%mm0, %%mm4 \n\t"
"pmaddwd %%mm2, %%mm5 \n\t"
"pmaddwd %%mm0, %%mm6 \n\t"
"pmaddwd %%mm2, %%mm7 \n\t"
"pmaddwd 48(%1), %%mm0 \n\t"
"pmaddwd 56(%1), %%mm2 \n\t"
"paddd %%mm1, %%mm3 \n\t"
"paddd %%mm4, %%mm5 \n\t"
"paddd %%mm6, %%mm7 \n\t"
"paddd %%mm0, %%mm2 \n\t"
"movq (%2), %%mm0 \n\t"
"paddd %%mm0, %%mm3 \n\t"
"paddd %%mm0, %%mm5 \n\t"
"paddd %%mm0, %%mm7 \n\t"
"paddd %%mm0, %%mm2 \n\t"
"psrad $"S(SHIFT_FRW_ROW)", %%mm3 \n\t"
"psrad $"S(SHIFT_FRW_ROW)", %%mm5 \n\t"
"psrad $"S(SHIFT_FRW_ROW)", %%mm7 \n\t"
"psrad $"S(SHIFT_FRW_ROW)", %%mm2 \n\t"
"packssdw %%mm5, %%mm3 \n\t"
"packssdw %%mm2, %%mm7 \n\t"
"movq %%mm3, (%3) \n\t"
"movq %%mm7, 8(%3) \n\t"
:
: "r" (in), "r" (table), "r" (fdct_r_row), "r" (out));
}
static av_always_inline void fdct_row_mmx(const int16_t *in, int16_t *out, const int16_t *table)
{
//FIXME reorder (I do not have an old MMX-only CPU here to benchmark ...)
__asm__ volatile(
"movd 12(%0), %%mm1 \n\t"
"punpcklwd 8(%0), %%mm1 \n\t"
"movq %%mm1, %%mm2 \n\t"
"psrlq $0x20, %%mm1 \n\t"
"movq 0(%0), %%mm0 \n\t"
"punpcklwd %%mm2, %%mm1 \n\t"
"movq %%mm0, %%mm5 \n\t"
"paddsw %%mm1, %%mm0 \n\t"
"psubsw %%mm1, %%mm5 \n\t"
"movq %%mm0, %%mm2 \n\t"
"punpckldq %%mm5, %%mm0 \n\t"
"punpckhdq %%mm5, %%mm2 \n\t"
"movq 0(%1), %%mm1 \n\t"
"movq 8(%1), %%mm3 \n\t"
"movq 16(%1), %%mm4 \n\t"
"movq 24(%1), %%mm5 \n\t"
"movq 32(%1), %%mm6 \n\t"
"movq 40(%1), %%mm7 \n\t"
"pmaddwd %%mm0, %%mm1 \n\t"
"pmaddwd %%mm2, %%mm3 \n\t"
"pmaddwd %%mm0, %%mm4 \n\t"
"pmaddwd %%mm2, %%mm5 \n\t"
"pmaddwd %%mm0, %%mm6 \n\t"
"pmaddwd %%mm2, %%mm7 \n\t"
"pmaddwd 48(%1), %%mm0 \n\t"
"pmaddwd 56(%1), %%mm2 \n\t"
"paddd %%mm1, %%mm3 \n\t"
"paddd %%mm4, %%mm5 \n\t"
"paddd %%mm6, %%mm7 \n\t"
"paddd %%mm0, %%mm2 \n\t"
"movq (%2), %%mm0 \n\t"
"paddd %%mm0, %%mm3 \n\t"
"paddd %%mm0, %%mm5 \n\t"
"paddd %%mm0, %%mm7 \n\t"
"paddd %%mm0, %%mm2 \n\t"
"psrad $"S(SHIFT_FRW_ROW)", %%mm3 \n\t"
"psrad $"S(SHIFT_FRW_ROW)", %%mm5 \n\t"
"psrad $"S(SHIFT_FRW_ROW)", %%mm7 \n\t"
"psrad $"S(SHIFT_FRW_ROW)", %%mm2 \n\t"
"packssdw %%mm5, %%mm3 \n\t"
"packssdw %%mm2, %%mm7 \n\t"
"movq %%mm3, 0(%3) \n\t"
"movq %%mm7, 8(%3) \n\t"
:
: "r" (in), "r" (table), "r" (fdct_r_row), "r" (out));
}
void ff_fdct_mmx(int16_t *block)
{
DECLARE_ALIGNED(8, int64_t, align_tmp)[16];
int16_t * block1= (int16_t*)align_tmp;
const int16_t *table= tab_frw_01234567;
int i;
fdct_col_mmx(block, block1, 0);
fdct_col_mmx(block, block1, 4);
for(i=8;i>0;i--) {
fdct_row_mmx(block1, block, table);
block1 += 8;
table += 32;
block += 8;
}
}
#endif /* HAVE_MMX_INLINE */
#if HAVE_MMXEXT_INLINE
void ff_fdct_mmxext(int16_t *block)
{
DECLARE_ALIGNED(8, int64_t, align_tmp)[16];
int16_t *block1= (int16_t*)align_tmp;
const int16_t *table= tab_frw_01234567;
int i;
fdct_col_mmx(block, block1, 0);
fdct_col_mmx(block, block1, 4);
for(i=8;i>0;i--) {
fdct_row_mmxext(block1, block, table);
block1 += 8;
table += 32;
block += 8;
}
}
#endif /* HAVE_MMXEXT_INLINE */
#if HAVE_SSE2_INLINE
void ff_fdct_sse2(int16_t *block)
{
DECLARE_ALIGNED(16, int64_t, align_tmp)[16];
int16_t * const block1= (int16_t*)align_tmp;
fdct_col_sse2(block, block1, 0);
fdct_row_sse2(block1, block);
}
#endif /* HAVE_SSE2_INLINE */

View File

@@ -0,0 +1,28 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_X86_FDCT_H
#define AVCODEC_X86_FDCT_H
#include <stdint.h>
void ff_fdct_mmx(int16_t *block);
void ff_fdct_mmxext(int16_t *block);
void ff_fdct_sse2(int16_t *block);
#endif /* AVCODEC_X86_FDCT_H */

View File

@@ -0,0 +1,44 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/fdctdsp.h"
#include "fdct.h"
av_cold void ff_fdctdsp_init_x86(FDCTDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth)
{
int cpu_flags = av_get_cpu_flags();
const int dct_algo = avctx->dct_algo;
if (!high_bit_depth) {
if ((dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX)) {
if (INLINE_MMX(cpu_flags))
c->fdct = ff_fdct_mmx;
if (INLINE_MMXEXT(cpu_flags))
c->fdct = ff_fdct_mmxext;
if (INLINE_SSE2(cpu_flags))
c->fdct = ff_fdct_sse2;
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,38 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_X86_FFT_H
#define AVCODEC_X86_FFT_H
#include "libavcodec/fft.h"
void ff_fft_permute_sse(FFTContext *s, FFTComplex *z);
void ff_fft_calc_avx(FFTContext *s, FFTComplex *z);
void ff_fft_calc_sse(FFTContext *s, FFTComplex *z);
void ff_fft_calc_3dnow(FFTContext *s, FFTComplex *z);
void ff_fft_calc_3dnowext(FFTContext *s, FFTComplex *z);
void ff_imdct_calc_3dnow(FFTContext *s, FFTSample *output, const FFTSample *input);
void ff_imdct_half_3dnow(FFTContext *s, FFTSample *output, const FFTSample *input);
void ff_imdct_calc_3dnowext(FFTContext *s, FFTSample *output, const FFTSample *input);
void ff_imdct_half_3dnowext(FFTContext *s, FFTSample *output, const FFTSample *input);
void ff_imdct_calc_sse(FFTContext *s, FFTSample *output, const FFTSample *input);
void ff_imdct_half_sse(FFTContext *s, FFTSample *output, const FFTSample *input);
void ff_imdct_half_avx(FFTContext *s, FFTSample *output, const FFTSample *input);
#endif /* AVCODEC_X86_FFT_H */

View File

@@ -0,0 +1,57 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/cpu.h"
#include "fft.h"
av_cold void ff_fft_init_x86(FFTContext *s)
{
int cpu_flags = av_get_cpu_flags();
#if ARCH_X86_32
if (EXTERNAL_AMD3DNOW(cpu_flags)) {
/* 3DNow! for K6-2/3 */
s->imdct_calc = ff_imdct_calc_3dnow;
s->imdct_half = ff_imdct_half_3dnow;
s->fft_calc = ff_fft_calc_3dnow;
}
if (EXTERNAL_AMD3DNOWEXT(cpu_flags)) {
/* 3DNowEx for K7 */
s->imdct_calc = ff_imdct_calc_3dnowext;
s->imdct_half = ff_imdct_half_3dnowext;
s->fft_calc = ff_fft_calc_3dnowext;
}
#endif
if (EXTERNAL_SSE(cpu_flags)) {
/* SSE for P3/P4/K8 */
s->imdct_calc = ff_imdct_calc_sse;
s->imdct_half = ff_imdct_half_sse;
s->fft_permute = ff_fft_permute_sse;
s->fft_calc = ff_fft_calc_sse;
s->fft_permutation = FF_FFT_PERM_SWAP_LSBS;
}
if (EXTERNAL_AVX_FAST(cpu_flags) && s->nbits >= 5) {
/* AVX for SB */
s->imdct_half = ff_imdct_half_avx;
s->fft_calc = ff_fft_calc_avx;
s->fft_permutation = FF_FFT_PERM_AVX;
}
}

View File

@@ -0,0 +1,101 @@
;******************************************************************************
;* FLAC DSP functions
;*
;* Copyright (c) 2014 James Darnley <james.darnley@gmail.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or modify
;* it under the terms of the GNU General Public License as published by
;* the Free Software Foundation; either version 2 of the License, or
;* (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;* GNU General Public License for more details.
;*
;* You should have received a copy of the GNU General Public License along
;* with FFmpeg; if not, write to the Free Software Foundation, Inc.,
;* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION .text
INIT_XMM sse4
%if ARCH_X86_64
cglobal flac_enc_lpc_16, 5, 7, 8, 0, res, smp, len, order, coefs
DECLARE_REG_TMP 5, 6
%define length r2d
movsxd orderq, orderd
%else
cglobal flac_enc_lpc_16, 5, 6, 8, 0, res, smp, len, order, coefs
DECLARE_REG_TMP 2, 5
%define length r2mp
%endif
; Here we assume that the maximum order value is 32. This means that we only
; need to copy a maximum of 32 samples. Therefore we let the preprocessor
; unroll this loop and copy all 32.
%assign iter 0
%rep 32/(mmsize/4)
movu m0, [smpq+iter]
movu [resq+iter], m0
%assign iter iter+mmsize
%endrep
lea resq, [resq+orderq*4]
lea smpq, [smpq+orderq*4]
lea coefsq, [coefsq+orderq*4]
sub length, orderd
movd m3, r5m
neg orderq
%define posj t0q
%define negj t1q
.looplen:
pxor m0, m0
pxor m4, m4
pxor m6, m6
mov posj, orderq
xor negj, negj
.looporder:
movd m2, [coefsq+posj*4] ; c = coefs[j]
SPLATD m2
movu m1, [smpq+negj*4-4] ; s = smp[i-j-1]
movu m5, [smpq+negj*4-4+mmsize]
movu m7, [smpq+negj*4-4+mmsize*2]
pmulld m1, m2
pmulld m5, m2
pmulld m7, m2
paddd m0, m1 ; p += c * s
paddd m4, m5
paddd m6, m7
dec negj
inc posj
jnz .looporder
psrad m0, m3 ; p >>= shift
psrad m4, m3
psrad m6, m3
movu m1, [smpq]
movu m5, [smpq+mmsize]
movu m7, [smpq+mmsize*2]
psubd m1, m0 ; smp[i] - p
psubd m5, m4
psubd m7, m6
movu [resq], m1 ; res[i] = smp[i] - (p >> shift)
movu [resq+mmsize], m5
movu [resq+mmsize*2], m7
add resq, 3*mmsize
add smpq, 3*mmsize
sub length, (3*mmsize)/4
jg .looplen
RET

View File

@@ -0,0 +1,313 @@
;******************************************************************************
;* FLAC DSP SIMD optimizations
;*
;* Copyright (C) 2014 Loren Merritt
;* Copyright (C) 2014 James Almer
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION .text
%macro PMACSDQL 5
%if cpuflag(xop)
pmacsdql %1, %2, %3, %1
%else
pmuldq %2, %3
paddq %1, %2
%endif
%endmacro
%macro LPC_32 1
INIT_XMM %1
cglobal flac_lpc_32, 5,6,5, decoded, coeffs, pred_order, qlevel, len, j
sub lend, pred_orderd
jle .ret
lea decodedq, [decodedq+pred_orderq*4-8]
lea coeffsq, [coeffsq+pred_orderq*4]
neg pred_orderq
movd m4, qlevelm
ALIGN 16
.loop_sample:
movd m0, [decodedq+pred_orderq*4+8]
add decodedq, 8
movd m1, [coeffsq+pred_orderq*4]
pxor m2, m2
pxor m3, m3
lea jq, [pred_orderq+1]
test jq, jq
jz .end_order
.loop_order:
PMACSDQL m2, m0, m1, m2, m0
movd m0, [decodedq+jq*4]
PMACSDQL m3, m1, m0, m3, m1
movd m1, [coeffsq+jq*4]
inc jq
jl .loop_order
.end_order:
PMACSDQL m2, m0, m1, m2, m0
psrlq m2, m4
movd m0, [decodedq]
paddd m0, m2
movd [decodedq], m0
sub lend, 2
jl .ret
PMACSDQL m3, m1, m0, m3, m1
psrlq m3, m4
movd m1, [decodedq+4]
paddd m1, m3
movd [decodedq+4], m1
jg .loop_sample
.ret:
REP_RET
%endmacro
%if HAVE_XOP_EXTERNAL
LPC_32 xop
%endif
LPC_32 sse4
;----------------------------------------------------------------------------------
;void ff_flac_decorrelate_[lrm]s_16_sse2(uint8_t **out, int32_t **in, int channels,
; int len, int shift);
;----------------------------------------------------------------------------------
%macro FLAC_DECORRELATE_16 3-4
cglobal flac_decorrelate_%1_16, 2, 4, 4, out, in0, in1, len
%if ARCH_X86_32
mov lend, lenm
%endif
movd m3, r4m
shl lend, 2
mov in1q, [in0q + gprsize]
mov in0q, [in0q]
mov outq, [outq]
add in1q, lenq
add in0q, lenq
add outq, lenq
neg lenq
align 16
.loop:
mova m0, [in0q + lenq]
mova m1, [in1q + lenq]
%ifidn %1, ms
psrad m2, m1, 1
psubd m0, m2
%endif
%ifnidn %1, indep2
p%4d m2, m0, m1
%endif
packssdw m%2, m%2
packssdw m%3, m%3
punpcklwd m%2, m%3
psllw m%2, m3
mova [outq + lenq], m%2
add lenq, 16
jl .loop
REP_RET
%endmacro
INIT_XMM sse2
FLAC_DECORRELATE_16 ls, 0, 2, sub
FLAC_DECORRELATE_16 rs, 2, 1, add
FLAC_DECORRELATE_16 ms, 2, 0, add
;----------------------------------------------------------------------------------
;void ff_flac_decorrelate_[lrm]s_32_sse2(uint8_t **out, int32_t **in, int channels,
; int len, int shift);
;----------------------------------------------------------------------------------
%macro FLAC_DECORRELATE_32 5
cglobal flac_decorrelate_%1_32, 2, 4, 4, out, in0, in1, len
%if ARCH_X86_32
mov lend, lenm
%endif
movd m3, r4m
mov in1q, [in0q + gprsize]
mov in0q, [in0q]
mov outq, [outq]
sub in1q, in0q
align 16
.loop:
mova m0, [in0q]
mova m1, [in0q + in1q]
%ifidn %1, ms
psrad m2, m1, 1
psubd m0, m2
%endif
p%5d m2, m0, m1
pslld m%2, m3
pslld m%3, m3
SBUTTERFLY dq, %2, %3, %4
mova [outq ], m%2
mova [outq + mmsize], m%3
add in0q, mmsize
add outq, mmsize*2
sub lend, mmsize/4
jg .loop
REP_RET
%endmacro
INIT_XMM sse2
FLAC_DECORRELATE_32 ls, 0, 2, 1, sub
FLAC_DECORRELATE_32 rs, 2, 1, 0, add
FLAC_DECORRELATE_32 ms, 2, 0, 1, add
;-----------------------------------------------------------------------------------------
;void ff_flac_decorrelate_indep<ch>_<bps>_<opt>(uint8_t **out, int32_t **in, int channels,
; int len, int shift);
;-----------------------------------------------------------------------------------------
;%1 = bps
;%2 = channels
;%3 = last xmm reg used
;%4 = word/dword (shift instruction)
%macro FLAC_DECORRELATE_INDEP 4
%define REPCOUNT %2/(32/%1) ; 16bits = channels / 2; 32bits = channels
cglobal flac_decorrelate_indep%2_%1, 2, %2+2, %3+1, out, in0, in1, len, in2, in3, in4, in5, in6, in7
%if ARCH_X86_32
%if %2 == 6
DEFINE_ARGS out, in0, in1, in2, in3, in4, in5
%define lend dword r3m
%else
mov lend, lenm
%endif
%endif
movd m%3, r4m
%assign %%i 1
%rep %2-1
mov in %+ %%i %+ q, [in0q+%%i*gprsize]
%assign %%i %%i+1
%endrep
mov in0q, [in0q]
mov outq, [outq]
%assign %%i 1
%rep %2-1
sub in %+ %%i %+ q, in0q
%assign %%i %%i+1
%endrep
align 16
.loop:
mova m0, [in0q]
%assign %%i 1
%rep REPCOUNT-1
mova m %+ %%i, [in0q + in %+ %%i %+ q]
%assign %%i %%i+1
%endrep
%if %1 == 32
%if %2 == 8
TRANSPOSE8x4D 0, 1, 2, 3, 4, 5, 6, 7, 8
%elif %2 == 6
SBUTTERFLY dq, 0, 1, 6
SBUTTERFLY dq, 2, 3, 6
SBUTTERFLY dq, 4, 5, 6
punpcklqdq m6, m0, m2
punpckhqdq m2, m4
shufps m4, m0, 0xe4
punpcklqdq m0, m1, m3
punpckhqdq m3, m5
shufps m5, m1, 0xe4
SWAP 0,6,1,4,5,3
%elif %2 == 4
TRANSPOSE4x4D 0, 1, 2, 3, 4
%else ; %2 == 2
SBUTTERFLY dq, 0, 1, 2
%endif
%else ; %1 == 16
%if %2 == 8
packssdw m0, [in0q + in4q]
packssdw m1, [in0q + in5q]
packssdw m2, [in0q + in6q]
packssdw m3, [in0q + in7q]
TRANSPOSE2x4x4W 0, 1, 2, 3, 4
%elif %2 == 6
packssdw m0, [in0q + in3q]
packssdw m1, [in0q + in4q]
packssdw m2, [in0q + in5q]
pshufd m3, m0, q1032
punpcklwd m0, m1
punpckhwd m1, m2
punpcklwd m2, m3
shufps m3, m0, m2, q2020
shufps m0, m1, q2031
shufps m2, m1, q3131
shufps m1, m2, m3, q3120
shufps m3, m0, q0220
shufps m0, m2, q3113
SWAP 2, 0, 3
%else ; %2 == 4
packssdw m0, [in0q + in2q]
packssdw m1, [in0q + in3q]
SBUTTERFLY wd, 0, 1, 2
SBUTTERFLY dq, 0, 1, 2
%endif
%endif
%assign %%i 0
%rep REPCOUNT
psll%4 m %+ %%i, m%3
%assign %%i %%i+1
%endrep
%assign %%i 0
%rep REPCOUNT
mova [outq + %%i*mmsize], m %+ %%i
%assign %%i %%i+1
%endrep
add in0q, mmsize
add outq, mmsize*REPCOUNT
sub lend, mmsize/4
jg .loop
REP_RET
%endmacro
INIT_XMM sse2
FLAC_DECORRELATE_16 indep2, 0, 1 ; Reuse stereo 16bits macro
FLAC_DECORRELATE_INDEP 32, 2, 3, d
FLAC_DECORRELATE_INDEP 16, 4, 3, w
FLAC_DECORRELATE_INDEP 32, 4, 5, d
FLAC_DECORRELATE_INDEP 16, 6, 4, w
FLAC_DECORRELATE_INDEP 32, 6, 7, d
%if ARCH_X86_64
FLAC_DECORRELATE_INDEP 16, 8, 5, w
FLAC_DECORRELATE_INDEP 32, 8, 9, d
%endif
INIT_XMM avx
FLAC_DECORRELATE_INDEP 32, 4, 5, d
FLAC_DECORRELATE_INDEP 32, 6, 7, d
%if ARCH_X86_64
FLAC_DECORRELATE_INDEP 16, 8, 5, w
FLAC_DECORRELATE_INDEP 32, 8, 9, d
%endif

View File

@@ -0,0 +1,115 @@
/*
* Copyright (c) 2014 James Almer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavcodec/flacdsp.h"
#include "libavutil/x86/cpu.h"
#include "config.h"
void ff_flac_lpc_32_sse4(int32_t *samples, const int coeffs[32], int order,
int qlevel, int len);
void ff_flac_lpc_32_xop(int32_t *samples, const int coeffs[32], int order,
int qlevel, int len);
void ff_flac_enc_lpc_16_sse4(int32_t *, const int32_t *, int, int, const int32_t *,int);
#define DECORRELATE_FUNCS(fmt, opt) \
void ff_flac_decorrelate_ls_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
int len, int shift); \
void ff_flac_decorrelate_rs_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
int len, int shift); \
void ff_flac_decorrelate_ms_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
int len, int shift); \
void ff_flac_decorrelate_indep2_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
int len, int shift); \
void ff_flac_decorrelate_indep4_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
int len, int shift); \
void ff_flac_decorrelate_indep6_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
int len, int shift); \
void ff_flac_decorrelate_indep8_##fmt##_##opt(uint8_t **out, int32_t **in, int channels, \
int len, int shift)
DECORRELATE_FUNCS(16, sse2);
DECORRELATE_FUNCS(16, avx);
DECORRELATE_FUNCS(32, sse2);
DECORRELATE_FUNCS(32, avx);
av_cold void ff_flacdsp_init_x86(FLACDSPContext *c, enum AVSampleFormat fmt, int channels,
int bps)
{
#if HAVE_YASM
int cpu_flags = av_get_cpu_flags();
#if CONFIG_FLAC_DECODER
if (EXTERNAL_SSE2(cpu_flags)) {
if (fmt == AV_SAMPLE_FMT_S16) {
if (channels == 2)
c->decorrelate[0] = ff_flac_decorrelate_indep2_16_sse2;
else if (channels == 4)
c->decorrelate[0] = ff_flac_decorrelate_indep4_16_sse2;
else if (channels == 6)
c->decorrelate[0] = ff_flac_decorrelate_indep6_16_sse2;
else if (ARCH_X86_64 && channels == 8)
c->decorrelate[0] = ff_flac_decorrelate_indep8_16_sse2;
c->decorrelate[1] = ff_flac_decorrelate_ls_16_sse2;
c->decorrelate[2] = ff_flac_decorrelate_rs_16_sse2;
c->decorrelate[3] = ff_flac_decorrelate_ms_16_sse2;
} else if (fmt == AV_SAMPLE_FMT_S32) {
if (channels == 2)
c->decorrelate[0] = ff_flac_decorrelate_indep2_32_sse2;
else if (channels == 4)
c->decorrelate[0] = ff_flac_decorrelate_indep4_32_sse2;
else if (channels == 6)
c->decorrelate[0] = ff_flac_decorrelate_indep6_32_sse2;
else if (ARCH_X86_64 && channels == 8)
c->decorrelate[0] = ff_flac_decorrelate_indep8_32_sse2;
c->decorrelate[1] = ff_flac_decorrelate_ls_32_sse2;
c->decorrelate[2] = ff_flac_decorrelate_rs_32_sse2;
c->decorrelate[3] = ff_flac_decorrelate_ms_32_sse2;
}
}
if (EXTERNAL_SSE4(cpu_flags)) {
c->lpc32 = ff_flac_lpc_32_sse4;
}
if (EXTERNAL_AVX(cpu_flags)) {
if (fmt == AV_SAMPLE_FMT_S16) {
if (ARCH_X86_64 && channels == 8)
c->decorrelate[0] = ff_flac_decorrelate_indep8_16_avx;
} else if (fmt == AV_SAMPLE_FMT_S32) {
if (channels == 4)
c->decorrelate[0] = ff_flac_decorrelate_indep4_32_avx;
else if (channels == 6)
c->decorrelate[0] = ff_flac_decorrelate_indep6_32_avx;
else if (ARCH_X86_64 && channels == 8)
c->decorrelate[0] = ff_flac_decorrelate_indep8_32_avx;
}
}
if (EXTERNAL_XOP(cpu_flags)) {
c->lpc32 = ff_flac_lpc_32_xop;
}
#endif
#if CONFIG_FLAC_ENCODER
if (EXTERNAL_SSE4(cpu_flags)) {
if (CONFIG_GPL)
c->lpc16_encode = ff_flac_enc_lpc_16_sse4;
}
#endif
#endif /* HAVE_YASM */
}

View File

@@ -0,0 +1,110 @@
;******************************************************************************
;* x86 optimized Format Conversion Utils
;* Copyright (c) 2008 Loren Merritt
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION .text
;------------------------------------------------------------------------------
; void ff_int32_to_float_fmul_scalar(float *dst, const int32_t *src, float mul,
; int len);
;------------------------------------------------------------------------------
%macro INT32_TO_FLOAT_FMUL_SCALAR 1
%if UNIX64
cglobal int32_to_float_fmul_scalar, 3, 3, %1, dst, src, len
%else
cglobal int32_to_float_fmul_scalar, 4, 4, %1, dst, src, mul, len
%endif
%if WIN64
SWAP 0, 2
%elif ARCH_X86_32
movss m0, mulm
%endif
SPLATD m0
shl lenq, 2
add srcq, lenq
add dstq, lenq
neg lenq
.loop:
%if cpuflag(sse2)
cvtdq2ps m1, [srcq+lenq ]
cvtdq2ps m2, [srcq+lenq+16]
%else
cvtpi2ps m1, [srcq+lenq ]
cvtpi2ps m3, [srcq+lenq+ 8]
cvtpi2ps m2, [srcq+lenq+16]
cvtpi2ps m4, [srcq+lenq+24]
movlhps m1, m3
movlhps m2, m4
%endif
mulps m1, m0
mulps m2, m0
mova [dstq+lenq ], m1
mova [dstq+lenq+16], m2
add lenq, 32
jl .loop
REP_RET
%endmacro
INIT_XMM sse
INT32_TO_FLOAT_FMUL_SCALAR 5
INIT_XMM sse2
INT32_TO_FLOAT_FMUL_SCALAR 3
;------------------------------------------------------------------------------
; void ff_int32_to_float_fmul_array8(FmtConvertContext *c, float *dst, const int32_t *src,
; const float *mul, int len);
;------------------------------------------------------------------------------
%macro INT32_TO_FLOAT_FMUL_ARRAY8 0
cglobal int32_to_float_fmul_array8, 5, 5, 5, c, dst, src, mul, len
shl lend, 2
add srcq, lenq
add dstq, lenq
neg lenq
.loop:
movss m0, [mulq]
SPLATD m0
%if cpuflag(sse2)
cvtdq2ps m1, [srcq+lenq ]
cvtdq2ps m2, [srcq+lenq+16]
%else
cvtpi2ps m1, [srcq+lenq ]
cvtpi2ps m3, [srcq+lenq+ 8]
cvtpi2ps m2, [srcq+lenq+16]
cvtpi2ps m4, [srcq+lenq+24]
movlhps m1, m3
movlhps m2, m4
%endif
mulps m1, m0
mulps m2, m0
mova [dstq+lenq ], m1
mova [dstq+lenq+16], m2
add mulq, 4
add lenq, 32
jl .loop
REP_RET
%endmacro
INIT_XMM sse
INT32_TO_FLOAT_FMUL_ARRAY8
INIT_XMM sse2
INT32_TO_FLOAT_FMUL_ARRAY8

View File

@@ -0,0 +1,56 @@
/*
* Format Conversion Utils
* Copyright (c) 2000, 2001 Fabrice Bellard
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/fmtconvert.h"
#if HAVE_YASM
void ff_int32_to_float_fmul_scalar_sse (float *dst, const int32_t *src, float mul, int len);
void ff_int32_to_float_fmul_scalar_sse2(float *dst, const int32_t *src, float mul, int len);
void ff_int32_to_float_fmul_array8_sse (FmtConvertContext *c, float *dst, const int32_t *src,
const float *mul, int len);
void ff_int32_to_float_fmul_array8_sse2(FmtConvertContext *c, float *dst, const int32_t *src,
const float *mul, int len);
#endif /* HAVE_YASM */
av_cold void ff_fmt_convert_init_x86(FmtConvertContext *c, AVCodecContext *avctx)
{
#if HAVE_YASM
int cpu_flags = av_get_cpu_flags();
if (EXTERNAL_SSE(cpu_flags)) {
c->int32_to_float_fmul_scalar = ff_int32_to_float_fmul_scalar_sse;
c->int32_to_float_fmul_array8 = ff_int32_to_float_fmul_array8_sse;
}
if (EXTERNAL_SSE2(cpu_flags)) {
c->int32_to_float_fmul_scalar = ff_int32_to_float_fmul_scalar_sse2;
c->int32_to_float_fmul_array8 = ff_int32_to_float_fmul_array8_sse2;
}
#endif /* HAVE_YASM */
}

View File

@@ -0,0 +1,107 @@
;******************************************************************************
;* SIMD-optimized fullpel functions
;* Copyright (c) 2008 Loren Merritt
;* Copyright (c) 2003-2013 Michael Niedermayer
;* Copyright (c) 2013 Daniel Kang
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION .text
%macro PAVGB_MMX 4
LOAD %3, %1
por %3, %2
pxor %2, %1
pand %2, %4
psrlq %2, 1
psubb %3, %2
SWAP %2, %3
%endmacro
; void ff_put/avg_pixels(uint8_t *block, const uint8_t *pixels,
; ptrdiff_t line_size, int h)
%macro OP_PIXELS 2
%if %2 == mmsize/2
%define LOAD movh
%define SAVE movh
%define LEN mmsize
%else
%define LOAD movu
%define SAVE mova
%define LEN %2
%endif
cglobal %1_pixels%2, 4,5,4
movsxdifnidn r2, r2d
lea r4, [r2*3]
%ifidn %1, avg
%if notcpuflag(mmxext)
pcmpeqd m6, m6
paddb m6, m6
%endif
%endif
.loop:
%assign %%i 0
%rep LEN/mmsize
LOAD m0, [r1 + %%i]
LOAD m1, [r1+r2 + %%i]
LOAD m2, [r1+r2*2 + %%i]
LOAD m3, [r1+r4 + %%i]
%ifidn %1, avg
%if notcpuflag(mmxext)
PAVGB_MMX [r0 + %%i], m0, m4, m6
PAVGB_MMX [r0+r2 + %%i], m1, m5, m6
PAVGB_MMX [r0+r2*2 + %%i], m2, m4, m6
PAVGB_MMX [r0+r4 + %%i], m3, m5, m6
%else
pavgb m0, [r0 + %%i]
pavgb m1, [r0+r2 + %%i]
pavgb m2, [r0+r2*2 + %%i]
pavgb m3, [r0+r4 + %%i]
%endif
%endif
SAVE [r0 + %%i], m0
SAVE [r0+r2 + %%i], m1
SAVE [r0+r2*2 + %%i], m2
SAVE [r0+r4 + %%i], m3
%assign %%i %%i+mmsize
%endrep
sub r3d, 4
lea r1, [r1+r2*4]
lea r0, [r0+r2*4]
jne .loop
RET
%endmacro
INIT_MMX mmx
OP_PIXELS put, 4
OP_PIXELS avg, 4
OP_PIXELS put, 8
OP_PIXELS avg, 8
OP_PIXELS put, 16
OP_PIXELS avg, 16
INIT_MMX mmxext
OP_PIXELS avg, 4
OP_PIXELS avg, 8
OP_PIXELS avg, 16
INIT_XMM sse2
OP_PIXELS put, 16
OP_PIXELS avg, 16

View File

@@ -0,0 +1,45 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_X86_FPEL_H
#define AVCODEC_X86_FPEL_H
#include <stddef.h>
#include <stdint.h>
void ff_avg_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels8_mmxext(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels16_mmxext(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels8_mmx(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels8_mmxext(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels16_mmx(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels16_sse2(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
#endif /* AVCODEC_X86_FPEL_H */

View File

@@ -0,0 +1,54 @@
;******************************************************************************
;* SIMD optimized DSP functions for G722 coding
;*
;* Copyright (c) 2014 James Almer
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
pw_qmf_coeffs: dw 3, -210, -11, -805, -11, 951, 53, 3876
pw_qmf_coeffs2: dw 12, 3876, -156, 951, 32, -805, 362, -210
pw_qmf_coeffs3: dw 362, 0 , 32, 0, -156, 0, 12, 0
pw_qmf_coeffs4: dw 53, 0, -11, 0, -11, 0, 3, 0
SECTION .text
INIT_XMM sse2
cglobal g722_apply_qmf, 2, 2, 5, prev, out
movu m0, [prevq+mmsize*0]
movu m1, [prevq+mmsize*1]
movu m2, [prevq+mmsize*2]
punpcklwd m3, m0, m1
punpckhwd m0, m1
punpcklwd m4, m2, m2
punpckhwd m2, m2
pmaddwd m3, [pw_qmf_coeffs ]
pmaddwd m0, [pw_qmf_coeffs2]
pmaddwd m4, [pw_qmf_coeffs3]
pmaddwd m2, [pw_qmf_coeffs4]
paddd m0, m3
paddd m2, m4
paddd m0, m2
pshufd m2, m0, q0032
paddd m0, m2
pshufd m0, m0, q0001
movq [outq], m0
RET

View File

@@ -0,0 +1,35 @@
/*
* Copyright (c) 2014 James Almer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "libavutil/attributes.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/g722dsp.h"
void ff_g722_apply_qmf_sse2(const int16_t *prev_samples, int xout[2]);
av_cold void ff_g722dsp_init_x86(G722DSPContext *dsp)
{
int cpu_flags = av_get_cpu_flags();
if (EXTERNAL_SSE2(cpu_flags))
dsp->apply_qmf = ff_g722_apply_qmf_sse2;
}

View File

@@ -0,0 +1,189 @@
;******************************************************************************
;* MMX-optimized H.263 loop filter
;* Copyright (c) 2003-2013 Michael Niedermayer
;* Copyright (c) 2013 Daniel Kang
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
cextern pb_FC
cextern h263_loop_filter_strength
SECTION .text
%macro H263_LOOP_FILTER 5
pxor m7, m7
mova m0, [%1]
mova m1, [%1]
mova m2, [%4]
mova m3, [%4]
punpcklbw m0, m7
punpckhbw m1, m7
punpcklbw m2, m7
punpckhbw m3, m7
psubw m0, m2
psubw m1, m3
mova m2, [%2]
mova m3, [%2]
mova m4, [%3]
mova m5, [%3]
punpcklbw m2, m7
punpckhbw m3, m7
punpcklbw m4, m7
punpckhbw m5, m7
psubw m4, m2
psubw m5, m3
psllw m4, 2
psllw m5, 2
paddw m4, m0
paddw m5, m1
pxor m6, m6
pcmpgtw m6, m4
pcmpgtw m7, m5
pxor m4, m6
pxor m5, m7
psubw m4, m6
psubw m5, m7
psrlw m4, 3
psrlw m5, 3
packuswb m4, m5
packsswb m6, m7
pxor m7, m7
movd m2, %5
punpcklbw m2, m2
punpcklbw m2, m2
punpcklbw m2, m2
psubusb m2, m4
mova m3, m2
psubusb m3, m4
psubb m2, m3
mova m3, [%2]
mova m4, [%3]
pxor m3, m6
pxor m4, m6
paddusb m3, m2
psubusb m4, m2
pxor m3, m6
pxor m4, m6
paddusb m2, m2
packsswb m0, m1
pcmpgtb m7, m0
pxor m0, m7
psubb m0, m7
mova m1, m0
psubusb m0, m2
psubb m1, m0
pand m1, [pb_FC]
psrlw m1, 2
pxor m1, m7
psubb m1, m7
mova m5, [%1]
mova m6, [%4]
psubb m5, m1
paddb m6, m1
%endmacro
INIT_MMX mmx
; void ff_h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale)
cglobal h263_v_loop_filter, 3,5
movsxdifnidn r1, r1d
movsxdifnidn r2, r2d
lea r4, [h263_loop_filter_strength]
movzx r3d, BYTE [r4+r2]
movsx r2, r3b
shl r2, 1
mov r3, r0
sub r3, r1
mov r4, r3
sub r4, r1
H263_LOOP_FILTER r4, r3, r0, r0+r1, r2d
mova [r3], m3
mova [r0], m4
mova [r4], m5
mova [r0+r1], m6
RET
%macro TRANSPOSE4X4 2
movd m0, [%1]
movd m1, [%1+r1]
movd m2, [%1+r1*2]
movd m3, [%1+r3]
punpcklbw m0, m1
punpcklbw m2, m3
mova m1, m0
punpcklwd m0, m2
punpckhwd m1, m2
movd [%2+ 0], m0
punpckhdq m0, m0
movd [%2+ 8], m0
movd [%2+16], m1
punpckhdq m1, m1
movd [%2+24], m1
%endmacro
; void ff_h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale)
INIT_MMX mmx
cglobal h263_h_loop_filter, 3,5,0,32
movsxdifnidn r1, r1d
movsxdifnidn r2, r2d
lea r4, [h263_loop_filter_strength]
movzx r3d, BYTE [r4+r2]
movsx r2, r3b
shl r2, 1
sub r0, 2
lea r3, [r1*3]
TRANSPOSE4X4 r0, rsp
lea r4, [r0+r1*4]
TRANSPOSE4X4 r4, rsp+4
H263_LOOP_FILTER rsp, rsp+8, rsp+16, rsp+24, r2d
mova m1, m5
mova m0, m4
punpcklbw m5, m3
punpcklbw m4, m6
punpckhbw m1, m3
punpckhbw m0, m6
mova m3, m5
mova m6, m1
punpcklwd m5, m4
punpcklwd m1, m0
punpckhwd m3, m4
punpckhwd m6, m0
movd [r0], m5
punpckhdq m5, m5
movd [r0+r1*1], m5
movd [r0+r1*2], m3
punpckhdq m3, m3
movd [r0+r3], m3
movd [r4], m1
punpckhdq m1, m1
movd [r4+r1*1], m1
movd [r4+r1*2], m6
punpckhdq m6, m6
movd [r4+r3], m6
RET

View File

@@ -0,0 +1,39 @@
/*
* Copyright (c) 2013 Diego Biurrun <diego@biurrun.de>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/h263dsp.h"
void ff_h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale);
void ff_h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale);
av_cold void ff_h263dsp_init_x86(H263DSPContext *c)
{
int cpu_flags = av_get_cpu_flags();
if (EXTERNAL_MMX(cpu_flags)) {
c->h263_h_loop_filter = ff_h263_h_loop_filter_mmx;
c->h263_v_loop_filter = ff_h263_v_loop_filter_mmx;
}
}

View File

@@ -0,0 +1,679 @@
;******************************************************************************
;* MMX/SSSE3-optimized functions for H264 chroma MC
;* Copyright (c) 2005 Zoltan Hidvegi <hzoli -a- hzoli -d- com>,
;* 2005-2008 Loren Merritt
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
rnd_rv40_2d_tbl: times 4 dw 0
times 4 dw 16
times 4 dw 32
times 4 dw 16
times 4 dw 32
times 4 dw 28
times 4 dw 32
times 4 dw 28
times 4 dw 0
times 4 dw 32
times 4 dw 16
times 4 dw 32
times 4 dw 32
times 4 dw 28
times 4 dw 32
times 4 dw 28
rnd_rv40_1d_tbl: times 4 dw 0
times 4 dw 2
times 4 dw 4
times 4 dw 2
times 4 dw 4
times 4 dw 3
times 4 dw 4
times 4 dw 3
times 4 dw 0
times 4 dw 4
times 4 dw 2
times 4 dw 4
times 4 dw 4
times 4 dw 3
times 4 dw 4
times 4 dw 3
cextern pw_3
cextern pw_4
cextern pw_8
pw_28: times 8 dw 28
cextern pw_32
cextern pw_64
SECTION .text
%macro mv0_pixels_mc8 0
lea r4, [r2*2 ]
.next4rows:
movq mm0, [r1 ]
movq mm1, [r1+r2]
add r1, r4
CHROMAMC_AVG mm0, [r0 ]
CHROMAMC_AVG mm1, [r0+r2]
movq [r0 ], mm0
movq [r0+r2], mm1
add r0, r4
movq mm0, [r1 ]
movq mm1, [r1+r2]
add r1, r4
CHROMAMC_AVG mm0, [r0 ]
CHROMAMC_AVG mm1, [r0+r2]
movq [r0 ], mm0
movq [r0+r2], mm1
add r0, r4
sub r3d, 4
jne .next4rows
%endmacro
%macro chroma_mc8_mmx_func 2-3
%ifidn %2, rv40
%ifdef PIC
%define rnd_1d_rv40 r8
%define rnd_2d_rv40 r8
%define extra_regs 2
%else ; no-PIC
%define rnd_1d_rv40 rnd_rv40_1d_tbl
%define rnd_2d_rv40 rnd_rv40_2d_tbl
%define extra_regs 1
%endif ; PIC
%else
%define extra_regs 0
%endif ; rv40
; void ff_put/avg_h264_chroma_mc8_*(uint8_t *dst /* align 8 */,
; uint8_t *src /* align 1 */,
; int stride, int h, int mx, int my)
cglobal %1_%2_chroma_mc8%3, 6, 7 + extra_regs, 0
%if ARCH_X86_64
movsxd r2, r2d
%endif
mov r6d, r5d
or r6d, r4d
jne .at_least_one_non_zero
; mx == 0 AND my == 0 - no filter needed
mv0_pixels_mc8
REP_RET
.at_least_one_non_zero:
%ifidn %2, rv40
%if ARCH_X86_64
mov r7, r5
and r7, 6 ; &~1 for mx/my=[0,7]
lea r7, [r7*4+r4]
sar r7d, 1
%define rnd_bias r7
%define dest_reg r0
%else ; x86-32
mov r0, r5
and r0, 6 ; &~1 for mx/my=[0,7]
lea r0, [r0*4+r4]
sar r0d, 1
%define rnd_bias r0
%define dest_reg r5
%endif
%else ; vc1, h264
%define rnd_bias 0
%define dest_reg r0
%endif
test r5d, r5d
mov r6, 1
je .my_is_zero
test r4d, r4d
mov r6, r2 ; dxy = x ? 1 : stride
jne .both_non_zero
.my_is_zero:
; mx == 0 XOR my == 0 - 1 dimensional filter only
or r4d, r5d ; x + y
%ifidn %2, rv40
%ifdef PIC
lea r8, [rnd_rv40_1d_tbl]
%endif
%if ARCH_X86_64 == 0
mov r5, r0m
%endif
%endif
movd m5, r4d
movq m4, [pw_8]
movq m6, [rnd_1d_%2+rnd_bias*8] ; mm6 = rnd >> 3
punpcklwd m5, m5
punpckldq m5, m5 ; mm5 = B = x
pxor m7, m7
psubw m4, m5 ; mm4 = A = 8-x
.next1drow:
movq m0, [r1 ] ; mm0 = src[0..7]
movq m2, [r1+r6] ; mm1 = src[1..8]
movq m1, m0
movq m3, m2
punpcklbw m0, m7
punpckhbw m1, m7
punpcklbw m2, m7
punpckhbw m3, m7
pmullw m0, m4 ; [mm0,mm1] = A * src[0..7]
pmullw m1, m4
pmullw m2, m5 ; [mm2,mm3] = B * src[1..8]
pmullw m3, m5
paddw m0, m6
paddw m1, m6
paddw m0, m2
paddw m1, m3
psrlw m0, 3
psrlw m1, 3
packuswb m0, m1
CHROMAMC_AVG m0, [dest_reg]
movq [dest_reg], m0 ; dst[0..7] = (A * src[0..7] + B * src[1..8] + (rnd >> 3)) >> 3
add dest_reg, r2
add r1, r2
dec r3d
jne .next1drow
REP_RET
.both_non_zero: ; general case, bilinear
movd m4, r4d ; x
movd m6, r5d ; y
%ifidn %2, rv40
%ifdef PIC
lea r8, [rnd_rv40_2d_tbl]
%endif
%if ARCH_X86_64 == 0
mov r5, r0m
%endif
%endif
mov r6, rsp ; backup stack pointer
and rsp, ~(mmsize-1) ; align stack
sub rsp, 16 ; AA and DD
punpcklwd m4, m4
punpcklwd m6, m6
punpckldq m4, m4 ; mm4 = x words
punpckldq m6, m6 ; mm6 = y words
movq m5, m4
pmullw m4, m6 ; mm4 = x * y
psllw m5, 3
psllw m6, 3
movq m7, m5
paddw m7, m6
movq [rsp+8], m4 ; DD = x * y
psubw m5, m4 ; mm5 = B = 8x - xy
psubw m6, m4 ; mm6 = C = 8y - xy
paddw m4, [pw_64]
psubw m4, m7 ; mm4 = A = xy - (8x+8y) + 64
pxor m7, m7
movq [rsp ], m4
movq m0, [r1 ] ; mm0 = src[0..7]
movq m1, [r1+1] ; mm1 = src[1..8]
.next2drow:
add r1, r2
movq m2, m0
movq m3, m1
punpckhbw m0, m7
punpcklbw m1, m7
punpcklbw m2, m7
punpckhbw m3, m7
pmullw m0, [rsp]
pmullw m2, [rsp]
pmullw m1, m5
pmullw m3, m5
paddw m2, m1 ; mm2 = A * src[0..3] + B * src[1..4]
paddw m3, m0 ; mm3 = A * src[4..7] + B * src[5..8]
movq m0, [r1]
movq m1, m0
punpcklbw m0, m7
punpckhbw m1, m7
pmullw m0, m6
pmullw m1, m6
paddw m2, m0
paddw m3, m1 ; [mm2,mm3] += C * src[0..7]
movq m1, [r1+1]
movq m0, m1
movq m4, m1
punpcklbw m0, m7
punpckhbw m4, m7
pmullw m0, [rsp+8]
pmullw m4, [rsp+8]
paddw m2, m0
paddw m3, m4 ; [mm2,mm3] += D * src[1..8]
movq m0, [r1]
paddw m2, [rnd_2d_%2+rnd_bias*8]
paddw m3, [rnd_2d_%2+rnd_bias*8]
psrlw m2, 6
psrlw m3, 6
packuswb m2, m3
CHROMAMC_AVG m2, [dest_reg]
movq [dest_reg], m2 ; dst[0..7] = ([mm2,mm3] + rnd) >> 6
add dest_reg, r2
dec r3d
jne .next2drow
mov rsp, r6 ; restore stack pointer
RET
%endmacro
%macro chroma_mc4_mmx_func 2
%define extra_regs 0
%ifidn %2, rv40
%ifdef PIC
%define extra_regs 1
%endif ; PIC
%endif ; rv40
cglobal %1_%2_chroma_mc4, 6, 6 + extra_regs, 0
%if ARCH_X86_64
movsxd r2, r2d
%endif
pxor m7, m7
movd m2, r4d ; x
movd m3, r5d ; y
movq m4, [pw_8]
movq m5, [pw_8]
punpcklwd m2, m2
punpcklwd m3, m3
punpcklwd m2, m2
punpcklwd m3, m3
psubw m4, m2
psubw m5, m3
%ifidn %2, rv40
%ifdef PIC
lea r6, [rnd_rv40_2d_tbl]
%define rnd_2d_rv40 r6
%else
%define rnd_2d_rv40 rnd_rv40_2d_tbl
%endif
and r5, 6 ; &~1 for mx/my=[0,7]
lea r5, [r5*4+r4]
sar r5d, 1
%define rnd_bias r5
%else ; vc1, h264
%define rnd_bias 0
%endif
movd m0, [r1 ]
movd m6, [r1+1]
add r1, r2
punpcklbw m0, m7
punpcklbw m6, m7
pmullw m0, m4
pmullw m6, m2
paddw m6, m0
.next2rows:
movd m0, [r1 ]
movd m1, [r1+1]
add r1, r2
punpcklbw m0, m7
punpcklbw m1, m7
pmullw m0, m4
pmullw m1, m2
paddw m1, m0
movq m0, m1
pmullw m6, m5
pmullw m1, m3
paddw m6, [rnd_2d_%2+rnd_bias*8]
paddw m1, m6
psrlw m1, 6
packuswb m1, m1
CHROMAMC_AVG4 m1, m6, [r0]
movd [r0], m1
add r0, r2
movd m6, [r1 ]
movd m1, [r1+1]
add r1, r2
punpcklbw m6, m7
punpcklbw m1, m7
pmullw m6, m4
pmullw m1, m2
paddw m1, m6
movq m6, m1
pmullw m0, m5
pmullw m1, m3
paddw m0, [rnd_2d_%2+rnd_bias*8]
paddw m1, m0
psrlw m1, 6
packuswb m1, m1
CHROMAMC_AVG4 m1, m0, [r0]
movd [r0], m1
add r0, r2
sub r3d, 2
jnz .next2rows
REP_RET
%endmacro
%macro chroma_mc2_mmx_func 2
cglobal %1_%2_chroma_mc2, 6, 7, 0
%if ARCH_X86_64
movsxd r2, r2d
%endif
mov r6d, r4d
shl r4d, 16
sub r4d, r6d
add r4d, 8
imul r5d, r4d ; x*y<<16 | y*(8-x)
shl r4d, 3
sub r4d, r5d ; x*(8-y)<<16 | (8-x)*(8-y)
movd m5, r4d
movd m6, r5d
punpckldq m5, m5 ; mm5 = {A,B,A,B}
punpckldq m6, m6 ; mm6 = {C,D,C,D}
pxor m7, m7
movd m2, [r1]
punpcklbw m2, m7
pshufw m2, m2, 0x94 ; mm0 = src[0,1,1,2]
.nextrow:
add r1, r2
movq m1, m2
pmaddwd m1, m5 ; mm1 = A * src[0,1] + B * src[1,2]
movd m0, [r1]
punpcklbw m0, m7
pshufw m0, m0, 0x94 ; mm0 = src[0,1,1,2]
movq m2, m0
pmaddwd m0, m6
paddw m1, [rnd_2d_%2]
paddw m1, m0 ; mm1 += C * src[0,1] + D * src[1,2]
psrlw m1, 6
packssdw m1, m7
packuswb m1, m7
CHROMAMC_AVG4 m1, m3, [r0]
movd r5d, m1
mov [r0], r5w
add r0, r2
sub r3d, 1
jnz .nextrow
REP_RET
%endmacro
%define rnd_1d_h264 pw_4
%define rnd_2d_h264 pw_32
%define rnd_1d_vc1 pw_3
%define rnd_2d_vc1 pw_28
%macro NOTHING 2-3
%endmacro
%macro DIRECT_AVG 2
PAVGB %1, %2
%endmacro
%macro COPY_AVG 3
movd %2, %3
PAVGB %1, %2
%endmacro
INIT_MMX mmx
%define CHROMAMC_AVG NOTHING
%define CHROMAMC_AVG4 NOTHING
chroma_mc8_mmx_func put, h264, _rnd
chroma_mc8_mmx_func put, vc1, _nornd
chroma_mc8_mmx_func put, rv40
chroma_mc4_mmx_func put, h264
chroma_mc4_mmx_func put, rv40
INIT_MMX mmxext
chroma_mc2_mmx_func put, h264
%define CHROMAMC_AVG DIRECT_AVG
%define CHROMAMC_AVG4 COPY_AVG
chroma_mc8_mmx_func avg, h264, _rnd
chroma_mc8_mmx_func avg, vc1, _nornd
chroma_mc8_mmx_func avg, rv40
chroma_mc4_mmx_func avg, h264
chroma_mc4_mmx_func avg, rv40
chroma_mc2_mmx_func avg, h264
INIT_MMX 3dnow
chroma_mc8_mmx_func avg, h264, _rnd
chroma_mc8_mmx_func avg, vc1, _nornd
chroma_mc8_mmx_func avg, rv40
chroma_mc4_mmx_func avg, h264
chroma_mc4_mmx_func avg, rv40
%macro chroma_mc8_ssse3_func 2-3
cglobal %1_%2_chroma_mc8%3, 6, 7, 8
%if ARCH_X86_64
movsxd r2, r2d
%endif
mov r6d, r5d
or r6d, r4d
jne .at_least_one_non_zero
; mx == 0 AND my == 0 - no filter needed
mv0_pixels_mc8
REP_RET
.at_least_one_non_zero:
test r5d, r5d
je .my_is_zero
test r4d, r4d
je .mx_is_zero
; general case, bilinear
mov r6d, r4d
shl r4d, 8
sub r4, r6
mov r6, 8
add r4, 8 ; x*288+8 = x<<8 | (8-x)
sub r6d, r5d
imul r6, r4 ; (8-y)*(x*255+8) = (8-y)*x<<8 | (8-y)*(8-x)
imul r4d, r5d ; y *(x*255+8) = y *x<<8 | y *(8-x)
movd m7, r6d
movd m6, r4d
movdqa m5, [rnd_2d_%2]
movq m0, [r1 ]
movq m1, [r1+1]
pshuflw m7, m7, 0
pshuflw m6, m6, 0
punpcklbw m0, m1
movlhps m7, m7
movlhps m6, m6
.next2rows:
movq m1, [r1+r2*1 ]
movq m2, [r1+r2*1+1]
movq m3, [r1+r2*2 ]
movq m4, [r1+r2*2+1]
lea r1, [r1+r2*2]
punpcklbw m1, m2
movdqa m2, m1
punpcklbw m3, m4
movdqa m4, m3
pmaddubsw m0, m7
pmaddubsw m1, m6
pmaddubsw m2, m7
pmaddubsw m3, m6
paddw m0, m5
paddw m2, m5
paddw m1, m0
paddw m3, m2
psrlw m1, 6
movdqa m0, m4
psrlw m3, 6
%ifidn %1, avg
movq m2, [r0 ]
movhps m2, [r0+r2]
%endif
packuswb m1, m3
CHROMAMC_AVG m1, m2
movq [r0 ], m1
movhps [r0+r2], m1
sub r3d, 2
lea r0, [r0+r2*2]
jg .next2rows
REP_RET
.my_is_zero:
mov r5d, r4d
shl r4d, 8
add r4, 8
sub r4, r5 ; 255*x+8 = x<<8 | (8-x)
movd m7, r4d
movdqa m6, [rnd_1d_%2]
pshuflw m7, m7, 0
movlhps m7, m7
.next2xrows:
movq m0, [r1 ]
movq m1, [r1 +1]
movq m2, [r1+r2 ]
movq m3, [r1+r2+1]
punpcklbw m0, m1
punpcklbw m2, m3
pmaddubsw m0, m7
pmaddubsw m2, m7
%ifidn %1, avg
movq m4, [r0 ]
movhps m4, [r0+r2]
%endif
paddw m0, m6
paddw m2, m6
psrlw m0, 3
psrlw m2, 3
packuswb m0, m2
CHROMAMC_AVG m0, m4
movq [r0 ], m0
movhps [r0+r2], m0
sub r3d, 2
lea r0, [r0+r2*2]
lea r1, [r1+r2*2]
jg .next2xrows
REP_RET
.mx_is_zero:
mov r4d, r5d
shl r5d, 8
add r5, 8
sub r5, r4 ; 255*y+8 = y<<8 | (8-y)
movd m7, r5d
movdqa m6, [rnd_1d_%2]
pshuflw m7, m7, 0
movlhps m7, m7
.next2yrows:
movq m0, [r1 ]
movq m1, [r1+r2 ]
movdqa m2, m1
movq m3, [r1+r2*2]
lea r1, [r1+r2*2]
punpcklbw m0, m1
punpcklbw m2, m3
pmaddubsw m0, m7
pmaddubsw m2, m7
%ifidn %1, avg
movq m4, [r0 ]
movhps m4, [r0+r2]
%endif
paddw m0, m6
paddw m2, m6
psrlw m0, 3
psrlw m2, 3
packuswb m0, m2
CHROMAMC_AVG m0, m4
movq [r0 ], m0
movhps [r0+r2], m0
sub r3d, 2
lea r0, [r0+r2*2]
jg .next2yrows
REP_RET
%endmacro
%macro chroma_mc4_ssse3_func 2
cglobal %1_%2_chroma_mc4, 6, 7, 0
%if ARCH_X86_64
movsxd r2, r2d
%endif
mov r6, r4
shl r4d, 8
sub r4d, r6d
mov r6, 8
add r4d, 8 ; x*288+8
sub r6d, r5d
imul r6d, r4d ; (8-y)*(x*255+8) = (8-y)*x<<8 | (8-y)*(8-x)
imul r4d, r5d ; y *(x*255+8) = y *x<<8 | y *(8-x)
movd m7, r6d
movd m6, r4d
movq m5, [pw_32]
movd m0, [r1 ]
pshufw m7, m7, 0
punpcklbw m0, [r1+1]
pshufw m6, m6, 0
.next2rows:
movd m1, [r1+r2*1 ]
movd m3, [r1+r2*2 ]
punpcklbw m1, [r1+r2*1+1]
punpcklbw m3, [r1+r2*2+1]
lea r1, [r1+r2*2]
movq m2, m1
movq m4, m3
pmaddubsw m0, m7
pmaddubsw m1, m6
pmaddubsw m2, m7
pmaddubsw m3, m6
paddw m0, m5
paddw m2, m5
paddw m1, m0
paddw m3, m2
psrlw m1, 6
movq m0, m4
psrlw m3, 6
packuswb m1, m1
packuswb m3, m3
CHROMAMC_AVG m1, [r0 ]
CHROMAMC_AVG m3, [r0+r2]
movd [r0 ], m1
movd [r0+r2], m3
sub r3d, 2
lea r0, [r0+r2*2]
jg .next2rows
REP_RET
%endmacro
%define CHROMAMC_AVG NOTHING
INIT_XMM ssse3
chroma_mc8_ssse3_func put, h264, _rnd
chroma_mc8_ssse3_func put, vc1, _nornd
INIT_MMX ssse3
chroma_mc4_ssse3_func put, h264
%define CHROMAMC_AVG DIRECT_AVG
INIT_XMM ssse3
chroma_mc8_ssse3_func avg, h264, _rnd
chroma_mc8_ssse3_func avg, vc1, _nornd
INIT_MMX ssse3
chroma_mc4_ssse3_func avg, h264

View File

@@ -0,0 +1,272 @@
;*****************************************************************************
;* MMX/SSE2/AVX-optimized 10-bit H.264 chroma MC code
;*****************************************************************************
;* Copyright (C) 2005-2011 x264 project
;*
;* Authors: Daniel Kang <daniel.d.kang@gmail.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
cextern pw_4
cextern pw_8
cextern pw_32
cextern pw_64
SECTION .text
%macro MV0_PIXELS_MC8 0
lea r4, [r2*3 ]
lea r5, [r2*4 ]
.next4rows:
movu m0, [r1 ]
movu m1, [r1+r2 ]
CHROMAMC_AVG m0, [r0 ]
CHROMAMC_AVG m1, [r0+r2 ]
mova [r0 ], m0
mova [r0+r2 ], m1
movu m0, [r1+r2*2]
movu m1, [r1+r4 ]
CHROMAMC_AVG m0, [r0+r2*2]
CHROMAMC_AVG m1, [r0+r4 ]
mova [r0+r2*2], m0
mova [r0+r4 ], m1
add r1, r5
add r0, r5
sub r3d, 4
jne .next4rows
%endmacro
;-----------------------------------------------------------------------------
; void ff_put/avg_h264_chroma_mc8(pixel *dst, pixel *src, int stride, int h,
; int mx, int my)
;-----------------------------------------------------------------------------
%macro CHROMA_MC8 1
cglobal %1_h264_chroma_mc8_10, 6,7,8
movsxdifnidn r2, r2d
mov r6d, r5d
or r6d, r4d
jne .at_least_one_non_zero
; mx == 0 AND my == 0 - no filter needed
MV0_PIXELS_MC8
REP_RET
.at_least_one_non_zero:
mov r6d, 2
test r5d, r5d
je .x_interpolation
mov r6, r2 ; dxy = x ? 1 : stride
test r4d, r4d
jne .xy_interpolation
.x_interpolation:
; mx == 0 XOR my == 0 - 1 dimensional filter only
or r4d, r5d ; x + y
movd m5, r4d
mova m4, [pw_8]
mova m6, [pw_4] ; mm6 = rnd >> 3
SPLATW m5, m5 ; mm5 = B = x
psubw m4, m5 ; mm4 = A = 8-x
.next1drow:
movu m0, [r1 ] ; mm0 = src[0..7]
movu m2, [r1+r6] ; mm2 = src[1..8]
pmullw m0, m4 ; mm0 = A * src[0..7]
pmullw m2, m5 ; mm2 = B * src[1..8]
paddw m0, m6
paddw m0, m2
psrlw m0, 3
CHROMAMC_AVG m0, [r0]
mova [r0], m0 ; dst[0..7] = (A * src[0..7] + B * src[1..8] + (rnd >> 3)) >> 3
add r0, r2
add r1, r2
dec r3d
jne .next1drow
REP_RET
.xy_interpolation: ; general case, bilinear
movd m4, r4m ; x
movd m6, r5m ; y
SPLATW m4, m4 ; mm4 = x words
SPLATW m6, m6 ; mm6 = y words
psllw m5, m4, 3 ; mm5 = 8x
pmullw m4, m6 ; mm4 = x * y
psllw m6, 3 ; mm6 = 8y
paddw m1, m5, m6 ; mm7 = 8x+8y
mova m7, m4 ; DD = x * y
psubw m5, m4 ; mm5 = B = 8x - xy
psubw m6, m4 ; mm6 = C = 8y - xy
paddw m4, [pw_64]
psubw m4, m1 ; mm4 = A = xy - (8x+8y) + 64
movu m0, [r1 ] ; mm0 = src[0..7]
movu m1, [r1+2] ; mm1 = src[1..8]
.next2drow:
add r1, r2
pmullw m2, m0, m4
pmullw m1, m5
paddw m2, m1 ; mm2 = A * src[0..7] + B * src[1..8]
movu m0, [r1]
movu m1, [r1+2]
pmullw m3, m0, m6
paddw m2, m3 ; mm2 += C * src[0..7+strde]
pmullw m3, m1, m7
paddw m2, m3 ; mm2 += D * src[1..8+strde]
paddw m2, [pw_32]
psrlw m2, 6
CHROMAMC_AVG m2, [r0]
mova [r0], m2 ; dst[0..7] = (mm2 + 32) >> 6
add r0, r2
dec r3d
jne .next2drow
REP_RET
%endmacro
;-----------------------------------------------------------------------------
; void ff_put/avg_h264_chroma_mc4(pixel *dst, pixel *src, int stride, int h,
; int mx, int my)
;-----------------------------------------------------------------------------
;TODO: xmm mc4
%macro MC4_OP 2
movq %1, [r1 ]
movq m1, [r1+2]
add r1, r2
pmullw %1, m4
pmullw m1, m2
paddw m1, %1
mova %1, m1
pmullw %2, m5
pmullw m1, m3
paddw %2, [pw_32]
paddw m1, %2
psrlw m1, 6
CHROMAMC_AVG m1, %2, [r0]
movq [r0], m1
add r0, r2
%endmacro
%macro CHROMA_MC4 1
cglobal %1_h264_chroma_mc4_10, 6,6,7
movsxdifnidn r2, r2d
movd m2, r4m ; x
movd m3, r5m ; y
mova m4, [pw_8]
mova m5, m4
SPLATW m2, m2
SPLATW m3, m3
psubw m4, m2
psubw m5, m3
movq m0, [r1 ]
movq m6, [r1+2]
add r1, r2
pmullw m0, m4
pmullw m6, m2
paddw m6, m0
.next2rows:
MC4_OP m0, m6
MC4_OP m6, m0
sub r3d, 2
jnz .next2rows
REP_RET
%endmacro
;-----------------------------------------------------------------------------
; void ff_put/avg_h264_chroma_mc2(pixel *dst, pixel *src, int stride, int h,
; int mx, int my)
;-----------------------------------------------------------------------------
%macro CHROMA_MC2 1
cglobal %1_h264_chroma_mc2_10, 6,7
movsxdifnidn r2, r2d
mov r6d, r4d
shl r4d, 16
sub r4d, r6d
add r4d, 8
imul r5d, r4d ; x*y<<16 | y*(8-x)
shl r4d, 3
sub r4d, r5d ; x*(8-y)<<16 | (8-x)*(8-y)
movd m5, r4d
movd m6, r5d
punpckldq m5, m5 ; mm5 = {A,B,A,B}
punpckldq m6, m6 ; mm6 = {C,D,C,D}
pxor m7, m7
pshufw m2, [r1], 0x94 ; mm0 = src[0,1,1,2]
.nextrow:
add r1, r2
movq m1, m2
pmaddwd m1, m5 ; mm1 = A * src[0,1] + B * src[1,2]
pshufw m0, [r1], 0x94 ; mm0 = src[0,1,1,2]
movq m2, m0
pmaddwd m0, m6
paddw m1, [pw_32]
paddw m1, m0 ; mm1 += C * src[0,1] + D * src[1,2]
psrlw m1, 6
packssdw m1, m7
CHROMAMC_AVG m1, m3, [r0]
movd [r0], m1
add r0, r2
dec r3d
jnz .nextrow
REP_RET
%endmacro
%macro NOTHING 2-3
%endmacro
%macro AVG 2-3
%if %0==3
movq %2, %3
%endif
pavgw %1, %2
%endmacro
%define CHROMAMC_AVG NOTHING
INIT_XMM sse2
CHROMA_MC8 put
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CHROMA_MC8 put
%endif
INIT_MMX mmxext
CHROMA_MC4 put
CHROMA_MC2 put
%define CHROMAMC_AVG AVG
INIT_XMM sse2
CHROMA_MC8 avg
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
CHROMA_MC8 avg
%endif
INIT_MMX mmxext
CHROMA_MC4 avg
CHROMA_MC2 avg

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,928 @@
;*****************************************************************************
;* MMX/SSE2/AVX-optimized 10-bit H.264 deblocking code
;*****************************************************************************
;* Copyright (C) 2005-2011 x264 project
;*
;* Authors: Oskar Arvidsson <oskar@irock.se>
;* Loren Merritt <lorenm@u.washington.edu>
;* Fiona Glaser <fiona@x264.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION .text
cextern pw_2
cextern pw_3
cextern pw_4
cextern pw_1023
%define pw_pixel_max pw_1023
; out: %4 = |%1-%2|-%3
; clobbers: %5
%macro ABS_SUB 5
psubusw %5, %2, %1
psubusw %4, %1, %2
por %4, %5
psubw %4, %3
%endmacro
; out: %4 = |%1-%2|<%3
%macro DIFF_LT 5
psubusw %4, %2, %1
psubusw %5, %1, %2
por %5, %4 ; |%1-%2|
pxor %4, %4
psubw %5, %3 ; |%1-%2|-%3
pcmpgtw %4, %5 ; 0 > |%1-%2|-%3
%endmacro
%macro LOAD_AB 4
movd %1, %3
movd %2, %4
SPLATW %1, %1
SPLATW %2, %2
%endmacro
; in: %2=tc reg
; out: %1=splatted tc
%macro LOAD_TC 2
movd %1, [%2]
punpcklbw %1, %1
%if mmsize == 8
pshufw %1, %1, 0
%else
pshuflw %1, %1, 01010000b
pshufd %1, %1, 01010000b
%endif
psraw %1, 6
%endmacro
; in: %1=p1, %2=p0, %3=q0, %4=q1
; %5=alpha, %6=beta, %7-%9=tmp
; out: %7=mask
%macro LOAD_MASK 9
ABS_SUB %2, %3, %5, %8, %7 ; |p0-q0| - alpha
ABS_SUB %1, %2, %6, %9, %7 ; |p1-p0| - beta
pand %8, %9
ABS_SUB %3, %4, %6, %9, %7 ; |q1-q0| - beta
pxor %7, %7
pand %8, %9
pcmpgtw %7, %8
%endmacro
; in: %1=p0, %2=q0, %3=p1, %4=q1, %5=mask, %6=tmp, %7=tmp
; out: %1=p0', m2=q0'
%macro DEBLOCK_P0_Q0 7
psubw %3, %4
pxor %7, %7
paddw %3, [pw_4]
psubw %7, %5
psubw %6, %2, %1
psllw %6, 2
paddw %3, %6
psraw %3, 3
mova %6, [pw_pixel_max]
CLIPW %3, %7, %5
pxor %7, %7
paddw %1, %3
psubw %2, %3
CLIPW %1, %7, %6
CLIPW %2, %7, %6
%endmacro
; in: %1=x2, %2=x1, %3=p0, %4=q0 %5=mask&tc, %6=tmp
%macro LUMA_Q1 6
pavgw %6, %3, %4 ; (p0+q0+1)>>1
paddw %1, %6
pxor %6, %6
psraw %1, 1
psubw %6, %5
psubw %1, %2
CLIPW %1, %6, %5
paddw %1, %2
%endmacro
%macro LUMA_DEBLOCK_ONE 3
DIFF_LT m5, %1, bm, m4, m6
pxor m6, m6
mova %3, m4
pcmpgtw m6, tcm
pand m4, tcm
pandn m6, m7
pand m4, m6
LUMA_Q1 m5, %2, m1, m2, m4, m6
%endmacro
%macro LUMA_H_STORE 2
%if mmsize == 8
movq [r0-4], m0
movq [r0+r1-4], m1
movq [r0+r1*2-4], m2
movq [r0+%2-4], m3
%else
movq [r0-4], m0
movhps [r0+r1-4], m0
movq [r0+r1*2-4], m1
movhps [%1-4], m1
movq [%1+r1-4], m2
movhps [%1+r1*2-4], m2
movq [%1+%2-4], m3
movhps [%1+r1*4-4], m3
%endif
%endmacro
%macro DEBLOCK_LUMA 0
;-----------------------------------------------------------------------------
; void ff_deblock_v_luma_10(uint16_t *pix, int stride, int alpha, int beta,
; int8_t *tc0)
;-----------------------------------------------------------------------------
cglobal deblock_v_luma_10, 5,5,8*(mmsize/16)
%assign pad 5*mmsize+12-(stack_offset&15)
%define tcm [rsp]
%define ms1 [rsp+mmsize]
%define ms2 [rsp+mmsize*2]
%define am [rsp+mmsize*3]
%define bm [rsp+mmsize*4]
SUB rsp, pad
shl r2d, 2
shl r3d, 2
LOAD_AB m4, m5, r2d, r3d
mov r3, 32/mmsize
mov r2, r0
sub r0, r1
mova am, m4
sub r0, r1
mova bm, m5
sub r0, r1
.loop:
mova m0, [r0+r1]
mova m1, [r0+r1*2]
mova m2, [r2]
mova m3, [r2+r1]
LOAD_MASK m0, m1, m2, m3, am, bm, m7, m4, m6
LOAD_TC m6, r4
mova tcm, m6
mova m5, [r0]
LUMA_DEBLOCK_ONE m1, m0, ms1
mova [r0+r1], m5
mova m5, [r2+r1*2]
LUMA_DEBLOCK_ONE m2, m3, ms2
mova [r2+r1], m5
pxor m5, m5
mova m6, tcm
pcmpgtw m5, tcm
psubw m6, ms1
pandn m5, m7
psubw m6, ms2
pand m5, m6
DEBLOCK_P0_Q0 m1, m2, m0, m3, m5, m7, m6
mova [r0+r1*2], m1
mova [r2], m2
add r0, mmsize
add r2, mmsize
add r4, mmsize/8
dec r3
jg .loop
ADD rsp, pad
RET
cglobal deblock_h_luma_10, 5,6,8*(mmsize/16)
%assign pad 7*mmsize+12-(stack_offset&15)
%define tcm [rsp]
%define ms1 [rsp+mmsize]
%define ms2 [rsp+mmsize*2]
%define p1m [rsp+mmsize*3]
%define p2m [rsp+mmsize*4]
%define am [rsp+mmsize*5]
%define bm [rsp+mmsize*6]
SUB rsp, pad
shl r2d, 2
shl r3d, 2
LOAD_AB m4, m5, r2d, r3d
mov r3, r1
mova am, m4
add r3, r1
mov r5, 32/mmsize
mova bm, m5
add r3, r1
%if mmsize == 16
mov r2, r0
add r2, r3
%endif
.loop:
%if mmsize == 8
movq m2, [r0-8] ; y q2 q1 q0
movq m7, [r0+0]
movq m5, [r0+r1-8]
movq m3, [r0+r1+0]
movq m0, [r0+r1*2-8]
movq m6, [r0+r1*2+0]
movq m1, [r0+r3-8]
TRANSPOSE4x4W 2, 5, 0, 1, 4
SWAP 2, 7
movq m7, [r0+r3]
TRANSPOSE4x4W 2, 3, 6, 7, 4
%else
movu m5, [r0-8] ; y q2 q1 q0 p0 p1 p2 x
movu m0, [r0+r1-8]
movu m2, [r0+r1*2-8]
movu m3, [r2-8]
TRANSPOSE4x4W 5, 0, 2, 3, 6
mova tcm, m3
movu m4, [r2+r1-8]
movu m1, [r2+r1*2-8]
movu m3, [r2+r3-8]
movu m7, [r2+r1*4-8]
TRANSPOSE4x4W 4, 1, 3, 7, 6
mova m6, tcm
punpcklqdq m6, m7
punpckhqdq m5, m4
SBUTTERFLY qdq, 0, 1, 7
SBUTTERFLY qdq, 2, 3, 7
%endif
mova p2m, m6
LOAD_MASK m0, m1, m2, m3, am, bm, m7, m4, m6
LOAD_TC m6, r4
mova tcm, m6
LUMA_DEBLOCK_ONE m1, m0, ms1
mova p1m, m5
mova m5, p2m
LUMA_DEBLOCK_ONE m2, m3, ms2
mova p2m, m5
pxor m5, m5
mova m6, tcm
pcmpgtw m5, tcm
psubw m6, ms1
pandn m5, m7
psubw m6, ms2
pand m5, m6
DEBLOCK_P0_Q0 m1, m2, m0, m3, m5, m7, m6
mova m0, p1m
mova m3, p2m
TRANSPOSE4x4W 0, 1, 2, 3, 4
LUMA_H_STORE r2, r3
add r4, mmsize/8
lea r0, [r0+r1*(mmsize/2)]
lea r2, [r2+r1*(mmsize/2)]
dec r5
jg .loop
ADD rsp, pad
RET
%endmacro
%if ARCH_X86_64
; in: m0=p1, m1=p0, m2=q0, m3=q1, m8=p2, m9=q2
; m12=alpha, m13=beta
; out: m0=p1', m3=q1', m1=p0', m2=q0'
; clobbers: m4, m5, m6, m7, m10, m11, m14
%macro DEBLOCK_LUMA_INTER_SSE2 0
LOAD_MASK m0, m1, m2, m3, m12, m13, m7, m4, m6
LOAD_TC m6, r4
DIFF_LT m8, m1, m13, m10, m4
DIFF_LT m9, m2, m13, m11, m4
pand m6, m7
mova m14, m6
pxor m4, m4
pcmpgtw m6, m4
pand m6, m14
mova m5, m10
pand m5, m6
LUMA_Q1 m8, m0, m1, m2, m5, m4
mova m5, m11
pand m5, m6
LUMA_Q1 m9, m3, m1, m2, m5, m4
pxor m4, m4
psubw m6, m10
pcmpgtw m4, m14
pandn m4, m7
psubw m6, m11
pand m4, m6
DEBLOCK_P0_Q0 m1, m2, m0, m3, m4, m5, m6
SWAP 0, 8
SWAP 3, 9
%endmacro
%macro DEBLOCK_LUMA_64 0
cglobal deblock_v_luma_10, 5,5,15
%define p2 m8
%define p1 m0
%define p0 m1
%define q0 m2
%define q1 m3
%define q2 m9
%define mask0 m7
%define mask1 m10
%define mask2 m11
shl r2d, 2
shl r3d, 2
LOAD_AB m12, m13, r2d, r3d
mov r2, r0
sub r0, r1
sub r0, r1
sub r0, r1
mov r3, 2
.loop:
mova p2, [r0]
mova p1, [r0+r1]
mova p0, [r0+r1*2]
mova q0, [r2]
mova q1, [r2+r1]
mova q2, [r2+r1*2]
DEBLOCK_LUMA_INTER_SSE2
mova [r0+r1], p1
mova [r0+r1*2], p0
mova [r2], q0
mova [r2+r1], q1
add r0, mmsize
add r2, mmsize
add r4, 2
dec r3
jg .loop
REP_RET
cglobal deblock_h_luma_10, 5,7,15
shl r2d, 2
shl r3d, 2
LOAD_AB m12, m13, r2d, r3d
mov r2, r1
add r2, r1
add r2, r1
mov r5, r0
add r5, r2
mov r6, 2
.loop:
movu m8, [r0-8] ; y q2 q1 q0 p0 p1 p2 x
movu m0, [r0+r1-8]
movu m2, [r0+r1*2-8]
movu m9, [r5-8]
movu m5, [r5+r1-8]
movu m1, [r5+r1*2-8]
movu m3, [r5+r2-8]
movu m7, [r5+r1*4-8]
TRANSPOSE4x4W 8, 0, 2, 9, 10
TRANSPOSE4x4W 5, 1, 3, 7, 10
punpckhqdq m8, m5
SBUTTERFLY qdq, 0, 1, 10
SBUTTERFLY qdq, 2, 3, 10
punpcklqdq m9, m7
DEBLOCK_LUMA_INTER_SSE2
TRANSPOSE4x4W 0, 1, 2, 3, 4
LUMA_H_STORE r5, r2
add r4, 2
lea r0, [r0+r1*8]
lea r5, [r5+r1*8]
dec r6
jg .loop
REP_RET
%endmacro
INIT_XMM sse2
DEBLOCK_LUMA_64
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
DEBLOCK_LUMA_64
%endif
%endif
%macro SWAPMOVA 2
%ifid %1
SWAP %1, %2
%else
mova %1, %2
%endif
%endmacro
; in: t0-t2: tmp registers
; %1=p0 %2=p1 %3=p2 %4=p3 %5=q0 %6=q1 %7=mask0
; %8=mask1p %9=2 %10=p0' %11=p1' %12=p2'
%macro LUMA_INTRA_P012 12 ; p0..p3 in memory
%if ARCH_X86_64
paddw t0, %3, %2
mova t2, %4
paddw t2, %3
%else
mova t0, %3
mova t2, %4
paddw t0, %2
paddw t2, %3
%endif
paddw t0, %1
paddw t2, t2
paddw t0, %5
paddw t2, %9
paddw t0, %9 ; (p2 + p1 + p0 + q0 + 2)
paddw t2, t0 ; (2*p3 + 3*p2 + p1 + p0 + q0 + 4)
psrlw t2, 3
psrlw t1, t0, 2
psubw t2, %3
psubw t1, %2
pand t2, %8
pand t1, %8
paddw t2, %3
paddw t1, %2
SWAPMOVA %11, t1
psubw t1, t0, %3
paddw t0, t0
psubw t1, %5
psubw t0, %3
paddw t1, %6
paddw t1, %2
paddw t0, %6
psrlw t1, 2 ; (2*p1 + p0 + q1 + 2)/4
psrlw t0, 3 ; (p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4)>>3
pxor t0, t1
pxor t1, %1
pand t0, %8
pand t1, %7
pxor t0, t1
pxor t0, %1
SWAPMOVA %10, t0
SWAPMOVA %12, t2
%endmacro
%macro LUMA_INTRA_INIT 1
%xdefine pad %1*mmsize+((gprsize*3) % mmsize)-(stack_offset&15)
%define t0 m4
%define t1 m5
%define t2 m6
%define t3 m7
%assign i 4
%rep %1
CAT_XDEFINE t, i, [rsp+mmsize*(i-4)]
%assign i i+1
%endrep
SUB rsp, pad
%endmacro
; in: %1-%3=tmp, %4=p2, %5=q2
%macro LUMA_INTRA_INTER 5
LOAD_AB t0, t1, r2d, r3d
mova %1, t0
LOAD_MASK m0, m1, m2, m3, %1, t1, t0, t2, t3
%if ARCH_X86_64
mova %2, t0 ; mask0
psrlw t3, %1, 2
%else
mova t3, %1
mova %2, t0 ; mask0
psrlw t3, 2
%endif
paddw t3, [pw_2] ; alpha/4+2
DIFF_LT m1, m2, t3, t2, t0 ; t2 = |p0-q0| < alpha/4+2
pand t2, %2
mova t3, %5 ; q2
mova %1, t2 ; mask1
DIFF_LT t3, m2, t1, t2, t0 ; t2 = |q2-q0| < beta
pand t2, %1
mova t3, %4 ; p2
mova %3, t2 ; mask1q
DIFF_LT t3, m1, t1, t2, t0 ; t2 = |p2-p0| < beta
pand t2, %1
mova %1, t2 ; mask1p
%endmacro
%macro LUMA_H_INTRA_LOAD 0
%if mmsize == 8
movu t0, [r0-8]
movu t1, [r0+r1-8]
movu m0, [r0+r1*2-8]
movu m1, [r0+r4-8]
TRANSPOSE4x4W 4, 5, 0, 1, 2
mova t4, t0 ; p3
mova t5, t1 ; p2
movu m2, [r0]
movu m3, [r0+r1]
movu t0, [r0+r1*2]
movu t1, [r0+r4]
TRANSPOSE4x4W 2, 3, 4, 5, 6
mova t6, t0 ; q2
mova t7, t1 ; q3
%else
movu t0, [r0-8]
movu t1, [r0+r1-8]
movu m0, [r0+r1*2-8]
movu m1, [r0+r5-8]
movu m2, [r4-8]
movu m3, [r4+r1-8]
movu t2, [r4+r1*2-8]
movu t3, [r4+r5-8]
TRANSPOSE8x8W 4, 5, 0, 1, 2, 3, 6, 7, t4, t5
mova t4, t0 ; p3
mova t5, t1 ; p2
mova t6, t2 ; q2
mova t7, t3 ; q3
%endif
%endmacro
; in: %1=q3 %2=q2' %3=q1' %4=q0' %5=p0' %6=p1' %7=p2' %8=p3 %9=tmp
%macro LUMA_H_INTRA_STORE 9
%if mmsize == 8
TRANSPOSE4x4W %1, %2, %3, %4, %9
movq [r0-8], m%1
movq [r0+r1-8], m%2
movq [r0+r1*2-8], m%3
movq [r0+r4-8], m%4
movq m%1, %8
TRANSPOSE4x4W %5, %6, %7, %1, %9
movq [r0], m%5
movq [r0+r1], m%6
movq [r0+r1*2], m%7
movq [r0+r4], m%1
%else
TRANSPOSE2x4x4W %1, %2, %3, %4, %9
movq [r0-8], m%1
movq [r0+r1-8], m%2
movq [r0+r1*2-8], m%3
movq [r0+r5-8], m%4
movhps [r4-8], m%1
movhps [r4+r1-8], m%2
movhps [r4+r1*2-8], m%3
movhps [r4+r5-8], m%4
%ifnum %8
SWAP %1, %8
%else
mova m%1, %8
%endif
TRANSPOSE2x4x4W %5, %6, %7, %1, %9
movq [r0], m%5
movq [r0+r1], m%6
movq [r0+r1*2], m%7
movq [r0+r5], m%1
movhps [r4], m%5
movhps [r4+r1], m%6
movhps [r4+r1*2], m%7
movhps [r4+r5], m%1
%endif
%endmacro
%if ARCH_X86_64
;-----------------------------------------------------------------------------
; void ff_deblock_v_luma_intra_10(uint16_t *pix, int stride, int alpha,
; int beta)
;-----------------------------------------------------------------------------
%macro DEBLOCK_LUMA_INTRA_64 0
cglobal deblock_v_luma_intra_10, 4,7,16
%define t0 m1
%define t1 m2
%define t2 m4
%define p2 m8
%define p1 m9
%define p0 m10
%define q0 m11
%define q1 m12
%define q2 m13
%define aa m5
%define bb m14
lea r4, [r1*4]
lea r5, [r1*3] ; 3*stride
neg r4
add r4, r0 ; pix-4*stride
mov r6, 2
mova m0, [pw_2]
shl r2d, 2
shl r3d, 2
LOAD_AB aa, bb, r2d, r3d
.loop:
mova p2, [r4+r1]
mova p1, [r4+2*r1]
mova p0, [r4+r5]
mova q0, [r0]
mova q1, [r0+r1]
mova q2, [r0+2*r1]
LOAD_MASK p1, p0, q0, q1, aa, bb, m3, t0, t1
mova t2, aa
psrlw t2, 2
paddw t2, m0 ; alpha/4+2
DIFF_LT p0, q0, t2, m6, t0 ; m6 = |p0-q0| < alpha/4+2
DIFF_LT p2, p0, bb, t1, t0 ; m7 = |p2-p0| < beta
DIFF_LT q2, q0, bb, m7, t0 ; t1 = |q2-q0| < beta
pand m6, m3
pand m7, m6
pand m6, t1
LUMA_INTRA_P012 p0, p1, p2, [r4], q0, q1, m3, m6, m0, [r4+r5], [r4+2*r1], [r4+r1]
LUMA_INTRA_P012 q0, q1, q2, [r0+r5], p0, p1, m3, m7, m0, [r0], [r0+r1], [r0+2*r1]
add r0, mmsize
add r4, mmsize
dec r6
jg .loop
REP_RET
;-----------------------------------------------------------------------------
; void ff_deblock_h_luma_intra_10(uint16_t *pix, int stride, int alpha,
; int beta)
;-----------------------------------------------------------------------------
cglobal deblock_h_luma_intra_10, 4,7,16
%define t0 m15
%define t1 m14
%define t2 m2
%define q3 m5
%define q2 m8
%define q1 m9
%define q0 m10
%define p0 m11
%define p1 m12
%define p2 m13
%define p3 m4
%define spill [rsp]
%assign pad 24-(stack_offset&15)
SUB rsp, pad
lea r4, [r1*4]
lea r5, [r1*3] ; 3*stride
add r4, r0 ; pix+4*stride
mov r6, 2
mova m0, [pw_2]
shl r2d, 2
shl r3d, 2
.loop:
movu q3, [r0-8]
movu q2, [r0+r1-8]
movu q1, [r0+r1*2-8]
movu q0, [r0+r5-8]
movu p0, [r4-8]
movu p1, [r4+r1-8]
movu p2, [r4+r1*2-8]
movu p3, [r4+r5-8]
TRANSPOSE8x8W 5, 8, 9, 10, 11, 12, 13, 4, 1
LOAD_AB m1, m2, r2d, r3d
LOAD_MASK q1, q0, p0, p1, m1, m2, m3, t0, t1
psrlw m1, 2
paddw m1, m0 ; alpha/4+2
DIFF_LT p0, q0, m1, m6, t0 ; m6 = |p0-q0| < alpha/4+2
DIFF_LT q2, q0, m2, t1, t0 ; t1 = |q2-q0| < beta
DIFF_LT p0, p2, m2, m7, t0 ; m7 = |p2-p0| < beta
pand m6, m3
pand m7, m6
pand m6, t1
mova spill, q3
LUMA_INTRA_P012 q0, q1, q2, q3, p0, p1, m3, m6, m0, m5, m1, q2
LUMA_INTRA_P012 p0, p1, p2, p3, q0, q1, m3, m7, m0, p0, m6, p2
mova m7, spill
LUMA_H_INTRA_STORE 7, 8, 1, 5, 11, 6, 13, 4, 14
lea r0, [r0+r1*8]
lea r4, [r4+r1*8]
dec r6
jg .loop
ADD rsp, pad
RET
%endmacro
INIT_XMM sse2
DEBLOCK_LUMA_INTRA_64
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
DEBLOCK_LUMA_INTRA_64
%endif
%endif
%macro DEBLOCK_LUMA_INTRA 0
;-----------------------------------------------------------------------------
; void ff_deblock_v_luma_intra_10(uint16_t *pix, int stride, int alpha,
; int beta)
;-----------------------------------------------------------------------------
cglobal deblock_v_luma_intra_10, 4,7,8*(mmsize/16)
LUMA_INTRA_INIT 3
lea r4, [r1*4]
lea r5, [r1*3]
neg r4
add r4, r0
mov r6, 32/mmsize
shl r2d, 2
shl r3d, 2
.loop:
mova m0, [r4+r1*2] ; p1
mova m1, [r4+r5] ; p0
mova m2, [r0] ; q0
mova m3, [r0+r1] ; q1
LUMA_INTRA_INTER t4, t5, t6, [r4+r1], [r0+r1*2]
LUMA_INTRA_P012 m1, m0, t3, [r4], m2, m3, t5, t4, [pw_2], [r4+r5], [r4+2*r1], [r4+r1]
mova t3, [r0+r1*2] ; q2
LUMA_INTRA_P012 m2, m3, t3, [r0+r5], m1, m0, t5, t6, [pw_2], [r0], [r0+r1], [r0+2*r1]
add r0, mmsize
add r4, mmsize
dec r6
jg .loop
ADD rsp, pad
RET
;-----------------------------------------------------------------------------
; void ff_deblock_h_luma_intra_10(uint16_t *pix, int stride, int alpha,
; int beta)
;-----------------------------------------------------------------------------
cglobal deblock_h_luma_intra_10, 4,7,8*(mmsize/16)
LUMA_INTRA_INIT 8
%if mmsize == 8
lea r4, [r1*3]
mov r5, 32/mmsize
%else
lea r4, [r1*4]
lea r5, [r1*3] ; 3*stride
add r4, r0 ; pix+4*stride
mov r6, 32/mmsize
%endif
shl r2d, 2
shl r3d, 2
.loop:
LUMA_H_INTRA_LOAD
LUMA_INTRA_INTER t8, t9, t10, t5, t6
LUMA_INTRA_P012 m1, m0, t3, t4, m2, m3, t9, t8, [pw_2], t8, t5, t11
mova t3, t6 ; q2
LUMA_INTRA_P012 m2, m3, t3, t7, m1, m0, t9, t10, [pw_2], m4, t6, m5
mova m2, t4
mova m0, t11
mova m1, t5
mova m3, t8
mova m6, t6
LUMA_H_INTRA_STORE 2, 0, 1, 3, 4, 6, 5, t7, 7
lea r0, [r0+r1*(mmsize/2)]
%if mmsize == 8
dec r5
%else
lea r4, [r4+r1*(mmsize/2)]
dec r6
%endif
jg .loop
ADD rsp, pad
RET
%endmacro
%if ARCH_X86_64 == 0
INIT_MMX mmxext
DEBLOCK_LUMA
DEBLOCK_LUMA_INTRA
INIT_XMM sse2
DEBLOCK_LUMA
DEBLOCK_LUMA_INTRA
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
DEBLOCK_LUMA
DEBLOCK_LUMA_INTRA
%endif
%endif
; in: %1=p0, %2=q0, %3=p1, %4=q1, %5=mask, %6=tmp, %7=tmp
; out: %1=p0', %2=q0'
%macro CHROMA_DEBLOCK_P0_Q0_INTRA 7
mova %6, [pw_2]
paddw %6, %3
paddw %6, %4
paddw %7, %6, %2
paddw %6, %1
paddw %6, %3
paddw %7, %4
psraw %6, 2
psraw %7, 2
psubw %6, %1
psubw %7, %2
pand %6, %5
pand %7, %5
paddw %1, %6
paddw %2, %7
%endmacro
%macro CHROMA_V_LOAD 1
mova m0, [r0] ; p1
mova m1, [r0+r1] ; p0
mova m2, [%1] ; q0
mova m3, [%1+r1] ; q1
%endmacro
%macro CHROMA_V_STORE 0
mova [r0+1*r1], m1
mova [r0+2*r1], m2
%endmacro
%macro CHROMA_V_LOAD_TC 2
movd %1, [%2]
punpcklbw %1, %1
punpcklwd %1, %1
psraw %1, 6
%endmacro
%macro DEBLOCK_CHROMA 0
;-----------------------------------------------------------------------------
; void ff_deblock_v_chroma_10(uint16_t *pix, int stride, int alpha, int beta,
; int8_t *tc0)
;-----------------------------------------------------------------------------
cglobal deblock_v_chroma_10, 5,7-(mmsize/16),8*(mmsize/16)
mov r5, r0
sub r0, r1
sub r0, r1
shl r2d, 2
shl r3d, 2
%if mmsize < 16
mov r6, 16/mmsize
.loop:
%endif
CHROMA_V_LOAD r5
LOAD_AB m4, m5, r2d, r3d
LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4
pxor m4, m4
CHROMA_V_LOAD_TC m6, r4
psubw m6, [pw_3]
pmaxsw m6, m4
pand m7, m6
DEBLOCK_P0_Q0 m1, m2, m0, m3, m7, m5, m6
CHROMA_V_STORE
%if mmsize < 16
add r0, mmsize
add r5, mmsize
add r4, mmsize/4
dec r6
jg .loop
REP_RET
%else
RET
%endif
;-----------------------------------------------------------------------------
; void ff_deblock_v_chroma_intra_10(uint16_t *pix, int stride, int alpha,
; int beta)
;-----------------------------------------------------------------------------
cglobal deblock_v_chroma_intra_10, 4,6-(mmsize/16),8*(mmsize/16)
mov r4, r0
sub r0, r1
sub r0, r1
shl r2d, 2
shl r3d, 2
%if mmsize < 16
mov r5, 16/mmsize
.loop:
%endif
CHROMA_V_LOAD r4
LOAD_AB m4, m5, r2d, r3d
LOAD_MASK m0, m1, m2, m3, m4, m5, m7, m6, m4
CHROMA_DEBLOCK_P0_Q0_INTRA m1, m2, m0, m3, m7, m5, m6
CHROMA_V_STORE
%if mmsize < 16
add r0, mmsize
add r4, mmsize
dec r5
jg .loop
REP_RET
%else
RET
%endif
%endmacro
%if ARCH_X86_64 == 0
INIT_MMX mmxext
DEBLOCK_CHROMA
%endif
INIT_XMM sse2
DEBLOCK_CHROMA
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
DEBLOCK_CHROMA
%endif

View File

@@ -0,0 +1,212 @@
/*
* H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
* Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* H.264 / AVC / MPEG4 part10 codec.
* non-MMX i386-specific optimizations for H.264
* @author Michael Niedermayer <michaelni@gmx.at>
*/
#ifndef AVCODEC_X86_H264_I386_H
#define AVCODEC_X86_H264_I386_H
#include <stddef.h>
#include "libavcodec/cabac.h"
#include "cabac.h"
#if HAVE_INLINE_ASM
#if ARCH_X86_64
#define REG64 "r"
#else
#define REG64 "m"
#endif
//FIXME use some macros to avoid duplicating get_cabac (cannot be done yet
//as that would make optimization work hard)
#if HAVE_7REGS && !BROKEN_COMPILER
#define decode_significance decode_significance_x86
static int decode_significance_x86(CABACContext *c, int max_coeff,
uint8_t *significant_coeff_ctx_base,
int *index, x86_reg last_off){
void *end= significant_coeff_ctx_base + max_coeff - 1;
int minusstart= -(intptr_t)significant_coeff_ctx_base;
int minusindex= 4-(intptr_t)index;
int bit;
x86_reg coeff_count;
#ifdef BROKEN_RELOCATIONS
void *tables;
__asm__ volatile(
"lea "MANGLE(ff_h264_cabac_tables)", %0 \n\t"
: "=&r"(tables)
: NAMED_CONSTRAINTS_ARRAY(ff_h264_cabac_tables)
);
#endif
__asm__ volatile(
"3: \n\t"
BRANCHLESS_GET_CABAC("%4", "%q4", "(%1)", "%3", "%w3",
"%5", "%q5", "%k0", "%b0",
"%c11(%6)", "%c12(%6)",
AV_STRINGIFY(H264_NORM_SHIFT_OFFSET),
AV_STRINGIFY(H264_LPS_RANGE_OFFSET),
AV_STRINGIFY(H264_MLPS_STATE_OFFSET),
"%13")
"test $1, %4 \n\t"
" jz 4f \n\t"
"add %10, %1 \n\t"
BRANCHLESS_GET_CABAC("%4", "%q4", "(%1)", "%3", "%w3",
"%5", "%q5", "%k0", "%b0",
"%c11(%6)", "%c12(%6)",
AV_STRINGIFY(H264_NORM_SHIFT_OFFSET),
AV_STRINGIFY(H264_LPS_RANGE_OFFSET),
AV_STRINGIFY(H264_MLPS_STATE_OFFSET),
"%13")
"sub %10, %1 \n\t"
"mov %2, %0 \n\t"
"movl %7, %%ecx \n\t"
"add %1, %%"REG_c" \n\t"
"movl %%ecx, (%0) \n\t"
"test $1, %4 \n\t"
" jnz 5f \n\t"
"add"OPSIZE" $4, %2 \n\t"
"4: \n\t"
"add $1, %1 \n\t"
"cmp %8, %1 \n\t"
" jb 3b \n\t"
"mov %2, %0 \n\t"
"movl %7, %%ecx \n\t"
"add %1, %%"REG_c" \n\t"
"movl %%ecx, (%0) \n\t"
"5: \n\t"
"add %9, %k0 \n\t"
"shr $2, %k0 \n\t"
: "=&q"(coeff_count), "+r"(significant_coeff_ctx_base), "+m"(index),
"+&r"(c->low), "=&r"(bit), "+&r"(c->range)
: "r"(c), "m"(minusstart), "m"(end), "m"(minusindex), "m"(last_off),
"i"(offsetof(CABACContext, bytestream)),
"i"(offsetof(CABACContext, bytestream_end))
TABLES_ARG
: "%"REG_c, "memory"
);
return coeff_count;
}
#define decode_significance_8x8 decode_significance_8x8_x86
static int decode_significance_8x8_x86(CABACContext *c,
uint8_t *significant_coeff_ctx_base,
int *index, uint8_t *last_coeff_ctx_base, const uint8_t *sig_off){
int minusindex= 4-(intptr_t)index;
int bit;
x86_reg coeff_count;
x86_reg last=0;
x86_reg state;
#ifdef BROKEN_RELOCATIONS
void *tables;
__asm__ volatile(
"lea "MANGLE(ff_h264_cabac_tables)", %0 \n\t"
: "=&r"(tables)
: NAMED_CONSTRAINTS_ARRAY(ff_h264_cabac_tables)
);
#endif
__asm__ volatile(
"mov %1, %6 \n\t"
"3: \n\t"
"mov %10, %0 \n\t"
"movzb (%0, %6), %6 \n\t"
"add %9, %6 \n\t"
BRANCHLESS_GET_CABAC("%4", "%q4", "(%6)", "%3", "%w3",
"%5", "%q5", "%k0", "%b0",
"%c12(%7)", "%c13(%7)",
AV_STRINGIFY(H264_NORM_SHIFT_OFFSET),
AV_STRINGIFY(H264_LPS_RANGE_OFFSET),
AV_STRINGIFY(H264_MLPS_STATE_OFFSET),
"%15")
"mov %1, %6 \n\t"
"test $1, %4 \n\t"
" jz 4f \n\t"
#ifdef BROKEN_RELOCATIONS
"movzb %c14(%15, %q6), %6\n\t"
#else
"movzb "MANGLE(ff_h264_cabac_tables)"+%c14(%6), %6\n\t"
#endif
"add %11, %6 \n\t"
BRANCHLESS_GET_CABAC("%4", "%q4", "(%6)", "%3", "%w3",
"%5", "%q5", "%k0", "%b0",
"%c12(%7)", "%c13(%7)",
AV_STRINGIFY(H264_NORM_SHIFT_OFFSET),
AV_STRINGIFY(H264_LPS_RANGE_OFFSET),
AV_STRINGIFY(H264_MLPS_STATE_OFFSET),
"%15")
"mov %2, %0 \n\t"
"mov %1, %6 \n\t"
"mov %k6, (%0) \n\t"
"test $1, %4 \n\t"
" jnz 5f \n\t"
"add"OPSIZE" $4, %2 \n\t"
"4: \n\t"
"add $1, %6 \n\t"
"mov %6, %1 \n\t"
"cmp $63, %6 \n\t"
" jb 3b \n\t"
"mov %2, %0 \n\t"
"mov %k6, (%0) \n\t"
"5: \n\t"
"addl %8, %k0 \n\t"
"shr $2, %k0 \n\t"
: "=&q"(coeff_count), "+"REG64(last), "+"REG64(index), "+&r"(c->low),
"=&r"(bit), "+&r"(c->range), "=&r"(state)
: "r"(c), "m"(minusindex), "m"(significant_coeff_ctx_base),
REG64(sig_off), REG64(last_coeff_ctx_base),
"i"(offsetof(CABACContext, bytestream)),
"i"(offsetof(CABACContext, bytestream_end)),
"i"(H264_LAST_COEFF_FLAG_OFFSET_8x8_OFFSET) TABLES_ARG
: "%"REG_c, "memory"
);
return coeff_count;
}
#endif /* HAVE_7REGS && BROKEN_COMPILER */
#endif /* HAVE_INLINE_ASM */
#endif /* AVCODEC_X86_H264_I386_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,599 @@
;*****************************************************************************
;* MMX/SSE2/AVX-optimized 10-bit H.264 iDCT code
;*****************************************************************************
;* Copyright (C) 2005-2011 x264 project
;*
;* Authors: Daniel Kang <daniel.d.kang@gmail.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
pd_32: times 4 dd 32
SECTION .text
cextern pw_1023
%define pw_pixel_max pw_1023
;-----------------------------------------------------------------------------
; void ff_h264_idct_add_10(pixel *dst, int16_t *block, int stride)
;-----------------------------------------------------------------------------
%macro STORE_DIFFx2 6
psrad %1, 6
psrad %2, 6
packssdw %1, %2
movq %3, [%5]
movhps %3, [%5+%6]
paddsw %1, %3
CLIPW %1, %4, [pw_pixel_max]
movq [%5], %1
movhps [%5+%6], %1
%endmacro
%macro STORE_DIFF16 5
psrad %1, 6
psrad %2, 6
packssdw %1, %2
paddsw %1, [%5]
CLIPW %1, %3, %4
mova [%5], %1
%endmacro
;dst, in, stride
%macro IDCT4_ADD_10 3
mova m0, [%2+ 0]
mova m1, [%2+16]
mova m2, [%2+32]
mova m3, [%2+48]
IDCT4_1D d,0,1,2,3,4,5
TRANSPOSE4x4D 0,1,2,3,4
paddd m0, [pd_32]
IDCT4_1D d,0,1,2,3,4,5
pxor m5, m5
mova [%2+ 0], m5
mova [%2+16], m5
mova [%2+32], m5
mova [%2+48], m5
STORE_DIFFx2 m0, m1, m4, m5, %1, %3
lea %1, [%1+%3*2]
STORE_DIFFx2 m2, m3, m4, m5, %1, %3
%endmacro
%macro IDCT_ADD_10 0
cglobal h264_idct_add_10, 3,3
IDCT4_ADD_10 r0, r1, r2
RET
%endmacro
INIT_XMM sse2
IDCT_ADD_10
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
IDCT_ADD_10
%endif
;-----------------------------------------------------------------------------
; void ff_h264_idct_add16_10(pixel *dst, const int *block_offset,
; int16_t *block, int stride,
; const uint8_t nnzc[6*8])
;-----------------------------------------------------------------------------
;;;;;;; NO FATE SAMPLES TRIGGER THIS
%macro ADD4x4IDCT 0
add4x4_idct %+ SUFFIX:
add r5, r0
mova m0, [r2+ 0]
mova m1, [r2+16]
mova m2, [r2+32]
mova m3, [r2+48]
IDCT4_1D d,0,1,2,3,4,5
TRANSPOSE4x4D 0,1,2,3,4
paddd m0, [pd_32]
IDCT4_1D d,0,1,2,3,4,5
pxor m5, m5
mova [r2+ 0], m5
mova [r2+16], m5
mova [r2+32], m5
mova [r2+48], m5
STORE_DIFFx2 m0, m1, m4, m5, r5, r3
lea r5, [r5+r3*2]
STORE_DIFFx2 m2, m3, m4, m5, r5, r3
ret
%endmacro
INIT_XMM sse2
ALIGN 16
ADD4x4IDCT
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
ALIGN 16
ADD4x4IDCT
%endif
%macro ADD16_OP 2
cmp byte [r4+%2], 0
jz .skipblock%1
mov r5d, [r1+%1*4]
call add4x4_idct %+ SUFFIX
.skipblock%1:
%if %1<15
add r2, 64
%endif
%endmacro
%macro IDCT_ADD16_10 0
cglobal h264_idct_add16_10, 5,6
ADD16_OP 0, 4+1*8
ADD16_OP 1, 5+1*8
ADD16_OP 2, 4+2*8
ADD16_OP 3, 5+2*8
ADD16_OP 4, 6+1*8
ADD16_OP 5, 7+1*8
ADD16_OP 6, 6+2*8
ADD16_OP 7, 7+2*8
ADD16_OP 8, 4+3*8
ADD16_OP 9, 5+3*8
ADD16_OP 10, 4+4*8
ADD16_OP 11, 5+4*8
ADD16_OP 12, 6+3*8
ADD16_OP 13, 7+3*8
ADD16_OP 14, 6+4*8
ADD16_OP 15, 7+4*8
REP_RET
%endmacro
INIT_XMM sse2
IDCT_ADD16_10
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
IDCT_ADD16_10
%endif
;-----------------------------------------------------------------------------
; void ff_h264_idct_dc_add_10(pixel *dst, int16_t *block, int stride)
;-----------------------------------------------------------------------------
%macro IDCT_DC_ADD_OP_10 3
pxor m5, m5
%if avx_enabled
paddw m1, m0, [%1+0 ]
paddw m2, m0, [%1+%2 ]
paddw m3, m0, [%1+%2*2]
paddw m4, m0, [%1+%3 ]
%else
mova m1, [%1+0 ]
mova m2, [%1+%2 ]
mova m3, [%1+%2*2]
mova m4, [%1+%3 ]
paddw m1, m0
paddw m2, m0
paddw m3, m0
paddw m4, m0
%endif
CLIPW m1, m5, m6
CLIPW m2, m5, m6
CLIPW m3, m5, m6
CLIPW m4, m5, m6
mova [%1+0 ], m1
mova [%1+%2 ], m2
mova [%1+%2*2], m3
mova [%1+%3 ], m4
%endmacro
INIT_MMX mmxext
cglobal h264_idct_dc_add_10,3,3
movd m0, [r1]
mov dword [r1], 0
paddd m0, [pd_32]
psrad m0, 6
lea r1, [r2*3]
pshufw m0, m0, 0
mova m6, [pw_pixel_max]
IDCT_DC_ADD_OP_10 r0, r2, r1
RET
;-----------------------------------------------------------------------------
; void ff_h264_idct8_dc_add_10(pixel *dst, int16_t *block, int stride)
;-----------------------------------------------------------------------------
%macro IDCT8_DC_ADD 0
cglobal h264_idct8_dc_add_10,3,4,7
movd m0, [r1]
mov dword[r1], 0
paddd m0, [pd_32]
psrad m0, 6
lea r1, [r2*3]
SPLATW m0, m0, 0
mova m6, [pw_pixel_max]
IDCT_DC_ADD_OP_10 r0, r2, r1
lea r0, [r0+r2*4]
IDCT_DC_ADD_OP_10 r0, r2, r1
RET
%endmacro
INIT_XMM sse2
IDCT8_DC_ADD
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
IDCT8_DC_ADD
%endif
;-----------------------------------------------------------------------------
; void ff_h264_idct_add16intra_10(pixel *dst, const int *block_offset,
; int16_t *block, int stride,
; const uint8_t nnzc[6*8])
;-----------------------------------------------------------------------------
%macro AC 1
.ac%1:
mov r5d, [r1+(%1+0)*4]
call add4x4_idct %+ SUFFIX
mov r5d, [r1+(%1+1)*4]
add r2, 64
call add4x4_idct %+ SUFFIX
add r2, 64
jmp .skipadd%1
%endmacro
%assign last_block 16
%macro ADD16_OP_INTRA 2
cmp word [r4+%2], 0
jnz .ac%1
mov r5d, [r2+ 0]
or r5d, [r2+64]
jz .skipblock%1
mov r5d, [r1+(%1+0)*4]
call idct_dc_add %+ SUFFIX
.skipblock%1:
%if %1<last_block-2
add r2, 128
%endif
.skipadd%1:
%endmacro
%macro IDCT_ADD16INTRA_10 0
idct_dc_add %+ SUFFIX:
add r5, r0
movq m0, [r2+ 0]
movhps m0, [r2+64]
mov dword [r2+ 0], 0
mov dword [r2+64], 0
paddd m0, [pd_32]
psrad m0, 6
pshufhw m0, m0, 0
pshuflw m0, m0, 0
lea r6, [r3*3]
mova m6, [pw_pixel_max]
IDCT_DC_ADD_OP_10 r5, r3, r6
ret
cglobal h264_idct_add16intra_10,5,7,8
ADD16_OP_INTRA 0, 4+1*8
ADD16_OP_INTRA 2, 4+2*8
ADD16_OP_INTRA 4, 6+1*8
ADD16_OP_INTRA 6, 6+2*8
ADD16_OP_INTRA 8, 4+3*8
ADD16_OP_INTRA 10, 4+4*8
ADD16_OP_INTRA 12, 6+3*8
ADD16_OP_INTRA 14, 6+4*8
REP_RET
AC 8
AC 10
AC 12
AC 14
AC 0
AC 2
AC 4
AC 6
%endmacro
INIT_XMM sse2
IDCT_ADD16INTRA_10
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
IDCT_ADD16INTRA_10
%endif
%assign last_block 36
;-----------------------------------------------------------------------------
; void ff_h264_idct_add8_10(pixel **dst, const int *block_offset,
; int16_t *block, int stride,
; const uint8_t nnzc[6*8])
;-----------------------------------------------------------------------------
%macro IDCT_ADD8 0
cglobal h264_idct_add8_10,5,8,7
%if ARCH_X86_64
mov r7, r0
%endif
add r2, 1024
mov r0, [r0]
ADD16_OP_INTRA 16, 4+ 6*8
ADD16_OP_INTRA 18, 4+ 7*8
add r2, 1024-128*2
%if ARCH_X86_64
mov r0, [r7+gprsize]
%else
mov r0, r0m
mov r0, [r0+gprsize]
%endif
ADD16_OP_INTRA 32, 4+11*8
ADD16_OP_INTRA 34, 4+12*8
REP_RET
AC 16
AC 18
AC 32
AC 34
%endmacro ; IDCT_ADD8
INIT_XMM sse2
IDCT_ADD8
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
IDCT_ADD8
%endif
;-----------------------------------------------------------------------------
; void ff_h264_idct8_add_10(pixel *dst, int16_t *block, int stride)
;-----------------------------------------------------------------------------
%macro IDCT8_1D 2
SWAP 0, 1
psrad m4, m5, 1
psrad m1, m0, 1
paddd m4, m5
paddd m1, m0
paddd m4, m7
paddd m1, m5
psubd m4, m0
paddd m1, m3
psubd m0, m3
psubd m5, m3
paddd m0, m7
psubd m5, m7
psrad m3, 1
psrad m7, 1
psubd m0, m3
psubd m5, m7
SWAP 1, 7
psrad m1, m7, 2
psrad m3, m4, 2
paddd m3, m0
psrad m0, 2
paddd m1, m5
psrad m5, 2
psubd m0, m4
psubd m7, m5
SWAP 5, 6
psrad m4, m2, 1
psrad m6, m5, 1
psubd m4, m5
paddd m6, m2
mova m2, %1
mova m5, %2
SUMSUB_BA d, 5, 2
SUMSUB_BA d, 6, 5
SUMSUB_BA d, 4, 2
SUMSUB_BA d, 7, 6
SUMSUB_BA d, 0, 4
SUMSUB_BA d, 3, 2
SUMSUB_BA d, 1, 5
SWAP 7, 6, 4, 5, 2, 3, 1, 0 ; 70315246 -> 01234567
%endmacro
%macro IDCT8_1D_FULL 1
mova m7, [%1+112*2]
mova m6, [%1+ 96*2]
mova m5, [%1+ 80*2]
mova m3, [%1+ 48*2]
mova m2, [%1+ 32*2]
mova m1, [%1+ 16*2]
IDCT8_1D [%1], [%1+ 64*2]
%endmacro
; %1=int16_t *block, %2=int16_t *dstblock
%macro IDCT8_ADD_SSE_START 2
IDCT8_1D_FULL %1
%if ARCH_X86_64
TRANSPOSE4x4D 0,1,2,3,8
mova [%2 ], m0
TRANSPOSE4x4D 4,5,6,7,8
mova [%2+8*2], m4
%else
mova [%1], m7
TRANSPOSE4x4D 0,1,2,3,7
mova m7, [%1]
mova [%2 ], m0
mova [%2+16*2], m1
mova [%2+32*2], m2
mova [%2+48*2], m3
TRANSPOSE4x4D 4,5,6,7,3
mova [%2+ 8*2], m4
mova [%2+24*2], m5
mova [%2+40*2], m6
mova [%2+56*2], m7
%endif
%endmacro
; %1=uint8_t *dst, %2=int16_t *block, %3=int stride
%macro IDCT8_ADD_SSE_END 3
IDCT8_1D_FULL %2
mova [%2 ], m6
mova [%2+16*2], m7
pxor m7, m7
STORE_DIFFx2 m0, m1, m6, m7, %1, %3
lea %1, [%1+%3*2]
STORE_DIFFx2 m2, m3, m6, m7, %1, %3
mova m0, [%2 ]
mova m1, [%2+16*2]
lea %1, [%1+%3*2]
STORE_DIFFx2 m4, m5, m6, m7, %1, %3
lea %1, [%1+%3*2]
STORE_DIFFx2 m0, m1, m6, m7, %1, %3
%endmacro
%macro IDCT8_ADD 0
cglobal h264_idct8_add_10, 3,4,16
%if UNIX64 == 0
%assign pad 16-gprsize-(stack_offset&15)
sub rsp, pad
call h264_idct8_add1_10 %+ SUFFIX
add rsp, pad
RET
%endif
ALIGN 16
; TODO: does not need to use stack
h264_idct8_add1_10 %+ SUFFIX:
%assign pad 256+16-gprsize
sub rsp, pad
add dword [r1], 32
%if ARCH_X86_64
IDCT8_ADD_SSE_START r1, rsp
SWAP 1, 9
SWAP 2, 10
SWAP 3, 11
SWAP 5, 13
SWAP 6, 14
SWAP 7, 15
IDCT8_ADD_SSE_START r1+16, rsp+128
PERMUTE 1,9, 2,10, 3,11, 5,1, 6,2, 7,3, 9,13, 10,14, 11,15, 13,5, 14,6, 15,7
IDCT8_1D [rsp], [rsp+128]
SWAP 0, 8
SWAP 1, 9
SWAP 2, 10
SWAP 3, 11
SWAP 4, 12
SWAP 5, 13
SWAP 6, 14
SWAP 7, 15
IDCT8_1D [rsp+16], [rsp+144]
psrad m8, 6
psrad m0, 6
packssdw m8, m0
paddsw m8, [r0]
pxor m0, m0
mova [r1+ 0], m0
mova [r1+ 16], m0
mova [r1+ 32], m0
mova [r1+ 48], m0
mova [r1+ 64], m0
mova [r1+ 80], m0
mova [r1+ 96], m0
mova [r1+112], m0
mova [r1+128], m0
mova [r1+144], m0
mova [r1+160], m0
mova [r1+176], m0
mova [r1+192], m0
mova [r1+208], m0
mova [r1+224], m0
mova [r1+240], m0
CLIPW m8, m0, [pw_pixel_max]
mova [r0], m8
mova m8, [pw_pixel_max]
STORE_DIFF16 m9, m1, m0, m8, r0+r2
lea r0, [r0+r2*2]
STORE_DIFF16 m10, m2, m0, m8, r0
STORE_DIFF16 m11, m3, m0, m8, r0+r2
lea r0, [r0+r2*2]
STORE_DIFF16 m12, m4, m0, m8, r0
STORE_DIFF16 m13, m5, m0, m8, r0+r2
lea r0, [r0+r2*2]
STORE_DIFF16 m14, m6, m0, m8, r0
STORE_DIFF16 m15, m7, m0, m8, r0+r2
%else
IDCT8_ADD_SSE_START r1, rsp
IDCT8_ADD_SSE_START r1+16, rsp+128
lea r3, [r0+8]
IDCT8_ADD_SSE_END r0, rsp, r2
IDCT8_ADD_SSE_END r3, rsp+16, r2
mova [r1+ 0], m7
mova [r1+ 16], m7
mova [r1+ 32], m7
mova [r1+ 48], m7
mova [r1+ 64], m7
mova [r1+ 80], m7
mova [r1+ 96], m7
mova [r1+112], m7
mova [r1+128], m7
mova [r1+144], m7
mova [r1+160], m7
mova [r1+176], m7
mova [r1+192], m7
mova [r1+208], m7
mova [r1+224], m7
mova [r1+240], m7
%endif ; ARCH_X86_64
add rsp, pad
ret
%endmacro
INIT_XMM sse2
IDCT8_ADD
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
IDCT8_ADD
%endif
;-----------------------------------------------------------------------------
; void ff_h264_idct8_add4_10(pixel **dst, const int *block_offset,
; int16_t *block, int stride,
; const uint8_t nnzc[6*8])
;-----------------------------------------------------------------------------
;;;;;;; NO FATE SAMPLES TRIGGER THIS
%macro IDCT8_ADD4_OP 2
cmp byte [r4+%2], 0
jz .skipblock%1
mov r0d, [r6+%1*4]
add r0, r5
call h264_idct8_add1_10 %+ SUFFIX
.skipblock%1:
%if %1<12
add r1, 256
%endif
%endmacro
%macro IDCT8_ADD4 0
cglobal h264_idct8_add4_10, 0,7,16
%assign pad 16-gprsize-(stack_offset&15)
SUB rsp, pad
mov r5, r0mp
mov r6, r1mp
mov r1, r2mp
mov r2d, r3m
movifnidn r4, r4mp
IDCT8_ADD4_OP 0, 4+1*8
IDCT8_ADD4_OP 4, 6+1*8
IDCT8_ADD4_OP 8, 4+3*8
IDCT8_ADD4_OP 12, 6+3*8
ADD rsp, pad
RET
%endmacro ; IDCT8_ADD4
INIT_XMM sse2
IDCT8_ADD4
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
IDCT8_ADD4
%endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,403 @@
/*
* Copyright (c) 2010 Fiona Glaser <fiona@x264.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/h264pred.h"
#define PRED4x4(TYPE, DEPTH, OPT) \
void ff_pred4x4_ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *src, \
const uint8_t *topright, \
ptrdiff_t stride);
PRED4x4(dc, 10, mmxext)
PRED4x4(down_left, 10, sse2)
PRED4x4(down_left, 10, avx)
PRED4x4(down_right, 10, sse2)
PRED4x4(down_right, 10, ssse3)
PRED4x4(down_right, 10, avx)
PRED4x4(vertical_left, 10, sse2)
PRED4x4(vertical_left, 10, avx)
PRED4x4(vertical_right, 10, sse2)
PRED4x4(vertical_right, 10, ssse3)
PRED4x4(vertical_right, 10, avx)
PRED4x4(horizontal_up, 10, mmxext)
PRED4x4(horizontal_down, 10, sse2)
PRED4x4(horizontal_down, 10, ssse3)
PRED4x4(horizontal_down, 10, avx)
#define PRED8x8(TYPE, DEPTH, OPT) \
void ff_pred8x8_ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *src, \
ptrdiff_t stride);
PRED8x8(dc, 10, mmxext)
PRED8x8(dc, 10, sse2)
PRED8x8(top_dc, 10, sse2)
PRED8x8(plane, 10, sse2)
PRED8x8(vertical, 10, sse2)
PRED8x8(horizontal, 10, sse2)
#define PRED8x8L(TYPE, DEPTH, OPT)\
void ff_pred8x8l_ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *src, \
int has_topleft, \
int has_topright, \
ptrdiff_t stride);
PRED8x8L(dc, 10, sse2)
PRED8x8L(dc, 10, avx)
PRED8x8L(128_dc, 10, mmxext)
PRED8x8L(128_dc, 10, sse2)
PRED8x8L(top_dc, 10, sse2)
PRED8x8L(top_dc, 10, avx)
PRED8x8L(vertical, 10, sse2)
PRED8x8L(vertical, 10, avx)
PRED8x8L(horizontal, 10, sse2)
PRED8x8L(horizontal, 10, ssse3)
PRED8x8L(horizontal, 10, avx)
PRED8x8L(down_left, 10, sse2)
PRED8x8L(down_left, 10, ssse3)
PRED8x8L(down_left, 10, avx)
PRED8x8L(down_right, 10, sse2)
PRED8x8L(down_right, 10, ssse3)
PRED8x8L(down_right, 10, avx)
PRED8x8L(vertical_right, 10, sse2)
PRED8x8L(vertical_right, 10, ssse3)
PRED8x8L(vertical_right, 10, avx)
PRED8x8L(horizontal_up, 10, sse2)
PRED8x8L(horizontal_up, 10, ssse3)
PRED8x8L(horizontal_up, 10, avx)
#define PRED16x16(TYPE, DEPTH, OPT)\
void ff_pred16x16_ ## TYPE ## _ ## DEPTH ## _ ## OPT (uint8_t *src, \
ptrdiff_t stride);
PRED16x16(dc, 10, mmxext)
PRED16x16(dc, 10, sse2)
PRED16x16(top_dc, 10, mmxext)
PRED16x16(top_dc, 10, sse2)
PRED16x16(128_dc, 10, mmxext)
PRED16x16(128_dc, 10, sse2)
PRED16x16(left_dc, 10, mmxext)
PRED16x16(left_dc, 10, sse2)
PRED16x16(vertical, 10, mmxext)
PRED16x16(vertical, 10, sse2)
PRED16x16(horizontal, 10, mmxext)
PRED16x16(horizontal, 10, sse2)
/* 8-bit versions */
PRED16x16(vertical, 8, mmx)
PRED16x16(vertical, 8, sse)
PRED16x16(horizontal, 8, mmx)
PRED16x16(horizontal, 8, mmxext)
PRED16x16(horizontal, 8, ssse3)
PRED16x16(dc, 8, mmxext)
PRED16x16(dc, 8, sse2)
PRED16x16(dc, 8, ssse3)
PRED16x16(plane_h264, 8, mmx)
PRED16x16(plane_h264, 8, mmxext)
PRED16x16(plane_h264, 8, sse2)
PRED16x16(plane_h264, 8, ssse3)
PRED16x16(plane_rv40, 8, mmx)
PRED16x16(plane_rv40, 8, mmxext)
PRED16x16(plane_rv40, 8, sse2)
PRED16x16(plane_rv40, 8, ssse3)
PRED16x16(plane_svq3, 8, mmx)
PRED16x16(plane_svq3, 8, mmxext)
PRED16x16(plane_svq3, 8, sse2)
PRED16x16(plane_svq3, 8, ssse3)
PRED16x16(tm_vp8, 8, mmx)
PRED16x16(tm_vp8, 8, mmxext)
PRED16x16(tm_vp8, 8, sse2)
PRED8x8(top_dc, 8, mmxext)
PRED8x8(dc_rv40, 8, mmxext)
PRED8x8(dc, 8, mmxext)
PRED8x8(vertical, 8, mmx)
PRED8x8(horizontal, 8, mmx)
PRED8x8(horizontal, 8, mmxext)
PRED8x8(horizontal, 8, ssse3)
PRED8x8(plane, 8, mmx)
PRED8x8(plane, 8, mmxext)
PRED8x8(plane, 8, sse2)
PRED8x8(plane, 8, ssse3)
PRED8x8(tm_vp8, 8, mmx)
PRED8x8(tm_vp8, 8, mmxext)
PRED8x8(tm_vp8, 8, sse2)
PRED8x8(tm_vp8, 8, ssse3)
PRED8x8L(top_dc, 8, mmxext)
PRED8x8L(top_dc, 8, ssse3)
PRED8x8L(dc, 8, mmxext)
PRED8x8L(dc, 8, ssse3)
PRED8x8L(horizontal, 8, mmxext)
PRED8x8L(horizontal, 8, ssse3)
PRED8x8L(vertical, 8, mmxext)
PRED8x8L(vertical, 8, ssse3)
PRED8x8L(down_left, 8, mmxext)
PRED8x8L(down_left, 8, sse2)
PRED8x8L(down_left, 8, ssse3)
PRED8x8L(down_right, 8, mmxext)
PRED8x8L(down_right, 8, sse2)
PRED8x8L(down_right, 8, ssse3)
PRED8x8L(vertical_right, 8, mmxext)
PRED8x8L(vertical_right, 8, sse2)
PRED8x8L(vertical_right, 8, ssse3)
PRED8x8L(vertical_left, 8, sse2)
PRED8x8L(vertical_left, 8, ssse3)
PRED8x8L(horizontal_up, 8, mmxext)
PRED8x8L(horizontal_up, 8, ssse3)
PRED8x8L(horizontal_down, 8, mmxext)
PRED8x8L(horizontal_down, 8, sse2)
PRED8x8L(horizontal_down, 8, ssse3)
PRED4x4(dc, 8, mmxext)
PRED4x4(down_left, 8, mmxext)
PRED4x4(down_right, 8, mmxext)
PRED4x4(vertical_left, 8, mmxext)
PRED4x4(vertical_right, 8, mmxext)
PRED4x4(horizontal_up, 8, mmxext)
PRED4x4(horizontal_down, 8, mmxext)
PRED4x4(tm_vp8, 8, mmx)
PRED4x4(tm_vp8, 8, mmxext)
PRED4x4(tm_vp8, 8, ssse3)
PRED4x4(vertical_vp8, 8, mmxext)
av_cold void ff_h264_pred_init_x86(H264PredContext *h, int codec_id,
const int bit_depth,
const int chroma_format_idc)
{
int cpu_flags = av_get_cpu_flags();
if (bit_depth == 8) {
if (EXTERNAL_MMX(cpu_flags)) {
h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vertical_8_mmx;
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_8_mmx;
if (chroma_format_idc <= 1) {
h->pred8x8 [VERT_PRED8x8 ] = ff_pred8x8_vertical_8_mmx;
h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_8_mmx;
}
if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_tm_vp8_8_mmx;
h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_mmx;
h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_8_mmx;
} else {
if (chroma_format_idc <= 1)
h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_8_mmx;
if (codec_id == AV_CODEC_ID_SVQ3) {
if (cpu_flags & AV_CPU_FLAG_CMOV)
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_svq3_8_mmx;
} else if (codec_id == AV_CODEC_ID_RV40) {
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_rv40_8_mmx;
} else {
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_h264_8_mmx;
}
}
}
if (EXTERNAL_MMXEXT(cpu_flags)) {
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_8_mmxext;
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_8_mmxext;
if (chroma_format_idc <= 1)
h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_horizontal_8_mmxext;
h->pred8x8l [TOP_DC_PRED ] = ff_pred8x8l_top_dc_8_mmxext;
h->pred8x8l [DC_PRED ] = ff_pred8x8l_dc_8_mmxext;
h->pred8x8l [HOR_PRED ] = ff_pred8x8l_horizontal_8_mmxext;
h->pred8x8l [VERT_PRED ] = ff_pred8x8l_vertical_8_mmxext;
h->pred8x8l [DIAG_DOWN_RIGHT_PRED ] = ff_pred8x8l_down_right_8_mmxext;
h->pred8x8l [VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_8_mmxext;
h->pred8x8l [HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_8_mmxext;
h->pred8x8l [DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_8_mmxext;
h->pred8x8l [HOR_DOWN_PRED ] = ff_pred8x8l_horizontal_down_8_mmxext;
h->pred4x4 [DIAG_DOWN_RIGHT_PRED ] = ff_pred4x4_down_right_8_mmxext;
h->pred4x4 [VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_8_mmxext;
h->pred4x4 [HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_8_mmxext;
h->pred4x4 [DC_PRED ] = ff_pred4x4_dc_8_mmxext;
if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8 ||
codec_id == AV_CODEC_ID_H264) {
h->pred4x4 [DIAG_DOWN_LEFT_PRED] = ff_pred4x4_down_left_8_mmxext;
}
if (codec_id == AV_CODEC_ID_SVQ3 || codec_id == AV_CODEC_ID_H264) {
h->pred4x4 [VERT_LEFT_PRED ] = ff_pred4x4_vertical_left_8_mmxext;
}
if (codec_id != AV_CODEC_ID_RV40) {
h->pred4x4 [HOR_UP_PRED ] = ff_pred4x4_horizontal_up_8_mmxext;
}
if (codec_id == AV_CODEC_ID_SVQ3 || codec_id == AV_CODEC_ID_H264) {
if (chroma_format_idc <= 1) {
h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_8_mmxext;
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_8_mmxext;
}
}
if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_tm_vp8_8_mmxext;
h->pred8x8 [DC_PRED8x8 ] = ff_pred8x8_dc_rv40_8_mmxext;
h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_mmxext;
h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_8_mmxext;
h->pred4x4 [VERT_PRED ] = ff_pred4x4_vertical_vp8_8_mmxext;
} else {
if (chroma_format_idc <= 1)
h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_8_mmxext;
if (codec_id == AV_CODEC_ID_SVQ3) {
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_svq3_8_mmxext;
} else if (codec_id == AV_CODEC_ID_RV40) {
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_rv40_8_mmxext;
} else {
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_h264_8_mmxext;
}
}
}
if (EXTERNAL_SSE(cpu_flags)) {
h->pred16x16[VERT_PRED8x8] = ff_pred16x16_vertical_8_sse;
}
if (EXTERNAL_SSE2(cpu_flags)) {
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_8_sse2;
h->pred8x8l [DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_8_sse2;
h->pred8x8l [DIAG_DOWN_RIGHT_PRED ] = ff_pred8x8l_down_right_8_sse2;
h->pred8x8l [VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_8_sse2;
h->pred8x8l [VERT_LEFT_PRED ] = ff_pred8x8l_vertical_left_8_sse2;
h->pred8x8l [HOR_DOWN_PRED ] = ff_pred8x8l_horizontal_down_8_sse2;
if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {
h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_tm_vp8_8_sse2;
h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_sse2;
} else {
if (chroma_format_idc <= 1)
h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_8_sse2;
if (codec_id == AV_CODEC_ID_SVQ3) {
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_svq3_8_sse2;
} else if (codec_id == AV_CODEC_ID_RV40) {
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_rv40_8_sse2;
} else {
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_h264_8_sse2;
}
}
}
if (EXTERNAL_SSSE3(cpu_flags)) {
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_8_ssse3;
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_8_ssse3;
if (chroma_format_idc <= 1)
h->pred8x8 [HOR_PRED8x8 ] = ff_pred8x8_horizontal_8_ssse3;
h->pred8x8l [TOP_DC_PRED ] = ff_pred8x8l_top_dc_8_ssse3;
h->pred8x8l [DC_PRED ] = ff_pred8x8l_dc_8_ssse3;
h->pred8x8l [HOR_PRED ] = ff_pred8x8l_horizontal_8_ssse3;
h->pred8x8l [VERT_PRED ] = ff_pred8x8l_vertical_8_ssse3;
h->pred8x8l [DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_8_ssse3;
h->pred8x8l [DIAG_DOWN_RIGHT_PRED ] = ff_pred8x8l_down_right_8_ssse3;
h->pred8x8l [VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_8_ssse3;
h->pred8x8l [VERT_LEFT_PRED ] = ff_pred8x8l_vertical_left_8_ssse3;
h->pred8x8l [HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_8_ssse3;
h->pred8x8l [HOR_DOWN_PRED ] = ff_pred8x8l_horizontal_down_8_ssse3;
if (codec_id == AV_CODEC_ID_VP7 || codec_id == AV_CODEC_ID_VP8) {
h->pred8x8 [PLANE_PRED8x8 ] = ff_pred8x8_tm_vp8_8_ssse3;
h->pred4x4 [TM_VP8_PRED ] = ff_pred4x4_tm_vp8_8_ssse3;
} else {
if (chroma_format_idc <= 1)
h->pred8x8 [PLANE_PRED8x8] = ff_pred8x8_plane_8_ssse3;
if (codec_id == AV_CODEC_ID_SVQ3) {
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_svq3_8_ssse3;
} else if (codec_id == AV_CODEC_ID_RV40) {
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_rv40_8_ssse3;
} else {
h->pred16x16[PLANE_PRED8x8] = ff_pred16x16_plane_h264_8_ssse3;
}
}
}
} else if (bit_depth == 10) {
if (EXTERNAL_MMXEXT(cpu_flags)) {
h->pred4x4[DC_PRED ] = ff_pred4x4_dc_10_mmxext;
h->pred4x4[HOR_UP_PRED ] = ff_pred4x4_horizontal_up_10_mmxext;
if (chroma_format_idc <= 1)
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_10_mmxext;
h->pred8x8l[DC_128_PRED ] = ff_pred8x8l_128_dc_10_mmxext;
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_10_mmxext;
h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_10_mmxext;
h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_10_mmxext;
h->pred16x16[LEFT_DC_PRED8x8 ] = ff_pred16x16_left_dc_10_mmxext;
h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vertical_10_mmxext;
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_10_mmxext;
}
if (EXTERNAL_SSE2(cpu_flags)) {
h->pred4x4[DIAG_DOWN_LEFT_PRED ] = ff_pred4x4_down_left_10_sse2;
h->pred4x4[DIAG_DOWN_RIGHT_PRED] = ff_pred4x4_down_right_10_sse2;
h->pred4x4[VERT_LEFT_PRED ] = ff_pred4x4_vertical_left_10_sse2;
h->pred4x4[VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_10_sse2;
h->pred4x4[HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_10_sse2;
if (chroma_format_idc <= 1) {
h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_10_sse2;
h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_10_sse2;
h->pred8x8[PLANE_PRED8x8 ] = ff_pred8x8_plane_10_sse2;
h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vertical_10_sse2;
h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_horizontal_10_sse2;
}
h->pred8x8l[VERT_PRED ] = ff_pred8x8l_vertical_10_sse2;
h->pred8x8l[HOR_PRED ] = ff_pred8x8l_horizontal_10_sse2;
h->pred8x8l[DC_PRED ] = ff_pred8x8l_dc_10_sse2;
h->pred8x8l[DC_128_PRED ] = ff_pred8x8l_128_dc_10_sse2;
h->pred8x8l[TOP_DC_PRED ] = ff_pred8x8l_top_dc_10_sse2;
h->pred8x8l[DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_10_sse2;
h->pred8x8l[DIAG_DOWN_RIGHT_PRED] = ff_pred8x8l_down_right_10_sse2;
h->pred8x8l[VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_10_sse2;
h->pred8x8l[HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_10_sse2;
h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_10_sse2;
h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_10_sse2;
h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_10_sse2;
h->pred16x16[LEFT_DC_PRED8x8 ] = ff_pred16x16_left_dc_10_sse2;
h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vertical_10_sse2;
h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_horizontal_10_sse2;
}
if (EXTERNAL_SSSE3(cpu_flags)) {
h->pred4x4[DIAG_DOWN_RIGHT_PRED] = ff_pred4x4_down_right_10_ssse3;
h->pred4x4[VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_10_ssse3;
h->pred4x4[HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_10_ssse3;
h->pred8x8l[HOR_PRED ] = ff_pred8x8l_horizontal_10_ssse3;
h->pred8x8l[DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_10_ssse3;
h->pred8x8l[DIAG_DOWN_RIGHT_PRED] = ff_pred8x8l_down_right_10_ssse3;
h->pred8x8l[VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_10_ssse3;
h->pred8x8l[HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_10_ssse3;
}
if (EXTERNAL_AVX(cpu_flags)) {
h->pred4x4[DIAG_DOWN_LEFT_PRED ] = ff_pred4x4_down_left_10_avx;
h->pred4x4[DIAG_DOWN_RIGHT_PRED] = ff_pred4x4_down_right_10_avx;
h->pred4x4[VERT_LEFT_PRED ] = ff_pred4x4_vertical_left_10_avx;
h->pred4x4[VERT_RIGHT_PRED ] = ff_pred4x4_vertical_right_10_avx;
h->pred4x4[HOR_DOWN_PRED ] = ff_pred4x4_horizontal_down_10_avx;
h->pred8x8l[VERT_PRED ] = ff_pred8x8l_vertical_10_avx;
h->pred8x8l[HOR_PRED ] = ff_pred8x8l_horizontal_10_avx;
h->pred8x8l[DC_PRED ] = ff_pred8x8l_dc_10_avx;
h->pred8x8l[TOP_DC_PRED ] = ff_pred8x8l_top_dc_10_avx;
h->pred8x8l[DIAG_DOWN_RIGHT_PRED] = ff_pred8x8l_down_right_10_avx;
h->pred8x8l[DIAG_DOWN_LEFT_PRED ] = ff_pred8x8l_down_left_10_avx;
h->pred8x8l[VERT_RIGHT_PRED ] = ff_pred8x8l_vertical_right_10_avx;
h->pred8x8l[HOR_UP_PRED ] = ff_pred8x8l_horizontal_up_10_avx;
}
}
}

View File

@@ -0,0 +1,635 @@
/*
* Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
* Copyright (c) 2011 Daniel Kang
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/h264.h"
#include "libavcodec/h264qpel.h"
#include "libavcodec/pixels.h"
#include "fpel.h"
#if HAVE_YASM
void ff_put_pixels4_mmx(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels4_mmxext(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels4_l2_mmxext(uint8_t *dst, const uint8_t *src1, const uint8_t *src2,
int dstStride, int src1Stride, int h);
void ff_avg_pixels4_l2_mmxext(uint8_t *dst, const uint8_t *src1, const uint8_t *src2,
int dstStride, int src1Stride, int h);
void ff_put_pixels8_l2_mmxext(uint8_t *dst, const uint8_t *src1, const uint8_t *src2,
int dstStride, int src1Stride, int h);
void ff_avg_pixels8_l2_mmxext(uint8_t *dst, const uint8_t *src1, const uint8_t *src2,
int dstStride, int src1Stride, int h);
void ff_put_pixels16_l2_mmxext(uint8_t *dst, const uint8_t *src1, const uint8_t *src2,
int dstStride, int src1Stride, int h);
void ff_avg_pixels16_l2_mmxext(uint8_t *dst, const uint8_t *src1, const uint8_t *src2,
int dstStride, int src1Stride, int h);
#define ff_put_pixels8_l2_sse2 ff_put_pixels8_l2_mmxext
#define ff_avg_pixels8_l2_sse2 ff_avg_pixels8_l2_mmxext
#define ff_put_pixels16_l2_sse2 ff_put_pixels16_l2_mmxext
#define ff_avg_pixels16_l2_sse2 ff_avg_pixels16_l2_mmxext
#define ff_put_pixels16_mmxext ff_put_pixels16_mmx
#define ff_put_pixels8_mmxext ff_put_pixels8_mmx
#define ff_put_pixels4_mmxext ff_put_pixels4_mmx
#define DEF_QPEL(OPNAME)\
void ff_ ## OPNAME ## _h264_qpel4_h_lowpass_mmxext(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride);\
void ff_ ## OPNAME ## _h264_qpel8_h_lowpass_mmxext(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride);\
void ff_ ## OPNAME ## _h264_qpel8_h_lowpass_ssse3(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride);\
void ff_ ## OPNAME ## _h264_qpel4_h_lowpass_l2_mmxext(uint8_t *dst, const uint8_t *src, const uint8_t *src2, int dstStride, int src2Stride);\
void ff_ ## OPNAME ## _h264_qpel8_h_lowpass_l2_mmxext(uint8_t *dst, const uint8_t *src, const uint8_t *src2, int dstStride, int src2Stride);\
void ff_ ## OPNAME ## _h264_qpel8_h_lowpass_l2_ssse3(uint8_t *dst, const uint8_t *src, const uint8_t *src2, int dstStride, int src2Stride);\
void ff_ ## OPNAME ## _h264_qpel4_v_lowpass_mmxext(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride);\
void ff_ ## OPNAME ## _h264_qpel8or16_v_lowpass_op_mmxext(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h);\
void ff_ ## OPNAME ## _h264_qpel8or16_v_lowpass_sse2(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h);\
void ff_ ## OPNAME ## _h264_qpel4_hv_lowpass_v_mmxext(const uint8_t *src, int16_t *tmp, int srcStride);\
void ff_ ## OPNAME ## _h264_qpel4_hv_lowpass_h_mmxext(int16_t *tmp, uint8_t *dst, int dstStride);\
void ff_ ## OPNAME ## _h264_qpel8or16_hv1_lowpass_op_mmxext(const uint8_t *src, int16_t *tmp, int srcStride, int size);\
void ff_ ## OPNAME ## _h264_qpel8or16_hv1_lowpass_op_sse2(const uint8_t *src, int16_t *tmp, int srcStride, int size);\
void ff_ ## OPNAME ## _h264_qpel8or16_hv2_lowpass_op_mmxext(uint8_t *dst, int16_t *tmp, int dstStride, int unused, int h);\
void ff_ ## OPNAME ## _h264_qpel8or16_hv2_lowpass_ssse3(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size);\
void ff_ ## OPNAME ## _pixels4_l2_shift5_mmxext(uint8_t *dst, const int16_t *src16, const uint8_t *src8, int dstStride, int src8Stride, int h);\
void ff_ ## OPNAME ## _pixels8_l2_shift5_mmxext(uint8_t *dst, const int16_t *src16, const uint8_t *src8, int dstStride, int src8Stride, int h);
DEF_QPEL(avg)
DEF_QPEL(put)
#define QPEL_H264(OPNAME, OP, MMX)\
static av_always_inline void ff_ ## OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, const uint8_t *src, int dstStride, int tmpStride, int srcStride){\
int w=3;\
src -= 2*srcStride+2;\
while(w--){\
ff_ ## OPNAME ## h264_qpel4_hv_lowpass_v_mmxext(src, tmp, srcStride);\
tmp += 4;\
src += 4;\
}\
tmp -= 3*4;\
ff_ ## OPNAME ## h264_qpel4_hv_lowpass_h_mmxext(tmp, dst, dstStride);\
}\
\
static av_always_inline void ff_ ## OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h){\
src -= 2*srcStride;\
ff_ ## OPNAME ## h264_qpel8or16_v_lowpass_op_mmxext(dst, src, dstStride, srcStride, h);\
src += 4;\
dst += 4;\
ff_ ## OPNAME ## h264_qpel8or16_v_lowpass_op_mmxext(dst, src, dstStride, srcStride, h);\
}\
static av_always_inline void ff_ ## OPNAME ## h264_qpel8or16_hv1_lowpass_ ## MMX(int16_t *tmp, const uint8_t *src, int tmpStride, int srcStride, int size){\
int w = (size+8)>>2;\
src -= 2*srcStride+2;\
while(w--){\
ff_ ## OPNAME ## h264_qpel8or16_hv1_lowpass_op_mmxext(src, tmp, srcStride, size);\
tmp += 4;\
src += 4;\
}\
}\
static av_always_inline void ff_ ## OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, int dstStride, int tmpStride, int size){\
int w = size>>4;\
do{\
ff_ ## OPNAME ## h264_qpel8or16_hv2_lowpass_op_mmxext(dst, tmp, dstStride, 0, size);\
tmp += 8;\
dst += 8;\
}while(w--);\
}\
\
static av_always_inline void ff_ ## OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride){\
ff_ ## OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
}\
static av_always_inline void ff_ ## OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride){\
ff_ ## OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
ff_ ## OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
}\
\
static av_always_inline void ff_ ## OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride){\
ff_ ## OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
ff_ ## OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
src += 8*srcStride;\
dst += 8*dstStride;\
ff_ ## OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
ff_ ## OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
}\
\
static av_always_inline void ff_ ## OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, const uint8_t *src, const uint8_t *src2, int dstStride, int src2Stride){\
ff_ ## OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
ff_ ## OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
src += 8*dstStride;\
dst += 8*dstStride;\
src2 += 8*src2Stride;\
ff_ ## OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
ff_ ## OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
}\
\
static av_always_inline void ff_ ## OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, const uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
ff_put_h264_qpel8or16_hv1_lowpass_ ## MMX(tmp, src, tmpStride, srcStride, size);\
ff_ ## OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
}\
static av_always_inline void ff_ ## OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, const uint8_t *src, int dstStride, int tmpStride, int srcStride){\
ff_ ## OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 8);\
}\
\
static av_always_inline void ff_ ## OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, const uint8_t *src, int dstStride, int tmpStride, int srcStride){\
ff_ ## OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst , tmp , src , dstStride, tmpStride, srcStride, 16);\
}\
\
static av_always_inline void ff_ ## OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, const int16_t *src16, const uint8_t *src8, int dstStride, int src8Stride, int h)\
{\
ff_ ## OPNAME ## pixels8_l2_shift5_ ## MMX(dst , src16 , src8 , dstStride, src8Stride, h);\
ff_ ## OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\
}\
#if ARCH_X86_64
#define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
void ff_avg_h264_qpel16_h_lowpass_l2_ssse3(uint8_t *dst, const uint8_t *src, const uint8_t *src2, int dstStride, int src2Stride);
void ff_put_h264_qpel16_h_lowpass_l2_ssse3(uint8_t *dst, const uint8_t *src, const uint8_t *src2, int dstStride, int src2Stride);
#else // ARCH_X86_64
#define QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
static av_always_inline void ff_ ## OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, const uint8_t *src, const uint8_t *src2, int dstStride, int src2Stride){\
ff_ ## OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
ff_ ## OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
src += 8*dstStride;\
dst += 8*dstStride;\
src2 += 8*src2Stride;\
ff_ ## OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst , src , src2 , dstStride, src2Stride);\
ff_ ## OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
}
#endif // ARCH_X86_64
#define QPEL_H264_H_XMM(OPNAME, OP, MMX)\
QPEL_H264_H16_XMM(OPNAME, OP, MMX)\
static av_always_inline void ff_ ## OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride){\
ff_ ## OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
ff_ ## OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
src += 8*srcStride;\
dst += 8*dstStride;\
ff_ ## OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst , src , dstStride, srcStride);\
ff_ ## OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
}\
#define QPEL_H264_V_XMM(OPNAME, OP, MMX)\
static av_always_inline void ff_ ## OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride){\
ff_ ## OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 8);\
}\
static av_always_inline void ff_ ## OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride){\
ff_ ## OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst , src , dstStride, srcStride, 16);\
ff_ ## OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
}
static av_always_inline void put_h264_qpel8or16_hv1_lowpass_sse2(int16_t *tmp,
const uint8_t *src,
int tmpStride,
int srcStride,
int size)
{
int w = (size+8)>>3;
src -= 2*srcStride+2;
while(w--){
ff_put_h264_qpel8or16_hv1_lowpass_op_sse2(src, tmp, srcStride, size);
tmp += 8;
src += 8;
}
}
#define QPEL_H264_HV_XMM(OPNAME, OP, MMX)\
static av_always_inline void ff_ ## OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, const uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
put_h264_qpel8or16_hv1_lowpass_sse2(tmp, src, tmpStride, srcStride, size);\
ff_ ## OPNAME ## h264_qpel8or16_hv2_lowpass_ ## MMX(dst, tmp, dstStride, tmpStride, size);\
}\
static av_always_inline void ff_ ## OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, const uint8_t *src, int dstStride, int tmpStride, int srcStride){\
ff_ ## OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 8);\
}\
static av_always_inline void ff_ ## OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, const uint8_t *src, int dstStride, int tmpStride, int srcStride){\
ff_ ## OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst, tmp, src, dstStride, tmpStride, srcStride, 16);\
}\
#define ff_put_h264_qpel8_h_lowpass_l2_sse2 ff_put_h264_qpel8_h_lowpass_l2_mmxext
#define ff_avg_h264_qpel8_h_lowpass_l2_sse2 ff_avg_h264_qpel8_h_lowpass_l2_mmxext
#define ff_put_h264_qpel16_h_lowpass_l2_sse2 ff_put_h264_qpel16_h_lowpass_l2_mmxext
#define ff_avg_h264_qpel16_h_lowpass_l2_sse2 ff_avg_h264_qpel16_h_lowpass_l2_mmxext
#define ff_put_h264_qpel8_v_lowpass_ssse3 ff_put_h264_qpel8_v_lowpass_sse2
#define ff_avg_h264_qpel8_v_lowpass_ssse3 ff_avg_h264_qpel8_v_lowpass_sse2
#define ff_put_h264_qpel16_v_lowpass_ssse3 ff_put_h264_qpel16_v_lowpass_sse2
#define ff_avg_h264_qpel16_v_lowpass_ssse3 ff_avg_h264_qpel16_v_lowpass_sse2
#define ff_put_h264_qpel8or16_hv2_lowpass_sse2 ff_put_h264_qpel8or16_hv2_lowpass_mmxext
#define ff_avg_h264_qpel8or16_hv2_lowpass_sse2 ff_avg_h264_qpel8or16_hv2_lowpass_mmxext
#define H264_MC(OPNAME, SIZE, MMX, ALIGN) \
H264_MC_C(OPNAME, SIZE, MMX, ALIGN)\
H264_MC_V(OPNAME, SIZE, MMX, ALIGN)\
H264_MC_H(OPNAME, SIZE, MMX, ALIGN)\
H264_MC_HV(OPNAME, SIZE, MMX, ALIGN)\
static void put_h264_qpel16_mc00_sse2 (uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
ff_put_pixels16_sse2(dst, src, stride, 16);
}
static void avg_h264_qpel16_mc00_sse2 (uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
ff_avg_pixels16_sse2(dst, src, stride, 16);
}
#define put_h264_qpel8_mc00_sse2 put_h264_qpel8_mc00_mmxext
#define avg_h264_qpel8_mc00_sse2 avg_h264_qpel8_mc00_mmxext
#define H264_MC_C(OPNAME, SIZE, MMX, ALIGN) \
static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
ff_ ## OPNAME ## pixels ## SIZE ## _ ## MMX(dst, src, stride, SIZE);\
}\
#define H264_MC_H(OPNAME, SIZE, MMX, ALIGN) \
static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\
}\
#define H264_MC_V(OPNAME, SIZE, MMX, ALIGN) \
static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
ff_ ## OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, temp, stride, stride, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
ff_ ## OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
ff_ ## OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, temp, stride, stride, SIZE);\
}\
#define H264_MC_HV(OPNAME, SIZE, MMX, ALIGN) \
static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*SIZE]);\
ff_put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
LOCAL_ALIGNED(ALIGN, uint16_t, temp, [SIZE*(SIZE<8?12:24)]);\
ff_ ## OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, temp, src, stride, SIZE, stride);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
uint8_t * const halfHV= temp;\
int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
av_assert2(((int)temp & 7) == 0);\
ff_put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
uint8_t * const halfHV= temp;\
int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
av_assert2(((int)temp & 7) == 0);\
ff_put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
ff_ ## OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
uint8_t * const halfHV= temp;\
int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
av_assert2(((int)temp & 7) == 0);\
ff_put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
ff_ ## OPNAME ## pixels ## SIZE ## _l2_shift5_mmxext(dst, halfV+2, halfHV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\
{\
LOCAL_ALIGNED(ALIGN, uint8_t, temp, [SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
uint8_t * const halfHV= temp;\
int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
av_assert2(((int)temp & 7) == 0);\
ff_put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
ff_ ## OPNAME ## pixels ## SIZE ## _l2_shift5_mmxext(dst, halfV+3, halfHV, stride, SIZE, SIZE);\
}\
#define H264_MC_4816(MMX)\
H264_MC(put_, 4, MMX, 8)\
H264_MC(put_, 8, MMX, 8)\
H264_MC(put_, 16,MMX, 8)\
H264_MC(avg_, 4, MMX, 8)\
H264_MC(avg_, 8, MMX, 8)\
H264_MC(avg_, 16,MMX, 8)\
#define H264_MC_816(QPEL, XMM)\
QPEL(put_, 8, XMM, 16)\
QPEL(put_, 16,XMM, 16)\
QPEL(avg_, 8, XMM, 16)\
QPEL(avg_, 16,XMM, 16)\
QPEL_H264(put_, PUT_OP, mmxext)
QPEL_H264(avg_, AVG_MMXEXT_OP, mmxext)
QPEL_H264_V_XMM(put_, PUT_OP, sse2)
QPEL_H264_V_XMM(avg_,AVG_MMXEXT_OP, sse2)
QPEL_H264_HV_XMM(put_, PUT_OP, sse2)
QPEL_H264_HV_XMM(avg_,AVG_MMXEXT_OP, sse2)
QPEL_H264_H_XMM(put_, PUT_OP, ssse3)
QPEL_H264_H_XMM(avg_,AVG_MMXEXT_OP, ssse3)
QPEL_H264_HV_XMM(put_, PUT_OP, ssse3)
QPEL_H264_HV_XMM(avg_,AVG_MMXEXT_OP, ssse3)
H264_MC_4816(mmxext)
H264_MC_816(H264_MC_V, sse2)
H264_MC_816(H264_MC_HV, sse2)
H264_MC_816(H264_MC_H, ssse3)
H264_MC_816(H264_MC_HV, ssse3)
//10bit
#define LUMA_MC_OP(OP, NUM, DEPTH, TYPE, OPT) \
void ff_ ## OP ## _h264_qpel ## NUM ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT \
(uint8_t *dst, const uint8_t *src, ptrdiff_t stride);
#define LUMA_MC_ALL(DEPTH, TYPE, OPT) \
LUMA_MC_OP(put, 4, DEPTH, TYPE, OPT) \
LUMA_MC_OP(avg, 4, DEPTH, TYPE, OPT) \
LUMA_MC_OP(put, 8, DEPTH, TYPE, OPT) \
LUMA_MC_OP(avg, 8, DEPTH, TYPE, OPT) \
LUMA_MC_OP(put, 16, DEPTH, TYPE, OPT) \
LUMA_MC_OP(avg, 16, DEPTH, TYPE, OPT)
#define LUMA_MC_816(DEPTH, TYPE, OPT) \
LUMA_MC_OP(put, 8, DEPTH, TYPE, OPT) \
LUMA_MC_OP(avg, 8, DEPTH, TYPE, OPT) \
LUMA_MC_OP(put, 16, DEPTH, TYPE, OPT) \
LUMA_MC_OP(avg, 16, DEPTH, TYPE, OPT)
LUMA_MC_ALL(10, mc00, mmxext)
LUMA_MC_ALL(10, mc10, mmxext)
LUMA_MC_ALL(10, mc20, mmxext)
LUMA_MC_ALL(10, mc30, mmxext)
LUMA_MC_ALL(10, mc01, mmxext)
LUMA_MC_ALL(10, mc11, mmxext)
LUMA_MC_ALL(10, mc21, mmxext)
LUMA_MC_ALL(10, mc31, mmxext)
LUMA_MC_ALL(10, mc02, mmxext)
LUMA_MC_ALL(10, mc12, mmxext)
LUMA_MC_ALL(10, mc22, mmxext)
LUMA_MC_ALL(10, mc32, mmxext)
LUMA_MC_ALL(10, mc03, mmxext)
LUMA_MC_ALL(10, mc13, mmxext)
LUMA_MC_ALL(10, mc23, mmxext)
LUMA_MC_ALL(10, mc33, mmxext)
LUMA_MC_816(10, mc00, sse2)
LUMA_MC_816(10, mc10, sse2)
LUMA_MC_816(10, mc10, sse2_cache64)
LUMA_MC_816(10, mc10, ssse3_cache64)
LUMA_MC_816(10, mc20, sse2)
LUMA_MC_816(10, mc20, sse2_cache64)
LUMA_MC_816(10, mc20, ssse3_cache64)
LUMA_MC_816(10, mc30, sse2)
LUMA_MC_816(10, mc30, sse2_cache64)
LUMA_MC_816(10, mc30, ssse3_cache64)
LUMA_MC_816(10, mc01, sse2)
LUMA_MC_816(10, mc11, sse2)
LUMA_MC_816(10, mc21, sse2)
LUMA_MC_816(10, mc31, sse2)
LUMA_MC_816(10, mc02, sse2)
LUMA_MC_816(10, mc12, sse2)
LUMA_MC_816(10, mc22, sse2)
LUMA_MC_816(10, mc32, sse2)
LUMA_MC_816(10, mc03, sse2)
LUMA_MC_816(10, mc13, sse2)
LUMA_MC_816(10, mc23, sse2)
LUMA_MC_816(10, mc33, sse2)
#define QPEL16_OPMC(OP, MC, MMX)\
void ff_ ## OP ## _h264_qpel16_ ## MC ## _10_ ## MMX(uint8_t *dst, const uint8_t *src, ptrdiff_t stride){\
ff_ ## OP ## _h264_qpel8_ ## MC ## _10_ ## MMX(dst , src , stride);\
ff_ ## OP ## _h264_qpel8_ ## MC ## _10_ ## MMX(dst+16, src+16, stride);\
src += 8*stride;\
dst += 8*stride;\
ff_ ## OP ## _h264_qpel8_ ## MC ## _10_ ## MMX(dst , src , stride);\
ff_ ## OP ## _h264_qpel8_ ## MC ## _10_ ## MMX(dst+16, src+16, stride);\
}
#define QPEL16_OP(MC, MMX)\
QPEL16_OPMC(put, MC, MMX)\
QPEL16_OPMC(avg, MC, MMX)
#define QPEL16(MMX)\
QPEL16_OP(mc00, MMX)\
QPEL16_OP(mc01, MMX)\
QPEL16_OP(mc02, MMX)\
QPEL16_OP(mc03, MMX)\
QPEL16_OP(mc10, MMX)\
QPEL16_OP(mc11, MMX)\
QPEL16_OP(mc12, MMX)\
QPEL16_OP(mc13, MMX)\
QPEL16_OP(mc20, MMX)\
QPEL16_OP(mc21, MMX)\
QPEL16_OP(mc22, MMX)\
QPEL16_OP(mc23, MMX)\
QPEL16_OP(mc30, MMX)\
QPEL16_OP(mc31, MMX)\
QPEL16_OP(mc32, MMX)\
QPEL16_OP(mc33, MMX)
#if ARCH_X86_32 // ARCH_X86_64 implies SSE2+
QPEL16(mmxext)
#endif
#endif /* HAVE_YASM */
#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
do { \
c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU; \
} while (0)
#define H264_QPEL_FUNCS(x, y, CPU) \
do { \
c->put_h264_qpel_pixels_tab[0][x + y * 4] = put_h264_qpel16_mc ## x ## y ## _ ## CPU; \
c->put_h264_qpel_pixels_tab[1][x + y * 4] = put_h264_qpel8_mc ## x ## y ## _ ## CPU; \
c->avg_h264_qpel_pixels_tab[0][x + y * 4] = avg_h264_qpel16_mc ## x ## y ## _ ## CPU; \
c->avg_h264_qpel_pixels_tab[1][x + y * 4] = avg_h264_qpel8_mc ## x ## y ## _ ## CPU; \
} while (0)
#define H264_QPEL_FUNCS_10(x, y, CPU) \
do { \
c->put_h264_qpel_pixels_tab[0][x + y * 4] = ff_put_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \
c->put_h264_qpel_pixels_tab[1][x + y * 4] = ff_put_h264_qpel8_mc ## x ## y ## _10_ ## CPU; \
c->avg_h264_qpel_pixels_tab[0][x + y * 4] = ff_avg_h264_qpel16_mc ## x ## y ## _10_ ## CPU; \
c->avg_h264_qpel_pixels_tab[1][x + y * 4] = ff_avg_h264_qpel8_mc ## x ## y ## _10_ ## CPU; \
} while (0)
av_cold void ff_h264qpel_init_x86(H264QpelContext *c, int bit_depth)
{
#if HAVE_YASM
int high_bit_depth = bit_depth > 8;
int cpu_flags = av_get_cpu_flags();
if (EXTERNAL_MMXEXT(cpu_flags)) {
if (!high_bit_depth) {
SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmxext, );
SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmxext, );
SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmxext, );
SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmxext, );
SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmxext, );
SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmxext, );
} else if (bit_depth == 10) {
#if ARCH_X86_32
SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_mmxext, ff_);
SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_mmxext, ff_);
SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_mmxext, ff_);
SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_mmxext, ff_);
#endif
SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 10_mmxext, ff_);
SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 10_mmxext, ff_);
}
}
if (EXTERNAL_SSE2(cpu_flags)) {
if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW) && !high_bit_depth) {
// these functions are slower than mmx on AMD, but faster on Intel
H264_QPEL_FUNCS(0, 0, sse2);
}
if (!high_bit_depth) {
H264_QPEL_FUNCS(0, 1, sse2);
H264_QPEL_FUNCS(0, 2, sse2);
H264_QPEL_FUNCS(0, 3, sse2);
H264_QPEL_FUNCS(1, 1, sse2);
H264_QPEL_FUNCS(1, 2, sse2);
H264_QPEL_FUNCS(1, 3, sse2);
H264_QPEL_FUNCS(2, 1, sse2);
H264_QPEL_FUNCS(2, 2, sse2);
H264_QPEL_FUNCS(2, 3, sse2);
H264_QPEL_FUNCS(3, 1, sse2);
H264_QPEL_FUNCS(3, 2, sse2);
H264_QPEL_FUNCS(3, 3, sse2);
}
if (bit_depth == 10) {
SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_sse2, ff_);
SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 10_sse2, ff_);
SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_sse2, ff_);
SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 10_sse2, ff_);
H264_QPEL_FUNCS_10(1, 0, sse2_cache64);
H264_QPEL_FUNCS_10(2, 0, sse2_cache64);
H264_QPEL_FUNCS_10(3, 0, sse2_cache64);
}
}
if (EXTERNAL_SSSE3(cpu_flags)) {
if (!high_bit_depth) {
H264_QPEL_FUNCS(1, 0, ssse3);
H264_QPEL_FUNCS(1, 1, ssse3);
H264_QPEL_FUNCS(1, 2, ssse3);
H264_QPEL_FUNCS(1, 3, ssse3);
H264_QPEL_FUNCS(2, 0, ssse3);
H264_QPEL_FUNCS(2, 1, ssse3);
H264_QPEL_FUNCS(2, 2, ssse3);
H264_QPEL_FUNCS(2, 3, ssse3);
H264_QPEL_FUNCS(3, 0, ssse3);
H264_QPEL_FUNCS(3, 1, ssse3);
H264_QPEL_FUNCS(3, 2, ssse3);
H264_QPEL_FUNCS(3, 3, ssse3);
}
if (bit_depth == 10) {
H264_QPEL_FUNCS_10(1, 0, ssse3_cache64);
H264_QPEL_FUNCS_10(2, 0, ssse3_cache64);
H264_QPEL_FUNCS_10(3, 0, ssse3_cache64);
}
}
if (EXTERNAL_AVX(cpu_flags)) {
/* AVX implies 64 byte cache lines without the need to avoid unaligned
* memory accesses that cross the boundary between two cache lines.
* TODO: Port X264_CPU_CACHELINE_32/64 detection from x264 to avoid
* having to treat SSE2 functions with such properties as AVX. */
if (bit_depth == 10) {
H264_QPEL_FUNCS_10(1, 0, sse2);
H264_QPEL_FUNCS_10(2, 0, sse2);
H264_QPEL_FUNCS_10(3, 0, sse2);
}
}
#endif
}

View File

@@ -0,0 +1,884 @@
;*****************************************************************************
;* MMX/SSE2/AVX-optimized 10-bit H.264 qpel code
;*****************************************************************************
;* Copyright (C) 2011 x264 project
;*
;* Authors: Daniel Kang <daniel.d.kang@gmail.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA 32
cextern pw_1023
%define pw_pixel_max pw_1023
cextern pw_16
cextern pw_1
cextern pb_0
pad10: times 8 dw 10*1023
pad20: times 8 dw 20*1023
pad30: times 8 dw 30*1023
depad: times 4 dd 32*20*1023 + 512
depad2: times 8 dw 20*1023 + 16*1022 + 16
unpad: times 8 dw 16*1022/32 ; needs to be mod 16
tap1: times 4 dw 1, -5
tap2: times 4 dw 20, 20
tap3: times 4 dw -5, 1
pd_0f: times 4 dd 0xffff
SECTION .text
%macro AVG_MOV 2
pavgw %2, %1
mova %1, %2
%endmacro
%macro ADDW 3
%if mmsize == 8
paddw %1, %2
%else
movu %3, %2
paddw %1, %3
%endif
%endmacro
%macro FILT_H 4
paddw %1, %4
psubw %1, %2 ; a-b
psraw %1, 2 ; (a-b)/4
psubw %1, %2 ; (a-b)/4-b
paddw %1, %3 ; (a-b)/4-b+c
psraw %1, 2 ; ((a-b)/4-b+c)/4
paddw %1, %3 ; ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
%endmacro
%macro PRELOAD_V 0
lea r3, [r2*3]
sub r1, r3
movu m0, [r1+r2]
movu m1, [r1+r2*2]
add r1, r3
movu m2, [r1]
movu m3, [r1+r2]
movu m4, [r1+r2*2]
add r1, r3
%endmacro
%macro FILT_V 8
movu %6, [r1]
paddw %1, %6
mova %7, %2
paddw %7, %5
mova %8, %3
paddw %8, %4
FILT_H %1, %7, %8, [pw_16]
psraw %1, 1
CLIPW %1, [pb_0], [pw_pixel_max]
%endmacro
%macro MC 1
%define OP_MOV mova
INIT_MMX mmxext
%1 put, 4
INIT_XMM sse2
%1 put, 8
%define OP_MOV AVG_MOV
INIT_MMX mmxext
%1 avg, 4
INIT_XMM sse2
%1 avg, 8
%endmacro
%macro MCAxA_OP 7
%if ARCH_X86_32
cglobal %1_h264_qpel%4_%2_10, %5,%6,%7
call stub_%1_h264_qpel%3_%2_10 %+ SUFFIX
mov r0, r0m
mov r1, r1m
add r0, %3*2
add r1, %3*2
call stub_%1_h264_qpel%3_%2_10 %+ SUFFIX
mov r0, r0m
mov r1, r1m
lea r0, [r0+r2*%3]
lea r1, [r1+r2*%3]
call stub_%1_h264_qpel%3_%2_10 %+ SUFFIX
mov r0, r0m
mov r1, r1m
lea r0, [r0+r2*%3+%3*2]
lea r1, [r1+r2*%3+%3*2]
call stub_%1_h264_qpel%3_%2_10 %+ SUFFIX
RET
%else ; ARCH_X86_64
cglobal %1_h264_qpel%4_%2_10, %5,%6 + 2,%7
mov r%6, r0
%assign p1 %6+1
mov r %+ p1, r1
call stub_%1_h264_qpel%3_%2_10 %+ SUFFIX
lea r0, [r%6+%3*2]
lea r1, [r %+ p1+%3*2]
call stub_%1_h264_qpel%3_%2_10 %+ SUFFIX
lea r0, [r%6+r2*%3]
lea r1, [r %+ p1+r2*%3]
call stub_%1_h264_qpel%3_%2_10 %+ SUFFIX
lea r0, [r%6+r2*%3+%3*2]
lea r1, [r %+ p1+r2*%3+%3*2]
%if UNIX64 == 0 ; fall through to function
call stub_%1_h264_qpel%3_%2_10 %+ SUFFIX
RET
%endif
%endif
%endmacro
;cpu, put/avg, mc, 4/8, ...
%macro cglobal_mc 6
%assign i %3*2
%if ARCH_X86_32 || cpuflag(sse2)
MCAxA_OP %1, %2, %3, i, %4,%5,%6
%endif
cglobal %1_h264_qpel%3_%2_10, %4,%5,%6
%if UNIX64 == 0 ; no prologue or epilogue for UNIX64
call stub_%1_h264_qpel%3_%2_10 %+ SUFFIX
RET
%endif
stub_%1_h264_qpel%3_%2_10 %+ SUFFIX:
%endmacro
;-----------------------------------------------------------------------------
; void ff_h264_qpel_mc00(uint8_t *dst, uint8_t *src, int stride)
;-----------------------------------------------------------------------------
%macro COPY4 0
movu m0, [r1 ]
OP_MOV [r0 ], m0
movu m0, [r1+r2 ]
OP_MOV [r0+r2 ], m0
movu m0, [r1+r2*2]
OP_MOV [r0+r2*2], m0
movu m0, [r1+r3 ]
OP_MOV [r0+r3 ], m0
%endmacro
%macro MC00 1
INIT_MMX mmxext
cglobal_mc %1, mc00, 4, 3,4,0
lea r3, [r2*3]
COPY4
ret
INIT_XMM sse2
cglobal %1_h264_qpel8_mc00_10, 3,4
lea r3, [r2*3]
COPY4
lea r0, [r0+r2*4]
lea r1, [r1+r2*4]
COPY4
RET
cglobal %1_h264_qpel16_mc00_10, 3,4
mov r3d, 8
.loop:
movu m0, [r1 ]
movu m1, [r1 +16]
OP_MOV [r0 ], m0
OP_MOV [r0 +16], m1
movu m0, [r1+r2 ]
movu m1, [r1+r2+16]
OP_MOV [r0+r2 ], m0
OP_MOV [r0+r2+16], m1
lea r0, [r0+r2*2]
lea r1, [r1+r2*2]
dec r3d
jg .loop
REP_RET
%endmacro
%define OP_MOV mova
MC00 put
%define OP_MOV AVG_MOV
MC00 avg
;-----------------------------------------------------------------------------
; void ff_h264_qpel_mc20(uint8_t *dst, uint8_t *src, int stride)
;-----------------------------------------------------------------------------
%macro MC_CACHE 1
%define OP_MOV mova
INIT_MMX mmxext
%1 put, 4
INIT_XMM sse2, cache64
%1 put, 8
INIT_XMM ssse3, cache64
%1 put, 8
INIT_XMM sse2
%1 put, 8
%define OP_MOV AVG_MOV
INIT_MMX mmxext
%1 avg, 4
INIT_XMM sse2, cache64
%1 avg, 8
INIT_XMM ssse3, cache64
%1 avg, 8
INIT_XMM sse2
%1 avg, 8
%endmacro
%macro MC20 2
cglobal_mc %1, mc20, %2, 3,4,9
mov r3d, %2
mova m1, [pw_pixel_max]
%if num_mmregs > 8
mova m8, [pw_16]
%define p16 m8
%else
%define p16 [pw_16]
%endif
.nextrow:
%if %0 == 4
movu m2, [r1-4]
movu m3, [r1-2]
movu m4, [r1+0]
ADDW m2, [r1+6], m5
ADDW m3, [r1+4], m5
ADDW m4, [r1+2], m5
%else ; movu is slow on these processors
%if mmsize==16
movu m2, [r1-4]
movu m0, [r1+6]
mova m6, m0
psrldq m0, 6
paddw m6, m2
PALIGNR m3, m0, m2, 2, m5
PALIGNR m7, m0, m2, 8, m5
paddw m3, m7
PALIGNR m4, m0, m2, 4, m5
PALIGNR m7, m0, m2, 6, m5
paddw m4, m7
SWAP 2, 6
%else
movu m2, [r1-4]
movu m6, [r1+4]
PALIGNR m3, m6, m2, 2, m5
paddw m3, m6
PALIGNR m4, m6, m2, 4, m5
PALIGNR m7, m6, m2, 6, m5
paddw m4, m7
paddw m2, [r1+6]
%endif
%endif
FILT_H m2, m3, m4, p16
psraw m2, 1
pxor m0, m0
CLIPW m2, m0, m1
OP_MOV [r0], m2
add r0, r2
add r1, r2
dec r3d
jg .nextrow
rep ret
%endmacro
MC_CACHE MC20
;-----------------------------------------------------------------------------
; void ff_h264_qpel_mc30(uint8_t *dst, uint8_t *src, int stride)
;-----------------------------------------------------------------------------
%macro MC30 2
cglobal_mc %1, mc30, %2, 3,5,9
lea r4, [r1+2]
jmp stub_%1_h264_qpel%2_mc10_10 %+ SUFFIX %+ .body
%endmacro
MC_CACHE MC30
;-----------------------------------------------------------------------------
; void ff_h264_qpel_mc10(uint8_t *dst, uint8_t *src, int stride)
;-----------------------------------------------------------------------------
%macro MC10 2
cglobal_mc %1, mc10, %2, 3,5,9
mov r4, r1
.body:
mov r3d, %2
mova m1, [pw_pixel_max]
%if num_mmregs > 8
mova m8, [pw_16]
%define p16 m8
%else
%define p16 [pw_16]
%endif
.nextrow:
%if %0 == 4
movu m2, [r1-4]
movu m3, [r1-2]
movu m4, [r1+0]
ADDW m2, [r1+6], m5
ADDW m3, [r1+4], m5
ADDW m4, [r1+2], m5
%else ; movu is slow on these processors
%if mmsize==16
movu m2, [r1-4]
movu m0, [r1+6]
mova m6, m0
psrldq m0, 6
paddw m6, m2
PALIGNR m3, m0, m2, 2, m5
PALIGNR m7, m0, m2, 8, m5
paddw m3, m7
PALIGNR m4, m0, m2, 4, m5
PALIGNR m7, m0, m2, 6, m5
paddw m4, m7
SWAP 2, 6
%else
movu m2, [r1-4]
movu m6, [r1+4]
PALIGNR m3, m6, m2, 2, m5
paddw m3, m6
PALIGNR m4, m6, m2, 4, m5
PALIGNR m7, m6, m2, 6, m5
paddw m4, m7
paddw m2, [r1+6]
%endif
%endif
FILT_H m2, m3, m4, p16
psraw m2, 1
pxor m0, m0
CLIPW m2, m0, m1
movu m3, [r4]
pavgw m2, m3
OP_MOV [r0], m2
add r0, r2
add r1, r2
add r4, r2
dec r3d
jg .nextrow
rep ret
%endmacro
MC_CACHE MC10
;-----------------------------------------------------------------------------
; void ff_h264_qpel_mc02(uint8_t *dst, uint8_t *src, int stride)
;-----------------------------------------------------------------------------
%macro V_FILT 10
v_filt%9_%10_10:
add r4, r2
.no_addr4:
FILT_V m0, m1, m2, m3, m4, m5, m6, m7
add r1, r2
add r0, r2
ret
%endmacro
INIT_MMX mmxext
RESET_MM_PERMUTATION
%assign i 0
%rep 4
V_FILT m0, m1, m2, m3, m4, m5, m6, m7, 4, i
SWAP 0,1,2,3,4,5
%assign i i+1
%endrep
INIT_XMM sse2
RESET_MM_PERMUTATION
%assign i 0
%rep 6
V_FILT m0, m1, m2, m3, m4, m5, m6, m7, 8, i
SWAP 0,1,2,3,4,5
%assign i i+1
%endrep
%macro MC02 2
cglobal_mc %1, mc02, %2, 3,4,8
PRELOAD_V
sub r0, r2
%assign j 0
%rep %2
%assign i (j % 6)
call v_filt%2_ %+ i %+ _10.no_addr4
OP_MOV [r0], m0
SWAP 0,1,2,3,4,5
%assign j j+1
%endrep
ret
%endmacro
MC MC02
;-----------------------------------------------------------------------------
; void ff_h264_qpel_mc01(uint8_t *dst, uint8_t *src, int stride)
;-----------------------------------------------------------------------------
%macro MC01 2
cglobal_mc %1, mc01, %2, 3,5,8
mov r4, r1
.body:
PRELOAD_V
sub r4, r2
sub r0, r2
%assign j 0
%rep %2
%assign i (j % 6)
call v_filt%2_ %+ i %+ _10
movu m7, [r4]
pavgw m0, m7
OP_MOV [r0], m0
SWAP 0,1,2,3,4,5
%assign j j+1
%endrep
ret
%endmacro
MC MC01
;-----------------------------------------------------------------------------
; void ff_h264_qpel_mc03(uint8_t *dst, uint8_t *src, int stride)
;-----------------------------------------------------------------------------
%macro MC03 2
cglobal_mc %1, mc03, %2, 3,5,8
lea r4, [r1+r2]
jmp stub_%1_h264_qpel%2_mc01_10 %+ SUFFIX %+ .body
%endmacro
MC MC03
;-----------------------------------------------------------------------------
; void ff_h264_qpel_mc11(uint8_t *dst, uint8_t *src, int stride)
;-----------------------------------------------------------------------------
%macro H_FILT_AVG 2-3
h_filt%1_%2_10:
;FILT_H with fewer registers and averaged with the FILT_V result
;m6,m7 are tmp registers, m0 is the FILT_V result, the rest are to be used next in the next iteration
;unfortunately I need three registers, so m5 will have to be re-read from memory
movu m5, [r4-4]
ADDW m5, [r4+6], m7
movu m6, [r4-2]
ADDW m6, [r4+4], m7
paddw m5, [pw_16]
psubw m5, m6 ; a-b
psraw m5, 2 ; (a-b)/4
psubw m5, m6 ; (a-b)/4-b
movu m6, [r4+0]
ADDW m6, [r4+2], m7
paddw m5, m6 ; (a-b)/4-b+c
psraw m5, 2 ; ((a-b)/4-b+c)/4
paddw m5, m6 ; ((a-b)/4-b+c)/4+c = (a-5*b+20*c)/16
psraw m5, 1
CLIPW m5, [pb_0], [pw_pixel_max]
;avg FILT_V, FILT_H
pavgw m0, m5
%if %0!=4
movu m5, [r1+r5]
%endif
ret
%endmacro
INIT_MMX mmxext
RESET_MM_PERMUTATION
%assign i 0
%rep 3
H_FILT_AVG 4, i
SWAP 0,1,2,3,4,5
%assign i i+1
%endrep
H_FILT_AVG 4, i, 0
INIT_XMM sse2
RESET_MM_PERMUTATION
%assign i 0
%rep 6
%if i==1
H_FILT_AVG 8, i, 0
%else
H_FILT_AVG 8, i
%endif
SWAP 0,1,2,3,4,5
%assign i i+1
%endrep
%macro MC11 2
; this REALLY needs x86_64
cglobal_mc %1, mc11, %2, 3,6,8
mov r4, r1
.body:
PRELOAD_V
sub r0, r2
sub r4, r2
mov r5, r2
neg r5
%assign j 0
%rep %2
%assign i (j % 6)
call v_filt%2_ %+ i %+ _10
call h_filt%2_ %+ i %+ _10
%if %2==8 && i==1
movu m5, [r1+r5]
%endif
OP_MOV [r0], m0
SWAP 0,1,2,3,4,5
%assign j j+1
%endrep
ret
%endmacro
MC MC11
;-----------------------------------------------------------------------------
; void ff_h264_qpel_mc31(uint8_t *dst, uint8_t *src, int stride)
;-----------------------------------------------------------------------------
%macro MC31 2
cglobal_mc %1, mc31, %2, 3,6,8
mov r4, r1
add r1, 2
jmp stub_%1_h264_qpel%2_mc11_10 %+ SUFFIX %+ .body
%endmacro
MC MC31
;-----------------------------------------------------------------------------
; void ff_h264_qpel_mc13(uint8_t *dst, uint8_t *src, int stride)
;-----------------------------------------------------------------------------
%macro MC13 2
cglobal_mc %1, mc13, %2, 3,7,12
lea r4, [r1+r2]
jmp stub_%1_h264_qpel%2_mc11_10 %+ SUFFIX %+ .body
%endmacro
MC MC13
;-----------------------------------------------------------------------------
; void ff_h264_qpel_mc33(uint8_t *dst, uint8_t *src, int stride)
;-----------------------------------------------------------------------------
%macro MC33 2
cglobal_mc %1, mc33, %2, 3,6,8
lea r4, [r1+r2]
add r1, 2
jmp stub_%1_h264_qpel%2_mc11_10 %+ SUFFIX %+ .body
%endmacro
MC MC33
;-----------------------------------------------------------------------------
; void ff_h264_qpel_mc22(uint8_t *dst, uint8_t *src, int stride)
;-----------------------------------------------------------------------------
%macro FILT_H2 3
psubw %1, %2 ; a-b
psubw %2, %3 ; b-c
psllw %2, 2
psubw %1, %2 ; a-5*b+4*c
psllw %3, 4
paddw %1, %3 ; a-5*b+20*c
%endmacro
%macro FILT_VNRD 8
movu %6, [r1]
paddw %1, %6
mova %7, %2
paddw %7, %5
mova %8, %3
paddw %8, %4
FILT_H2 %1, %7, %8
%endmacro
%macro HV 1
%if mmsize==16
%define PAD 12
%define COUNT 2
%else
%define PAD 4
%define COUNT 3
%endif
put_hv%1_10:
neg r2 ; This actually saves instructions
lea r1, [r1+r2*2-mmsize+PAD]
lea r4, [rsp+PAD+gprsize]
mov r3d, COUNT
.v_loop:
movu m0, [r1]
sub r1, r2
movu m1, [r1]
sub r1, r2
movu m2, [r1]
sub r1, r2
movu m3, [r1]
sub r1, r2
movu m4, [r1]
sub r1, r2
%assign i 0
%rep %1-1
FILT_VNRD m0, m1, m2, m3, m4, m5, m6, m7
psubw m0, [pad20]
movu [r4+i*mmsize*3], m0
sub r1, r2
SWAP 0,1,2,3,4,5
%assign i i+1
%endrep
FILT_VNRD m0, m1, m2, m3, m4, m5, m6, m7
psubw m0, [pad20]
movu [r4+i*mmsize*3], m0
add r4, mmsize
lea r1, [r1+r2*8+mmsize]
%if %1==8
lea r1, [r1+r2*4]
%endif
dec r3d
jg .v_loop
neg r2
ret
%endmacro
INIT_MMX mmxext
HV 4
INIT_XMM sse2
HV 8
%macro H_LOOP 1
%if num_mmregs > 8
%define s1 m8
%define s2 m9
%define s3 m10
%define d1 m11
%else
%define s1 [tap1]
%define s2 [tap2]
%define s3 [tap3]
%define d1 [depad]
%endif
h%1_loop_op:
movu m1, [r1+mmsize-4]
movu m2, [r1+mmsize-2]
mova m3, [r1+mmsize+0]
movu m4, [r1+mmsize+2]
movu m5, [r1+mmsize+4]
movu m6, [r1+mmsize+6]
%if num_mmregs > 8
pmaddwd m1, s1
pmaddwd m2, s1
pmaddwd m3, s2
pmaddwd m4, s2
pmaddwd m5, s3
pmaddwd m6, s3
paddd m1, d1
paddd m2, d1
%else
mova m0, s1
pmaddwd m1, m0
pmaddwd m2, m0
mova m0, s2
pmaddwd m3, m0
pmaddwd m4, m0
mova m0, s3
pmaddwd m5, m0
pmaddwd m6, m0
mova m0, d1
paddd m1, m0
paddd m2, m0
%endif
paddd m3, m5
paddd m4, m6
paddd m1, m3
paddd m2, m4
psrad m1, 10
psrad m2, 10
pslld m2, 16
pand m1, [pd_0f]
por m1, m2
%if num_mmregs <= 8
pxor m0, m0
%endif
CLIPW m1, m0, m7
add r1, mmsize*3
ret
%endmacro
INIT_MMX mmxext
H_LOOP 4
INIT_XMM sse2
H_LOOP 8
%macro MC22 2
cglobal_mc %1, mc22, %2, 3,7,12
%define PAD mmsize*8*4*2 ; SIZE*16*4*sizeof(pixel)
mov r6, rsp ; backup stack pointer
and rsp, ~(mmsize-1) ; align stack
sub rsp, PAD
call put_hv%2_10
mov r3d, %2
mova m7, [pw_pixel_max]
%if num_mmregs > 8
pxor m0, m0
mova m8, [tap1]
mova m9, [tap2]
mova m10, [tap3]
mova m11, [depad]
%endif
mov r1, rsp
.h_loop:
call h%2_loop_op
OP_MOV [r0], m1
add r0, r2
dec r3d
jg .h_loop
mov rsp, r6 ; restore stack pointer
ret
%endmacro
MC MC22
;-----------------------------------------------------------------------------
; void ff_h264_qpel_mc12(uint8_t *dst, uint8_t *src, int stride)
;-----------------------------------------------------------------------------
%macro MC12 2
cglobal_mc %1, mc12, %2, 3,7,12
%define PAD mmsize*8*4*2 ; SIZE*16*4*sizeof(pixel)
mov r6, rsp ; backup stack pointer
and rsp, ~(mmsize-1) ; align stack
sub rsp, PAD
call put_hv%2_10
xor r4d, r4d
.body:
mov r3d, %2
pxor m0, m0
mova m7, [pw_pixel_max]
%if num_mmregs > 8
mova m8, [tap1]
mova m9, [tap2]
mova m10, [tap3]
mova m11, [depad]
%endif
mov r1, rsp
.h_loop:
call h%2_loop_op
movu m3, [r1+r4-2*mmsize] ; movu needed for mc32, etc
paddw m3, [depad2]
psrlw m3, 5
psubw m3, [unpad]
CLIPW m3, m0, m7
pavgw m1, m3
OP_MOV [r0], m1
add r0, r2
dec r3d
jg .h_loop
mov rsp, r6 ; restore stack pointer
ret
%endmacro
MC MC12
;-----------------------------------------------------------------------------
; void ff_h264_qpel_mc32(uint8_t *dst, uint8_t *src, int stride)
;-----------------------------------------------------------------------------
%macro MC32 2
cglobal_mc %1, mc32, %2, 3,7,12
%define PAD mmsize*8*3*2 ; SIZE*16*4*sizeof(pixel)
mov r6, rsp ; backup stack pointer
and rsp, ~(mmsize-1) ; align stack
sub rsp, PAD
call put_hv%2_10
mov r4d, 2 ; sizeof(pixel)
jmp stub_%1_h264_qpel%2_mc12_10 %+ SUFFIX %+ .body
%endmacro
MC MC32
;-----------------------------------------------------------------------------
; void ff_h264_qpel_mc21(uint8_t *dst, uint8_t *src, int stride)
;-----------------------------------------------------------------------------
%macro H_NRD 1
put_h%1_10:
add rsp, gprsize
mov r3d, %1
xor r4d, r4d
mova m6, [pad20]
.nextrow:
movu m2, [r5-4]
movu m3, [r5-2]
movu m4, [r5+0]
ADDW m2, [r5+6], m5
ADDW m3, [r5+4], m5
ADDW m4, [r5+2], m5
FILT_H2 m2, m3, m4
psubw m2, m6
mova [rsp+r4], m2
add r4d, mmsize*3
add r5, r2
dec r3d
jg .nextrow
sub rsp, gprsize
ret
%endmacro
INIT_MMX mmxext
H_NRD 4
INIT_XMM sse2
H_NRD 8
%macro MC21 2
cglobal_mc %1, mc21, %2, 3,7,12
mov r5, r1
.body:
%define PAD mmsize*8*3*2 ; SIZE*16*4*sizeof(pixel)
mov r6, rsp ; backup stack pointer
and rsp, ~(mmsize-1) ; align stack
sub rsp, PAD
call put_h%2_10
sub rsp, PAD
call put_hv%2_10
mov r4d, PAD-mmsize ; H buffer
jmp stub_%1_h264_qpel%2_mc12_10 %+ SUFFIX %+ .body
%endmacro
MC MC21
;-----------------------------------------------------------------------------
; void ff_h264_qpel_mc23(uint8_t *dst, uint8_t *src, int stride)
;-----------------------------------------------------------------------------
%macro MC23 2
cglobal_mc %1, mc23, %2, 3,7,12
lea r5, [r1+r2]
jmp stub_%1_h264_qpel%2_mc21_10 %+ SUFFIX %+ .body
%endmacro
MC MC23

View File

@@ -0,0 +1,862 @@
;*****************************************************************************
;* MMX/SSE2/SSSE3-optimized H.264 QPEL code
;*****************************************************************************
;* Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
;* Copyright (C) 2012 Daniel Kang
;*
;* Authors: Daniel Kang <daniel.d.kang@gmail.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA 32
cextern pw_16
cextern pw_5
cextern pb_0
SECTION .text
%macro op_avgh 3
movh %3, %2
pavgb %1, %3
movh %2, %1
%endmacro
%macro op_avg 2-3
pavgb %1, %2
mova %2, %1
%endmacro
%macro op_puth 2-3
movh %2, %1
%endmacro
%macro op_put 2-3
mova %2, %1
%endmacro
%macro QPEL4_H_LOWPASS_OP 1
cglobal %1_h264_qpel4_h_lowpass, 4,5 ; dst, src, dstStride, srcStride
movsxdifnidn r2, r2d
movsxdifnidn r3, r3d
pxor m7, m7
mova m4, [pw_5]
mova m5, [pw_16]
mov r4d, 4
.loop:
movh m1, [r1-1]
movh m2, [r1+0]
movh m3, [r1+1]
movh m0, [r1+2]
punpcklbw m1, m7
punpcklbw m2, m7
punpcklbw m3, m7
punpcklbw m0, m7
paddw m1, m0
paddw m2, m3
movh m0, [r1-2]
movh m3, [r1+3]
punpcklbw m0, m7
punpcklbw m3, m7
paddw m0, m3
psllw m2, 2
psubw m2, m1
pmullw m2, m4
paddw m0, m5
paddw m0, m2
psraw m0, 5
packuswb m0, m0
op_%1h m0, [r0], m6
add r0, r2
add r1, r3
dec r4d
jg .loop
REP_RET
%endmacro
INIT_MMX mmxext
QPEL4_H_LOWPASS_OP put
QPEL4_H_LOWPASS_OP avg
%macro QPEL8_H_LOWPASS_OP 1
cglobal %1_h264_qpel8_h_lowpass, 4,5 ; dst, src, dstStride, srcStride
movsxdifnidn r2, r2d
movsxdifnidn r3, r3d
mov r4d, 8
pxor m7, m7
mova m6, [pw_5]
.loop:
mova m0, [r1]
mova m2, [r1+1]
mova m1, m0
mova m3, m2
punpcklbw m0, m7
punpckhbw m1, m7
punpcklbw m2, m7
punpckhbw m3, m7
paddw m0, m2
paddw m1, m3
psllw m0, 2
psllw m1, 2
mova m2, [r1-1]
mova m4, [r1+2]
mova m3, m2
mova m5, m4
punpcklbw m2, m7
punpckhbw m3, m7
punpcklbw m4, m7
punpckhbw m5, m7
paddw m2, m4
paddw m5, m3
psubw m0, m2
psubw m1, m5
pmullw m0, m6
pmullw m1, m6
movd m2, [r1-2]
movd m5, [r1+7]
punpcklbw m2, m7
punpcklbw m5, m7
paddw m2, m3
paddw m4, m5
mova m5, [pw_16]
paddw m2, m5
paddw m4, m5
paddw m0, m2
paddw m1, m4
psraw m0, 5
psraw m1, 5
packuswb m0, m1
op_%1 m0, [r0], m4
add r0, r2
add r1, r3
dec r4d
jg .loop
REP_RET
%endmacro
INIT_MMX mmxext
QPEL8_H_LOWPASS_OP put
QPEL8_H_LOWPASS_OP avg
%macro QPEL8_H_LOWPASS_OP_XMM 1
cglobal %1_h264_qpel8_h_lowpass, 4,5,8 ; dst, src, dstStride, srcStride
movsxdifnidn r2, r2d
movsxdifnidn r3, r3d
mov r4d, 8
pxor m7, m7
mova m6, [pw_5]
.loop:
movu m1, [r1-2]
mova m0, m1
punpckhbw m1, m7
punpcklbw m0, m7
mova m2, m1
mova m3, m1
mova m4, m1
mova m5, m1
palignr m4, m0, 2
palignr m3, m0, 4
palignr m2, m0, 6
palignr m1, m0, 8
palignr m5, m0, 10
paddw m0, m5
paddw m2, m3
paddw m1, m4
psllw m2, 2
psubw m2, m1
paddw m0, [pw_16]
pmullw m2, m6
paddw m2, m0
psraw m2, 5
packuswb m2, m2
op_%1h m2, [r0], m4
add r1, r3
add r0, r2
dec r4d
jne .loop
REP_RET
%endmacro
INIT_XMM ssse3
QPEL8_H_LOWPASS_OP_XMM put
QPEL8_H_LOWPASS_OP_XMM avg
%macro QPEL4_H_LOWPASS_L2_OP 1
cglobal %1_h264_qpel4_h_lowpass_l2, 5,6 ; dst, src, src2, dstStride, srcStride
movsxdifnidn r3, r3d
movsxdifnidn r4, r4d
pxor m7, m7
mova m4, [pw_5]
mova m5, [pw_16]
mov r5d, 4
.loop:
movh m1, [r1-1]
movh m2, [r1+0]
movh m3, [r1+1]
movh m0, [r1+2]
punpcklbw m1, m7
punpcklbw m2, m7
punpcklbw m3, m7
punpcklbw m0, m7
paddw m1, m0
paddw m2, m3
movh m0, [r1-2]
movh m3, [r1+3]
punpcklbw m0, m7
punpcklbw m3, m7
paddw m0, m3
psllw m2, 2
psubw m2, m1
pmullw m2, m4
paddw m0, m5
paddw m0, m2
movh m3, [r2]
psraw m0, 5
packuswb m0, m0
pavgb m0, m3
op_%1h m0, [r0], m6
add r0, r3
add r1, r3
add r2, r4
dec r5d
jg .loop
REP_RET
%endmacro
INIT_MMX mmxext
QPEL4_H_LOWPASS_L2_OP put
QPEL4_H_LOWPASS_L2_OP avg
%macro QPEL8_H_LOWPASS_L2_OP 1
cglobal %1_h264_qpel8_h_lowpass_l2, 5,6 ; dst, src, src2, dstStride, srcStride
movsxdifnidn r3, r3d
movsxdifnidn r4, r4d
mov r5d, 8
pxor m7, m7
mova m6, [pw_5]
.loop:
mova m0, [r1]
mova m2, [r1+1]
mova m1, m0
mova m3, m2
punpcklbw m0, m7
punpckhbw m1, m7
punpcklbw m2, m7
punpckhbw m3, m7
paddw m0, m2
paddw m1, m3
psllw m0, 2
psllw m1, 2
mova m2, [r1-1]
mova m4, [r1+2]
mova m3, m2
mova m5, m4
punpcklbw m2, m7
punpckhbw m3, m7
punpcklbw m4, m7
punpckhbw m5, m7
paddw m2, m4
paddw m5, m3
psubw m0, m2
psubw m1, m5
pmullw m0, m6
pmullw m1, m6
movd m2, [r1-2]
movd m5, [r1+7]
punpcklbw m2, m7
punpcklbw m5, m7
paddw m2, m3
paddw m4, m5
mova m5, [pw_16]
paddw m2, m5
paddw m4, m5
paddw m0, m2
paddw m1, m4
psraw m0, 5
psraw m1, 5
mova m4, [r2]
packuswb m0, m1
pavgb m0, m4
op_%1 m0, [r0], m4
add r0, r3
add r1, r3
add r2, r4
dec r5d
jg .loop
REP_RET
%endmacro
INIT_MMX mmxext
QPEL8_H_LOWPASS_L2_OP put
QPEL8_H_LOWPASS_L2_OP avg
%macro QPEL8_H_LOWPASS_L2_OP_XMM 1
cglobal %1_h264_qpel8_h_lowpass_l2, 5,6,8 ; dst, src, src2, dstStride, src2Stride
movsxdifnidn r3, r3d
movsxdifnidn r4, r4d
mov r5d, 8
pxor m7, m7
mova m6, [pw_5]
.loop:
lddqu m1, [r1-2]
mova m0, m1
punpckhbw m1, m7
punpcklbw m0, m7
mova m2, m1
mova m3, m1
mova m4, m1
mova m5, m1
palignr m4, m0, 2
palignr m3, m0, 4
palignr m2, m0, 6
palignr m1, m0, 8
palignr m5, m0, 10
paddw m0, m5
paddw m2, m3
paddw m1, m4
psllw m2, 2
movh m3, [r2]
psubw m2, m1
paddw m0, [pw_16]
pmullw m2, m6
paddw m2, m0
psraw m2, 5
packuswb m2, m2
pavgb m2, m3
op_%1h m2, [r0], m4
add r1, r3
add r0, r3
add r2, r4
dec r5d
jg .loop
REP_RET
%endmacro
INIT_XMM ssse3
QPEL8_H_LOWPASS_L2_OP_XMM put
QPEL8_H_LOWPASS_L2_OP_XMM avg
; All functions that call this are required to have function arguments of
; dst, src, dstStride, srcStride
%macro FILT_V 1
mova m6, m2
movh m5, [r1]
paddw m6, m3
psllw m6, 2
psubw m6, m1
psubw m6, m4
punpcklbw m5, m7
pmullw m6, [pw_5]
paddw m0, [pw_16]
add r1, r3
paddw m0, m5
paddw m6, m0
psraw m6, 5
packuswb m6, m6
op_%1h m6, [r0], m0 ; 1
add r0, r2
SWAP 0, 1, 2, 3, 4, 5
%endmacro
%macro QPEL4_V_LOWPASS_OP 1
cglobal %1_h264_qpel4_v_lowpass, 4,4 ; dst, src, dstStride, srcStride
movsxdifnidn r2, r2d
movsxdifnidn r3, r3d
sub r1, r3
sub r1, r3
pxor m7, m7
movh m0, [r1]
movh m1, [r1+r3]
lea r1, [r1+2*r3]
movh m2, [r1]
movh m3, [r1+r3]
lea r1, [r1+2*r3]
movh m4, [r1]
add r1, r3
punpcklbw m0, m7
punpcklbw m1, m7
punpcklbw m2, m7
punpcklbw m3, m7
punpcklbw m4, m7
FILT_V %1
FILT_V %1
FILT_V %1
FILT_V %1
RET
%endmacro
INIT_MMX mmxext
QPEL4_V_LOWPASS_OP put
QPEL4_V_LOWPASS_OP avg
%macro QPEL8OR16_V_LOWPASS_OP 1
%if cpuflag(sse2)
cglobal %1_h264_qpel8or16_v_lowpass, 5,5,8 ; dst, src, dstStride, srcStride, h
movsxdifnidn r2, r2d
movsxdifnidn r3, r3d
sub r1, r3
sub r1, r3
%else
cglobal %1_h264_qpel8or16_v_lowpass_op, 5,5,8 ; dst, src, dstStride, srcStride, h
movsxdifnidn r2, r2d
movsxdifnidn r3, r3d
%endif
pxor m7, m7
movh m0, [r1]
movh m1, [r1+r3]
lea r1, [r1+2*r3]
movh m2, [r1]
movh m3, [r1+r3]
lea r1, [r1+2*r3]
movh m4, [r1]
add r1, r3
punpcklbw m0, m7
punpcklbw m1, m7
punpcklbw m2, m7
punpcklbw m3, m7
punpcklbw m4, m7
FILT_V %1
FILT_V %1
FILT_V %1
FILT_V %1
FILT_V %1
FILT_V %1
FILT_V %1
FILT_V %1
cmp r4d, 16
jne .end
FILT_V %1
FILT_V %1
FILT_V %1
FILT_V %1
FILT_V %1
FILT_V %1
FILT_V %1
FILT_V %1
.end:
REP_RET
%endmacro
INIT_MMX mmxext
QPEL8OR16_V_LOWPASS_OP put
QPEL8OR16_V_LOWPASS_OP avg
INIT_XMM sse2
QPEL8OR16_V_LOWPASS_OP put
QPEL8OR16_V_LOWPASS_OP avg
; All functions that use this are required to have args:
; src, tmp, srcSize
%macro FILT_HV 1 ; offset
mova m6, m2
movh m5, [r0]
paddw m6, m3
psllw m6, 2
paddw m0, [pw_16]
psubw m6, m1
psubw m6, m4
punpcklbw m5, m7
pmullw m6, [pw_5]
paddw m0, m5
add r0, r2
paddw m6, m0
mova [r1+%1], m6
SWAP 0, 1, 2, 3, 4, 5
%endmacro
%macro QPEL4_HV1_LOWPASS_OP 1
cglobal %1_h264_qpel4_hv_lowpass_v, 3,3 ; src, tmp, srcStride
movsxdifnidn r2, r2d
pxor m7, m7
movh m0, [r0]
movh m1, [r0+r2]
lea r0, [r0+2*r2]
movh m2, [r0]
movh m3, [r0+r2]
lea r0, [r0+2*r2]
movh m4, [r0]
add r0, r2
punpcklbw m0, m7
punpcklbw m1, m7
punpcklbw m2, m7
punpcklbw m3, m7
punpcklbw m4, m7
FILT_HV 0*24
FILT_HV 1*24
FILT_HV 2*24
FILT_HV 3*24
RET
cglobal %1_h264_qpel4_hv_lowpass_h, 3,4 ; tmp, dst, dstStride
movsxdifnidn r2, r2d
mov r3d, 4
.loop:
mova m0, [r0]
paddw m0, [r0+10]
mova m1, [r0+2]
paddw m1, [r0+8]
mova m2, [r0+4]
paddw m2, [r0+6]
psubw m0, m1
psraw m0, 2
psubw m0, m1
paddsw m0, m2
psraw m0, 2
paddw m0, m2
psraw m0, 6
packuswb m0, m0
op_%1h m0, [r1], m7
add r0, 24
add r1, r2
dec r3d
jnz .loop
REP_RET
%endmacro
INIT_MMX mmxext
QPEL4_HV1_LOWPASS_OP put
QPEL4_HV1_LOWPASS_OP avg
%macro QPEL8OR16_HV1_LOWPASS_OP 1
cglobal %1_h264_qpel8or16_hv1_lowpass_op, 4,4,8 ; src, tmp, srcStride, size
movsxdifnidn r2, r2d
pxor m7, m7
movh m0, [r0]
movh m1, [r0+r2]
lea r0, [r0+2*r2]
movh m2, [r0]
movh m3, [r0+r2]
lea r0, [r0+2*r2]
movh m4, [r0]
add r0, r2
punpcklbw m0, m7
punpcklbw m1, m7
punpcklbw m2, m7
punpcklbw m3, m7
punpcklbw m4, m7
FILT_HV 0*48
FILT_HV 1*48
FILT_HV 2*48
FILT_HV 3*48
FILT_HV 4*48
FILT_HV 5*48
FILT_HV 6*48
FILT_HV 7*48
cmp r3d, 16
jne .end
FILT_HV 8*48
FILT_HV 9*48
FILT_HV 10*48
FILT_HV 11*48
FILT_HV 12*48
FILT_HV 13*48
FILT_HV 14*48
FILT_HV 15*48
.end:
REP_RET
%endmacro
INIT_MMX mmxext
QPEL8OR16_HV1_LOWPASS_OP put
QPEL8OR16_HV1_LOWPASS_OP avg
INIT_XMM sse2
QPEL8OR16_HV1_LOWPASS_OP put
%macro QPEL8OR16_HV2_LOWPASS_OP 1
; unused is to match ssse3 and mmxext args
cglobal %1_h264_qpel8or16_hv2_lowpass_op, 5,5 ; dst, tmp, dstStride, unused, h
movsxdifnidn r2, r2d
.loop:
mova m0, [r1]
mova m3, [r1+8]
mova m1, [r1+2]
mova m4, [r1+10]
paddw m0, m4
paddw m1, m3
paddw m3, [r1+18]
paddw m4, [r1+16]
mova m2, [r1+4]
mova m5, [r1+12]
paddw m2, [r1+6]
paddw m5, [r1+14]
psubw m0, m1
psubw m3, m4
psraw m0, 2
psraw m3, 2
psubw m0, m1
psubw m3, m4
paddsw m0, m2
paddsw m3, m5
psraw m0, 2
psraw m3, 2
paddw m0, m2
paddw m3, m5
psraw m0, 6
psraw m3, 6
packuswb m0, m3
op_%1 m0, [r0], m7
add r1, 48
add r0, r2
dec r4d
jne .loop
REP_RET
%endmacro
INIT_MMX mmxext
QPEL8OR16_HV2_LOWPASS_OP put
QPEL8OR16_HV2_LOWPASS_OP avg
%macro QPEL8OR16_HV2_LOWPASS_OP_XMM 1
cglobal %1_h264_qpel8or16_hv2_lowpass, 5,5,8 ; dst, tmp, dstStride, tmpStride, size
movsxdifnidn r2, r2d
movsxdifnidn r3, r3d
cmp r4d, 16
je .op16
.loop8:
mova m1, [r1+16]
mova m0, [r1]
mova m2, m1
mova m3, m1
mova m4, m1
mova m5, m1
palignr m5, m0, 10
palignr m4, m0, 8
palignr m3, m0, 6
palignr m2, m0, 4
palignr m1, m0, 2
paddw m0, m5
paddw m1, m4
paddw m2, m3
psubw m0, m1
psraw m0, 2
psubw m0, m1
paddw m0, m2
psraw m0, 2
paddw m0, m2
psraw m0, 6
packuswb m0, m0
op_%1h m0, [r0], m7
add r1, 48
add r0, r2
dec r4d
jne .loop8
jmp .done
.op16:
mova m4, [r1+32]
mova m5, [r1+16]
mova m7, [r1]
mova m3, m4
mova m2, m4
mova m1, m4
mova m0, m4
palignr m0, m5, 10
palignr m1, m5, 8
palignr m2, m5, 6
palignr m3, m5, 4
palignr m4, m5, 2
paddw m0, m5
paddw m1, m4
paddw m2, m3
mova m6, m5
mova m4, m5
mova m3, m5
palignr m4, m7, 8
palignr m6, m7, 2
palignr m3, m7, 10
paddw m4, m6
mova m6, m5
palignr m5, m7, 6
palignr m6, m7, 4
paddw m3, m7
paddw m5, m6
psubw m0, m1
psubw m3, m4
psraw m0, 2
psraw m3, 2
psubw m0, m1
psubw m3, m4
paddw m0, m2
paddw m3, m5
psraw m0, 2
psraw m3, 2
paddw m0, m2
paddw m3, m5
psraw m0, 6
psraw m3, 6
packuswb m3, m0
op_%1 m3, [r0], m7
add r1, 48
add r0, r2
dec r4d
jne .op16
.done:
REP_RET
%endmacro
INIT_XMM ssse3
QPEL8OR16_HV2_LOWPASS_OP_XMM put
QPEL8OR16_HV2_LOWPASS_OP_XMM avg
%macro PIXELS4_L2_SHIFT5 1
cglobal %1_pixels4_l2_shift5,6,6 ; dst, src16, src8, dstStride, src8Stride, h
movsxdifnidn r3, r3d
movsxdifnidn r4, r4d
mova m0, [r1]
mova m1, [r1+24]
psraw m0, 5
psraw m1, 5
packuswb m0, m0
packuswb m1, m1
pavgb m0, [r2]
pavgb m1, [r2+r4]
op_%1h m0, [r0], m4
op_%1h m1, [r0+r3], m5
lea r2, [r2+r4*2]
lea r0, [r0+r3*2]
mova m0, [r1+48]
mova m1, [r1+72]
psraw m0, 5
psraw m1, 5
packuswb m0, m0
packuswb m1, m1
pavgb m0, [r2]
pavgb m1, [r2+r4]
op_%1h m0, [r0], m4
op_%1h m1, [r0+r3], m5
RET
%endmacro
INIT_MMX mmxext
PIXELS4_L2_SHIFT5 put
PIXELS4_L2_SHIFT5 avg
%macro PIXELS8_L2_SHIFT5 1
cglobal %1_pixels8_l2_shift5, 6, 6 ; dst, src16, src8, dstStride, src8Stride, h
movsxdifnidn r3, r3d
movsxdifnidn r4, r4d
.loop:
mova m0, [r1]
mova m1, [r1+8]
mova m2, [r1+48]
mova m3, [r1+48+8]
psraw m0, 5
psraw m1, 5
psraw m2, 5
psraw m3, 5
packuswb m0, m1
packuswb m2, m3
pavgb m0, [r2]
pavgb m2, [r2+r4]
op_%1 m0, [r0], m4
op_%1 m2, [r0+r3], m5
lea r2, [r2+2*r4]
add r1, 48*2
lea r0, [r0+2*r3]
sub r5d, 2
jne .loop
REP_RET
%endmacro
INIT_MMX mmxext
PIXELS8_L2_SHIFT5 put
PIXELS8_L2_SHIFT5 avg
%if ARCH_X86_64
%macro QPEL16_H_LOWPASS_L2_OP 1
cglobal %1_h264_qpel16_h_lowpass_l2, 5, 6, 16 ; dst, src, src2, dstStride, src2Stride
movsxdifnidn r3, r3d
movsxdifnidn r4, r4d
mov r5d, 16
pxor m15, m15
mova m14, [pw_5]
mova m13, [pw_16]
.loop:
lddqu m1, [r1+6]
lddqu m7, [r1-2]
mova m0, m1
punpckhbw m1, m15
punpcklbw m0, m15
punpcklbw m7, m15
mova m2, m1
mova m6, m0
mova m3, m1
mova m8, m0
mova m4, m1
mova m9, m0
mova m12, m0
mova m11, m1
palignr m11, m0, 10
palignr m12, m7, 10
palignr m4, m0, 2
palignr m9, m7, 2
palignr m3, m0, 4
palignr m8, m7, 4
palignr m2, m0, 6
palignr m6, m7, 6
paddw m11, m0
palignr m1, m0, 8
palignr m0, m7, 8
paddw m7, m12
paddw m2, m3
paddw m6, m8
paddw m1, m4
paddw m0, m9
psllw m2, 2
psllw m6, 2
psubw m2, m1
psubw m6, m0
paddw m11, m13
paddw m7, m13
pmullw m2, m14
pmullw m6, m14
lddqu m3, [r2]
paddw m2, m11
paddw m6, m7
psraw m2, 5
psraw m6, 5
packuswb m6, m2
pavgb m6, m3
op_%1 m6, [r0], m11
add r1, r3
add r0, r3
add r2, r4
dec r5d
jg .loop
REP_RET
%endmacro
INIT_XMM ssse3
QPEL16_H_LOWPASS_L2_OP put
QPEL16_H_LOWPASS_L2_OP avg
%endif

View File

@@ -0,0 +1,320 @@
;*****************************************************************************
;* SSE2-optimized weighted prediction code
;*****************************************************************************
;* Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
;* Copyright (C) 2010 Eli Friedman <eli.friedman@gmail.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION .text
;-----------------------------------------------------------------------------
; biweight pred:
;
; void ff_h264_biweight_16_sse2(uint8_t *dst, uint8_t *src, int stride,
; int height, int log2_denom, int weightd,
; int weights, int offset);
; and
; void ff_h264_weight_16_sse2(uint8_t *dst, int stride, int height,
; int log2_denom, int weight, int offset);
;-----------------------------------------------------------------------------
%macro WEIGHT_SETUP 0
add r5, r5
inc r5
movd m3, r4d
movd m5, r5d
movd m6, r3d
pslld m5, m6
psrld m5, 1
%if mmsize == 16
pshuflw m3, m3, 0
pshuflw m5, m5, 0
punpcklqdq m3, m3
punpcklqdq m5, m5
%else
pshufw m3, m3, 0
pshufw m5, m5, 0
%endif
pxor m7, m7
%endmacro
%macro WEIGHT_OP 2
movh m0, [r0+%1]
movh m1, [r0+%2]
punpcklbw m0, m7
punpcklbw m1, m7
pmullw m0, m3
pmullw m1, m3
paddsw m0, m5
paddsw m1, m5
psraw m0, m6
psraw m1, m6
packuswb m0, m1
%endmacro
INIT_MMX mmxext
cglobal h264_weight_16, 6, 6, 0
WEIGHT_SETUP
.nextrow:
WEIGHT_OP 0, 4
mova [r0 ], m0
WEIGHT_OP 8, 12
mova [r0+8], m0
add r0, r1
dec r2d
jnz .nextrow
REP_RET
%macro WEIGHT_FUNC_MM 2
cglobal h264_weight_%1, 6, 6, %2
WEIGHT_SETUP
.nextrow:
WEIGHT_OP 0, mmsize/2
mova [r0], m0
add r0, r1
dec r2d
jnz .nextrow
REP_RET
%endmacro
INIT_MMX mmxext
WEIGHT_FUNC_MM 8, 0
INIT_XMM sse2
WEIGHT_FUNC_MM 16, 8
%macro WEIGHT_FUNC_HALF_MM 2
cglobal h264_weight_%1, 6, 6, %2
WEIGHT_SETUP
sar r2d, 1
lea r3, [r1*2]
.nextrow:
WEIGHT_OP 0, r1
movh [r0], m0
%if mmsize == 16
movhps [r0+r1], m0
%else
psrlq m0, 32
movh [r0+r1], m0
%endif
add r0, r3
dec r2d
jnz .nextrow
REP_RET
%endmacro
INIT_MMX mmxext
WEIGHT_FUNC_HALF_MM 4, 0
INIT_XMM sse2
WEIGHT_FUNC_HALF_MM 8, 8
%macro BIWEIGHT_SETUP 0
%if ARCH_X86_64
%define off_regd r7d
%else
%define off_regd r3d
%endif
mov off_regd, r7m
add off_regd, 1
or off_regd, 1
add r4, 1
cmp r6d, 128
je .nonnormal
cmp r5, 128
jne .normal
.nonnormal:
sar r5, 1
sar r6, 1
sar off_regd, 1
sub r4, 1
.normal:
%if cpuflag(ssse3)
movd m4, r5d
movd m0, r6d
%else
movd m3, r5d
movd m4, r6d
%endif
movd m5, off_regd
movd m6, r4d
pslld m5, m6
psrld m5, 1
%if cpuflag(ssse3)
punpcklbw m4, m0
pshuflw m4, m4, 0
pshuflw m5, m5, 0
punpcklqdq m4, m4
punpcklqdq m5, m5
%else
%if mmsize == 16
pshuflw m3, m3, 0
pshuflw m4, m4, 0
pshuflw m5, m5, 0
punpcklqdq m3, m3
punpcklqdq m4, m4
punpcklqdq m5, m5
%else
pshufw m3, m3, 0
pshufw m4, m4, 0
pshufw m5, m5, 0
%endif
pxor m7, m7
%endif
%endmacro
%macro BIWEIGHT_STEPA 3
movh m%1, [r0+%3]
movh m%2, [r1+%3]
punpcklbw m%1, m7
punpcklbw m%2, m7
pmullw m%1, m3
pmullw m%2, m4
paddsw m%1, m%2
%endmacro
%macro BIWEIGHT_STEPB 0
paddsw m0, m5
paddsw m1, m5
psraw m0, m6
psraw m1, m6
packuswb m0, m1
%endmacro
INIT_MMX mmxext
cglobal h264_biweight_16, 7, 8, 0
BIWEIGHT_SETUP
movifnidn r3d, r3m
.nextrow:
BIWEIGHT_STEPA 0, 1, 0
BIWEIGHT_STEPA 1, 2, 4
BIWEIGHT_STEPB
mova [r0], m0
BIWEIGHT_STEPA 0, 1, 8
BIWEIGHT_STEPA 1, 2, 12
BIWEIGHT_STEPB
mova [r0+8], m0
add r0, r2
add r1, r2
dec r3d
jnz .nextrow
REP_RET
%macro BIWEIGHT_FUNC_MM 2
cglobal h264_biweight_%1, 7, 8, %2
BIWEIGHT_SETUP
movifnidn r3d, r3m
.nextrow:
BIWEIGHT_STEPA 0, 1, 0
BIWEIGHT_STEPA 1, 2, mmsize/2
BIWEIGHT_STEPB
mova [r0], m0
add r0, r2
add r1, r2
dec r3d
jnz .nextrow
REP_RET
%endmacro
INIT_MMX mmxext
BIWEIGHT_FUNC_MM 8, 0
INIT_XMM sse2
BIWEIGHT_FUNC_MM 16, 8
%macro BIWEIGHT_FUNC_HALF_MM 2
cglobal h264_biweight_%1, 7, 8, %2
BIWEIGHT_SETUP
movifnidn r3d, r3m
sar r3, 1
lea r4, [r2*2]
.nextrow:
BIWEIGHT_STEPA 0, 1, 0
BIWEIGHT_STEPA 1, 2, r2
BIWEIGHT_STEPB
movh [r0], m0
%if mmsize == 16
movhps [r0+r2], m0
%else
psrlq m0, 32
movh [r0+r2], m0
%endif
add r0, r4
add r1, r4
dec r3d
jnz .nextrow
REP_RET
%endmacro
INIT_MMX mmxext
BIWEIGHT_FUNC_HALF_MM 4, 0
INIT_XMM sse2
BIWEIGHT_FUNC_HALF_MM 8, 8
%macro BIWEIGHT_SSSE3_OP 0
pmaddubsw m0, m4
pmaddubsw m2, m4
paddsw m0, m5
paddsw m2, m5
psraw m0, m6
psraw m2, m6
packuswb m0, m2
%endmacro
INIT_XMM ssse3
cglobal h264_biweight_16, 7, 8, 8
BIWEIGHT_SETUP
movifnidn r3d, r3m
.nextrow:
movh m0, [r0]
movh m2, [r0+8]
movh m3, [r1+8]
punpcklbw m0, [r1]
punpcklbw m2, m3
BIWEIGHT_SSSE3_OP
mova [r0], m0
add r0, r2
add r1, r2
dec r3d
jnz .nextrow
REP_RET
INIT_XMM ssse3
cglobal h264_biweight_8, 7, 8, 8
BIWEIGHT_SETUP
movifnidn r3d, r3m
sar r3, 1
lea r4, [r2*2]
.nextrow:
movh m0, [r0]
movh m1, [r1]
movh m2, [r0+r2]
movh m3, [r1+r2]
punpcklbw m0, m1
punpcklbw m2, m3
BIWEIGHT_SSSE3_OP
movh [r0], m0
movhps [r0+r2], m0
add r0, r4
add r1, r4
dec r3d
jnz .nextrow
REP_RET

View File

@@ -0,0 +1,284 @@
;*****************************************************************************
;* MMX/SSE2/AVX-optimized 10-bit H.264 weighted prediction code
;*****************************************************************************
;* Copyright (C) 2005-2011 x264 project
;*
;* Authors: Daniel Kang <daniel.d.kang@gmail.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA 32
sq_1: dq 1
dq 0
cextern pw_1
cextern pw_1023
%define pw_pixel_max pw_1023
SECTION .text
;-----------------------------------------------------------------------------
; void ff_h264_weight_16_10(uint8_t *dst, int stride, int height,
; int log2_denom, int weight, int offset);
;-----------------------------------------------------------------------------
%macro WEIGHT_PROLOGUE 0
.prologue:
PROLOGUE 0,6,8
movifnidn r0, r0mp
movifnidn r1d, r1m
movifnidn r2d, r2m
movifnidn r4d, r4m
movifnidn r5d, r5m
%endmacro
%macro WEIGHT_SETUP 0
mova m0, [pw_1]
movd m2, r3m
pslld m0, m2 ; 1<<log2_denom
SPLATW m0, m0
shl r5, 19 ; *8, move to upper half of dword
lea r5, [r5+r4*2+0x10000]
movd m3, r5d ; weight<<1 | 1+(offset<<(3))
pshufd m3, m3, 0
mova m4, [pw_pixel_max]
paddw m2, [sq_1] ; log2_denom+1
%if notcpuflag(sse4)
pxor m7, m7
%endif
%endmacro
%macro WEIGHT_OP 1-2
%if %0==1
mova m5, [r0+%1]
punpckhwd m6, m5, m0
punpcklwd m5, m0
%else
movq m5, [r0+%1]
movq m6, [r0+%2]
punpcklwd m5, m0
punpcklwd m6, m0
%endif
pmaddwd m5, m3
pmaddwd m6, m3
psrad m5, m2
psrad m6, m2
%if cpuflag(sse4)
packusdw m5, m6
pminsw m5, m4
%else
packssdw m5, m6
CLIPW m5, m7, m4
%endif
%endmacro
%macro WEIGHT_FUNC_DBL 0
cglobal h264_weight_16_10
WEIGHT_PROLOGUE
WEIGHT_SETUP
.nextrow:
WEIGHT_OP 0
mova [r0 ], m5
WEIGHT_OP 16
mova [r0+16], m5
add r0, r1
dec r2d
jnz .nextrow
REP_RET
%endmacro
INIT_XMM sse2
WEIGHT_FUNC_DBL
INIT_XMM sse4
WEIGHT_FUNC_DBL
%macro WEIGHT_FUNC_MM 0
cglobal h264_weight_8_10
WEIGHT_PROLOGUE
WEIGHT_SETUP
.nextrow:
WEIGHT_OP 0
mova [r0], m5
add r0, r1
dec r2d
jnz .nextrow
REP_RET
%endmacro
INIT_XMM sse2
WEIGHT_FUNC_MM
INIT_XMM sse4
WEIGHT_FUNC_MM
%macro WEIGHT_FUNC_HALF_MM 0
cglobal h264_weight_4_10
WEIGHT_PROLOGUE
sar r2d, 1
WEIGHT_SETUP
lea r3, [r1*2]
.nextrow:
WEIGHT_OP 0, r1
movh [r0], m5
movhps [r0+r1], m5
add r0, r3
dec r2d
jnz .nextrow
REP_RET
%endmacro
INIT_XMM sse2
WEIGHT_FUNC_HALF_MM
INIT_XMM sse4
WEIGHT_FUNC_HALF_MM
;-----------------------------------------------------------------------------
; void ff_h264_biweight_16_10(uint8_t *dst, uint8_t *src, int stride,
; int height, int log2_denom, int weightd,
; int weights, int offset);
;-----------------------------------------------------------------------------
%if ARCH_X86_32
DECLARE_REG_TMP 3
%else
DECLARE_REG_TMP 7
%endif
%macro BIWEIGHT_PROLOGUE 0
.prologue:
PROLOGUE 0,8,8
movifnidn r0, r0mp
movifnidn r1, r1mp
movifnidn r2d, r2m
movifnidn r5d, r5m
movifnidn r6d, r6m
movifnidn t0d, r7m
%endmacro
%macro BIWEIGHT_SETUP 0
lea t0, [t0*4+1] ; (offset<<2)+1
or t0, 1
shl r6, 16
or r5, r6
movd m4, r5d ; weightd | weights
movd m5, t0d ; (offset+1)|1
movd m6, r4m ; log2_denom
pslld m5, m6 ; (((offset<<2)+1)|1)<<log2_denom
paddd m6, [sq_1]
pshufd m4, m4, 0
pshufd m5, m5, 0
mova m3, [pw_pixel_max]
movifnidn r3d, r3m
%if notcpuflag(sse4)
pxor m7, m7
%endif
%endmacro
%macro BIWEIGHT 1-2
%if %0==1
mova m0, [r0+%1]
mova m1, [r1+%1]
punpckhwd m2, m0, m1
punpcklwd m0, m1
%else
movq m0, [r0+%1]
movq m1, [r1+%1]
punpcklwd m0, m1
movq m2, [r0+%2]
movq m1, [r1+%2]
punpcklwd m2, m1
%endif
pmaddwd m0, m4
pmaddwd m2, m4
paddd m0, m5
paddd m2, m5
psrad m0, m6
psrad m2, m6
%if cpuflag(sse4)
packusdw m0, m2
pminsw m0, m3
%else
packssdw m0, m2
CLIPW m0, m7, m3
%endif
%endmacro
%macro BIWEIGHT_FUNC_DBL 0
cglobal h264_biweight_16_10
BIWEIGHT_PROLOGUE
BIWEIGHT_SETUP
.nextrow:
BIWEIGHT 0
mova [r0 ], m0
BIWEIGHT 16
mova [r0+16], m0
add r0, r2
add r1, r2
dec r3d
jnz .nextrow
REP_RET
%endmacro
INIT_XMM sse2
BIWEIGHT_FUNC_DBL
INIT_XMM sse4
BIWEIGHT_FUNC_DBL
%macro BIWEIGHT_FUNC 0
cglobal h264_biweight_8_10
BIWEIGHT_PROLOGUE
BIWEIGHT_SETUP
.nextrow:
BIWEIGHT 0
mova [r0], m0
add r0, r2
add r1, r2
dec r3d
jnz .nextrow
REP_RET
%endmacro
INIT_XMM sse2
BIWEIGHT_FUNC
INIT_XMM sse4
BIWEIGHT_FUNC
%macro BIWEIGHT_FUNC_HALF 0
cglobal h264_biweight_4_10
BIWEIGHT_PROLOGUE
BIWEIGHT_SETUP
sar r3d, 1
lea r4, [r2*2]
.nextrow:
BIWEIGHT 0, r2
movh [r0 ], m0
movhps [r0+r2], m0
add r0, r4
add r1, r4
dec r3d
jnz .nextrow
REP_RET
%endmacro
INIT_XMM sse2
BIWEIGHT_FUNC_HALF
INIT_XMM sse4
BIWEIGHT_FUNC_HALF

View File

@@ -0,0 +1,117 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/h264chroma.h"
void ff_put_h264_chroma_mc8_rnd_mmx (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc8_rnd_mmxext(uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc8_rnd_3dnow(uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_put_h264_chroma_mc4_mmx (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc4_mmxext (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc4_3dnow (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_put_h264_chroma_mc2_mmxext (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc2_mmxext (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_put_h264_chroma_mc8_rnd_ssse3(uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_put_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc8_rnd_ssse3(uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
void ff_avg_h264_chroma_mc4_ssse3 (uint8_t *dst, uint8_t *src,
int stride, int h, int x, int y);
#define CHROMA_MC(OP, NUM, DEPTH, OPT) \
void ff_ ## OP ## _h264_chroma_mc ## NUM ## _ ## DEPTH ## _ ## OPT \
(uint8_t *dst, uint8_t *src, \
int stride, int h, int x, int y);
CHROMA_MC(put, 2, 10, mmxext)
CHROMA_MC(avg, 2, 10, mmxext)
CHROMA_MC(put, 4, 10, mmxext)
CHROMA_MC(avg, 4, 10, mmxext)
CHROMA_MC(put, 8, 10, sse2)
CHROMA_MC(avg, 8, 10, sse2)
CHROMA_MC(put, 8, 10, avx)
CHROMA_MC(avg, 8, 10, avx)
av_cold void ff_h264chroma_init_x86(H264ChromaContext *c, int bit_depth)
{
int high_bit_depth = bit_depth > 8;
int cpu_flags = av_get_cpu_flags();
if (EXTERNAL_MMX(cpu_flags) && !high_bit_depth) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_rnd_mmx;
c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_mmx;
}
if (EXTERNAL_AMD3DNOW(cpu_flags) && !high_bit_depth) {
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_rnd_3dnow;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_3dnow;
}
if (EXTERNAL_MMXEXT(cpu_flags) && !high_bit_depth) {
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_rnd_mmxext;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_mmxext;
c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_mmxext;
c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_mmxext;
}
if (EXTERNAL_MMXEXT(cpu_flags) && bit_depth > 8 && bit_depth <= 10) {
c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_10_mmxext;
c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_10_mmxext;
c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_10_mmxext;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_10_mmxext;
}
if (EXTERNAL_SSE2(cpu_flags) && bit_depth > 8 && bit_depth <= 10) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_sse2;
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_sse2;
}
if (EXTERNAL_SSSE3(cpu_flags) && !high_bit_depth) {
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_rnd_ssse3;
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_rnd_ssse3;
c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_ssse3;
c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_ssse3;
}
if (EXTERNAL_AVX(cpu_flags) && bit_depth > 8 && bit_depth <= 10) {
// AVX implies !cache64.
// TODO: Port cache(32|64) detection from x264.
c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_avx;
c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_avx;
}
}

View File

@@ -0,0 +1,370 @@
/*
* Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/h264dsp.h"
/***********************************/
/* IDCT */
#define IDCT_ADD_FUNC(NUM, DEPTH, OPT) \
void ff_h264_idct ## NUM ## _add_ ## DEPTH ## _ ## OPT(uint8_t *dst, \
int16_t *block, \
int stride);
IDCT_ADD_FUNC(, 8, mmx)
IDCT_ADD_FUNC(, 10, sse2)
IDCT_ADD_FUNC(_dc, 8, mmxext)
IDCT_ADD_FUNC(_dc, 10, mmxext)
IDCT_ADD_FUNC(8_dc, 8, mmxext)
IDCT_ADD_FUNC(8_dc, 10, sse2)
IDCT_ADD_FUNC(8, 8, mmx)
IDCT_ADD_FUNC(8, 8, sse2)
IDCT_ADD_FUNC(8, 10, sse2)
IDCT_ADD_FUNC(, 10, avx)
IDCT_ADD_FUNC(8_dc, 10, avx)
IDCT_ADD_FUNC(8, 10, avx)
#define IDCT_ADD_REP_FUNC(NUM, REP, DEPTH, OPT) \
void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \
(uint8_t *dst, const int *block_offset, \
int16_t *block, int stride, const uint8_t nnzc[6 * 8]);
IDCT_ADD_REP_FUNC(8, 4, 8, mmx)
IDCT_ADD_REP_FUNC(8, 4, 8, mmxext)
IDCT_ADD_REP_FUNC(8, 4, 8, sse2)
IDCT_ADD_REP_FUNC(8, 4, 10, sse2)
IDCT_ADD_REP_FUNC(8, 4, 10, avx)
IDCT_ADD_REP_FUNC(, 16, 8, mmx)
IDCT_ADD_REP_FUNC(, 16, 8, mmxext)
IDCT_ADD_REP_FUNC(, 16, 8, sse2)
IDCT_ADD_REP_FUNC(, 16, 10, sse2)
IDCT_ADD_REP_FUNC(, 16intra, 8, mmx)
IDCT_ADD_REP_FUNC(, 16intra, 8, mmxext)
IDCT_ADD_REP_FUNC(, 16intra, 8, sse2)
IDCT_ADD_REP_FUNC(, 16intra, 10, sse2)
IDCT_ADD_REP_FUNC(, 16, 10, avx)
IDCT_ADD_REP_FUNC(, 16intra, 10, avx)
#define IDCT_ADD_REP_FUNC2(NUM, REP, DEPTH, OPT) \
void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \
(uint8_t **dst, const int *block_offset, \
int16_t *block, int stride, const uint8_t nnzc[6 * 8]);
IDCT_ADD_REP_FUNC2(, 8, 8, mmx)
IDCT_ADD_REP_FUNC2(, 8, 8, mmxext)
IDCT_ADD_REP_FUNC2(, 8, 8, sse2)
IDCT_ADD_REP_FUNC2(, 8, 10, sse2)
IDCT_ADD_REP_FUNC2(, 8, 10, avx)
void ff_h264_luma_dc_dequant_idct_mmx(int16_t *output, int16_t *input, int qmul);
void ff_h264_luma_dc_dequant_idct_sse2(int16_t *output, int16_t *input, int qmul);
/***********************************/
/* deblocking */
void ff_h264_loop_filter_strength_mmxext(int16_t bS[2][4][4], uint8_t nnz[40],
int8_t ref[2][40],
int16_t mv[2][40][2],
int bidir, int edges, int step,
int mask_mv0, int mask_mv1, int field);
#define LF_FUNC(DIR, TYPE, DEPTH, OPT) \
void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT(uint8_t *pix, \
int stride, \
int alpha, \
int beta, \
int8_t *tc0);
#define LF_IFUNC(DIR, TYPE, DEPTH, OPT) \
void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT(uint8_t *pix, \
int stride, \
int alpha, \
int beta);
#define LF_FUNCS(type, depth) \
LF_FUNC(h, chroma, depth, mmxext) \
LF_IFUNC(h, chroma_intra, depth, mmxext) \
LF_FUNC(v, chroma, depth, mmxext) \
LF_IFUNC(v, chroma_intra, depth, mmxext) \
LF_FUNC(h, luma, depth, mmxext) \
LF_IFUNC(h, luma_intra, depth, mmxext) \
LF_FUNC(h, luma, depth, sse2) \
LF_IFUNC(h, luma_intra, depth, sse2) \
LF_FUNC(v, luma, depth, sse2) \
LF_IFUNC(v, luma_intra, depth, sse2) \
LF_FUNC(h, chroma, depth, sse2) \
LF_IFUNC(h, chroma_intra, depth, sse2) \
LF_FUNC(v, chroma, depth, sse2) \
LF_IFUNC(v, chroma_intra, depth, sse2) \
LF_FUNC(h, luma, depth, avx) \
LF_IFUNC(h, luma_intra, depth, avx) \
LF_FUNC(v, luma, depth, avx) \
LF_IFUNC(v, luma_intra, depth, avx) \
LF_FUNC(h, chroma, depth, avx) \
LF_IFUNC(h, chroma_intra, depth, avx) \
LF_FUNC(v, chroma, depth, avx) \
LF_IFUNC(v, chroma_intra, depth, avx)
LF_FUNCS(uint8_t, 8)
LF_FUNCS(uint16_t, 10)
#if ARCH_X86_32 && HAVE_MMXEXT_EXTERNAL
LF_FUNC(v8, luma, 8, mmxext)
static void deblock_v_luma_8_mmxext(uint8_t *pix, int stride, int alpha,
int beta, int8_t *tc0)
{
if ((tc0[0] & tc0[1]) >= 0)
ff_deblock_v8_luma_8_mmxext(pix + 0, stride, alpha, beta, tc0);
if ((tc0[2] & tc0[3]) >= 0)
ff_deblock_v8_luma_8_mmxext(pix + 8, stride, alpha, beta, tc0 + 2);
}
LF_IFUNC(v8, luma_intra, 8, mmxext)
static void deblock_v_luma_intra_8_mmxext(uint8_t *pix, int stride,
int alpha, int beta)
{
ff_deblock_v8_luma_intra_8_mmxext(pix + 0, stride, alpha, beta);
ff_deblock_v8_luma_intra_8_mmxext(pix + 8, stride, alpha, beta);
}
#endif /* ARCH_X86_32 && HAVE_MMXEXT_EXTERNAL */
LF_FUNC(v, luma, 10, mmxext)
LF_IFUNC(v, luma_intra, 10, mmxext)
/***********************************/
/* weighted prediction */
#define H264_WEIGHT(W, OPT) \
void ff_h264_weight_ ## W ## _ ## OPT(uint8_t *dst, int stride, \
int height, int log2_denom, \
int weight, int offset);
#define H264_BIWEIGHT(W, OPT) \
void ff_h264_biweight_ ## W ## _ ## OPT(uint8_t *dst, uint8_t *src, \
int stride, int height, \
int log2_denom, int weightd, \
int weights, int offset);
#define H264_BIWEIGHT_MMX(W) \
H264_WEIGHT(W, mmxext) \
H264_BIWEIGHT(W, mmxext)
#define H264_BIWEIGHT_MMX_SSE(W) \
H264_BIWEIGHT_MMX(W) \
H264_WEIGHT(W, sse2) \
H264_BIWEIGHT(W, sse2) \
H264_BIWEIGHT(W, ssse3)
H264_BIWEIGHT_MMX_SSE(16)
H264_BIWEIGHT_MMX_SSE(8)
H264_BIWEIGHT_MMX(4)
#define H264_WEIGHT_10(W, DEPTH, OPT) \
void ff_h264_weight_ ## W ## _ ## DEPTH ## _ ## OPT(uint8_t *dst, \
int stride, \
int height, \
int log2_denom, \
int weight, \
int offset);
#define H264_BIWEIGHT_10(W, DEPTH, OPT) \
void ff_h264_biweight_ ## W ## _ ## DEPTH ## _ ## OPT(uint8_t *dst, \
uint8_t *src, \
int stride, \
int height, \
int log2_denom, \
int weightd, \
int weights, \
int offset);
#define H264_BIWEIGHT_10_SSE(W, DEPTH) \
H264_WEIGHT_10(W, DEPTH, sse2) \
H264_WEIGHT_10(W, DEPTH, sse4) \
H264_BIWEIGHT_10(W, DEPTH, sse2) \
H264_BIWEIGHT_10(W, DEPTH, sse4)
H264_BIWEIGHT_10_SSE(16, 10)
H264_BIWEIGHT_10_SSE(8, 10)
H264_BIWEIGHT_10_SSE(4, 10)
av_cold void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth,
const int chroma_format_idc)
{
#if HAVE_YASM
int cpu_flags = av_get_cpu_flags();
if (EXTERNAL_MMXEXT(cpu_flags) && chroma_format_idc <= 1)
c->h264_loop_filter_strength = ff_h264_loop_filter_strength_mmxext;
if (bit_depth == 8) {
if (EXTERNAL_MMX(cpu_flags)) {
c->h264_idct_dc_add =
c->h264_idct_add = ff_h264_idct_add_8_mmx;
c->h264_idct8_dc_add =
c->h264_idct8_add = ff_h264_idct8_add_8_mmx;
c->h264_idct_add16 = ff_h264_idct_add16_8_mmx;
c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmx;
if (chroma_format_idc <= 1)
c->h264_idct_add8 = ff_h264_idct_add8_8_mmx;
c->h264_idct_add16intra = ff_h264_idct_add16intra_8_mmx;
if (cpu_flags & AV_CPU_FLAG_CMOV)
c->h264_luma_dc_dequant_idct = ff_h264_luma_dc_dequant_idct_mmx;
}
if (EXTERNAL_MMXEXT(cpu_flags)) {
c->h264_idct_dc_add = ff_h264_idct_dc_add_8_mmxext;
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_8_mmxext;
c->h264_idct_add16 = ff_h264_idct_add16_8_mmxext;
c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmxext;
if (chroma_format_idc <= 1)
c->h264_idct_add8 = ff_h264_idct_add8_8_mmxext;
c->h264_idct_add16intra = ff_h264_idct_add16intra_8_mmxext;
c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_8_mmxext;
c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_8_mmxext;
if (chroma_format_idc <= 1) {
c->h264_h_loop_filter_chroma = ff_deblock_h_chroma_8_mmxext;
c->h264_h_loop_filter_chroma_intra = ff_deblock_h_chroma_intra_8_mmxext;
}
#if ARCH_X86_32 && HAVE_MMXEXT_EXTERNAL
c->h264_v_loop_filter_luma = deblock_v_luma_8_mmxext;
c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_mmxext;
c->h264_v_loop_filter_luma_intra = deblock_v_luma_intra_8_mmxext;
c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_mmxext;
#endif /* ARCH_X86_32 && HAVE_MMXEXT_EXTERNAL */
c->weight_h264_pixels_tab[0] = ff_h264_weight_16_mmxext;
c->weight_h264_pixels_tab[1] = ff_h264_weight_8_mmxext;
c->weight_h264_pixels_tab[2] = ff_h264_weight_4_mmxext;
c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_mmxext;
c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_mmxext;
c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_mmxext;
}
if (EXTERNAL_SSE2(cpu_flags)) {
c->h264_idct8_add = ff_h264_idct8_add_8_sse2;
c->h264_idct_add16 = ff_h264_idct_add16_8_sse2;
c->h264_idct8_add4 = ff_h264_idct8_add4_8_sse2;
if (chroma_format_idc <= 1)
c->h264_idct_add8 = ff_h264_idct_add8_8_sse2;
c->h264_idct_add16intra = ff_h264_idct_add16intra_8_sse2;
c->h264_luma_dc_dequant_idct = ff_h264_luma_dc_dequant_idct_sse2;
c->weight_h264_pixels_tab[0] = ff_h264_weight_16_sse2;
c->weight_h264_pixels_tab[1] = ff_h264_weight_8_sse2;
c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_sse2;
c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_sse2;
c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_sse2;
c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_sse2;
c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_sse2;
c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_sse2;
}
if (EXTERNAL_SSSE3(cpu_flags)) {
c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_ssse3;
c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_ssse3;
}
if (EXTERNAL_AVX(cpu_flags)) {
c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_avx;
c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_avx;
c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_avx;
c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_avx;
}
} else if (bit_depth == 10) {
if (EXTERNAL_MMXEXT(cpu_flags)) {
#if ARCH_X86_32
c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_10_mmxext;
c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_10_mmxext;
c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_mmxext;
c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_mmxext;
c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_mmxext;
c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_mmxext;
#endif /* ARCH_X86_32 */
c->h264_idct_dc_add = ff_h264_idct_dc_add_10_mmxext;
}
if (EXTERNAL_SSE2(cpu_flags)) {
c->h264_idct_add = ff_h264_idct_add_10_sse2;
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_sse2;
c->h264_idct_add16 = ff_h264_idct_add16_10_sse2;
if (chroma_format_idc <= 1)
c->h264_idct_add8 = ff_h264_idct_add8_10_sse2;
c->h264_idct_add16intra = ff_h264_idct_add16intra_10_sse2;
#if HAVE_ALIGNED_STACK
c->h264_idct8_add = ff_h264_idct8_add_10_sse2;
c->h264_idct8_add4 = ff_h264_idct8_add4_10_sse2;
#endif /* HAVE_ALIGNED_STACK */
c->weight_h264_pixels_tab[0] = ff_h264_weight_16_10_sse2;
c->weight_h264_pixels_tab[1] = ff_h264_weight_8_10_sse2;
c->weight_h264_pixels_tab[2] = ff_h264_weight_4_10_sse2;
c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_10_sse2;
c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_10_sse2;
c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_10_sse2;
c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_10_sse2;
c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_10_sse2;
#if HAVE_ALIGNED_STACK
c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_sse2;
c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_sse2;
c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_sse2;
c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_sse2;
#endif /* HAVE_ALIGNED_STACK */
}
if (EXTERNAL_SSE4(cpu_flags)) {
c->weight_h264_pixels_tab[0] = ff_h264_weight_16_10_sse4;
c->weight_h264_pixels_tab[1] = ff_h264_weight_8_10_sse4;
c->weight_h264_pixels_tab[2] = ff_h264_weight_4_10_sse4;
c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_10_sse4;
c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_10_sse4;
c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_10_sse4;
}
if (EXTERNAL_AVX(cpu_flags)) {
c->h264_idct_dc_add =
c->h264_idct_add = ff_h264_idct_add_10_avx;
c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_avx;
c->h264_idct_add16 = ff_h264_idct_add16_10_avx;
if (chroma_format_idc <= 1)
c->h264_idct_add8 = ff_h264_idct_add8_10_avx;
c->h264_idct_add16intra = ff_h264_idct_add16intra_10_avx;
#if HAVE_ALIGNED_STACK
c->h264_idct8_add = ff_h264_idct8_add_10_avx;
c->h264_idct8_add4 = ff_h264_idct8_add4_10_avx;
#endif /* HAVE_ALIGNED_STACK */
c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_10_avx;
c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_10_avx;
#if HAVE_ALIGNED_STACK
c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_avx;
c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_avx;
c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_avx;
c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_avx;
#endif /* HAVE_ALIGNED_STACK */
}
}
#endif
}

View File

@@ -0,0 +1,876 @@
;*****************************************************************************
;* SSE2-optimized HEVC deblocking code
;*****************************************************************************
;* Copyright (C) 2013 VTT
;*
;* Authors: Seppo Tomperi <seppo.tomperi@vtt.fi>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
cextern pw_1023
%define pw_pixel_max_10 pw_1023
pw_pixel_max_12: times 8 dw ((1 << 12)-1)
pw_m2: times 8 dw -2
pd_1 : times 4 dd 1
cextern pw_4
cextern pw_8
cextern pw_m1
SECTION .text
INIT_XMM sse2
; expands to [base],...,[base+7*stride]
%define PASS8ROWS(base, base3, stride, stride3) \
[base], [base+stride], [base+stride*2], [base3], \
[base3+stride], [base3+stride*2], [base3+stride3], [base3+stride*4]
; in: 8 rows of 4 bytes in %4..%11
; out: 4 rows of 8 words in m0..m3
%macro TRANSPOSE4x8B_LOAD 8
movd m0, %1
movd m2, %2
movd m1, %3
movd m3, %4
punpcklbw m0, m2
punpcklbw m1, m3
punpcklwd m0, m1
movd m4, %5
movd m6, %6
movd m5, %7
movd m3, %8
punpcklbw m4, m6
punpcklbw m5, m3
punpcklwd m4, m5
punpckhdq m2, m0, m4
punpckldq m0, m4
pxor m5, m5
punpckhbw m1, m0, m5
punpcklbw m0, m5
punpckhbw m3, m2, m5
punpcklbw m2, m5
%endmacro
; in: 4 rows of 8 words in m0..m3
; out: 8 rows of 4 bytes in %1..%8
%macro TRANSPOSE8x4B_STORE 8
packuswb m0, m2
packuswb m1, m3
SBUTTERFLY bw, 0, 1, 2
SBUTTERFLY wd, 0, 1, 2
movd %1, m0
pshufd m0, m0, 0x39
movd %2, m0
pshufd m0, m0, 0x39
movd %3, m0
pshufd m0, m0, 0x39
movd %4, m0
movd %5, m1
pshufd m1, m1, 0x39
movd %6, m1
pshufd m1, m1, 0x39
movd %7, m1
pshufd m1, m1, 0x39
movd %8, m1
%endmacro
; in: 8 rows of 4 words in %4..%11
; out: 4 rows of 8 words in m0..m3
%macro TRANSPOSE4x8W_LOAD 8
movq m0, %1
movq m2, %2
movq m1, %3
movq m3, %4
punpcklwd m0, m2
punpcklwd m1, m3
punpckhdq m2, m0, m1
punpckldq m0, m1
movq m4, %5
movq m6, %6
movq m5, %7
movq m3, %8
punpcklwd m4, m6
punpcklwd m5, m3
punpckhdq m6, m4, m5
punpckldq m4, m5
punpckhqdq m1, m0, m4
punpcklqdq m0, m4
punpckhqdq m3, m2, m6
punpcklqdq m2, m6
%endmacro
; in: 4 rows of 8 words in m0..m3
; out: 8 rows of 4 words in %1..%8
%macro TRANSPOSE8x4W_STORE 9
TRANSPOSE4x4W 0, 1, 2, 3, 4
pxor m5, m5; zeros reg
CLIPW m0, m5, %9
CLIPW m1, m5, %9
CLIPW m2, m5, %9
CLIPW m3, m5, %9
movq %1, m0
movhps %2, m0
movq %3, m1
movhps %4, m1
movq %5, m2
movhps %6, m2
movq %7, m3
movhps %8, m3
%endmacro
; in: 8 rows of 8 bytes in %1..%8
; out: 8 rows of 8 words in m0..m7
%macro TRANSPOSE8x8B_LOAD 8
movq m7, %1
movq m2, %2
movq m1, %3
movq m3, %4
punpcklbw m7, m2
punpcklbw m1, m3
punpcklwd m3, m7, m1
punpckhwd m7, m1
movq m4, %5
movq m6, %6
movq m5, %7
movq m15, %8
punpcklbw m4, m6
punpcklbw m5, m15
punpcklwd m9, m4, m5
punpckhwd m4, m5
punpckldq m1, m3, m9; 0, 1
punpckhdq m3, m9; 2, 3
punpckldq m5, m7, m4; 4, 5
punpckhdq m7, m4; 6, 7
pxor m13, m13
punpcklbw m0, m1, m13; 0 in 16 bit
punpckhbw m1, m13; 1 in 16 bit
punpcklbw m2, m3, m13; 2
punpckhbw m3, m13; 3
punpcklbw m4, m5, m13; 4
punpckhbw m5, m13; 5
punpcklbw m6, m7, m13; 6
punpckhbw m7, m13; 7
%endmacro
; in: 8 rows of 8 words in m0..m8
; out: 8 rows of 8 bytes in %1..%8
%macro TRANSPOSE8x8B_STORE 8
packuswb m0, m4
packuswb m1, m5
packuswb m2, m6
packuswb m3, m7
TRANSPOSE2x4x4B 0, 1, 2, 3, 4
movq %1, m0
movhps %2, m0
movq %3, m1
movhps %4, m1
movq %5, m2
movhps %6, m2
movq %7, m3
movhps %8, m3
%endmacro
; in: 8 rows of 8 words in %1..%8
; out: 8 rows of 8 words in m0..m7
%macro TRANSPOSE8x8W_LOAD 8
movdqu m0, %1
movdqu m1, %2
movdqu m2, %3
movdqu m3, %4
movdqu m4, %5
movdqu m5, %6
movdqu m6, %7
movdqu m7, %8
TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
%endmacro
; in: 8 rows of 8 words in m0..m8
; out: 8 rows of 8 words in %1..%8
%macro TRANSPOSE8x8W_STORE 9
TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
pxor m8, m8
CLIPW m0, m8, %9
CLIPW m1, m8, %9
CLIPW m2, m8, %9
CLIPW m3, m8, %9
CLIPW m4, m8, %9
CLIPW m5, m8, %9
CLIPW m6, m8, %9
CLIPW m7, m8, %9
movdqu %1, m0
movdqu %2, m1
movdqu %3, m2
movdqu %4, m3
movdqu %5, m4
movdqu %6, m5
movdqu %7, m6
movdqu %8, m7
%endmacro
; in: %2 clobbered
; out: %1
; mask in m11
; clobbers m10
%macro MASKED_COPY 2
pand %2, m11 ; and mask
pandn m10, m11, %1; and -mask
por %2, m10
mova %1, %2
%endmacro
; in: %2 clobbered
; out: %1
; mask in %3, will be clobbered
%macro MASKED_COPY2 3
pand %2, %3 ; and mask
pandn %3, %1; and -mask
por %2, %3
mova %1, %2
%endmacro
ALIGN 16
; input in m0 ... m3 and tcs in r2. Output in m1 and m2
%macro CHROMA_DEBLOCK_BODY 1
psubw m4, m2, m1; q0 - p0
psubw m5, m0, m3; p1 - q1
psllw m4, 2; << 2
paddw m5, m4;
;tc calculations
movq m6, [tcq]; tc0
punpcklwd m6, m6
pshufd m6, m6, 0xA0; tc0, tc1
%if cpuflag(ssse3)
psignw m4, m6, [pw_m1]; -tc0, -tc1
%else
pmullw m4, m6, [pw_m1]; -tc0, -tc1
%endif
;end tc calculations
paddw m5, [pw_4]; +4
psraw m5, 3; >> 3
%if %1 > 8
psllw m4, %1-8; << (BIT_DEPTH - 8)
psllw m6, %1-8; << (BIT_DEPTH - 8)
%endif
pmaxsw m5, m4
pminsw m5, m6
paddw m1, m5; p0 + delta0
psubw m2, m5; q0 - delta0
%endmacro
; input in m0 ... m7, beta in r2 tcs in r3. Output in m1...m6
%macro LUMA_DEBLOCK_BODY 2
psllw m9, m2, 1; *2
psubw m10, m1, m9
paddw m10, m3
ABS1 m10, m11 ; 0dp0, 0dp3 , 1dp0, 1dp3
psllw m9, m5, 1; *2
psubw m11, m6, m9
paddw m11, m4
ABS1 m11, m13 ; 0dq0, 0dq3 , 1dq0, 1dq3
;beta calculations
%if %1 > 8
shl betaq, %1 - 8
%endif
movd m13, betad
SPLATW m13, m13, 0
;end beta calculations
paddw m9, m10, m11; 0d0, 0d3 , 1d0, 1d3
pshufhw m14, m9, 0x0f ;0b00001111; 0d3 0d3 0d0 0d0 in high
pshuflw m14, m14, 0x0f ;0b00001111; 1d3 1d3 1d0 1d0 in low
pshufhw m9, m9, 0xf0 ;0b11110000; 0d0 0d0 0d3 0d3
pshuflw m9, m9, 0xf0 ;0b11110000; 1d0 1d0 1d3 1d3
paddw m14, m9; 0d0+0d3, 1d0+1d3
;compare
pcmpgtw m15, m13, m14
movmskps r13, m15 ;filtering mask 0d0 + 0d3 < beta0 (bit 2 or 3) , 1d0 + 1d3 < beta1 (bit 0 or 1)
test r13, r13
je .bypassluma
;weak / strong decision compare to beta_2
psraw m15, m13, 2; beta >> 2
psllw m8, m9, 1;
pcmpgtw m15, m8; (d0 << 1) < beta_2, (d3 << 1) < beta_2
movmskps r6, m15;
;end weak / strong decision
; weak filter nd_p/q calculation
pshufd m8, m10, 0x31
psrld m8, 16
paddw m8, m10
movd r7d, m8
pshufd m8, m8, 0x4E
movd r8d, m8
pshufd m8, m11, 0x31
psrld m8, 16
paddw m8, m11
movd r9d, m8
pshufd m8, m8, 0x4E
movd r10d, m8
; end calc for weak filter
; filtering mask
mov r11, r13
shr r11, 3
movd m15, r11d
and r13, 1
movd m11, r13d
shufps m11, m15, 0
shl r11, 1
or r13, r11
pcmpeqd m11, [pd_1]; filtering mask
;decide between strong and weak filtering
;tc25 calculations
mov r11d, [tcq];
%if %1 > 8
shl r11, %1 - 8
%endif
movd m8, r11d; tc0
mov r3d, [tcq+4];
%if %1 > 8
shl r3, %1 - 8
%endif
add r11d, r3d; tc0 + tc1
jz .bypassluma
movd m9, r3d; tc1
punpcklwd m8, m8
punpcklwd m9, m9
shufps m8, m9, 0; tc0, tc1
mova m9, m8
psllw m8, 2; tc << 2
pavgw m8, m9; tc25 = ((tc * 5 + 1) >> 1)
;end tc25 calculations
;----beta_3 comparison-----
psubw m12, m0, m3; p3 - p0
ABS1 m12, m14; abs(p3 - p0)
psubw m15, m7, m4; q3 - q0
ABS1 m15, m14; abs(q3 - q0)
paddw m12, m15; abs(p3 - p0) + abs(q3 - q0)
pshufhw m12, m12, 0xf0 ;0b11110000;
pshuflw m12, m12, 0xf0 ;0b11110000;
psraw m13, 3; beta >> 3
pcmpgtw m13, m12;
movmskps r11, m13;
and r6, r11; strong mask , beta_2 and beta_3 comparisons
;----beta_3 comparison end-----
;----tc25 comparison---
psubw m12, m3, m4; p0 - q0
ABS1 m12, m14; abs(p0 - q0)
pshufhw m12, m12, 0xf0 ;0b11110000;
pshuflw m12, m12, 0xf0 ;0b11110000;
pcmpgtw m8, m12; tc25 comparisons
movmskps r11, m8;
and r6, r11; strong mask, beta_2, beta_3 and tc25 comparisons
;----tc25 comparison end---
mov r11, r6;
shr r11, 1;
and r6, r11; strong mask, bits 2 and 0
pmullw m14, m9, [pw_m2]; -tc * 2
paddw m9, m9
and r6, 5; 0b101
mov r11, r6; strong mask
shr r6, 2;
movd m12, r6d; store to xmm for mask generation
shl r6, 1
and r11, 1
movd m10, r11d; store to xmm for mask generation
or r6, r11; final strong mask, bits 1 and 0
jz .weakfilter
shufps m10, m12, 0
pcmpeqd m10, [pd_1]; strong mask
mova m13, [pw_4]; 4 in every cell
pand m11, m10; combine filtering mask and strong mask
paddw m12, m2, m3; p1 + p0
paddw m12, m4; p1 + p0 + q0
mova m10, m12; copy
paddw m12, m12; 2*p1 + 2*p0 + 2*q0
paddw m12, m1; p2 + 2*p1 + 2*p0 + 2*q0
paddw m12, m5; p2 + 2*p1 + 2*p0 + 2*q0 + q1
paddw m12, m13; p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4
psraw m12, 3; ((p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4) >> 3)
psubw m12, m3; ((p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4) >> 3) - p0
pmaxsw m12, m14
pminsw m12, m9; av_clip( , -2 * tc, 2 * tc)
paddw m12, m3; p0'
paddw m15, m1, m10; p2 + p1 + p0 + q0
psrlw m13, 1; 2 in every cell
paddw m15, m13; p2 + p1 + p0 + q0 + 2
psraw m15, 2; (p2 + p1 + p0 + q0 + 2) >> 2
psubw m15, m2;((p2 + p1 + p0 + q0 + 2) >> 2) - p1
pmaxsw m15, m14
pminsw m15, m9; av_clip( , -2 * tc, 2 * tc)
paddw m15, m2; p1'
paddw m8, m1, m0; p3 + p2
paddw m8, m8; 2*p3 + 2*p2
paddw m8, m1; 2*p3 + 3*p2
paddw m8, m10; 2*p3 + 3*p2 + p1 + p0 + q0
paddw m13, m13
paddw m8, m13; 2*p3 + 3*p2 + p1 + p0 + q0 + 4
psraw m8, 3; (2*p3 + 3*p2 + p1 + p0 + q0 + 4) >> 3
psubw m8, m1; ((2*p3 + 3*p2 + p1 + p0 + q0 + 4) >> 3) - p2
pmaxsw m8, m14
pminsw m8, m9; av_clip( , -2 * tc, 2 * tc)
paddw m8, m1; p2'
MASKED_COPY m1, m8
paddw m8, m3, m4; p0 + q0
paddw m8, m5; p0 + q0 + q1
paddw m8, m8; 2*p0 + 2*q0 + 2*q1
paddw m8, m2; p1 + 2*p0 + 2*q0 + 2*q1
paddw m8, m6; p1 + 2*p0 + 2*q0 + 2*q1 + q2
paddw m8, m13; p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4
psraw m8, 3; (p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4) >>3
psubw m8, m4;
pmaxsw m8, m14
pminsw m8, m9; av_clip( , -2 * tc, 2 * tc)
paddw m8, m4; q0'
MASKED_COPY m2, m15
paddw m15, m3, m4; p0 + q0
paddw m15, m5; p0 + q0 + q1
mova m10, m15;
paddw m15, m6; p0 + q0 + q1 + q2
psrlw m13, 1; 2 in every cell
paddw m15, m13; p0 + q0 + q1 + q2 + 2
psraw m15, 2; (p0 + q0 + q1 + q2 + 2) >> 2
psubw m15, m5; ((p0 + q0 + q1 + q2 + 2) >> 2) - q1
pmaxsw m15, m14
pminsw m15, m9; av_clip( , -2 * tc, 2 * tc)
paddw m15, m5; q1'
paddw m13, m7; q3 + 2
paddw m13, m6; q3 + q2 + 2
paddw m13, m13; 2*q3 + 2*q2 + 4
paddw m13, m6; 2*q3 + 3*q2 + 4
paddw m13, m10; 2*q3 + 3*q2 + q1 + q0 + p0 + 4
psraw m13, 3; (2*q3 + 3*q2 + q1 + q0 + p0 + 4) >> 3
psubw m13, m6; ((2*q3 + 3*q2 + q1 + q0 + p0 + 4) >> 3) - q2
pmaxsw m13, m14
pminsw m13, m9; av_clip( , -2 * tc, 2 * tc)
paddw m13, m6; q2'
MASKED_COPY m6, m13
MASKED_COPY m5, m15
MASKED_COPY m4, m8
MASKED_COPY m3, m12
.weakfilter:
not r6; strong mask -> weak mask
and r6, r13; final weak filtering mask, bits 0 and 1
jz .store
; weak filtering mask
mov r11, r6
shr r11, 1
movd m12, r11d
and r6, 1
movd m11, r6d
shufps m11, m12, 0
pcmpeqd m11, [pd_1]; filtering mask
mov r13, betaq
shr r13, 1;
add betaq, r13
shr betaq, 3; ((beta + (beta >> 1)) >> 3))
mova m13, [pw_8]
psubw m12, m4, m3 ; q0 - p0
psllw m10, m12, 3; 8 * (q0 - p0)
paddw m12, m10 ; 9 * (q0 - p0)
psubw m10, m5, m2 ; q1 - p1
psllw m8, m10, 1; 2 * ( q1 - p1 )
paddw m10, m8; 3 * ( q1 - p1 )
psubw m12, m10; 9 * (q0 - p0) - 3 * ( q1 - p1 )
paddw m12, m13; + 8
psraw m12, 4; >> 4 , delta0
PABSW m13, m12; abs(delta0)
psllw m10, m9, 2; 8 * tc
paddw m10, m9; 10 * tc
pcmpgtw m10, m13
pand m11, m10
psraw m9, 1; tc * 2 -> tc
psraw m14, 1; -tc * 2 -> -tc
pmaxsw m12, m14
pminsw m12, m9; av_clip(delta0, -tc, tc)
psraw m9, 1; tc -> tc / 2
%if cpuflag(ssse3)
psignw m14, m9, [pw_m1]; -tc / 2
%else
pmullw m14, m9, [pw_m1]; -tc / 2
%endif
pavgw m15, m1, m3; (p2 + p0 + 1) >> 1
psubw m15, m2; ((p2 + p0 + 1) >> 1) - p1
paddw m15, m12; ((p2 + p0 + 1) >> 1) - p1 + delta0
psraw m15, 1; (((p2 + p0 + 1) >> 1) - p1 + delta0) >> 1
pmaxsw m15, m14
pminsw m15, m9; av_clip(deltap1, -tc/2, tc/2)
paddw m15, m2; p1'
;beta calculations
movd m10, betad
SPLATW m10, m10, 0
movd m13, r7d; 1dp0 + 1dp3
movd m8, r8d; 0dp0 + 0dp3
punpcklwd m8, m8
punpcklwd m13, m13
shufps m13, m8, 0;
pcmpgtw m8, m10, m13
pand m8, m11
;end beta calculations
MASKED_COPY2 m2, m15, m8; write p1'
pavgw m8, m6, m4; (q2 + q0 + 1) >> 1
psubw m8, m5; ((q2 + q0 + 1) >> 1) - q1
psubw m8, m12; ((q2 + q0 + 1) >> 1) - q1 - delta0)
psraw m8, 1; ((q2 + q0 + 1) >> 1) - q1 - delta0) >> 1
pmaxsw m8, m14
pminsw m8, m9; av_clip(deltaq1, -tc/2, tc/2)
paddw m8, m5; q1'
movd m13, r9d;
movd m15, r10d;
punpcklwd m15, m15
punpcklwd m13, m13
shufps m13, m15, 0; dq0 + dq3
pcmpgtw m10, m13; compare to ((beta+(beta>>1))>>3)
pand m10, m11
MASKED_COPY2 m5, m8, m10; write q1'
paddw m15, m3, m12 ; p0 + delta0
MASKED_COPY m3, m15
psubw m8, m4, m12 ; q0 - delta0
MASKED_COPY m4, m8
%endmacro
;-----------------------------------------------------------------------------
; void ff_hevc_v_loop_filter_chroma(uint8_t *_pix, ptrdiff_t _stride, int32_t *tc,
; uint8_t *_no_p, uint8_t *_no_q);
;-----------------------------------------------------------------------------
%macro LOOP_FILTER_CHROMA 0
cglobal hevc_v_loop_filter_chroma_8, 3, 5, 7, pix, stride, tc, pix0, r3stride
sub pixq, 2
lea r3strideq, [3*strideq]
mov pix0q, pixq
add pixq, r3strideq
TRANSPOSE4x8B_LOAD PASS8ROWS(pix0q, pixq, strideq, r3strideq)
CHROMA_DEBLOCK_BODY 8
TRANSPOSE8x4B_STORE PASS8ROWS(pix0q, pixq, strideq, r3strideq)
RET
cglobal hevc_v_loop_filter_chroma_10, 3, 5, 7, pix, stride, tc, pix0, r3stride
sub pixq, 4
lea r3strideq, [3*strideq]
mov pix0q, pixq
add pixq, r3strideq
TRANSPOSE4x8W_LOAD PASS8ROWS(pix0q, pixq, strideq, r3strideq)
CHROMA_DEBLOCK_BODY 10
TRANSPOSE8x4W_STORE PASS8ROWS(pix0q, pixq, strideq, r3strideq), [pw_pixel_max_10]
RET
cglobal hevc_v_loop_filter_chroma_12, 3, 5, 7, pix, stride, tc, pix0, r3stride
sub pixq, 4
lea r3strideq, [3*strideq]
mov pix0q, pixq
add pixq, r3strideq
TRANSPOSE4x8W_LOAD PASS8ROWS(pix0q, pixq, strideq, r3strideq)
CHROMA_DEBLOCK_BODY 12
TRANSPOSE8x4W_STORE PASS8ROWS(pix0q, pixq, strideq, r3strideq), [pw_pixel_max_12]
RET
;-----------------------------------------------------------------------------
; void ff_hevc_h_loop_filter_chroma(uint8_t *_pix, ptrdiff_t _stride, int32_t *tc,
; uint8_t *_no_p, uint8_t *_no_q);
;-----------------------------------------------------------------------------
cglobal hevc_h_loop_filter_chroma_8, 3, 4, 7, pix, stride, tc, pix0
mov pix0q, pixq
sub pix0q, strideq
sub pix0q, strideq
movq m0, [pix0q]; p1
movq m1, [pix0q+strideq]; p0
movq m2, [pixq]; q0
movq m3, [pixq+strideq]; q1
pxor m5, m5; zeros reg
punpcklbw m0, m5
punpcklbw m1, m5
punpcklbw m2, m5
punpcklbw m3, m5
CHROMA_DEBLOCK_BODY 8
packuswb m1, m2
movh[pix0q+strideq], m1
movhps [pixq], m1
RET
cglobal hevc_h_loop_filter_chroma_10, 3, 4, 7, pix, stride, tc, pix0
mov pix0q, pixq
sub pix0q, strideq
sub pix0q, strideq
movu m0, [pix0q]; p1
movu m1, [pix0q+strideq]; p0
movu m2, [pixq]; q0
movu m3, [pixq+strideq]; q1
CHROMA_DEBLOCK_BODY 10
pxor m5, m5; zeros reg
CLIPW m1, m5, [pw_pixel_max_10]
CLIPW m2, m5, [pw_pixel_max_10]
movu [pix0q+strideq], m1
movu [pixq], m2
RET
cglobal hevc_h_loop_filter_chroma_12, 3, 4, 7, pix, stride, tc, pix0
mov pix0q, pixq
sub pix0q, strideq
sub pix0q, strideq
movu m0, [pix0q]; p1
movu m1, [pix0q+strideq]; p0
movu m2, [pixq]; q0
movu m3, [pixq+strideq]; q1
CHROMA_DEBLOCK_BODY 12
pxor m5, m5; zeros reg
CLIPW m1, m5, [pw_pixel_max_12]
CLIPW m2, m5, [pw_pixel_max_12]
movu [pix0q+strideq], m1
movu [pixq], m2
RET
%endmacro
INIT_XMM sse2
LOOP_FILTER_CHROMA
INIT_XMM avx
LOOP_FILTER_CHROMA
%if ARCH_X86_64
%macro LOOP_FILTER_LUMA 0
;-----------------------------------------------------------------------------
; void ff_hevc_v_loop_filter_luma(uint8_t *_pix, ptrdiff_t _stride, int beta,
; int32_t *tc, uint8_t *_no_p, uint8_t *_no_q);
;-----------------------------------------------------------------------------
cglobal hevc_v_loop_filter_luma_8, 4, 14, 16, pix, stride, beta, tc, pix0, src3stride
sub pixq, 4
lea pix0q, [3 * r1]
mov src3strideq, pixq
add pixq, pix0q
TRANSPOSE8x8B_LOAD PASS8ROWS(src3strideq, pixq, r1, pix0q)
LUMA_DEBLOCK_BODY 8, v
.store:
TRANSPOSE8x8B_STORE PASS8ROWS(src3strideq, pixq, r1, pix0q)
.bypassluma:
RET
cglobal hevc_v_loop_filter_luma_10, 4, 14, 16, pix, stride, beta, tc, pix0, src3stride
sub pixq, 8
lea pix0q, [3 * strideq]
mov src3strideq, pixq
add pixq, pix0q
TRANSPOSE8x8W_LOAD PASS8ROWS(src3strideq, pixq, strideq, pix0q)
LUMA_DEBLOCK_BODY 10, v
.store:
TRANSPOSE8x8W_STORE PASS8ROWS(src3strideq, pixq, r1, pix0q), [pw_pixel_max_10]
.bypassluma:
RET
cglobal hevc_v_loop_filter_luma_12, 4, 14, 16, pix, stride, beta, tc, pix0, src3stride
sub pixq, 8
lea pix0q, [3 * strideq]
mov src3strideq, pixq
add pixq, pix0q
TRANSPOSE8x8W_LOAD PASS8ROWS(src3strideq, pixq, strideq, pix0q)
LUMA_DEBLOCK_BODY 12, v
.store:
TRANSPOSE8x8W_STORE PASS8ROWS(src3strideq, pixq, r1, pix0q), [pw_pixel_max_12]
.bypassluma:
RET
;-----------------------------------------------------------------------------
; void ff_hevc_h_loop_filter_luma(uint8_t *_pix, ptrdiff_t _stride, int beta,
; int32_t *tc, uint8_t *_no_p, uint8_t *_no_q);
;-----------------------------------------------------------------------------
cglobal hevc_h_loop_filter_luma_8, 4, 14, 16, pix, stride, beta, tc, pix0, src3stride
lea src3strideq, [3 * strideq]
mov pix0q, pixq
sub pix0q, src3strideq
sub pix0q, strideq
movq m0, [pix0q]; p3
movq m1, [pix0q + strideq]; p2
movq m2, [pix0q + 2 * strideq]; p1
movq m3, [pix0q + src3strideq]; p0
movq m4, [pixq]; q0
movq m5, [pixq + strideq]; q1
movq m6, [pixq + 2 * strideq]; q2
movq m7, [pixq + src3strideq]; q3
pxor m8, m8
punpcklbw m0, m8
punpcklbw m1, m8
punpcklbw m2, m8
punpcklbw m3, m8
punpcklbw m4, m8
punpcklbw m5, m8
punpcklbw m6, m8
punpcklbw m7, m8
LUMA_DEBLOCK_BODY 8, h
.store:
packuswb m1, m2
packuswb m3, m4
packuswb m5, m6
movh [pix0q + strideq], m1
movhps [pix0q + 2 * strideq], m1
movh [pix0q + src3strideq], m3
movhps [pixq ], m3
movh [pixq + strideq], m5
movhps [pixq + 2 * strideq], m5
.bypassluma:
RET
cglobal hevc_h_loop_filter_luma_10, 4, 14, 16, pix, stride, beta, tc, pix0, src3stride
lea src3strideq, [3 * strideq]
mov pix0q, pixq
sub pix0q, src3strideq
sub pix0q, strideq
movdqu m0, [pix0q]; p3
movdqu m1, [pix0q + strideq]; p2
movdqu m2, [pix0q + 2 * strideq]; p1
movdqu m3, [pix0q + src3strideq]; p0
movdqu m4, [pixq]; q0
movdqu m5, [pixq + strideq]; q1
movdqu m6, [pixq + 2 * strideq]; q2
movdqu m7, [pixq + src3strideq]; q3
LUMA_DEBLOCK_BODY 10, h
.store:
pxor m8, m8; zeros reg
CLIPW m1, m8, [pw_pixel_max_10]
CLIPW m2, m8, [pw_pixel_max_10]
CLIPW m3, m8, [pw_pixel_max_10]
CLIPW m4, m8, [pw_pixel_max_10]
CLIPW m5, m8, [pw_pixel_max_10]
CLIPW m6, m8, [pw_pixel_max_10]
movdqu [pix0q + strideq], m1; p2
movdqu [pix0q + 2 * strideq], m2; p1
movdqu [pix0q + src3strideq], m3; p0
movdqu [pixq ], m4; q0
movdqu [pixq + strideq], m5; q1
movdqu [pixq + 2 * strideq], m6; q2
.bypassluma:
RET
cglobal hevc_h_loop_filter_luma_12, 4, 14, 16, pix, stride, beta, tc, pix0, src3stride
lea src3strideq, [3 * strideq]
mov pix0q, pixq
sub pix0q, src3strideq
sub pix0q, strideq
movdqu m0, [pix0q]; p3
movdqu m1, [pix0q + strideq]; p2
movdqu m2, [pix0q + 2 * strideq]; p1
movdqu m3, [pix0q + src3strideq]; p0
movdqu m4, [pixq]; q0
movdqu m5, [pixq + strideq]; q1
movdqu m6, [pixq + 2 * strideq]; q2
movdqu m7, [pixq + src3strideq]; q3
LUMA_DEBLOCK_BODY 12, h
.store:
pxor m8, m8; zeros reg
CLIPW m1, m8, [pw_pixel_max_12]
CLIPW m2, m8, [pw_pixel_max_12]
CLIPW m3, m8, [pw_pixel_max_12]
CLIPW m4, m8, [pw_pixel_max_12]
CLIPW m5, m8, [pw_pixel_max_12]
CLIPW m6, m8, [pw_pixel_max_12]
movdqu [pix0q + strideq], m1; p2
movdqu [pix0q + 2 * strideq], m2; p1
movdqu [pix0q + src3strideq], m3; p0
movdqu [pixq ], m4; q0
movdqu [pixq + strideq], m5; q1
movdqu [pixq + 2 * strideq], m6; q2
.bypassluma:
RET
%endmacro
INIT_XMM sse2
LOOP_FILTER_LUMA
INIT_XMM ssse3
LOOP_FILTER_LUMA
INIT_XMM avx
LOOP_FILTER_LUMA
%endif

View File

@@ -0,0 +1,122 @@
; /*
; * SIMD optimized idct functions for HEVC decoding
; * Copyright (c) 2014 Pierre-Edouard LEPERE
; * Copyright (c) 2014 James Almer
; *
; * This file is part of FFmpeg.
; *
; * FFmpeg is free software; you can redistribute it and/or
; * modify it under the terms of the GNU Lesser General Public
; * License as published by the Free Software Foundation; either
; * version 2.1 of the License, or (at your option) any later version.
; *
; * FFmpeg is distributed in the hope that it will be useful,
; * but WITHOUT ANY WARRANTY; without even the implied warranty of
; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; * Lesser General Public License for more details.
; *
; * You should have received a copy of the GNU Lesser General Public
; * License along with FFmpeg; if not, write to the Free Software
; * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
; */
%include "libavutil/x86/x86util.asm"
SECTION .text
; void ff_hevc_idctHxW_dc_{8,10}_<opt>(int16_t *coeffs)
; %1 = HxW
; %2 = number of loops
; %3 = bitdepth
%macro IDCT_DC 3
cglobal hevc_idct%1x%1_dc_%3, 1, 2, 1, coeff, tmp
movsx tmpq, word [coeffq]
add tmpw, ((1 << 14-%3) + 1)
sar tmpw, (15-%3)
movd xm0, tmpd
SPLATW m0, xm0
DEFINE_ARGS coeff, cnt
mov cntd, %2
.loop:
mova [coeffq+mmsize*0], m0
mova [coeffq+mmsize*1], m0
mova [coeffq+mmsize*2], m0
mova [coeffq+mmsize*3], m0
mova [coeffq+mmsize*4], m0
mova [coeffq+mmsize*5], m0
mova [coeffq+mmsize*6], m0
mova [coeffq+mmsize*7], m0
add coeffq, mmsize*8
dec cntd
jg .loop
RET
%endmacro
; %1 = HxW
; %2 = bitdepth
%macro IDCT_DC_NL 2 ; No loop
cglobal hevc_idct%1x%1_dc_%2, 1, 2, 1, coeff, tmp
movsx tmpq, word [coeffq]
add tmpw, ((1 << 14-%2) + 1)
sar tmpw, (15-%2)
movd m0, tmpd
SPLATW m0, xm0
mova [coeffq+mmsize*0], m0
mova [coeffq+mmsize*1], m0
mova [coeffq+mmsize*2], m0
mova [coeffq+mmsize*3], m0
%if mmsize == 16
mova [coeffq+mmsize*4], m0
mova [coeffq+mmsize*5], m0
mova [coeffq+mmsize*6], m0
mova [coeffq+mmsize*7], m0
%endif
RET
%endmacro
; 8-bit
INIT_MMX mmxext
IDCT_DC_NL 4, 8
IDCT_DC 8, 2, 8
INIT_XMM sse2
IDCT_DC_NL 8, 8
IDCT_DC 16, 4, 8
IDCT_DC 32, 16, 8
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
IDCT_DC 16, 2, 8
IDCT_DC 32, 8, 8
%endif ;HAVE_AVX2_EXTERNAL
; 10-bit
INIT_MMX mmxext
IDCT_DC_NL 4, 10
IDCT_DC 8, 2, 10
INIT_XMM sse2
IDCT_DC_NL 8, 10
IDCT_DC 16, 4, 10
IDCT_DC 32, 16, 10
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
IDCT_DC 16, 2, 10
IDCT_DC 32, 8, 10
%endif ;HAVE_AVX2_EXTERNAL
; 12-bit
INIT_MMX mmxext
IDCT_DC_NL 4, 12
IDCT_DC 8, 2, 12
INIT_XMM sse2
IDCT_DC_NL 8, 12
IDCT_DC 16, 4, 12
IDCT_DC 32, 16, 12
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
IDCT_DC 16, 2, 12
IDCT_DC 32, 8, 12
%endif ;HAVE_AVX2_EXTERNAL

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,388 @@
; /*
; * Provide SIMD optimizations for transform_add functions for HEVC decoding
; * Copyright (c) 2014 Pierre-Edouard LEPERE
; *
; * This file is part of FFmpeg.
; *
; * FFmpeg is free software; you can redistribute it and/or
; * modify it under the terms of the GNU Lesser General Public
; * License as published by the Free Software Foundation; either
; * version 2.1 of the License, or (at your option) any later version.
; *
; * FFmpeg is distributed in the hope that it will be useful,
; * but WITHOUT ANY WARRANTY; without even the implied warranty of
; * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
; * Lesser General Public License for more details.
; *
; * You should have received a copy of the GNU Lesser General Public
; * License along with FFmpeg; if not, write to the Free Software
; * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
; */
%include "libavutil/x86/x86util.asm"
SECTION .text
cextern pw_1023
%define max_pixels_10 pw_1023
;the tr_add macros and functions were largely inspired by x264 project's code in the h264_idct.asm file
%macro TR_ADD_MMX_4_8 0
mova m2, [r1]
mova m4, [r1+8]
pxor m3, m3
psubw m3, m2
packuswb m2, m2
packuswb m3, m3
pxor m5, m5
psubw m5, m4
packuswb m4, m4
packuswb m5, m5
movh m0, [r0 ]
movh m1, [r0+r2 ]
paddusb m0, m2
paddusb m1, m4
psubusb m0, m3
psubusb m1, m5
movh [r0 ], m0
movh [r0+r2 ], m1
%endmacro
INIT_MMX mmxext
; void ff_hevc_tranform_add_8_mmxext(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride)
cglobal hevc_transform_add4_8, 3, 4, 6
TR_ADD_MMX_4_8
add r1, 16
lea r0, [r0+r2*2]
TR_ADD_MMX_4_8
RET
%macro TR_ADD_SSE_8_8 0
pxor m3, m3
mova m4, [r1]
mova m6, [r1+16]
mova m0, [r1+32]
mova m2, [r1+48]
psubw m5, m3, m4
psubw m7, m3, m6
psubw m1, m3, m0
packuswb m4, m0
packuswb m5, m1
psubw m3, m2
packuswb m6, m2
packuswb m7, m3
movq m0, [r0 ]
movq m1, [r0+r2 ]
movhps m0, [r0+r2*2]
movhps m1, [r0+r3 ]
paddusb m0, m4
paddusb m1, m6
psubusb m0, m5
psubusb m1, m7
movq [r0 ], m0
movq [r0+r2 ], m1
movhps [r0+2*r2], m0
movhps [r0+r3 ], m1
%endmacro
%macro TR_ADD_SSE_16_32_8 3
mova xm2, [r1+%1 ]
mova xm6, [r1+%1+16]
%if cpuflag(avx2)
vinserti128 m2, m2, [r1+%1+32], 1
vinserti128 m6, m6, [r1+%1+48], 1
%endif
%if cpuflag(avx)
psubw m1, m0, m2
psubw m5, m0, m6
%else
mova m1, m0
mova m5, m0
psubw m1, m2
psubw m5, m6
%endif
packuswb m2, m6
packuswb m1, m5
mova xm4, [r1+%1+mmsize*2 ]
mova xm6, [r1+%1+mmsize*2+16]
%if cpuflag(avx2)
vinserti128 m4, m4, [r1+%1+96 ], 1
vinserti128 m6, m6, [r1+%1+112], 1
%endif
%if cpuflag(avx)
psubw m3, m0, m4
psubw m5, m0, m6
%else
mova m3, m0
mova m5, m0
psubw m3, m4
psubw m5, m6
%endif
packuswb m4, m6
packuswb m3, m5
paddusb m2, [%2]
paddusb m4, [%3]
psubusb m2, m1
psubusb m4, m3
mova [%2], m2
mova [%3], m4
%endmacro
%macro TRANSFORM_ADD_8 0
; void ff_hevc_transform_add8_8_<opt>(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride)
cglobal hevc_transform_add8_8, 3, 4, 8
lea r3, [r2*3]
TR_ADD_SSE_8_8
add r1, 64
lea r0, [r0+r2*4]
TR_ADD_SSE_8_8
RET
; void ff_hevc_transform_add16_8_<opt>(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride)
cglobal hevc_transform_add16_8, 3, 4, 7
pxor m0, m0
lea r3, [r2*3]
TR_ADD_SSE_16_32_8 0, r0, r0+r2
TR_ADD_SSE_16_32_8 64, r0+r2*2, r0+r3
%rep 3
add r1, 128
lea r0, [r0+r2*4]
TR_ADD_SSE_16_32_8 0, r0, r0+r2
TR_ADD_SSE_16_32_8 64, r0+r2*2, r0+r3
%endrep
RET
; void ff_hevc_transform_add32_8_<opt>(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride)
cglobal hevc_transform_add32_8, 3, 4, 7
pxor m0, m0
TR_ADD_SSE_16_32_8 0, r0, r0+16
TR_ADD_SSE_16_32_8 64, r0+r2, r0+r2+16
%rep 15
add r1, 128
lea r0, [r0+r2*2]
TR_ADD_SSE_16_32_8 0, r0, r0+16
TR_ADD_SSE_16_32_8 64, r0+r2, r0+r2+16
%endrep
RET
%endmacro
INIT_XMM sse2
TRANSFORM_ADD_8
INIT_XMM avx
TRANSFORM_ADD_8
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
; void ff_hevc_transform_add32_8_avx2(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride)
cglobal hevc_transform_add32_8, 3, 4, 7
pxor m0, m0
lea r3, [r2*3]
TR_ADD_SSE_16_32_8 0, r0, r0+r2
TR_ADD_SSE_16_32_8 128, r0+r2*2, r0+r3
%rep 7
add r1, 256
lea r0, [r0+r2*4]
TR_ADD_SSE_16_32_8 0, r0, r0+r2
TR_ADD_SSE_16_32_8 128, r0+r2*2, r0+r3
%endrep
RET
%endif
;-----------------------------------------------------------------------------
; void ff_hevc_transform_add_10(pixel *dst, int16_t *block, int stride)
;-----------------------------------------------------------------------------
%macro TR_ADD_SSE_8_10 4
mova m0, [%4]
mova m1, [%4+16]
mova m2, [%4+32]
mova m3, [%4+48]
paddw m0, [%1+0 ]
paddw m1, [%1+%2 ]
paddw m2, [%1+%2*2]
paddw m3, [%1+%3 ]
CLIPW m0, m4, m5
CLIPW m1, m4, m5
CLIPW m2, m4, m5
CLIPW m3, m4, m5
mova [%1+0 ], m0
mova [%1+%2 ], m1
mova [%1+%2*2], m2
mova [%1+%3 ], m3
%endmacro
%macro TR_ADD_MMX4_10 3
mova m0, [%1+0 ]
mova m1, [%1+%2 ]
paddw m0, [%3]
paddw m1, [%3+8]
CLIPW m0, m2, m3
CLIPW m1, m2, m3
mova [%1+0 ], m0
mova [%1+%2 ], m1
%endmacro
%macro TRANS_ADD_SSE_16_10 3
mova m0, [%3]
mova m1, [%3+16]
mova m2, [%3+32]
mova m3, [%3+48]
paddw m0, [%1 ]
paddw m1, [%1+16 ]
paddw m2, [%1+%2 ]
paddw m3, [%1+%2+16]
CLIPW m0, m4, m5
CLIPW m1, m4, m5
CLIPW m2, m4, m5
CLIPW m3, m4, m5
mova [%1 ], m0
mova [%1+16 ], m1
mova [%1+%2 ], m2
mova [%1+%2+16], m3
%endmacro
%macro TRANS_ADD_SSE_32_10 2
mova m0, [%2]
mova m1, [%2+16]
mova m2, [%2+32]
mova m3, [%2+48]
paddw m0, [%1 ]
paddw m1, [%1+16]
paddw m2, [%1+32]
paddw m3, [%1+48]
CLIPW m0, m4, m5
CLIPW m1, m4, m5
CLIPW m2, m4, m5
CLIPW m3, m4, m5
mova [%1 ], m0
mova [%1+16], m1
mova [%1+32], m2
mova [%1+48], m3
%endmacro
%macro TRANS_ADD16_AVX2 4
mova m0, [%4]
mova m1, [%4+32]
mova m2, [%4+64]
mova m3, [%4+96]
paddw m0, [%1+0 ]
paddw m1, [%1+%2 ]
paddw m2, [%1+%2*2]
paddw m3, [%1+%3 ]
CLIPW m0, m4, m5
CLIPW m1, m4, m5
CLIPW m2, m4, m5
CLIPW m3, m4, m5
mova [%1+0 ], m0
mova [%1+%2 ], m1
mova [%1+%2*2], m2
mova [%1+%3 ], m3
%endmacro
%macro TRANS_ADD32_AVX2 3
mova m0, [%3]
mova m1, [%3+32]
mova m2, [%3+64]
mova m3, [%3+96]
paddw m0, [%1 ]
paddw m1, [%1+32 ]
paddw m2, [%1+%2 ]
paddw m3, [%1+%2+32]
CLIPW m0, m4, m5
CLIPW m1, m4, m5
CLIPW m2, m4, m5
CLIPW m3, m4, m5
mova [%1 ], m0
mova [%1+32 ], m1
mova [%1+%2 ], m2
mova [%1+%2+32], m3
%endmacro
INIT_MMX mmxext
cglobal hevc_transform_add4_10,3,4, 6
pxor m2, m2
mova m3, [max_pixels_10]
TR_ADD_MMX4_10 r0, r2, r1
add r1, 16
lea r0, [r0+2*r2]
TR_ADD_MMX4_10 r0, r2, r1
RET
;-----------------------------------------------------------------------------
; void ff_hevc_transform_add_10(pixel *dst, int16_t *block, int stride)
;-----------------------------------------------------------------------------
INIT_XMM sse2
cglobal hevc_transform_add8_10,3,4,6
pxor m4, m4
mova m5, [max_pixels_10]
lea r3, [r2*3]
TR_ADD_SSE_8_10 r0, r2, r3, r1
lea r0, [r0+r2*4]
add r1, 64
TR_ADD_SSE_8_10 r0, r2, r3, r1
RET
cglobal hevc_transform_add16_10,3,4,6
pxor m4, m4
mova m5, [max_pixels_10]
TRANS_ADD_SSE_16_10 r0, r2, r1
%rep 7
lea r0, [r0+r2*2]
add r1, 64
TRANS_ADD_SSE_16_10 r0, r2, r1
%endrep
RET
cglobal hevc_transform_add32_10,3,4,6
pxor m4, m4
mova m5, [max_pixels_10]
TRANS_ADD_SSE_32_10 r0, r1
%rep 31
lea r0, [r0+r2]
add r1, 64
TRANS_ADD_SSE_32_10 r0, r1
%endrep
RET
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
cglobal hevc_transform_add16_10,3,4,6
pxor m4, m4
mova m5, [max_pixels_10]
lea r3, [r2*3]
TRANS_ADD16_AVX2 r0, r2, r3, r1
%rep 3
lea r0, [r0+r2*4]
add r1, 128
TRANS_ADD16_AVX2 r0, r2, r3, r1
%endrep
RET
cglobal hevc_transform_add32_10,3,4,6
pxor m4, m4
mova m5, [max_pixels_10]
TRANS_ADD32_AVX2 r0, r2, r1
%rep 15
lea r0, [r0+r2*2]
add r1, 128
TRANS_ADD32_AVX2 r0, r2, r1
%endrep
RET
%endif ;HAVE_AVX_EXTERNAL

View File

@@ -0,0 +1,624 @@
;******************************************************************************
;* SIMD optimized SAO functions for HEVC decoding
;*
;* Copyright (c) 2013 Pierre-Edouard LEPERE
;* Copyright (c) 2014 James Almer
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA 32
pw_mask10: times 16 dw 0x03FF
pw_mask12: times 16 dw 0x0FFF
pw_m2: times 16 dw -2
pb_edge_shuffle: times 2 db 1, 2, 0, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
pb_eo: db -1, 0, 1, 0, 0, -1, 0, 1, -1, -1, 1, 1, 1, -1, -1, 1
cextern pw_m1
cextern pw_1
cextern pw_2
cextern pb_1
cextern pb_2
SECTION .text
%define MAX_PB_SIZE 64
%define PADDING_SIZE 32 ; AV_INPUT_BUFFER_PADDING_SIZE
;******************************************************************************
;SAO Band Filter
;******************************************************************************
%macro HEVC_SAO_BAND_FILTER_INIT 1
and leftq, 31
movd xm0, leftd
add leftq, 1
and leftq, 31
movd xm1, leftd
add leftq, 1
and leftq, 31
movd xm2, leftd
add leftq, 1
and leftq, 31
movd xm3, leftd
SPLATW m0, xm0
SPLATW m1, xm1
SPLATW m2, xm2
SPLATW m3, xm3
%if mmsize > 16
SPLATW m4, [offsetq + 2]
SPLATW m5, [offsetq + 4]
SPLATW m6, [offsetq + 6]
SPLATW m7, [offsetq + 8]
%else
movq m7, [offsetq + 2]
SPLATW m4, m7, 0
SPLATW m5, m7, 1
SPLATW m6, m7, 2
SPLATW m7, m7, 3
%endif
%if ARCH_X86_64
%if %1 > 8
mova m13, [pw_mask %+ %1]
%endif
pxor m14, m14
%else ; ARCH_X86_32
mova [rsp+mmsize*0], m0
mova [rsp+mmsize*1], m1
mova [rsp+mmsize*2], m2
mova [rsp+mmsize*3], m3
mova [rsp+mmsize*4], m4
mova [rsp+mmsize*5], m5
mova [rsp+mmsize*6], m6
pxor m0, m0
%if %1 > 8
mova m1, [pw_mask %+ %1]
%endif
%assign MMSIZE mmsize
%define m14 m0
%define m13 m1
%define m9 m2
%define m8 m3
%endif ; ARCH
DEFINE_ARGS dst, src, dststride, srcstride, offset, height
mov heightd, r7m
%endmacro
%macro HEVC_SAO_BAND_FILTER_COMPUTE 3
psraw %2, %3, %1-5
%if ARCH_X86_64
pcmpeqw m10, %2, m0
pcmpeqw m11, %2, m1
pcmpeqw m12, %2, m2
pcmpeqw %2, m3
pand m10, m4
pand m11, m5
pand m12, m6
pand %2, m7
por m10, m11
por m12, %2
por m10, m12
paddw %3, m10
%else ; ARCH_X86_32
pcmpeqw m4, %2, [rsp+MMSIZE*0]
pcmpeqw m5, %2, [rsp+MMSIZE*1]
pcmpeqw m6, %2, [rsp+MMSIZE*2]
pcmpeqw %2, [rsp+MMSIZE*3]
pand m4, [rsp+MMSIZE*4]
pand m5, [rsp+MMSIZE*5]
pand m6, [rsp+MMSIZE*6]
pand %2, m7
por m4, m5
por m6, %2
por m4, m6
paddw %3, m4
%endif ; ARCH
%endmacro
;void ff_hevc_sao_band_filter_<width>_8_<opt>(uint8_t *_dst, uint8_t *_src, ptrdiff_t _stride_dst, ptrdiff_t _stride_src,
; int16_t *sao_offset_val, int sao_left_class, int width, int height);
%macro HEVC_SAO_BAND_FILTER_8 2
cglobal hevc_sao_band_filter_%1_8, 6, 6, 15, 7*mmsize*ARCH_X86_32, dst, src, dststride, srcstride, offset, left
HEVC_SAO_BAND_FILTER_INIT 8
align 16
.loop:
%if %1 == 8
movq m8, [srcq]
punpcklbw m8, m14
HEVC_SAO_BAND_FILTER_COMPUTE 8, m9, m8
packuswb m8, m14
movq [dstq], m8
%endif ; %1 == 8
%assign i 0
%rep %2
mova m13, [srcq + i]
punpcklbw m8, m13, m14
HEVC_SAO_BAND_FILTER_COMPUTE 8, m9, m8
punpckhbw m13, m14
HEVC_SAO_BAND_FILTER_COMPUTE 8, m9, m13
packuswb m8, m13
mova [dstq + i], m8
%assign i i+mmsize
%endrep
%if %1 == 48
INIT_XMM cpuname
mova m13, [srcq + i]
punpcklbw m8, m13, m14
HEVC_SAO_BAND_FILTER_COMPUTE 8, m9, m8
punpckhbw m13, m14
HEVC_SAO_BAND_FILTER_COMPUTE 8, m9, m13
packuswb m8, m13
mova [dstq + i], m8
%if cpuflag(avx2)
INIT_YMM cpuname
%endif
%endif ; %1 == 48
add dstq, dststrideq ; dst += dststride
add srcq, srcstrideq ; src += srcstride
dec heightd ; cmp height
jnz .loop ; height loop
REP_RET
%endmacro
;void ff_hevc_sao_band_filter_<width>_<depth>_<opt>(uint8_t *_dst, uint8_t *_src, ptrdiff_t _stride_dst, ptrdiff_t _stride_src,
; int16_t *sao_offset_val, int sao_left_class, int width, int height);
%macro HEVC_SAO_BAND_FILTER_16 3
cglobal hevc_sao_band_filter_%2_%1, 6, 6, 15, 7*mmsize*ARCH_X86_32, dst, src, dststride, srcstride, offset, left
HEVC_SAO_BAND_FILTER_INIT %1
align 16
.loop:
%if %2 == 8
movu m8, [srcq]
HEVC_SAO_BAND_FILTER_COMPUTE %1, m9, m8
CLIPW m8, m14, m13
movu [dstq], m8
%endif
%assign i 0
%rep %3
mova m8, [srcq + i]
HEVC_SAO_BAND_FILTER_COMPUTE %1, m9, m8
CLIPW m8, m14, m13
mova [dstq + i], m8
mova m9, [srcq + i + mmsize]
HEVC_SAO_BAND_FILTER_COMPUTE %1, m8, m9
CLIPW m9, m14, m13
mova [dstq + i + mmsize], m9
%assign i i+mmsize*2
%endrep
%if %2 == 48
INIT_XMM cpuname
mova m8, [srcq + i]
HEVC_SAO_BAND_FILTER_COMPUTE %1, m9, m8
CLIPW m8, m14, m13
mova [dstq + i], m8
mova m9, [srcq + i + mmsize]
HEVC_SAO_BAND_FILTER_COMPUTE %1, m8, m9
CLIPW m9, m14, m13
mova [dstq + i + mmsize], m9
%if cpuflag(avx2)
INIT_YMM cpuname
%endif
%endif ; %1 == 48
add dstq, dststrideq
add srcq, srcstrideq
dec heightd
jg .loop
REP_RET
%endmacro
%macro HEVC_SAO_BAND_FILTER_FUNCS 0
HEVC_SAO_BAND_FILTER_8 8, 0
HEVC_SAO_BAND_FILTER_8 16, 1
HEVC_SAO_BAND_FILTER_8 32, 2
HEVC_SAO_BAND_FILTER_8 48, 2
HEVC_SAO_BAND_FILTER_8 64, 4
HEVC_SAO_BAND_FILTER_16 10, 8, 0
HEVC_SAO_BAND_FILTER_16 10, 16, 1
HEVC_SAO_BAND_FILTER_16 10, 32, 2
HEVC_SAO_BAND_FILTER_16 10, 48, 2
HEVC_SAO_BAND_FILTER_16 10, 64, 4
HEVC_SAO_BAND_FILTER_16 12, 8, 0
HEVC_SAO_BAND_FILTER_16 12, 16, 1
HEVC_SAO_BAND_FILTER_16 12, 32, 2
HEVC_SAO_BAND_FILTER_16 12, 48, 2
HEVC_SAO_BAND_FILTER_16 12, 64, 4
%endmacro
INIT_XMM sse2
HEVC_SAO_BAND_FILTER_FUNCS
INIT_XMM avx
HEVC_SAO_BAND_FILTER_FUNCS
%if HAVE_AVX2_EXTERNAL
INIT_XMM avx2
HEVC_SAO_BAND_FILTER_8 8, 0
HEVC_SAO_BAND_FILTER_8 16, 1
INIT_YMM avx2
HEVC_SAO_BAND_FILTER_8 32, 1
HEVC_SAO_BAND_FILTER_8 48, 1
HEVC_SAO_BAND_FILTER_8 64, 2
INIT_XMM avx2
HEVC_SAO_BAND_FILTER_16 10, 8, 0
HEVC_SAO_BAND_FILTER_16 10, 16, 1
INIT_YMM avx2
HEVC_SAO_BAND_FILTER_16 10, 32, 1
HEVC_SAO_BAND_FILTER_16 10, 48, 1
HEVC_SAO_BAND_FILTER_16 10, 64, 2
INIT_XMM avx2
HEVC_SAO_BAND_FILTER_16 12, 8, 0
HEVC_SAO_BAND_FILTER_16 12, 16, 1
INIT_YMM avx2
HEVC_SAO_BAND_FILTER_16 12, 32, 1
HEVC_SAO_BAND_FILTER_16 12, 48, 1
HEVC_SAO_BAND_FILTER_16 12, 64, 2
%endif
;******************************************************************************
;SAO Edge Filter
;******************************************************************************
%define EDGE_SRCSTRIDE 2 * MAX_PB_SIZE + PADDING_SIZE
%macro HEVC_SAO_EDGE_FILTER_INIT 1
%if WIN64
movsxd eoq, dword eom
%elif ARCH_X86_64
movsxd eoq, eod
%else
mov eoq, r4m
%endif
lea tmp2q, [pb_eo]
movsx a_strideq, byte [tmp2q+eoq*4+1]
movsx b_strideq, byte [tmp2q+eoq*4+3]
imul a_strideq, EDGE_SRCSTRIDE>>%1
imul b_strideq, EDGE_SRCSTRIDE>>%1
movsx tmpq, byte [tmp2q+eoq*4]
add a_strideq, tmpq
movsx tmpq, byte [tmp2q+eoq*4+2]
add b_strideq, tmpq
%endmacro
%macro HEVC_SAO_EDGE_FILTER_COMPUTE_8 1
pminub m4, m1, m2
pminub m5, m1, m3
pcmpeqb m2, m4
pcmpeqb m3, m5
pcmpeqb m4, m1
pcmpeqb m5, m1
psubb m4, m2
psubb m5, m3
paddb m4, m6
paddb m4, m5
pshufb m2, m0, m4
%if %1 > 8
punpckhbw m5, m7, m1
punpckhbw m4, m2, m7
punpcklbw m3, m7, m1
punpcklbw m2, m7
pmaddubsw m5, m4
pmaddubsw m3, m2
packuswb m3, m5
%else
punpcklbw m3, m7, m1
punpcklbw m2, m7
pmaddubsw m3, m2
packuswb m3, m3
%endif
%endmacro
;void ff_hevc_sao_edge_filter_<width>_8_<opt>(uint8_t *_dst, uint8_t *_src, ptrdiff_t stride_dst, int16_t *sao_offset_val,
; int eo, int width, int height);
%macro HEVC_SAO_EDGE_FILTER_8 2-3
%if ARCH_X86_64
cglobal hevc_sao_edge_filter_%1_8, 4, 9, 8, dst, src, dststride, offset, eo, a_stride, b_stride, height, tmp
%define tmp2q heightq
HEVC_SAO_EDGE_FILTER_INIT 0
mov heightd, r6m
%else ; ARCH_X86_32
cglobal hevc_sao_edge_filter_%1_8, 1, 6, 8, dst, src, dststride, a_stride, b_stride, height
%define eoq srcq
%define tmpq heightq
%define tmp2q dststrideq
%define offsetq heightq
HEVC_SAO_EDGE_FILTER_INIT 0
mov srcq, srcm
mov offsetq, r3m
mov dststrideq, dststridem
%endif ; ARCH
%if mmsize > 16
vbroadcasti128 m0, [offsetq]
%else
movu m0, [offsetq]
%endif
mova m1, [pb_edge_shuffle]
packsswb m0, m0
mova m7, [pb_1]
pshufb m0, m1
mova m6, [pb_2]
%if ARCH_X86_32
mov heightd, r6m
%endif
align 16
.loop:
%if %1 == 8
movq m1, [srcq]
movq m2, [srcq + a_strideq]
movq m3, [srcq + b_strideq]
HEVC_SAO_EDGE_FILTER_COMPUTE_8 %1
movq [dstq], m3
%endif
%assign i 0
%rep %2
mova m1, [srcq + i]
movu m2, [srcq + a_strideq + i]
movu m3, [srcq + b_strideq + i]
HEVC_SAO_EDGE_FILTER_COMPUTE_8 %1
mov%3 [dstq + i], m3
%assign i i+mmsize
%endrep
%if %1 == 48
INIT_XMM cpuname
mova m1, [srcq + i]
movu m2, [srcq + a_strideq + i]
movu m3, [srcq + b_strideq + i]
HEVC_SAO_EDGE_FILTER_COMPUTE_8 %1
mova [dstq + i], m3
%if cpuflag(avx2)
INIT_YMM cpuname
%endif
%endif
add dstq, dststrideq
add srcq, EDGE_SRCSTRIDE
dec heightd
jg .loop
RET
%endmacro
%macro PMINUW 4
%if cpuflag(sse4)
pminuw %1, %2, %3
%else
psubusw %4, %2, %3
psubw %1, %2, %4
%endif
%endmacro
%macro HEVC_SAO_EDGE_FILTER_COMPUTE_10 0
PMINUW m4, m1, m2, m6
PMINUW m5, m1, m3, m7
pcmpeqw m2, m4
pcmpeqw m3, m5
pcmpeqw m4, m1
pcmpeqw m5, m1
psubw m4, m2
psubw m5, m3
paddw m4, m5
pcmpeqw m2, m4, [pw_m2]
%if ARCH_X86_64
pcmpeqw m3, m4, m13
pcmpeqw m5, m4, m0
pcmpeqw m6, m4, m14
pcmpeqw m7, m4, m15
pand m2, m8
pand m3, m9
pand m5, m10
pand m6, m11
pand m7, m12
%else
pcmpeqw m3, m4, [pw_m1]
pcmpeqw m5, m4, m0
pcmpeqw m6, m4, [pw_1]
pcmpeqw m7, m4, [pw_2]
pand m2, [rsp+MMSIZE*0]
pand m3, [rsp+MMSIZE*1]
pand m5, [rsp+MMSIZE*2]
pand m6, [rsp+MMSIZE*3]
pand m7, [rsp+MMSIZE*4]
%endif
paddw m2, m3
paddw m5, m6
paddw m2, m7
paddw m2, m1
paddw m2, m5
%endmacro
;void ff_hevc_sao_edge_filter_<width>_<depth>_<opt>(uint8_t *_dst, uint8_t *_src, ptrdiff_t stride_dst, int16_t *sao_offset_val,
; int eo, int width, int height);
%macro HEVC_SAO_EDGE_FILTER_16 3
%if ARCH_X86_64
cglobal hevc_sao_edge_filter_%2_%1, 4, 9, 16, dst, src, dststride, offset, eo, a_stride, b_stride, height, tmp
%define tmp2q heightq
HEVC_SAO_EDGE_FILTER_INIT 1
mov heightd, r6m
add a_strideq, a_strideq
add b_strideq, b_strideq
%else ; ARCH_X86_32
cglobal hevc_sao_edge_filter_%2_%1, 1, 6, 8, 5*mmsize, dst, src, dststride, a_stride, b_stride, height
%assign MMSIZE mmsize
%define eoq srcq
%define tmpq heightq
%define tmp2q dststrideq
%define offsetq heightq
%define m8 m1
%define m9 m2
%define m10 m3
%define m11 m4
%define m12 m5
HEVC_SAO_EDGE_FILTER_INIT 1
mov srcq, srcm
mov offsetq, r3m
mov dststrideq, dststridem
add a_strideq, a_strideq
add b_strideq, b_strideq
%endif ; ARCH
%if cpuflag(avx2)
SPLATW m8, [offsetq+2]
SPLATW m9, [offsetq+4]
SPLATW m10, [offsetq+0]
SPLATW m11, [offsetq+6]
SPLATW m12, [offsetq+8]
%else
movq m10, [offsetq+0]
movd m12, [offsetq+6]
SPLATW m8, xm10, 1
SPLATW m9, xm10, 2
SPLATW m10, xm10, 0
SPLATW m11, xm12, 0
SPLATW m12, xm12, 1
%endif
pxor m0, m0
%if ARCH_X86_64
mova m13, [pw_m1]
mova m14, [pw_1]
mova m15, [pw_2]
%else
mov heightd, r6m
mova [rsp+mmsize*0], m8
mova [rsp+mmsize*1], m9
mova [rsp+mmsize*2], m10
mova [rsp+mmsize*3], m11
mova [rsp+mmsize*4], m12
%endif
align 16
.loop:
%if %2 == 8
mova m1, [srcq]
movu m2, [srcq+a_strideq]
movu m3, [srcq+b_strideq]
HEVC_SAO_EDGE_FILTER_COMPUTE_10
CLIPW m2, m0, [pw_mask %+ %1]
movu [dstq], m2
%endif
%assign i 0
%rep %3
mova m1, [srcq + i]
movu m2, [srcq+a_strideq + i]
movu m3, [srcq+b_strideq + i]
HEVC_SAO_EDGE_FILTER_COMPUTE_10
CLIPW m2, m0, [pw_mask %+ %1]
mova [dstq + i], m2
mova m1, [srcq + i + mmsize]
movu m2, [srcq+a_strideq + i + mmsize]
movu m3, [srcq+b_strideq + i + mmsize]
HEVC_SAO_EDGE_FILTER_COMPUTE_10
CLIPW m2, m0, [pw_mask %+ %1]
mova [dstq + i + mmsize], m2
%assign i i+mmsize*2
%endrep
%if %2 == 48
INIT_XMM cpuname
mova m1, [srcq + i]
movu m2, [srcq+a_strideq + i]
movu m3, [srcq+b_strideq + i]
HEVC_SAO_EDGE_FILTER_COMPUTE_10
CLIPW m2, m0, [pw_mask %+ %1]
mova [dstq + i], m2
mova m1, [srcq + i + mmsize]
movu m2, [srcq+a_strideq + i + mmsize]
movu m3, [srcq+b_strideq + i + mmsize]
HEVC_SAO_EDGE_FILTER_COMPUTE_10
CLIPW m2, m0, [pw_mask %+ %1]
mova [dstq + i + mmsize], m2
%if cpuflag(avx2)
INIT_YMM cpuname
%endif
%endif
add dstq, dststrideq
add srcq, EDGE_SRCSTRIDE
dec heightd
jg .loop
RET
%endmacro
INIT_XMM ssse3
HEVC_SAO_EDGE_FILTER_8 8, 0
HEVC_SAO_EDGE_FILTER_8 16, 1, a
HEVC_SAO_EDGE_FILTER_8 32, 2, a
HEVC_SAO_EDGE_FILTER_8 48, 2, a
HEVC_SAO_EDGE_FILTER_8 64, 4, a
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
HEVC_SAO_EDGE_FILTER_8 32, 1, a
HEVC_SAO_EDGE_FILTER_8 48, 1, u
HEVC_SAO_EDGE_FILTER_8 64, 2, a
%endif
INIT_XMM sse2
HEVC_SAO_EDGE_FILTER_16 10, 8, 0
HEVC_SAO_EDGE_FILTER_16 10, 16, 1
HEVC_SAO_EDGE_FILTER_16 10, 32, 2
HEVC_SAO_EDGE_FILTER_16 10, 48, 2
HEVC_SAO_EDGE_FILTER_16 10, 64, 4
HEVC_SAO_EDGE_FILTER_16 12, 8, 0
HEVC_SAO_EDGE_FILTER_16 12, 16, 1
HEVC_SAO_EDGE_FILTER_16 12, 32, 2
HEVC_SAO_EDGE_FILTER_16 12, 48, 2
HEVC_SAO_EDGE_FILTER_16 12, 64, 4
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
HEVC_SAO_EDGE_FILTER_16 10, 32, 1
HEVC_SAO_EDGE_FILTER_16 10, 48, 1
HEVC_SAO_EDGE_FILTER_16 10, 64, 2
HEVC_SAO_EDGE_FILTER_16 12, 32, 1
HEVC_SAO_EDGE_FILTER_16 12, 48, 1
HEVC_SAO_EDGE_FILTER_16 12, 64, 2
%endif

View File

@@ -0,0 +1,261 @@
/*
* HEVC video decoder
*
* Copyright (C) 2012 - 2013 Guillaume Martres
* Copyright (C) 2013 - 2014 Pierre-Edouard Lepere
*
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_X86_HEVCDSP_H
#define AVCODEC_X86_HEVCDSP_H
#include <stddef.h>
#include <stdint.h>
#define idct_dc_proto(size, bitd, opt) \
void ff_hevc_idct##size##_dc_add_##bitd##_##opt(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride)
#define PEL_LINK(dst, idx1, idx2, idx3, name, D, opt) \
dst[idx1][idx2][idx3] = ff_hevc_put_hevc_ ## name ## _ ## D ## _##opt; \
dst ## _bi[idx1][idx2][idx3] = ff_hevc_put_hevc_bi_ ## name ## _ ## D ## _##opt; \
dst ## _uni[idx1][idx2][idx3] = ff_hevc_put_hevc_uni_ ## name ## _ ## D ## _##opt; \
dst ## _uni_w[idx1][idx2][idx3] = ff_hevc_put_hevc_uni_w_ ## name ## _ ## D ## _##opt; \
dst ## _bi_w[idx1][idx2][idx3] = ff_hevc_put_hevc_bi_w_ ## name ## _ ## D ## _##opt
#define PEL_PROTOTYPE(name, D, opt) \
void ff_hevc_put_hevc_ ## name ## _ ## D ## _##opt(int16_t *dst, uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my,int width); \
void ff_hevc_put_hevc_bi_ ## name ## _ ## D ## _##opt(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my, int width); \
void ff_hevc_put_hevc_uni_ ## name ## _ ## D ## _##opt(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my, int width); \
void ff_hevc_put_hevc_uni_w_ ## name ## _ ## D ## _##opt(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int height, int denom, int wx, int ox, intptr_t mx, intptr_t my, int width); \
void ff_hevc_put_hevc_bi_w_ ## name ## _ ## D ## _##opt(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, int denom, int wx0, int wx1, int ox0, int ox1, intptr_t mx, intptr_t my, int width)
///////////////////////////////////////////////////////////////////////////////
// MC functions
///////////////////////////////////////////////////////////////////////////////
#define EPEL_PROTOTYPES(fname, bitd, opt) \
PEL_PROTOTYPE(fname##4, bitd, opt); \
PEL_PROTOTYPE(fname##6, bitd, opt); \
PEL_PROTOTYPE(fname##8, bitd, opt); \
PEL_PROTOTYPE(fname##12, bitd, opt); \
PEL_PROTOTYPE(fname##16, bitd, opt); \
PEL_PROTOTYPE(fname##24, bitd, opt); \
PEL_PROTOTYPE(fname##32, bitd, opt); \
PEL_PROTOTYPE(fname##48, bitd, opt); \
PEL_PROTOTYPE(fname##64, bitd, opt)
#define QPEL_PROTOTYPES(fname, bitd, opt) \
PEL_PROTOTYPE(fname##4, bitd, opt); \
PEL_PROTOTYPE(fname##8, bitd, opt); \
PEL_PROTOTYPE(fname##12, bitd, opt); \
PEL_PROTOTYPE(fname##16, bitd, opt); \
PEL_PROTOTYPE(fname##24, bitd, opt); \
PEL_PROTOTYPE(fname##32, bitd, opt); \
PEL_PROTOTYPE(fname##48, bitd, opt); \
PEL_PROTOTYPE(fname##64, bitd, opt)
#define WEIGHTING_PROTOTYPE(width, bitd, opt) \
void ff_hevc_put_hevc_uni_w##width##_##bitd##_##opt(uint8_t *dst, ptrdiff_t dststride, int16_t *_src, int height, int denom, int _wx, int _ox); \
void ff_hevc_put_hevc_bi_w##width##_##bitd##_##opt(uint8_t *dst, ptrdiff_t dststride, int16_t *_src, int16_t *_src2, int height, int denom, int _wx0, int _wx1, int _ox0, int _ox1)
#define WEIGHTING_PROTOTYPES(bitd, opt) \
WEIGHTING_PROTOTYPE(2, bitd, opt); \
WEIGHTING_PROTOTYPE(4, bitd, opt); \
WEIGHTING_PROTOTYPE(6, bitd, opt); \
WEIGHTING_PROTOTYPE(8, bitd, opt); \
WEIGHTING_PROTOTYPE(12, bitd, opt); \
WEIGHTING_PROTOTYPE(16, bitd, opt); \
WEIGHTING_PROTOTYPE(24, bitd, opt); \
WEIGHTING_PROTOTYPE(32, bitd, opt); \
WEIGHTING_PROTOTYPE(48, bitd, opt); \
WEIGHTING_PROTOTYPE(64, bitd, opt)
///////////////////////////////////////////////////////////////////////////////
// QPEL_PIXELS EPEL_PIXELS
///////////////////////////////////////////////////////////////////////////////
EPEL_PROTOTYPES(pel_pixels , 8, sse4);
EPEL_PROTOTYPES(pel_pixels , 10, sse4);
EPEL_PROTOTYPES(pel_pixels , 12, sse4);
void ff_hevc_put_hevc_pel_pixels16_8_avx2(int16_t *dst, uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my,int width);
void ff_hevc_put_hevc_pel_pixels24_8_avx2(int16_t *dst, uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my,int width);
void ff_hevc_put_hevc_pel_pixels32_8_avx2(int16_t *dst, uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my,int width);
void ff_hevc_put_hevc_pel_pixels48_8_avx2(int16_t *dst, uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my,int width);
void ff_hevc_put_hevc_pel_pixels64_8_avx2(int16_t *dst, uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my,int width);
void ff_hevc_put_hevc_pel_pixels16_10_avx2(int16_t *dst, uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my,int width);
void ff_hevc_put_hevc_pel_pixels24_10_avx2(int16_t *dst, uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my,int width);
void ff_hevc_put_hevc_pel_pixels32_10_avx2(int16_t *dst, uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my,int width);
void ff_hevc_put_hevc_pel_pixels48_10_avx2(int16_t *dst, uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my,int width);
void ff_hevc_put_hevc_pel_pixels64_10_avx2(int16_t *dst, uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my,int width);
void ff_hevc_put_hevc_uni_pel_pixels32_8_avx2(uint8_t *dst, ptrdiff_t dststride,uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my,int width);
void ff_hevc_put_hevc_uni_pel_pixels48_8_avx2(uint8_t *dst, ptrdiff_t dststride,uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my,int width);
void ff_hevc_put_hevc_uni_pel_pixels64_8_avx2(uint8_t *dst, ptrdiff_t dststride,uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my,int width);
void ff_hevc_put_hevc_uni_pel_pixels96_8_avx2(uint8_t *dst, ptrdiff_t dststride,uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my,int width); //used for 10bit
void ff_hevc_put_hevc_uni_pel_pixels128_8_avx2(uint8_t *dst, ptrdiff_t dststride,uint8_t *_src, ptrdiff_t _srcstride, int height, intptr_t mx, intptr_t my,int width);//used for 10bit
void ff_hevc_put_hevc_bi_pel_pixels16_8_avx2(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my, int width);
void ff_hevc_put_hevc_bi_pel_pixels24_8_avx2(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my, int width);
void ff_hevc_put_hevc_bi_pel_pixels32_8_avx2(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my, int width);
void ff_hevc_put_hevc_bi_pel_pixels48_8_avx2(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my, int width);
void ff_hevc_put_hevc_bi_pel_pixels64_8_avx2(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my, int width);
void ff_hevc_put_hevc_bi_pel_pixels16_10_avx2(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my, int width);
void ff_hevc_put_hevc_bi_pel_pixels24_10_avx2(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my, int width);
void ff_hevc_put_hevc_bi_pel_pixels32_10_avx2(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my, int width);
void ff_hevc_put_hevc_bi_pel_pixels48_10_avx2(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my, int width);
void ff_hevc_put_hevc_bi_pel_pixels64_10_avx2(uint8_t *_dst, ptrdiff_t _dststride, uint8_t *_src, ptrdiff_t _srcstride, int16_t *src2, int height, intptr_t mx, intptr_t my, int width);
///////////////////////////////////////////////////////////////////////////////
// EPEL
///////////////////////////////////////////////////////////////////////////////
EPEL_PROTOTYPES(epel_h , 8, sse4);
EPEL_PROTOTYPES(epel_h , 10, sse4);
EPEL_PROTOTYPES(epel_h , 12, sse4);
EPEL_PROTOTYPES(epel_v , 8, sse4);
EPEL_PROTOTYPES(epel_v , 10, sse4);
EPEL_PROTOTYPES(epel_v , 12, sse4);
EPEL_PROTOTYPES(epel_hv , 8, sse4);
EPEL_PROTOTYPES(epel_hv , 10, sse4);
EPEL_PROTOTYPES(epel_hv , 12, sse4);
PEL_PROTOTYPE(epel_h16, 8, avx2);
PEL_PROTOTYPE(epel_h24, 8, avx2);
PEL_PROTOTYPE(epel_h32, 8, avx2);
PEL_PROTOTYPE(epel_h48, 8, avx2);
PEL_PROTOTYPE(epel_h64, 8, avx2);
PEL_PROTOTYPE(epel_h16,10, avx2);
PEL_PROTOTYPE(epel_h24,10, avx2);
PEL_PROTOTYPE(epel_h32,10, avx2);
PEL_PROTOTYPE(epel_h48,10, avx2);
PEL_PROTOTYPE(epel_h64,10, avx2);
PEL_PROTOTYPE(epel_v16, 8, avx2);
PEL_PROTOTYPE(epel_v24, 8, avx2);
PEL_PROTOTYPE(epel_v32, 8, avx2);
PEL_PROTOTYPE(epel_v48, 8, avx2);
PEL_PROTOTYPE(epel_v64, 8, avx2);
PEL_PROTOTYPE(epel_v16,10, avx2);
PEL_PROTOTYPE(epel_v24,10, avx2);
PEL_PROTOTYPE(epel_v32,10, avx2);
PEL_PROTOTYPE(epel_v48,10, avx2);
PEL_PROTOTYPE(epel_v64,10, avx2);
PEL_PROTOTYPE(epel_hv16, 8, avx2);
PEL_PROTOTYPE(epel_hv24, 8, avx2);
PEL_PROTOTYPE(epel_hv32, 8, avx2);
PEL_PROTOTYPE(epel_hv48, 8, avx2);
PEL_PROTOTYPE(epel_hv64, 8, avx2);
PEL_PROTOTYPE(epel_hv16,10, avx2);
PEL_PROTOTYPE(epel_hv24,10, avx2);
PEL_PROTOTYPE(epel_hv32,10, avx2);
PEL_PROTOTYPE(epel_hv48,10, avx2);
PEL_PROTOTYPE(epel_hv64,10, avx2);
///////////////////////////////////////////////////////////////////////////////
// QPEL
///////////////////////////////////////////////////////////////////////////////
QPEL_PROTOTYPES(qpel_h , 8, sse4);
QPEL_PROTOTYPES(qpel_h , 10, sse4);
QPEL_PROTOTYPES(qpel_h , 12, sse4);
QPEL_PROTOTYPES(qpel_v, 8, sse4);
QPEL_PROTOTYPES(qpel_v, 10, sse4);
QPEL_PROTOTYPES(qpel_v, 12, sse4);
QPEL_PROTOTYPES(qpel_hv, 8, sse4);
QPEL_PROTOTYPES(qpel_hv, 10, sse4);
QPEL_PROTOTYPES(qpel_hv, 12, sse4);
PEL_PROTOTYPE(qpel_h16, 8, avx2);
PEL_PROTOTYPE(qpel_h24, 8, avx2);
PEL_PROTOTYPE(qpel_h32, 8, avx2);
PEL_PROTOTYPE(qpel_h48, 8, avx2);
PEL_PROTOTYPE(qpel_h64, 8, avx2);
PEL_PROTOTYPE(qpel_h16,10, avx2);
PEL_PROTOTYPE(qpel_h24,10, avx2);
PEL_PROTOTYPE(qpel_h32,10, avx2);
PEL_PROTOTYPE(qpel_h48,10, avx2);
PEL_PROTOTYPE(qpel_h64,10, avx2);
PEL_PROTOTYPE(qpel_v16, 8, avx2);
PEL_PROTOTYPE(qpel_v24, 8, avx2);
PEL_PROTOTYPE(qpel_v32, 8, avx2);
PEL_PROTOTYPE(qpel_v48, 8, avx2);
PEL_PROTOTYPE(qpel_v64, 8, avx2);
PEL_PROTOTYPE(qpel_v16,10, avx2);
PEL_PROTOTYPE(qpel_v24,10, avx2);
PEL_PROTOTYPE(qpel_v32,10, avx2);
PEL_PROTOTYPE(qpel_v48,10, avx2);
PEL_PROTOTYPE(qpel_v64,10, avx2);
PEL_PROTOTYPE(qpel_hv16, 8, avx2);
PEL_PROTOTYPE(qpel_hv24, 8, avx2);
PEL_PROTOTYPE(qpel_hv32, 8, avx2);
PEL_PROTOTYPE(qpel_hv48, 8, avx2);
PEL_PROTOTYPE(qpel_hv64, 8, avx2);
PEL_PROTOTYPE(qpel_hv16,10, avx2);
PEL_PROTOTYPE(qpel_hv24,10, avx2);
PEL_PROTOTYPE(qpel_hv32,10, avx2);
PEL_PROTOTYPE(qpel_hv48,10, avx2);
PEL_PROTOTYPE(qpel_hv64,10, avx2);
WEIGHTING_PROTOTYPES(8, sse4);
WEIGHTING_PROTOTYPES(10, sse4);
WEIGHTING_PROTOTYPES(12, sse4);
///////////////////////////////////////////////////////////////////////////////
// TRANSFORM_ADD
///////////////////////////////////////////////////////////////////////////////
void ff_hevc_transform_add4_8_mmxext(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride);
void ff_hevc_transform_add8_8_sse2(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride);
void ff_hevc_transform_add16_8_sse2(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride);
void ff_hevc_transform_add32_8_sse2(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride);
void ff_hevc_transform_add8_8_avx(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride);
void ff_hevc_transform_add16_8_avx(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride);
void ff_hevc_transform_add32_8_avx(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride);
void ff_hevc_transform_add32_8_avx2(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride);
void ff_hevc_transform_add4_10_mmxext(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride);
void ff_hevc_transform_add8_10_sse2(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride);
void ff_hevc_transform_add16_10_sse2(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride);
void ff_hevc_transform_add32_10_sse2(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride);
void ff_hevc_transform_add16_10_avx2(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride);
void ff_hevc_transform_add32_10_avx2(uint8_t *dst, int16_t *coeffs, ptrdiff_t stride);
#endif // AVCODEC_X86_HEVCDSP_H

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,680 @@
;******************************************************************************
;*
;* Copyright (c) 2000-2001 Fabrice Bellard <fabrice@bellard.org>
;* Copyright (c) Nick Kurshev <nickols_k@mail.ru>
;* Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
;* Copyright (c) 2002 Zdenek Kabelac <kabi@informatics.muni.cz>
;* Copyright (c) 2013 Daniel Kang
;*
;* SIMD-optimized halfpel functions
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
cextern pb_1
cextern pw_2
pb_interleave16: db 0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15
pb_interleave8: db 0, 4, 1, 5, 2, 6, 3, 7
cextern pw_8192
SECTION .text
; void ff_put_pixels8_x2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
%macro PUT_PIXELS8_X2 0
%if cpuflag(sse2)
cglobal put_pixels16_x2, 4,5,4
%else
cglobal put_pixels8_x2, 4,5
%endif
lea r4, [r2*2]
.loop:
movu m0, [r1+1]
movu m1, [r1+r2+1]
%if cpuflag(sse2)
movu m2, [r1]
movu m3, [r1+r2]
pavgb m0, m2
pavgb m1, m3
%else
PAVGB m0, [r1]
PAVGB m1, [r1+r2]
%endif
mova [r0], m0
mova [r0+r2], m1
add r1, r4
add r0, r4
movu m0, [r1+1]
movu m1, [r1+r2+1]
%if cpuflag(sse2)
movu m2, [r1]
movu m3, [r1+r2]
pavgb m0, m2
pavgb m1, m3
%else
PAVGB m0, [r1]
PAVGB m1, [r1+r2]
%endif
add r1, r4
mova [r0], m0
mova [r0+r2], m1
add r0, r4
sub r3d, 4
jne .loop
REP_RET
%endmacro
INIT_MMX mmxext
PUT_PIXELS8_X2
INIT_MMX 3dnow
PUT_PIXELS8_X2
; void ff_put_pixels16_x2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
%macro PUT_PIXELS_16 0
cglobal put_pixels16_x2, 4,5
lea r4, [r2*2]
.loop:
mova m0, [r1]
mova m1, [r1+r2]
mova m2, [r1+8]
mova m3, [r1+r2+8]
PAVGB m0, [r1+1]
PAVGB m1, [r1+r2+1]
PAVGB m2, [r1+9]
PAVGB m3, [r1+r2+9]
mova [r0], m0
mova [r0+r2], m1
mova [r0+8], m2
mova [r0+r2+8], m3
add r1, r4
add r0, r4
mova m0, [r1]
mova m1, [r1+r2]
mova m2, [r1+8]
mova m3, [r1+r2+8]
PAVGB m0, [r1+1]
PAVGB m1, [r1+r2+1]
PAVGB m2, [r1+9]
PAVGB m3, [r1+r2+9]
add r1, r4
mova [r0], m0
mova [r0+r2], m1
mova [r0+8], m2
mova [r0+r2+8], m3
add r0, r4
sub r3d, 4
jne .loop
REP_RET
%endmacro
INIT_MMX mmxext
PUT_PIXELS_16
INIT_MMX 3dnow
PUT_PIXELS_16
; The 8_X2 macro can easily be used here
INIT_XMM sse2
PUT_PIXELS8_X2
; void ff_put_no_rnd_pixels8_x2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
%macro PUT_NO_RND_PIXELS8_X2 0
cglobal put_no_rnd_pixels8_x2, 4,5
mova m6, [pb_1]
lea r4, [r2*2]
.loop:
mova m0, [r1]
mova m2, [r1+r2]
mova m1, [r1+1]
mova m3, [r1+r2+1]
add r1, r4
psubusb m0, m6
psubusb m2, m6
PAVGB m0, m1
PAVGB m2, m3
mova [r0], m0
mova [r0+r2], m2
mova m0, [r1]
mova m1, [r1+1]
mova m2, [r1+r2]
mova m3, [r1+r2+1]
add r0, r4
add r1, r4
psubusb m0, m6
psubusb m2, m6
PAVGB m0, m1
PAVGB m2, m3
mova [r0], m0
mova [r0+r2], m2
add r0, r4
sub r3d, 4
jne .loop
REP_RET
%endmacro
INIT_MMX mmxext
PUT_NO_RND_PIXELS8_X2
INIT_MMX 3dnow
PUT_NO_RND_PIXELS8_X2
; void ff_put_no_rnd_pixels8_x2_exact(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
%macro PUT_NO_RND_PIXELS8_X2_EXACT 0
cglobal put_no_rnd_pixels8_x2_exact, 4,5
lea r4, [r2*3]
pcmpeqb m6, m6
.loop:
mova m0, [r1]
mova m2, [r1+r2]
mova m1, [r1+1]
mova m3, [r1+r2+1]
pxor m0, m6
pxor m2, m6
pxor m1, m6
pxor m3, m6
PAVGB m0, m1
PAVGB m2, m3
pxor m0, m6
pxor m2, m6
mova [r0], m0
mova [r0+r2], m2
mova m0, [r1+r2*2]
mova m1, [r1+r2*2+1]
mova m2, [r1+r4]
mova m3, [r1+r4+1]
pxor m0, m6
pxor m1, m6
pxor m2, m6
pxor m3, m6
PAVGB m0, m1
PAVGB m2, m3
pxor m0, m6
pxor m2, m6
mova [r0+r2*2], m0
mova [r0+r4], m2
lea r1, [r1+r2*4]
lea r0, [r0+r2*4]
sub r3d, 4
jg .loop
REP_RET
%endmacro
INIT_MMX mmxext
PUT_NO_RND_PIXELS8_X2_EXACT
INIT_MMX 3dnow
PUT_NO_RND_PIXELS8_X2_EXACT
; void ff_put_pixels8_y2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
%macro PUT_PIXELS8_Y2 0
%if cpuflag(sse2)
cglobal put_pixels16_y2, 4,5,3
%else
cglobal put_pixels8_y2, 4,5
%endif
lea r4, [r2*2]
movu m0, [r1]
sub r0, r2
.loop:
movu m1, [r1+r2]
movu m2, [r1+r4]
add r1, r4
PAVGB m0, m1
PAVGB m1, m2
mova [r0+r2], m0
mova [r0+r4], m1
movu m1, [r1+r2]
movu m0, [r1+r4]
add r0, r4
add r1, r4
PAVGB m2, m1
PAVGB m1, m0
mova [r0+r2], m2
mova [r0+r4], m1
add r0, r4
sub r3d, 4
jne .loop
REP_RET
%endmacro
INIT_MMX mmxext
PUT_PIXELS8_Y2
INIT_MMX 3dnow
PUT_PIXELS8_Y2
; actually, put_pixels16_y2_sse2
INIT_XMM sse2
PUT_PIXELS8_Y2
; void ff_put_no_rnd_pixels8_y2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
%macro PUT_NO_RND_PIXELS8_Y2 0
cglobal put_no_rnd_pixels8_y2, 4,5
mova m6, [pb_1]
lea r4, [r2+r2]
mova m0, [r1]
sub r0, r2
.loop:
mova m1, [r1+r2]
mova m2, [r1+r4]
add r1, r4
psubusb m1, m6
PAVGB m0, m1
PAVGB m1, m2
mova [r0+r2], m0
mova [r0+r4], m1
mova m1, [r1+r2]
mova m0, [r1+r4]
add r0, r4
add r1, r4
psubusb m1, m6
PAVGB m2, m1
PAVGB m1, m0
mova [r0+r2], m2
mova [r0+r4], m1
add r0, r4
sub r3d, 4
jne .loop
REP_RET
%endmacro
INIT_MMX mmxext
PUT_NO_RND_PIXELS8_Y2
INIT_MMX 3dnow
PUT_NO_RND_PIXELS8_Y2
; void ff_put_no_rnd_pixels8_y2_exact(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
%macro PUT_NO_RND_PIXELS8_Y2_EXACT 0
cglobal put_no_rnd_pixels8_y2_exact, 4,5
lea r4, [r2*3]
mova m0, [r1]
pcmpeqb m6, m6
add r1, r2
pxor m0, m6
.loop:
mova m1, [r1]
mova m2, [r1+r2]
pxor m1, m6
pxor m2, m6
PAVGB m0, m1
PAVGB m1, m2
pxor m0, m6
pxor m1, m6
mova [r0], m0
mova [r0+r2], m1
mova m1, [r1+r2*2]
mova m0, [r1+r4]
pxor m1, m6
pxor m0, m6
PAVGB m2, m1
PAVGB m1, m0
pxor m2, m6
pxor m1, m6
mova [r0+r2*2], m2
mova [r0+r4], m1
lea r1, [r1+r2*4]
lea r0, [r0+r2*4]
sub r3d, 4
jg .loop
REP_RET
%endmacro
INIT_MMX mmxext
PUT_NO_RND_PIXELS8_Y2_EXACT
INIT_MMX 3dnow
PUT_NO_RND_PIXELS8_Y2_EXACT
; void ff_avg_pixels8(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
%macro AVG_PIXELS8 0
cglobal avg_pixels8, 4,5
lea r4, [r2*2]
.loop:
mova m0, [r0]
mova m1, [r0+r2]
PAVGB m0, [r1]
PAVGB m1, [r1+r2]
mova [r0], m0
mova [r0+r2], m1
add r1, r4
add r0, r4
mova m0, [r0]
mova m1, [r0+r2]
PAVGB m0, [r1]
PAVGB m1, [r1+r2]
add r1, r4
mova [r0], m0
mova [r0+r2], m1
add r0, r4
sub r3d, 4
jne .loop
REP_RET
%endmacro
INIT_MMX 3dnow
AVG_PIXELS8
; void ff_avg_pixels8_x2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
%macro AVG_PIXELS8_X2 0
%if cpuflag(sse2)
cglobal avg_pixels16_x2, 4,5,4
%else
cglobal avg_pixels8_x2, 4,5
%endif
lea r4, [r2*2]
%if notcpuflag(mmxext)
pcmpeqd m5, m5
paddb m5, m5
%endif
.loop:
movu m0, [r1]
movu m2, [r1+r2]
%if cpuflag(sse2)
movu m1, [r1+1]
movu m3, [r1+r2+1]
pavgb m0, m1
pavgb m2, m3
%else
PAVGB m0, [r1+1], m3, m5
PAVGB m2, [r1+r2+1], m4, m5
%endif
PAVGB m0, [r0], m3, m5
PAVGB m2, [r0+r2], m4, m5
add r1, r4
mova [r0], m0
mova [r0+r2], m2
movu m0, [r1]
movu m2, [r1+r2]
%if cpuflag(sse2)
movu m1, [r1+1]
movu m3, [r1+r2+1]
pavgb m0, m1
pavgb m2, m3
%else
PAVGB m0, [r1+1], m3, m5
PAVGB m2, [r1+r2+1], m4, m5
%endif
add r0, r4
add r1, r4
PAVGB m0, [r0], m3, m5
PAVGB m2, [r0+r2], m4, m5
mova [r0], m0
mova [r0+r2], m2
add r0, r4
sub r3d, 4
jne .loop
REP_RET
%endmacro
INIT_MMX mmx
AVG_PIXELS8_X2
INIT_MMX mmxext
AVG_PIXELS8_X2
INIT_MMX 3dnow
AVG_PIXELS8_X2
; actually avg_pixels16_x2
INIT_XMM sse2
AVG_PIXELS8_X2
; void ff_avg_pixels8_y2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
%macro AVG_PIXELS8_Y2 0
%if cpuflag(sse2)
cglobal avg_pixels16_y2, 4,5,3
%else
cglobal avg_pixels8_y2, 4,5
%endif
lea r4, [r2*2]
movu m0, [r1]
sub r0, r2
.loop:
movu m1, [r1+r2]
movu m2, [r1+r4]
add r1, r4
PAVGB m0, m1
PAVGB m1, m2
PAVGB m0, [r0+r2]
PAVGB m1, [r0+r4]
mova [r0+r2], m0
mova [r0+r4], m1
movu m1, [r1+r2]
movu m0, [r1+r4]
PAVGB m2, m1
PAVGB m1, m0
add r0, r4
add r1, r4
PAVGB m2, [r0+r2]
PAVGB m1, [r0+r4]
mova [r0+r2], m2
mova [r0+r4], m1
add r0, r4
sub r3d, 4
jne .loop
REP_RET
%endmacro
INIT_MMX mmxext
AVG_PIXELS8_Y2
INIT_MMX 3dnow
AVG_PIXELS8_Y2
; actually avg_pixels16_y2
INIT_XMM sse2
AVG_PIXELS8_Y2
; void ff_avg_pixels8_xy2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
; Note this is not correctly rounded, and is therefore used for
; not-bitexact output
%macro AVG_APPROX_PIXELS8_XY2 0
cglobal avg_approx_pixels8_xy2, 4,5
mova m6, [pb_1]
lea r4, [r2*2]
mova m0, [r1]
PAVGB m0, [r1+1]
.loop:
mova m2, [r1+r4]
mova m1, [r1+r2]
psubusb m2, m6
PAVGB m1, [r1+r2+1]
PAVGB m2, [r1+r4+1]
add r1, r4
PAVGB m0, m1
PAVGB m1, m2
PAVGB m0, [r0]
PAVGB m1, [r0+r2]
mova [r0], m0
mova [r0+r2], m1
mova m1, [r1+r2]
mova m0, [r1+r4]
PAVGB m1, [r1+r2+1]
PAVGB m0, [r1+r4+1]
add r0, r4
add r1, r4
PAVGB m2, m1
PAVGB m1, m0
PAVGB m2, [r0]
PAVGB m1, [r0+r2]
mova [r0], m2
mova [r0+r2], m1
add r0, r4
sub r3d, 4
jne .loop
REP_RET
%endmacro
INIT_MMX mmxext
AVG_APPROX_PIXELS8_XY2
INIT_MMX 3dnow
AVG_APPROX_PIXELS8_XY2
; void ff_avg_pixels16_xy2(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
%macro SET_PIXELS_XY2 1
%if cpuflag(sse2)
cglobal %1_pixels16_xy2, 4,5,8
%else
cglobal %1_pixels8_xy2, 4,5
%endif
pxor m7, m7
mova m6, [pw_2]
movu m0, [r1]
movu m4, [r1+1]
mova m1, m0
mova m5, m4
punpcklbw m0, m7
punpcklbw m4, m7
punpckhbw m1, m7
punpckhbw m5, m7
paddusw m4, m0
paddusw m5, m1
xor r4, r4
add r1, r2
.loop:
movu m0, [r1+r4]
movu m2, [r1+r4+1]
mova m1, m0
mova m3, m2
punpcklbw m0, m7
punpcklbw m2, m7
punpckhbw m1, m7
punpckhbw m3, m7
paddusw m0, m2
paddusw m1, m3
paddusw m4, m6
paddusw m5, m6
paddusw m4, m0
paddusw m5, m1
psrlw m4, 2
psrlw m5, 2
%ifidn %1, avg
mova m3, [r0+r4]
packuswb m4, m5
PAVGB m4, m3
%else
packuswb m4, m5
%endif
mova [r0+r4], m4
add r4, r2
movu m2, [r1+r4]
movu m4, [r1+r4+1]
mova m3, m2
mova m5, m4
punpcklbw m2, m7
punpcklbw m4, m7
punpckhbw m3, m7
punpckhbw m5, m7
paddusw m4, m2
paddusw m5, m3
paddusw m0, m6
paddusw m1, m6
paddusw m0, m4
paddusw m1, m5
psrlw m0, 2
psrlw m1, 2
%ifidn %1, avg
mova m3, [r0+r4]
packuswb m0, m1
PAVGB m0, m3
%else
packuswb m0, m1
%endif
mova [r0+r4], m0
add r4, r2
sub r3d, 2
jnz .loop
REP_RET
%endmacro
INIT_MMX mmxext
SET_PIXELS_XY2 avg
INIT_MMX 3dnow
SET_PIXELS_XY2 avg
INIT_XMM sse2
SET_PIXELS_XY2 put
SET_PIXELS_XY2 avg
%macro SSSE3_PIXELS_XY2 1-2
%if %0 == 2 ; sse2
cglobal %1_pixels16_xy2, 4,5,%2
mova m4, [pb_interleave16]
%else
cglobal %1_pixels8_xy2, 4,5
mova m4, [pb_interleave8]
%endif
mova m5, [pb_1]
movu m0, [r1]
movu m1, [r1+1]
pmaddubsw m0, m5
pmaddubsw m1, m5
xor r4, r4
add r1, r2
.loop:
movu m2, [r1+r4]
movu m3, [r1+r4+1]
pmaddubsw m2, m5
pmaddubsw m3, m5
paddusw m0, m2
paddusw m1, m3
pmulhrsw m0, [pw_8192]
pmulhrsw m1, [pw_8192]
%ifidn %1, avg
mova m6, [r0+r4]
packuswb m0, m1
pshufb m0, m4
pavgb m0, m6
%else
packuswb m0, m1
pshufb m0, m4
%endif
mova [r0+r4], m0
add r4, r2
movu m0, [r1+r4]
movu m1, [r1+r4+1]
pmaddubsw m0, m5
pmaddubsw m1, m5
paddusw m2, m0
paddusw m3, m1
pmulhrsw m2, [pw_8192]
pmulhrsw m3, [pw_8192]
%ifidn %1, avg
mova m6, [r0+r4]
packuswb m2, m3
pshufb m2, m4
pavgb m2, m6
%else
packuswb m2, m3
pshufb m2, m4
%endif
mova [r0+r4], m2
add r4, r2
sub r3d, 2
jnz .loop
REP_RET
%endmacro
INIT_MMX ssse3
SSSE3_PIXELS_XY2 put
SSSE3_PIXELS_XY2 avg
INIT_XMM ssse3
SSSE3_PIXELS_XY2 put, 6
SSSE3_PIXELS_XY2 avg, 7

View File

@@ -0,0 +1,53 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_X86_HPELDSP_H
#define AVCODEC_X86_HPELDSP_H
#include <stddef.h>
#include <stdint.h>
void ff_avg_pixels8_x2_mmx(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels8_xy2_mmx(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels8_xy2_mmxext(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels8_xy2_ssse3(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels16_xy2_mmx(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels16_xy2_sse2(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels16_xy2_ssse3(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels8_xy2_mmx(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels8_xy2_ssse3(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels16_xy2_mmx(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels16_xy2_sse2(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels16_xy2_ssse3(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
#endif /* AVCODEC_X86_HPELDSP_H */

View File

@@ -0,0 +1,332 @@
/*
* SIMD-optimized halfpel functions
* Copyright (c) 2000, 2001 Fabrice Bellard
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/hpeldsp.h"
#include "libavcodec/pixels.h"
#include "fpel.h"
#include "hpeldsp.h"
void ff_put_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels16_x2_mmxext(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels16_x2_3dnow(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels16_x2_sse2(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels16_x2_sse2(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels16_y2_sse2(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels16_y2_sse2(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_no_rnd_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_no_rnd_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_no_rnd_pixels8_x2_exact_mmxext(uint8_t *block,
const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_no_rnd_pixels8_x2_exact_3dnow(uint8_t *block,
const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_no_rnd_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_no_rnd_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_no_rnd_pixels8_y2_exact_mmxext(uint8_t *block,
const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_put_no_rnd_pixels8_y2_exact_3dnow(uint8_t *block,
const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels8_3dnow(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels8_x2_mmxext(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels8_x2_3dnow(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels8_y2_mmxext(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels8_y2_3dnow(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_pixels8_xy2_3dnow(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_approx_pixels8_xy2_mmxext(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
void ff_avg_approx_pixels8_xy2_3dnow(uint8_t *block, const uint8_t *pixels,
ptrdiff_t line_size, int h);
#define avg_pixels8_mmx ff_avg_pixels8_mmx
#define avg_pixels8_x2_mmx ff_avg_pixels8_x2_mmx
#define avg_pixels16_mmx ff_avg_pixels16_mmx
#define avg_pixels8_xy2_mmx ff_avg_pixels8_xy2_mmx
#define avg_pixels16_xy2_mmx ff_avg_pixels16_xy2_mmx
#define put_pixels8_mmx ff_put_pixels8_mmx
#define put_pixels16_mmx ff_put_pixels16_mmx
#define put_pixels8_xy2_mmx ff_put_pixels8_xy2_mmx
#define put_pixels16_xy2_mmx ff_put_pixels16_xy2_mmx
#define avg_no_rnd_pixels16_mmx ff_avg_pixels16_mmx
#define put_no_rnd_pixels8_mmx ff_put_pixels8_mmx
#define put_no_rnd_pixels16_mmx ff_put_pixels16_mmx
#if HAVE_INLINE_ASM
/***********************************/
/* MMX no rounding */
#define DEF(x, y) x ## _no_rnd_ ## y ## _mmx
#define SET_RND MOVQ_WONE
#define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
#define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
#define STATIC static
#include "rnd_template.c"
#include "hpeldsp_rnd_template.c"
#undef DEF
#undef SET_RND
#undef PAVGBP
#undef PAVGB
#undef STATIC
CALL_2X_PIXELS(avg_no_rnd_pixels16_y2_mmx, avg_no_rnd_pixels8_y2_mmx, 8)
CALL_2X_PIXELS(put_no_rnd_pixels16_y2_mmx, put_no_rnd_pixels8_y2_mmx, 8)
CALL_2X_PIXELS(avg_no_rnd_pixels16_xy2_mmx, avg_no_rnd_pixels8_xy2_mmx, 8)
CALL_2X_PIXELS(put_no_rnd_pixels16_xy2_mmx, put_no_rnd_pixels8_xy2_mmx, 8)
/***********************************/
/* MMX rounding */
#define DEF(x, y) x ## _ ## y ## _mmx
#define SET_RND MOVQ_WTWO
#define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
#define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
#include "hpeldsp_rnd_template.c"
#undef DEF
#define DEF(x, y) ff_ ## x ## _ ## y ## _mmx
#define STATIC
#include "rnd_template.c"
#undef DEF
#undef SET_RND
#undef PAVGBP
#undef PAVGB
CALL_2X_PIXELS(avg_pixels16_y2_mmx, avg_pixels8_y2_mmx, 8)
CALL_2X_PIXELS(put_pixels16_y2_mmx, put_pixels8_y2_mmx, 8)
CALL_2X_PIXELS_EXPORT(ff_avg_pixels16_xy2_mmx, ff_avg_pixels8_xy2_mmx, 8)
CALL_2X_PIXELS_EXPORT(ff_put_pixels16_xy2_mmx, ff_put_pixels8_xy2_mmx, 8)
#endif /* HAVE_INLINE_ASM */
#if HAVE_YASM
#define HPELDSP_AVG_PIXELS16(CPUEXT) \
CALL_2X_PIXELS(put_no_rnd_pixels16_x2 ## CPUEXT, ff_put_no_rnd_pixels8_x2 ## CPUEXT, 8) \
CALL_2X_PIXELS(put_pixels16_y2 ## CPUEXT, ff_put_pixels8_y2 ## CPUEXT, 8) \
CALL_2X_PIXELS(put_no_rnd_pixels16_y2 ## CPUEXT, ff_put_no_rnd_pixels8_y2 ## CPUEXT, 8) \
CALL_2X_PIXELS(avg_pixels16 ## CPUEXT, ff_avg_pixels8 ## CPUEXT, 8) \
CALL_2X_PIXELS(avg_pixels16_x2 ## CPUEXT, ff_avg_pixels8_x2 ## CPUEXT, 8) \
CALL_2X_PIXELS(avg_pixels16_y2 ## CPUEXT, ff_avg_pixels8_y2 ## CPUEXT, 8) \
CALL_2X_PIXELS(avg_pixels16_xy2 ## CPUEXT, ff_avg_pixels8_xy2 ## CPUEXT, 8) \
CALL_2X_PIXELS(avg_approx_pixels16_xy2## CPUEXT, ff_avg_approx_pixels8_xy2## CPUEXT, 8)
HPELDSP_AVG_PIXELS16(_3dnow)
HPELDSP_AVG_PIXELS16(_mmxext)
#endif /* HAVE_YASM */
#define SET_HPEL_FUNCS_EXT(PFX, IDX, SIZE, CPU) \
if (HAVE_MMX_EXTERNAL) \
c->PFX ## _pixels_tab IDX [0] = PFX ## _pixels ## SIZE ## _ ## CPU;
#if HAVE_MMX_INLINE
#define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
do { \
SET_HPEL_FUNCS_EXT(PFX, IDX, SIZE, CPU) \
c->PFX ## _pixels_tab IDX [1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
c->PFX ## _pixels_tab IDX [2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
c->PFX ## _pixels_tab IDX [3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU; \
} while (0)
#else
#define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
do { \
SET_HPEL_FUNCS_EXT(PFX, IDX, SIZE, CPU) \
} while (0)
#endif
static void hpeldsp_init_mmx(HpelDSPContext *c, int flags, int cpu_flags)
{
SET_HPEL_FUNCS(put, [0], 16, mmx);
SET_HPEL_FUNCS(put_no_rnd, [0], 16, mmx);
SET_HPEL_FUNCS(avg, [0], 16, mmx);
SET_HPEL_FUNCS(avg_no_rnd, , 16, mmx);
SET_HPEL_FUNCS(put, [1], 8, mmx);
SET_HPEL_FUNCS(put_no_rnd, [1], 8, mmx);
if (HAVE_MMX_EXTERNAL) {
c->avg_pixels_tab[1][0] = ff_avg_pixels8_mmx;
c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_mmx;
}
#if HAVE_MMX_INLINE
c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx;
c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_mmx;
#endif
}
static void hpeldsp_init_mmxext(HpelDSPContext *c, int flags, int cpu_flags)
{
#if HAVE_MMXEXT_EXTERNAL
c->put_pixels_tab[0][1] = ff_put_pixels16_x2_mmxext;
c->put_pixels_tab[0][2] = put_pixels16_y2_mmxext;
c->avg_pixels_tab[0][0] = avg_pixels16_mmxext;
c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmxext;
c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmxext;
c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmxext;
c->put_pixels_tab[1][1] = ff_put_pixels8_x2_mmxext;
c->put_pixels_tab[1][2] = ff_put_pixels8_y2_mmxext;
c->avg_pixels_tab[1][0] = ff_avg_pixels8_mmxext;
c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_mmxext;
c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_mmxext;
c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_mmxext;
if (!(flags & AV_CODEC_FLAG_BITEXACT)) {
c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmxext;
c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmxext;
c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_mmxext;
c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_mmxext;
c->avg_pixels_tab[0][3] = avg_approx_pixels16_xy2_mmxext;
c->avg_pixels_tab[1][3] = ff_avg_approx_pixels8_xy2_mmxext;
}
if (CONFIG_VP3_DECODER && flags & AV_CODEC_FLAG_BITEXACT) {
c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_exact_mmxext;
c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_exact_mmxext;
}
#endif /* HAVE_MMXEXT_EXTERNAL */
}
static void hpeldsp_init_3dnow(HpelDSPContext *c, int flags, int cpu_flags)
{
#if HAVE_AMD3DNOW_EXTERNAL
c->put_pixels_tab[0][1] = ff_put_pixels16_x2_3dnow;
c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
c->put_pixels_tab[1][1] = ff_put_pixels8_x2_3dnow;
c->put_pixels_tab[1][2] = ff_put_pixels8_y2_3dnow;
c->avg_pixels_tab[1][0] = ff_avg_pixels8_3dnow;
c->avg_pixels_tab[1][1] = ff_avg_pixels8_x2_3dnow;
c->avg_pixels_tab[1][2] = ff_avg_pixels8_y2_3dnow;
c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_3dnow;
if (!(flags & AV_CODEC_FLAG_BITEXACT)){
c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_3dnow;
c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_3dnow;
c->avg_pixels_tab[0][3] = avg_approx_pixels16_xy2_3dnow;
c->avg_pixels_tab[1][3] = ff_avg_approx_pixels8_xy2_3dnow;
}
if (CONFIG_VP3_DECODER && flags & AV_CODEC_FLAG_BITEXACT) {
c->put_no_rnd_pixels_tab[1][1] = ff_put_no_rnd_pixels8_x2_exact_3dnow;
c->put_no_rnd_pixels_tab[1][2] = ff_put_no_rnd_pixels8_y2_exact_3dnow;
}
#endif /* HAVE_AMD3DNOW_EXTERNAL */
}
static void hpeldsp_init_sse2(HpelDSPContext *c, int flags, int cpu_flags)
{
#if HAVE_SSE2_EXTERNAL
if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW)) {
// these functions are slower than mmx on AMD, but faster on Intel
c->put_pixels_tab[0][0] = ff_put_pixels16_sse2;
c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_sse2;
c->put_pixels_tab[0][1] = ff_put_pixels16_x2_sse2;
c->put_pixels_tab[0][2] = ff_put_pixels16_y2_sse2;
c->put_pixels_tab[0][3] = ff_put_pixels16_xy2_sse2;
c->avg_pixels_tab[0][0] = ff_avg_pixels16_sse2;
c->avg_pixels_tab[0][1] = ff_avg_pixels16_x2_sse2;
c->avg_pixels_tab[0][2] = ff_avg_pixels16_y2_sse2;
c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_sse2;
}
#endif /* HAVE_SSE2_EXTERNAL */
}
static void hpeldsp_init_ssse3(HpelDSPContext *c, int flags, int cpu_flags)
{
#if HAVE_SSSE3_EXTERNAL
c->put_pixels_tab[0][3] = ff_put_pixels16_xy2_ssse3;
c->avg_pixels_tab[0][3] = ff_avg_pixels16_xy2_ssse3;
c->put_pixels_tab[1][3] = ff_put_pixels8_xy2_ssse3;
c->avg_pixels_tab[1][3] = ff_avg_pixels8_xy2_ssse3;
#endif
}
av_cold void ff_hpeldsp_init_x86(HpelDSPContext *c, int flags)
{
int cpu_flags = av_get_cpu_flags();
if (INLINE_MMX(cpu_flags))
hpeldsp_init_mmx(c, flags, cpu_flags);
if (EXTERNAL_AMD3DNOW(cpu_flags))
hpeldsp_init_3dnow(c, flags, cpu_flags);
if (EXTERNAL_MMXEXT(cpu_flags))
hpeldsp_init_mmxext(c, flags, cpu_flags);
if (EXTERNAL_SSE2(cpu_flags))
hpeldsp_init_sse2(c, flags, cpu_flags);
if (EXTERNAL_SSSE3(cpu_flags))
hpeldsp_init_ssse3(c, flags, cpu_flags);
}

View File

@@ -0,0 +1,202 @@
/*
* SIMD-optimized halfpel functions are compiled twice for rnd/no_rnd
* Copyright (c) 2000, 2001 Fabrice Bellard
* Copyright (c) 2003-2004 Michael Niedermayer <michaelni@gmx.at>
*
* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
* mostly rewritten by Michael Niedermayer <michaelni@gmx.at>
* and improved by Zdenek Kabelac <kabi@users.sf.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stddef.h>
#include <stdint.h>
// put_pixels
static void DEF(put, pixels8_x2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
MOVQ_BFE(mm6);
__asm__ volatile(
"lea (%3, %3), %%"REG_a" \n\t"
".p2align 3 \n\t"
"1: \n\t"
"movq (%1), %%mm0 \n\t"
"movq 1(%1), %%mm1 \n\t"
"movq (%1, %3), %%mm2 \n\t"
"movq 1(%1, %3), %%mm3 \n\t"
PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
"movq %%mm4, (%2) \n\t"
"movq %%mm5, (%2, %3) \n\t"
"add %%"REG_a", %1 \n\t"
"add %%"REG_a", %2 \n\t"
"movq (%1), %%mm0 \n\t"
"movq 1(%1), %%mm1 \n\t"
"movq (%1, %3), %%mm2 \n\t"
"movq 1(%1, %3), %%mm3 \n\t"
PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
"movq %%mm4, (%2) \n\t"
"movq %%mm5, (%2, %3) \n\t"
"add %%"REG_a", %1 \n\t"
"add %%"REG_a", %2 \n\t"
"subl $4, %0 \n\t"
"jnz 1b \n\t"
:"+g"(h), "+S"(pixels), "+D"(block)
:"r"((x86_reg)line_size)
:REG_a, "memory");
}
static void DEF(put, pixels16_x2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
MOVQ_BFE(mm6);
__asm__ volatile(
"lea (%3, %3), %%"REG_a" \n\t"
".p2align 3 \n\t"
"1: \n\t"
"movq (%1), %%mm0 \n\t"
"movq 1(%1), %%mm1 \n\t"
"movq (%1, %3), %%mm2 \n\t"
"movq 1(%1, %3), %%mm3 \n\t"
PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
"movq %%mm4, (%2) \n\t"
"movq %%mm5, (%2, %3) \n\t"
"movq 8(%1), %%mm0 \n\t"
"movq 9(%1), %%mm1 \n\t"
"movq 8(%1, %3), %%mm2 \n\t"
"movq 9(%1, %3), %%mm3 \n\t"
PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
"movq %%mm4, 8(%2) \n\t"
"movq %%mm5, 8(%2, %3) \n\t"
"add %%"REG_a", %1 \n\t"
"add %%"REG_a", %2 \n\t"
"movq (%1), %%mm0 \n\t"
"movq 1(%1), %%mm1 \n\t"
"movq (%1, %3), %%mm2 \n\t"
"movq 1(%1, %3), %%mm3 \n\t"
PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
"movq %%mm4, (%2) \n\t"
"movq %%mm5, (%2, %3) \n\t"
"movq 8(%1), %%mm0 \n\t"
"movq 9(%1), %%mm1 \n\t"
"movq 8(%1, %3), %%mm2 \n\t"
"movq 9(%1, %3), %%mm3 \n\t"
PAVGBP(%%mm0, %%mm1, %%mm4, %%mm2, %%mm3, %%mm5)
"movq %%mm4, 8(%2) \n\t"
"movq %%mm5, 8(%2, %3) \n\t"
"add %%"REG_a", %1 \n\t"
"add %%"REG_a", %2 \n\t"
"subl $4, %0 \n\t"
"jnz 1b \n\t"
:"+g"(h), "+S"(pixels), "+D"(block)
:"r"((x86_reg)line_size)
:REG_a, "memory");
}
static void DEF(put, pixels8_y2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
MOVQ_BFE(mm6);
__asm__ volatile(
"lea (%3, %3), %%"REG_a" \n\t"
"movq (%1), %%mm0 \n\t"
".p2align 3 \n\t"
"1: \n\t"
"movq (%1, %3), %%mm1 \n\t"
"movq (%1, %%"REG_a"),%%mm2 \n\t"
PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5)
"movq %%mm4, (%2) \n\t"
"movq %%mm5, (%2, %3) \n\t"
"add %%"REG_a", %1 \n\t"
"add %%"REG_a", %2 \n\t"
"movq (%1, %3), %%mm1 \n\t"
"movq (%1, %%"REG_a"),%%mm0 \n\t"
PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5)
"movq %%mm4, (%2) \n\t"
"movq %%mm5, (%2, %3) \n\t"
"add %%"REG_a", %1 \n\t"
"add %%"REG_a", %2 \n\t"
"subl $4, %0 \n\t"
"jnz 1b \n\t"
:"+g"(h), "+S"(pixels), "+D"(block)
:"r"((x86_reg)line_size)
:REG_a, "memory");
}
static void DEF(avg, pixels16_x2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
MOVQ_BFE(mm6);
__asm__ volatile(
".p2align 3 \n\t"
"1: \n\t"
"movq (%1), %%mm0 \n\t"
"movq 1(%1), %%mm1 \n\t"
"movq (%2), %%mm3 \n\t"
PAVGB(%%mm0, %%mm1, %%mm2, %%mm6)
PAVGB_MMX(%%mm3, %%mm2, %%mm0, %%mm6)
"movq %%mm0, (%2) \n\t"
"movq 8(%1), %%mm0 \n\t"
"movq 9(%1), %%mm1 \n\t"
"movq 8(%2), %%mm3 \n\t"
PAVGB(%%mm0, %%mm1, %%mm2, %%mm6)
PAVGB_MMX(%%mm3, %%mm2, %%mm0, %%mm6)
"movq %%mm0, 8(%2) \n\t"
"add %3, %1 \n\t"
"add %3, %2 \n\t"
"subl $1, %0 \n\t"
"jnz 1b \n\t"
:"+g"(h), "+S"(pixels), "+D"(block)
:"r"((x86_reg)line_size)
:"memory");
}
static void DEF(avg, pixels8_y2)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
MOVQ_BFE(mm6);
__asm__ volatile(
"lea (%3, %3), %%"REG_a" \n\t"
"movq (%1), %%mm0 \n\t"
".p2align 3 \n\t"
"1: \n\t"
"movq (%1, %3), %%mm1 \n\t"
"movq (%1, %%"REG_a"), %%mm2 \n\t"
PAVGBP(%%mm1, %%mm0, %%mm4, %%mm2, %%mm1, %%mm5)
"movq (%2), %%mm3 \n\t"
PAVGB_MMX(%%mm3, %%mm4, %%mm0, %%mm6)
"movq (%2, %3), %%mm3 \n\t"
PAVGB_MMX(%%mm3, %%mm5, %%mm1, %%mm6)
"movq %%mm0, (%2) \n\t"
"movq %%mm1, (%2, %3) \n\t"
"add %%"REG_a", %1 \n\t"
"add %%"REG_a", %2 \n\t"
"movq (%1, %3), %%mm1 \n\t"
"movq (%1, %%"REG_a"), %%mm0 \n\t"
PAVGBP(%%mm1, %%mm2, %%mm4, %%mm0, %%mm1, %%mm5)
"movq (%2), %%mm3 \n\t"
PAVGB_MMX(%%mm3, %%mm4, %%mm2, %%mm6)
"movq (%2, %3), %%mm3 \n\t"
PAVGB_MMX(%%mm3, %%mm5, %%mm1, %%mm6)
"movq %%mm2, (%2) \n\t"
"movq %%mm1, (%2, %3) \n\t"
"add %%"REG_a", %1 \n\t"
"add %%"REG_a", %2 \n\t"
"subl $4, %0 \n\t"
"jnz 1b \n\t"
:"+g"(h), "+S"(pixels), "+D"(block)
:"r"((x86_reg)line_size)
:REG_a, "memory");
}

View File

@@ -0,0 +1,253 @@
;******************************************************************************
;* SIMD-optimized HuffYUV functions
;* Copyright (c) 2008 Loren Merritt
;* Copyright (c) 2014 Christophe Gisquet
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
cextern pb_15
pb_zzzzzzzz77777777: times 8 db -1
pb_7: times 8 db 7
pb_zzzz3333zzzzbbbb: db -1,-1,-1,-1,3,3,3,3,-1,-1,-1,-1,11,11,11,11
pb_zz11zz55zz99zzdd: db -1,-1,1,1,-1,-1,5,5,-1,-1,9,9,-1,-1,13,13
SECTION .text
; void ff_add_hfyu_median_pred_mmxext(uint8_t *dst, const uint8_t *top,
; const uint8_t *diff, int w,
; int *left, int *left_top)
%macro HFYU_MEDIAN 0
cglobal add_hfyu_median_pred, 6,6,8, dst, top, diff, w, left, left_top
movu m0, [topq]
mova m2, m0
movd m4, [left_topq]
LSHIFT m2, 1
mova m1, m0
por m4, m2
movd m3, [leftq]
psubb m0, m4 ; t-tl
add dstq, wq
add topq, wq
add diffq, wq
neg wq
jmp .skip
.loop:
movu m4, [topq+wq]
mova m0, m4
LSHIFT m4, 1
por m4, m1
mova m1, m0 ; t
psubb m0, m4 ; t-tl
.skip:
movu m2, [diffq+wq]
%assign i 0
%rep mmsize
mova m4, m0
paddb m4, m3 ; t-tl+l
mova m5, m3
pmaxub m3, m1
pminub m5, m1
pminub m3, m4
pmaxub m3, m5 ; median
paddb m3, m2 ; +residual
%if i==0
mova m7, m3
LSHIFT m7, mmsize-1
%else
mova m6, m3
RSHIFT m7, 1
LSHIFT m6, mmsize-1
por m7, m6
%endif
%if i<mmsize-1
RSHIFT m0, 1
RSHIFT m1, 1
RSHIFT m2, 1
%endif
%assign i i+1
%endrep
movu [dstq+wq], m7
add wq, mmsize
jl .loop
movzx r2d, byte [dstq-1]
mov [leftq], r2d
movzx r2d, byte [topq-1]
mov [left_topq], r2d
RET
%endmacro
%if ARCH_X86_32
INIT_MMX mmxext
HFYU_MEDIAN
%endif
INIT_XMM sse2
HFYU_MEDIAN
%macro ADD_HFYU_LEFT_LOOP 2 ; %1 = dst_is_aligned, %2 = src_is_aligned
add srcq, wq
add dstq, wq
neg wq
%%.loop:
%if %2
mova m1, [srcq+wq]
%else
movu m1, [srcq+wq]
%endif
mova m2, m1
psllw m1, 8
paddb m1, m2
mova m2, m1
pshufb m1, m3
paddb m1, m2
pshufb m0, m5
mova m2, m1
pshufb m1, m4
paddb m1, m2
%if mmsize == 16
mova m2, m1
pshufb m1, m6
paddb m1, m2
%endif
paddb m0, m1
%if %1
mova [dstq+wq], m0
%else
movq [dstq+wq], m0
movhps [dstq+wq+8], m0
%endif
add wq, mmsize
jl %%.loop
mov eax, mmsize-1
sub eax, wd
movd m1, eax
pshufb m0, m1
movd eax, m0
RET
%endmacro
; int ff_add_hfyu_left_pred(uint8_t *dst, const uint8_t *src, int w, int left)
INIT_MMX ssse3
cglobal add_hfyu_left_pred, 3,3,7, dst, src, w, left
.skip_prologue:
mova m5, [pb_7]
mova m4, [pb_zzzz3333zzzzbbbb]
mova m3, [pb_zz11zz55zz99zzdd]
movd m0, leftm
psllq m0, 56
ADD_HFYU_LEFT_LOOP 1, 1
INIT_XMM sse4
cglobal add_hfyu_left_pred, 3,3,7, dst, src, w, left
mova m5, [pb_15]
mova m6, [pb_zzzzzzzz77777777]
mova m4, [pb_zzzz3333zzzzbbbb]
mova m3, [pb_zz11zz55zz99zzdd]
movd m0, leftm
pslldq m0, 15
test srcq, 15
jnz .src_unaligned
test dstq, 15
jnz .dst_unaligned
ADD_HFYU_LEFT_LOOP 1, 1
.dst_unaligned:
ADD_HFYU_LEFT_LOOP 0, 1
.src_unaligned:
ADD_HFYU_LEFT_LOOP 0, 0
%macro ADD_BYTES 0
cglobal add_bytes, 3,4,2, dst, src, w, size
mov sizeq, wq
and sizeq, -2*mmsize
jz .2
add dstq, sizeq
add srcq, sizeq
neg sizeq
.1:
mova m0, [srcq + sizeq]
mova m1, [srcq + sizeq + mmsize]
paddb m0, [dstq + sizeq]
paddb m1, [dstq + sizeq + mmsize]
mova [dstq + sizeq], m0
mova [dstq + sizeq + mmsize], m1
add sizeq, 2*mmsize
jl .1
.2:
and wq, 2*mmsize-1
jz .end
add dstq, wq
add srcq, wq
neg wq
.3:
mov sizeb, [srcq + wq]
add [dstq + wq], sizeb
inc wq
jl .3
.end:
REP_RET
%endmacro
%if ARCH_X86_32
INIT_MMX mmx
ADD_BYTES
%endif
INIT_XMM sse2
ADD_BYTES
; void add_hfyu_left_pred_bgr32(uint8_t *dst, const uint8_t *src,
; intptr_t w, uint8_t *left)
%macro LEFT_BGR32 0
cglobal add_hfyu_left_pred_bgr32, 4,4,3, dst, src, w, left
shl wq, 2
movd m0, [leftq]
lea dstq, [dstq + wq]
lea srcq, [srcq + wq]
LSHIFT m0, mmsize-4
neg wq
.loop:
movu m1, [srcq+wq]
mova m2, m1
%if mmsize == 8
punpckhdq m0, m0
%endif
LSHIFT m1, 4
paddb m1, m2
%if mmsize == 16
pshufd m0, m0, q3333
mova m2, m1
LSHIFT m1, 8
paddb m1, m2
%endif
paddb m0, m1
movu [dstq+wq], m0
add wq, mmsize
jl .loop
movd m0, [dstq-4]
movd [leftq], m0
REP_RET
%endmacro
%if ARCH_X86_32
INIT_MMX mmx
LEFT_BGR32
%endif
INIT_XMM sse2
LEFT_BGR32

View File

@@ -0,0 +1,117 @@
/*
* Copyright (c) 2009 Loren Merritt <lorenm@u.washington.edu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/huffyuvdsp.h"
void ff_add_bytes_mmx(uint8_t *dst, uint8_t *src, intptr_t w);
void ff_add_bytes_sse2(uint8_t *dst, uint8_t *src, intptr_t w);
void ff_add_hfyu_median_pred_mmxext(uint8_t *dst, const uint8_t *top,
const uint8_t *diff, intptr_t w,
int *left, int *left_top);
void ff_add_hfyu_median_pred_sse2(uint8_t *dst, const uint8_t *top,
const uint8_t *diff, intptr_t w,
int *left, int *left_top);
int ff_add_hfyu_left_pred_ssse3(uint8_t *dst, const uint8_t *src,
intptr_t w, int left);
int ff_add_hfyu_left_pred_sse4(uint8_t *dst, const uint8_t *src,
intptr_t w, int left);
void ff_add_hfyu_left_pred_bgr32_mmx(uint8_t *dst, const uint8_t *src,
intptr_t w, uint8_t *left);
void ff_add_hfyu_left_pred_bgr32_sse2(uint8_t *dst, const uint8_t *src,
intptr_t w, uint8_t *left);
#if HAVE_INLINE_ASM && HAVE_7REGS && ARCH_X86_32
static void add_hfyu_median_pred_cmov(uint8_t *dst, const uint8_t *top,
const uint8_t *diff, intptr_t w,
int *left, int *left_top)
{
x86_reg w2 = -w;
x86_reg x;
int l = *left & 0xff;
int tl = *left_top & 0xff;
int t;
__asm__ volatile (
"mov %7, %3 \n"
"1: \n"
"movzbl (%3, %4), %2 \n"
"mov %2, %k3 \n"
"sub %b1, %b3 \n"
"add %b0, %b3 \n"
"mov %2, %1 \n"
"cmp %0, %2 \n"
"cmovg %0, %2 \n"
"cmovg %1, %0 \n"
"cmp %k3, %0 \n"
"cmovg %k3, %0 \n"
"mov %7, %3 \n"
"cmp %2, %0 \n"
"cmovl %2, %0 \n"
"add (%6, %4), %b0 \n"
"mov %b0, (%5, %4) \n"
"inc %4 \n"
"jl 1b \n"
: "+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
: "r"(dst + w), "r"(diff + w), "rm"(top + w)
);
*left = l;
*left_top = tl;
}
#endif
av_cold void ff_huffyuvdsp_init_x86(HuffYUVDSPContext *c)
{
int cpu_flags = av_get_cpu_flags();
#if HAVE_INLINE_ASM && HAVE_7REGS && ARCH_X86_32
if (cpu_flags & AV_CPU_FLAG_CMOV)
c->add_hfyu_median_pred = add_hfyu_median_pred_cmov;
#endif
if (ARCH_X86_32 && EXTERNAL_MMX(cpu_flags)) {
c->add_bytes = ff_add_bytes_mmx;
c->add_hfyu_left_pred_bgr32 = ff_add_hfyu_left_pred_bgr32_mmx;
}
if (ARCH_X86_32 && EXTERNAL_MMXEXT(cpu_flags)) {
/* slower than cmov version on AMD */
if (!(cpu_flags & AV_CPU_FLAG_3DNOW))
c->add_hfyu_median_pred = ff_add_hfyu_median_pred_mmxext;
}
if (EXTERNAL_SSE2(cpu_flags)) {
c->add_bytes = ff_add_bytes_sse2;
c->add_hfyu_median_pred = ff_add_hfyu_median_pred_sse2;
c->add_hfyu_left_pred_bgr32 = ff_add_hfyu_left_pred_bgr32_sse2;
}
if (EXTERNAL_SSSE3(cpu_flags)) {
c->add_hfyu_left_pred = ff_add_hfyu_left_pred_ssse3;
if (cpu_flags & AV_CPU_FLAG_SSE4) // not really SSE4, just slow on Conroe
c->add_hfyu_left_pred = ff_add_hfyu_left_pred_sse4;
}
}

View File

@@ -0,0 +1,114 @@
/*
* SIMD-optimized HuffYUV encoding functions
* Copyright (c) 2000, 2001 Fabrice Bellard
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/huffyuvencdsp.h"
#include "libavcodec/mathops.h"
#if HAVE_INLINE_ASM
static void diff_bytes_mmx(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w)
{
x86_reg i = 0;
if (w >= 16)
__asm__ volatile (
"1: \n\t"
"movq (%2, %0), %%mm0 \n\t"
"movq (%1, %0), %%mm1 \n\t"
"psubb %%mm0, %%mm1 \n\t"
"movq %%mm1, (%3, %0) \n\t"
"movq 8(%2, %0), %%mm0 \n\t"
"movq 8(%1, %0), %%mm1 \n\t"
"psubb %%mm0, %%mm1 \n\t"
"movq %%mm1, 8(%3, %0) \n\t"
"add $16, %0 \n\t"
"cmp %4, %0 \n\t"
" jb 1b \n\t"
: "+r" (i)
: "r" (src1), "r" (src2), "r" (dst), "r" ((x86_reg) w - 15));
for (; i < w; i++)
dst[i + 0] = src1[i + 0] - src2[i + 0];
}
static void sub_hfyu_median_pred_mmxext(uint8_t *dst, const uint8_t *src1,
const uint8_t *src2, int w,
int *left, int *left_top)
{
x86_reg i = 0;
uint8_t l, lt;
__asm__ volatile (
"movq (%1, %0), %%mm0 \n\t" // LT
"psllq $8, %%mm0 \n\t"
"1: \n\t"
"movq (%1, %0), %%mm1 \n\t" // T
"movq -1(%2, %0), %%mm2 \n\t" // L
"movq (%2, %0), %%mm3 \n\t" // X
"movq %%mm2, %%mm4 \n\t" // L
"psubb %%mm0, %%mm2 \n\t"
"paddb %%mm1, %%mm2 \n\t" // L + T - LT
"movq %%mm4, %%mm5 \n\t" // L
"pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
"pminub %%mm5, %%mm1 \n\t" // min(T, L)
"pminub %%mm2, %%mm4 \n\t"
"pmaxub %%mm1, %%mm4 \n\t"
"psubb %%mm4, %%mm3 \n\t" // dst - pred
"movq %%mm3, (%3, %0) \n\t"
"add $8, %0 \n\t"
"movq -1(%1, %0), %%mm0 \n\t" // LT
"cmp %4, %0 \n\t"
" jb 1b \n\t"
: "+r" (i)
: "r" (src1), "r" (src2), "r" (dst), "r" ((x86_reg) w));
l = *left;
lt = *left_top;
dst[0] = src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt) & 0xFF);
*left_top = src1[w - 1];
*left = src2[w - 1];
}
#endif /* HAVE_INLINE_ASM */
av_cold void ff_huffyuvencdsp_init_x86(HuffYUVEncDSPContext *c)
{
#if HAVE_INLINE_ASM
int cpu_flags = av_get_cpu_flags();
if (INLINE_MMX(cpu_flags)) {
c->diff_bytes = diff_bytes_mmx;
}
if (INLINE_MMXEXT(cpu_flags)) {
c->sub_hfyu_median_pred = sub_hfyu_median_pred_mmxext;
}
#endif /* HAVE_INLINE_ASM */
}

View File

@@ -0,0 +1,183 @@
;******************************************************************************
;* SIMD-optimized IDCT-related routines
;* Copyright (c) 2008 Loren Merritt
;* Copyright (c) 2003-2013 Michael Niedermayer
;* Copyright (c) 2013 Daniel Kang
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
cextern pb_80
SECTION .text
;--------------------------------------------------------------------------
;void ff_put_signed_pixels_clamped(const int16_t *block, uint8_t *pixels,
; ptrdiff_t line_size)
;--------------------------------------------------------------------------
%macro PUT_SIGNED_PIXELS_CLAMPED_HALF 1
mova m1, [blockq+mmsize*0+%1]
mova m2, [blockq+mmsize*2+%1]
%if mmsize == 8
mova m3, [blockq+mmsize*4+%1]
mova m4, [blockq+mmsize*6+%1]
%endif
packsswb m1, [blockq+mmsize*1+%1]
packsswb m2, [blockq+mmsize*3+%1]
%if mmsize == 8
packsswb m3, [blockq+mmsize*5+%1]
packsswb m4, [blockq+mmsize*7+%1]
%endif
paddb m1, m0
paddb m2, m0
%if mmsize == 8
paddb m3, m0
paddb m4, m0
movq [pixelsq+lsizeq*0], m1
movq [pixelsq+lsizeq*1], m2
movq [pixelsq+lsizeq*2], m3
movq [pixelsq+lsize3q ], m4
%else
movq [pixelsq+lsizeq*0], m1
movhps [pixelsq+lsizeq*1], m1
movq [pixelsq+lsizeq*2], m2
movhps [pixelsq+lsize3q ], m2
%endif
%endmacro
%macro PUT_SIGNED_PIXELS_CLAMPED 1
cglobal put_signed_pixels_clamped, 3, 4, %1, block, pixels, lsize, lsize3
mova m0, [pb_80]
lea lsize3q, [lsizeq*3]
PUT_SIGNED_PIXELS_CLAMPED_HALF 0
lea pixelsq, [pixelsq+lsizeq*4]
PUT_SIGNED_PIXELS_CLAMPED_HALF 64
RET
%endmacro
INIT_MMX mmx
PUT_SIGNED_PIXELS_CLAMPED 0
INIT_XMM sse2
PUT_SIGNED_PIXELS_CLAMPED 3
;--------------------------------------------------------------------------
; void ff_put_pixels_clamped(const int16_t *block, uint8_t *pixels,
; ptrdiff_t line_size);
;--------------------------------------------------------------------------
; %1 = block offset
%macro PUT_PIXELS_CLAMPED_HALF 1
mova m0, [blockq+mmsize*0+%1]
mova m1, [blockq+mmsize*2+%1]
%if mmsize == 8
mova m2, [blockq+mmsize*4+%1]
mova m3, [blockq+mmsize*6+%1]
%endif
packuswb m0, [blockq+mmsize*1+%1]
packuswb m1, [blockq+mmsize*3+%1]
%if mmsize == 8
packuswb m2, [blockq+mmsize*5+%1]
packuswb m3, [blockq+mmsize*7+%1]
movq [pixelsq], m0
movq [lsizeq+pixelsq], m1
movq [2*lsizeq+pixelsq], m2
movq [lsize3q+pixelsq], m3
%else
movq [pixelsq], m0
movhps [lsizeq+pixelsq], m0
movq [2*lsizeq+pixelsq], m1
movhps [lsize3q+pixelsq], m1
%endif
%endmacro
%macro PUT_PIXELS_CLAMPED 0
cglobal put_pixels_clamped, 3, 4, 2, block, pixels, lsize, lsize3
lea lsize3q, [lsizeq*3]
PUT_PIXELS_CLAMPED_HALF 0
lea pixelsq, [pixelsq+lsizeq*4]
PUT_PIXELS_CLAMPED_HALF 64
RET
%endmacro
INIT_MMX mmx
PUT_PIXELS_CLAMPED
INIT_XMM sse2
PUT_PIXELS_CLAMPED
;--------------------------------------------------------------------------
; void ff_add_pixels_clamped(const int16_t *block, uint8_t *pixels,
; ptrdiff_t line_size);
;--------------------------------------------------------------------------
; %1 = block offset
%macro ADD_PIXELS_CLAMPED 1
mova m0, [blockq+mmsize*0+%1]
mova m1, [blockq+mmsize*1+%1]
%if mmsize == 8
mova m5, [blockq+mmsize*2+%1]
mova m6, [blockq+mmsize*3+%1]
%endif
movq m2, [pixelsq]
movq m3, [pixelsq+lsizeq]
%if mmsize == 8
mova m7, m2
punpcklbw m2, m4
punpckhbw m7, m4
paddsw m0, m2
paddsw m1, m7
mova m7, m3
punpcklbw m3, m4
punpckhbw m7, m4
paddsw m5, m3
paddsw m6, m7
%else
punpcklbw m2, m4
punpcklbw m3, m4
paddsw m0, m2
paddsw m1, m3
%endif
packuswb m0, m1
%if mmsize == 8
packuswb m5, m6
movq [pixelsq], m0
movq [pixelsq+lsizeq], m5
%else
movq [pixelsq], m0
movhps [pixelsq+lsizeq], m0
%endif
%endmacro
%macro ADD_PIXELS_CLAMPED 0
cglobal add_pixels_clamped, 3, 3, 5, block, pixels, lsize
pxor m4, m4
ADD_PIXELS_CLAMPED 0
lea pixelsq, [pixelsq+lsizeq*2]
ADD_PIXELS_CLAMPED 32
lea pixelsq, [pixelsq+lsizeq*2]
ADD_PIXELS_CLAMPED 64
lea pixelsq, [pixelsq+lsizeq*2]
ADD_PIXELS_CLAMPED 96
RET
%endmacro
INIT_MMX mmx
ADD_PIXELS_CLAMPED
INIT_XMM sse2
ADD_PIXELS_CLAMPED

View File

@@ -0,0 +1,38 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_X86_IDCTDSP_H
#define AVCODEC_X86_IDCTDSP_H
#include <stdint.h>
#include <stddef.h>
void ff_add_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
ptrdiff_t line_size);
void ff_add_pixels_clamped_sse2(const int16_t *block, uint8_t *pixels,
ptrdiff_t line_size);
void ff_put_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
ptrdiff_t line_size);
void ff_put_pixels_clamped_sse2(const int16_t *block, uint8_t *pixels,
ptrdiff_t line_size);
void ff_put_signed_pixels_clamped_mmx(const int16_t *block, uint8_t *pixels,
ptrdiff_t line_size);
void ff_put_signed_pixels_clamped_sse2(const int16_t *block, uint8_t *pixels,
ptrdiff_t line_size);
#endif /* AVCODEC_X86_IDCTDSP_H */

View File

@@ -0,0 +1,88 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/idctdsp.h"
#include "idctdsp.h"
#include "simple_idct.h"
/* Input permutation for the simple_idct_mmx */
static const uint8_t simple_mmx_permutation[64] = {
0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D,
0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D,
0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D,
0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F,
0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F,
0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D,
0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F,
0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F,
};
static const uint8_t idct_sse2_row_perm[8] = { 0, 4, 1, 5, 2, 6, 3, 7 };
av_cold int ff_init_scantable_permutation_x86(uint8_t *idct_permutation,
enum idct_permutation_type perm_type)
{
int i;
switch (perm_type) {
case FF_IDCT_PERM_SIMPLE:
for (i = 0; i < 64; i++)
idct_permutation[i] = simple_mmx_permutation[i];
return 1;
case FF_IDCT_PERM_SSE2:
for (i = 0; i < 64; i++)
idct_permutation[i] = (i & 0x38) | idct_sse2_row_perm[i & 7];
return 1;
}
return 0;
}
av_cold void ff_idctdsp_init_x86(IDCTDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth)
{
int cpu_flags = av_get_cpu_flags();
if (INLINE_MMX(cpu_flags)) {
if (!high_bit_depth &&
avctx->lowres == 0 &&
(avctx->idct_algo == FF_IDCT_AUTO ||
avctx->idct_algo == FF_IDCT_SIMPLEAUTO ||
avctx->idct_algo == FF_IDCT_SIMPLEMMX)) {
c->idct_put = ff_simple_idct_put_mmx;
c->idct_add = ff_simple_idct_add_mmx;
c->idct = ff_simple_idct_mmx;
c->perm_type = FF_IDCT_PERM_SIMPLE;
}
}
if (EXTERNAL_MMX(cpu_flags)) {
c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
}
if (EXTERNAL_SSE2(cpu_flags)) {
c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_sse2;
c->put_pixels_clamped = ff_put_pixels_clamped_sse2;
c->add_pixels_clamped = ff_add_pixels_clamped_sse2;
}
}

View File

@@ -0,0 +1,735 @@
;******************************************************************************
;* 36 point SSE-optimized IMDCT transform
;* Copyright (c) 2011 Vitor Sessak
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
align 16
ps_mask: dd 0, ~0, ~0, ~0
ps_mask2: dd 0, ~0, 0, ~0
ps_mask3: dd 0, 0, 0, ~0
ps_mask4: dd 0, ~0, 0, 0
ps_val1: dd -0.5, -0.5, -0.8660254038, -0.8660254038
ps_val2: dd 1.0, 1.0, 0.8660254038, 0.8660254038
ps_val3: dd 0.1736481777, 0.1736481777, 0.3420201433, 0.3420201433
ps_val4: dd -0.7660444431, -0.7660444431, 0.8660254038, 0.8660254038
ps_val5: dd -0.9396926208, -0.9396926208, -0.9848077530, -0.9848077530
ps_val6: dd 0.5, 0.5, -0.6427876097, -0.6427876097
ps_val7: dd 1.0, 1.0, -0.6427876097, -0.6427876097
ps_p1p1m1m1: dd 0, 0, 0x80000000, 0x80000000
ps_p1m1p1m1: dd 0, 0x80000000, 0, 0x80000000
ps_cosh: dd 1.0, 0.50190991877167369479, 1.0, 5.73685662283492756461
dd 1.0, 0.51763809020504152469, 1.0, 1.93185165257813657349
dd 1.0, 0.55168895948124587824, -1.0, -1.18310079157624925896
dd 1.0, 0.61038729438072803416, -1.0, -0.87172339781054900991
dd 1.0, 0.70710678118654752439, 0.0, 0.0
ps_cosh_sse3: dd 1.0, -0.50190991877167369479, 1.0, -5.73685662283492756461
dd 1.0, -0.51763809020504152469, 1.0, -1.93185165257813657349
dd 1.0, -0.55168895948124587824, -1.0, 1.18310079157624925896
dd 1.0, -0.61038729438072803416, -1.0, 0.87172339781054900991
dd 1.0, -0.70710678118654752439, 0.0, 0.0
costabs: times 4 dd 0.98480773
times 4 dd 0.93969262
times 4 dd 0.86602539
times 4 dd -0.76604444
times 4 dd -0.64278764
times 4 dd 0.50000000
times 4 dd -0.50000000
times 4 dd -0.34202015
times 4 dd -0.17364818
times 4 dd 0.50190992
times 4 dd 0.51763808
times 4 dd 0.55168896
times 4 dd 0.61038726
times 4 dd 0.70710677
times 4 dd 0.87172341
times 4 dd 1.18310082
times 4 dd 1.93185163
times 4 dd 5.73685646
%define SBLIMIT 32
SECTION .text
%macro PSHUFD 3
%if cpuflag(sse2) && notcpuflag(avx)
pshufd %1, %2, %3
%else
shufps %1, %2, %2, %3
%endif
%endmacro
; input %2={x1,x2,x3,x4}, %3={y1,y2,y3,y4}
; output %1={x3,x4,y1,y2}
%macro BUILDINVHIGHLOW 3
%if cpuflag(avx)
shufps %1, %2, %3, 0x4e
%else
movlhps %1, %3
movhlps %1, %2
%endif
%endmacro
; input %2={x1,x2,x3,x4}, %3={y1,y2,y3,y4}
; output %1={x4,y1,y2,y3}
%macro ROTLEFT 3
%if cpuflag(ssse3)
palignr %1, %3, %2, 12
%else
BUILDINVHIGHLOW %1, %2, %3
shufps %1, %1, %3, 0x99
%endif
%endmacro
%macro INVERTHL 2
%if cpuflag(sse2)
PSHUFD %1, %2, 0x4e
%else
movhlps %1, %2
movlhps %1, %2
%endif
%endmacro
%macro BUTTERF 3
INVERTHL %2, %1
xorps %1, [ps_p1p1m1m1]
addps %1, %2
%if cpuflag(sse3)
mulps %1, %1, [ps_cosh_sse3 + %3]
PSHUFD %2, %1, 0xb1
addsubps %1, %1, %2
%else
mulps %1, [ps_cosh + %3]
PSHUFD %2, %1, 0xb1
xorps %1, [ps_p1m1p1m1]
addps %1, %2
%endif
%endmacro
%macro BUTTERF2 3
%if cpuflag(sse3)
mulps %1, %1, [ps_cosh_sse3 + %3]
PSHUFD %2, %1, 0xe1
addsubps %1, %1, %2
%else
mulps %1, [ps_cosh + %3]
PSHUFD %2, %1, 0xe1
xorps %1, [ps_p1m1p1m1]
addps %1, %2
%endif
%endmacro
%macro STORE 4
movhlps %2, %1
movss [%3 ], %1
movss [%3 + 2*%4], %2
shufps %1, %1, 0xb1
movss [%3 + %4], %1
movhlps %2, %1
movss [%3 + 3*%4], %2
%endmacro
%macro LOAD 4
movlps %1, [%3 ]
movhps %1, [%3 + %4]
movlps %2, [%3 + 2*%4]
movhps %2, [%3 + 3*%4]
shufps %1, %2, 0x88
%endmacro
%macro LOADA64 2
%if cpuflag(avx)
movu %1, [%2]
%else
movlps %1, [%2]
movhps %1, [%2 + 8]
%endif
%endmacro
%macro DEFINE_IMDCT 0
cglobal imdct36_float, 4,4,9, out, buf, in, win
; for(i=17;i>=1;i--) in[i] += in[i-1];
LOADA64 m0, inq
LOADA64 m1, inq + 16
ROTLEFT m5, m0, m1
PSHUFD m6, m0, 0x93
andps m6, m6, [ps_mask]
addps m0, m0, m6
LOADA64 m2, inq + 32
ROTLEFT m7, m1, m2
addps m1, m1, m5
LOADA64 m3, inq + 48
ROTLEFT m5, m2, m3
xorps m4, m4, m4
movlps m4, [inq+64]
BUILDINVHIGHLOW m6, m3, m4
shufps m6, m6, m4, 0xa9
addps m4, m4, m6
addps m2, m2, m7
addps m3, m3, m5
; for(i=17;i>=3;i-=2) in[i] += in[i-2];
movlhps m5, m5, m0
andps m5, m5, [ps_mask3]
BUILDINVHIGHLOW m7, m0, m1
andps m7, m7, [ps_mask2]
addps m0, m0, m5
BUILDINVHIGHLOW m6, m1, m2
andps m6, m6, [ps_mask2]
addps m1, m1, m7
BUILDINVHIGHLOW m7, m2, m3
andps m7, m7, [ps_mask2]
addps m2, m2, m6
movhlps m6, m6, m3
andps m6, m6, [ps_mask4]
addps m3, m3, m7
addps m4, m4, m6
; Populate tmp[]
movlhps m6, m1, m5 ; zero out high values
subps m6, m6, m4
subps m5, m0, m3
%if ARCH_X86_64
SWAP m5, m8
%endif
mulps m7, m2, [ps_val1]
%if ARCH_X86_64
mulps m5, m8, [ps_val2]
%else
mulps m5, m5, [ps_val2]
%endif
addps m7, m7, m5
mulps m5, m6, [ps_val1]
subps m7, m7, m5
%if ARCH_X86_64
SWAP m5, m8
%else
subps m5, m0, m3
%endif
subps m5, m5, m6
addps m5, m5, m2
shufps m6, m4, m3, 0xe4
subps m6, m6, m2
mulps m6, m6, [ps_val3]
addps m4, m4, m1
mulps m4, m4, [ps_val4]
shufps m1, m1, m0, 0xe4
addps m1, m1, m2
mulps m1, m1, [ps_val5]
mulps m3, m3, [ps_val6]
mulps m0, m0, [ps_val7]
addps m0, m0, m3
xorps m2, m1, [ps_p1p1m1m1]
subps m2, m2, m4
addps m2, m2, m0
addps m3, m4, m0
subps m3, m3, m6
xorps m3, m3, [ps_p1p1m1m1]
shufps m0, m0, m4, 0xe4
subps m0, m0, m1
addps m0, m0, m6
BUILDINVHIGHLOW m4, m2, m3
shufps m3, m3, m2, 0x4e
; we have tmp = {SwAPLH(m0), SwAPLH(m7), m3, m4, m5}
BUTTERF m0, m1, 0
BUTTERF m7, m2, 16
BUTTERF m3, m6, 32
BUTTERF m4, m1, 48
BUTTERF2 m5, m1, 64
; permutates:
; m0 0 1 2 3 => 2 6 10 14 m1
; m7 4 5 6 7 => 3 7 11 15 m2
; m3 8 9 10 11 => 17 13 9 5 m3
; m4 12 13 14 15 => 16 12 8 4 m5
; m5 16 17 xx xx => 0 1 xx xx m0
unpckhps m1, m0, m7
unpckhps m6, m3, m4
movhlps m2, m6, m1
movlhps m1, m1, m6
unpcklps m5, m5, m4
unpcklps m3, m3, m7
movhlps m4, m3, m5
movlhps m5, m5, m3
SWAP m4, m3
; permutation done
PSHUFD m6, m2, 0xb1
movss m4, [bufq + 4*68]
movss m7, [bufq + 4*64]
unpcklps m7, m7, m4
mulps m6, m6, [winq + 16*4]
addps m6, m6, m7
movss [outq + 64*SBLIMIT], m6
shufps m6, m6, m6, 0xb1
movss [outq + 68*SBLIMIT], m6
mulps m6, m3, [winq + 4*4]
LOAD m4, m7, bufq + 4*16, 16
addps m6, m6, m4
STORE m6, m7, outq + 16*SBLIMIT, 4*SBLIMIT
shufps m4, m0, m3, 0xb5
mulps m4, m4, [winq + 8*4]
LOAD m7, m6, bufq + 4*32, 16
addps m4, m4, m7
STORE m4, m6, outq + 32*SBLIMIT, 4*SBLIMIT
shufps m3, m3, m2, 0xb1
mulps m3, m3, [winq + 12*4]
LOAD m7, m6, bufq + 4*48, 16
addps m3, m3, m7
STORE m3, m7, outq + 48*SBLIMIT, 4*SBLIMIT
mulps m2, m2, [winq]
LOAD m6, m7, bufq, 16
addps m2, m2, m6
STORE m2, m7, outq, 4*SBLIMIT
mulps m4, m1, [winq + 20*4]
STORE m4, m7, bufq, 16
mulps m3, m5, [winq + 24*4]
STORE m3, m7, bufq + 4*16, 16
shufps m0, m0, m5, 0xb0
mulps m0, m0, [winq + 28*4]
STORE m0, m7, bufq + 4*32, 16
shufps m5, m5, m1, 0xb1
mulps m5, m5, [winq + 32*4]
STORE m5, m7, bufq + 4*48, 16
shufps m1, m1, m1, 0xb1
mulps m1, m1, [winq + 36*4]
movss [bufq + 4*64], m1
shufps m1, m1, 0xb1
movss [bufq + 4*68], m1
RET
%endmacro
%if ARCH_X86_32
INIT_XMM sse
DEFINE_IMDCT
%endif
INIT_XMM sse2
DEFINE_IMDCT
INIT_XMM sse3
DEFINE_IMDCT
INIT_XMM ssse3
DEFINE_IMDCT
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
DEFINE_IMDCT
%endif
INIT_XMM sse
%if ARCH_X86_64
%define SPILL SWAP
%define UNSPILL SWAP
%define SPILLED(x) m %+ x
%else
%define SPILLED(x) [tmpq+(x-8)*16 + 32*4]
%macro SPILL 2 ; xmm#, mempos
movaps SPILLED(%2), m%1
%endmacro
%macro UNSPILL 2
movaps m%1, SPILLED(%2)
%endmacro
%endif
%macro DEFINE_FOUR_IMDCT 0
cglobal four_imdct36_float, 5,5,16, out, buf, in, win, tmp
movlps m0, [inq+64]
movhps m0, [inq+64 + 72]
movlps m3, [inq+64 + 2*72]
movhps m3, [inq+64 + 3*72]
shufps m5, m0, m3, 0xdd
shufps m0, m0, m3, 0x88
mova m1, [inq+48]
movu m6, [inq+48 + 72]
mova m7, [inq+48 + 2*72]
movu m3, [inq+48 + 3*72]
TRANSPOSE4x4PS 1, 6, 7, 3, 4
addps m4, m6, m7
mova [tmpq+4*28], m4
addps m7, m3
addps m6, m1
addps m3, m0
addps m0, m5
addps m0, m7
addps m7, m6
mova [tmpq+4*12], m7
SPILL 3, 12
mova m4, [inq+32]
movu m5, [inq+32 + 72]
mova m2, [inq+32 + 2*72]
movu m7, [inq+32 + 3*72]
TRANSPOSE4x4PS 4, 5, 2, 7, 3
addps m1, m7
SPILL 1, 11
addps m3, m5, m2
SPILL 3, 13
addps m7, m2
addps m5, m4
addps m6, m7
mova [tmpq], m6
addps m7, m5
mova [tmpq+4*16], m7
mova m2, [inq+16]
movu m7, [inq+16 + 72]
mova m1, [inq+16 + 2*72]
movu m6, [inq+16 + 3*72]
TRANSPOSE4x4PS 2, 7, 1, 6, 3
addps m4, m6
addps m6, m1
addps m1, m7
addps m7, m2
addps m5, m6
SPILL 5, 15
addps m6, m7
mulps m6, [costabs + 16*2]
mova [tmpq+4*8], m6
SPILL 1, 10
SPILL 0, 14
mova m1, [inq]
movu m6, [inq + 72]
mova m3, [inq + 2*72]
movu m5, [inq + 3*72]
TRANSPOSE4x4PS 1, 6, 3, 5, 0
addps m2, m5
addps m5, m3
addps m7, m5
addps m3, m6
addps m6, m1
SPILL 7, 8
addps m5, m6
SPILL 6, 9
addps m6, m4, SPILLED(12)
subps m6, m2
UNSPILL 7, 11
SPILL 5, 11
subps m5, m1, m7
mulps m7, [costabs + 16*5]
addps m7, m1
mulps m0, m6, [costabs + 16*6]
addps m0, m5
mova [tmpq+4*24], m0
addps m6, m5
mova [tmpq+4*4], m6
addps m6, m4, m2
mulps m6, [costabs + 16*1]
subps m4, SPILLED(12)
mulps m4, [costabs + 16*8]
addps m2, SPILLED(12)
mulps m2, [costabs + 16*3]
subps m5, m7, m6
subps m5, m2
addps m6, m7
addps m6, m4
addps m7, m2
subps m7, m4
mova [tmpq+4*20], m7
mova m2, [tmpq+4*28]
mova [tmpq+4*28], m5
UNSPILL 7, 13
subps m5, m7, m2
mulps m5, [costabs + 16*7]
UNSPILL 1, 10
mulps m1, [costabs + 16*2]
addps m4, m3, m2
mulps m4, [costabs + 16*4]
addps m2, m7
addps m7, m3
mulps m7, [costabs]
subps m3, m2
mulps m3, [costabs + 16*2]
addps m2, m7, m5
addps m2, m1
SPILL 2, 10
addps m7, m4
subps m7, m1
SPILL 7, 12
subps m5, m4
subps m5, m1
UNSPILL 0, 14
SPILL 5, 13
addps m1, m0, SPILLED(15)
subps m1, SPILLED(8)
mova m4, [costabs + 16*5]
mulps m4, [tmpq]
UNSPILL 2, 9
addps m4, m2
subps m2, [tmpq]
mulps m5, m1, [costabs + 16*6]
addps m5, m2
SPILL 5, 9
addps m2, m1
SPILL 2, 14
UNSPILL 5, 15
subps m7, m5, m0
addps m5, SPILLED(8)
mulps m5, [costabs + 16*1]
mulps m7, [costabs + 16*8]
addps m0, SPILLED(8)
mulps m0, [costabs + 16*3]
subps m2, m4, m5
subps m2, m0
SPILL 2, 15
addps m5, m4
addps m5, m7
addps m4, m0
subps m4, m7
SPILL 4, 8
mova m7, [tmpq+4*16]
mova m2, [tmpq+4*12]
addps m0, m7, m2
subps m0, SPILLED(11)
mulps m0, [costabs + 16*2]
addps m4, m7, SPILLED(11)
mulps m4, [costabs]
subps m7, m2
mulps m7, [costabs + 16*7]
addps m2, SPILLED(11)
mulps m2, [costabs + 16*4]
addps m1, m7, [tmpq+4*8]
addps m1, m4
addps m4, m2
subps m4, [tmpq+4*8]
SPILL 4, 11
subps m7, m2
subps m7, [tmpq+4*8]
addps m4, m6, SPILLED(10)
subps m6, SPILLED(10)
addps m2, m5, m1
mulps m2, [costabs + 16*9]
subps m5, m1
mulps m5, [costabs + 16*17]
subps m1, m4, m2
addps m4, m2
mulps m2, m1, [winq+4*36]
addps m2, [bufq+4*36]
mova [outq+1152], m2
mulps m1, [winq+4*32]
addps m1, [bufq+4*32]
mova [outq+1024], m1
mulps m1, m4, [winq+4*116]
mova [bufq+4*36], m1
mulps m4, [winq+4*112]
mova [bufq+4*32], m4
addps m2, m6, m5
subps m6, m5
mulps m1, m6, [winq+4*68]
addps m1, [bufq+4*68]
mova [outq+2176], m1
mulps m6, [winq]
addps m6, [bufq]
mova [outq], m6
mulps m1, m2, [winq+4*148]
mova [bufq+4*68], m1
mulps m2, [winq+4*80]
mova [bufq], m2
addps m5, m3, [tmpq+4*24]
mova m2, [tmpq+4*24]
subps m2, m3
mova m1, SPILLED(9)
subps m1, m0
mulps m1, [costabs + 16*10]
addps m0, SPILLED(9)
mulps m0, [costabs + 16*16]
addps m6, m5, m1
subps m5, m1
mulps m3, m5, [winq+4*40]
addps m3, [bufq+4*40]
mova [outq+1280], m3
mulps m5, [winq+4*28]
addps m5, [bufq+4*28]
mova [outq+896], m5
mulps m1, m6, [winq+4*120]
mova [bufq+4*40], m1
mulps m6, [winq+4*108]
mova [bufq+4*28], m6
addps m1, m2, m0
subps m2, m0
mulps m5, m2, [winq+4*64]
addps m5, [bufq+4*64]
mova [outq+2048], m5
mulps m2, [winq+4*4]
addps m2, [bufq+4*4]
mova [outq+128], m2
mulps m0, m1, [winq+4*144]
mova [bufq+4*64], m0
mulps m1, [winq+4*84]
mova [bufq+4*4], m1
mova m1, [tmpq+4*28]
mova m5, m1
addps m1, SPILLED(13)
subps m5, SPILLED(13)
UNSPILL 3, 15
addps m2, m7, m3
mulps m2, [costabs + 16*11]
subps m3, m7
mulps m3, [costabs + 16*15]
addps m0, m2, m1
subps m1, m2
SWAP m0, m2
mulps m6, m1, [winq+4*44]
addps m6, [bufq+4*44]
mova [outq+1408], m6
mulps m1, [winq+4*24]
addps m1, [bufq+4*24]
mova [outq+768], m1
mulps m0, m2, [winq+4*124]
mova [bufq+4*44], m0
mulps m2, [winq+4*104]
mova [bufq+4*24], m2
addps m0, m5, m3
subps m5, m3
mulps m1, m5, [winq+4*60]
addps m1, [bufq+4*60]
mova [outq+1920], m1
mulps m5, [winq+4*8]
addps m5, [bufq+4*8]
mova [outq+256], m5
mulps m1, m0, [winq+4*140]
mova [bufq+4*60], m1
mulps m0, [winq+4*88]
mova [bufq+4*8], m0
mova m1, [tmpq+4*20]
addps m1, SPILLED(12)
mova m2, [tmpq+4*20]
subps m2, SPILLED(12)
UNSPILL 7, 8
subps m0, m7, SPILLED(11)
addps m7, SPILLED(11)
mulps m4, m7, [costabs + 16*12]
mulps m0, [costabs + 16*14]
addps m5, m1, m4
subps m1, m4
mulps m7, m1, [winq+4*48]
addps m7, [bufq+4*48]
mova [outq+1536], m7
mulps m1, [winq+4*20]
addps m1, [bufq+4*20]
mova [outq+640], m1
mulps m1, m5, [winq+4*128]
mova [bufq+4*48], m1
mulps m5, [winq+4*100]
mova [bufq+4*20], m5
addps m6, m2, m0
subps m2, m0
mulps m1, m2, [winq+4*56]
addps m1, [bufq+4*56]
mova [outq+1792], m1
mulps m2, [winq+4*12]
addps m2, [bufq+4*12]
mova [outq+384], m2
mulps m0, m6, [winq+4*136]
mova [bufq+4*56], m0
mulps m6, [winq+4*92]
mova [bufq+4*12], m6
UNSPILL 0, 14
mulps m0, [costabs + 16*13]
mova m3, [tmpq+4*4]
addps m2, m0, m3
subps m3, m0
mulps m0, m3, [winq+4*52]
addps m0, [bufq+4*52]
mova [outq+1664], m0
mulps m3, [winq+4*16]
addps m3, [bufq+4*16]
mova [outq+512], m3
mulps m0, m2, [winq+4*132]
mova [bufq+4*52], m0
mulps m2, [winq+4*96]
mova [bufq+4*16], m2
RET
%endmacro
INIT_XMM sse
DEFINE_FOUR_IMDCT
%if HAVE_AVX_EXTERNAL
INIT_XMM avx
DEFINE_FOUR_IMDCT
%endif

View File

@@ -0,0 +1,100 @@
/*
* inline assembly helper macros
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_X86_INLINE_ASM_H
#define AVCODEC_X86_INLINE_ASM_H
#include "constants.h"
#define MOVQ_WONE(regd) \
__asm__ volatile ( \
"pcmpeqd %%" #regd ", %%" #regd " \n\t" \
"psrlw $15, %%" #regd ::)
#define JUMPALIGN() __asm__ volatile (".p2align 3"::)
#define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%"#regd", %%"#regd ::)
#define MOVQ_BFE(regd) \
__asm__ volatile ( \
"pcmpeqd %%"#regd", %%"#regd" \n\t" \
"paddb %%"#regd", %%"#regd" \n\t" ::)
#ifndef PIC
#define MOVQ_WTWO(regd) __asm__ volatile ("movq %0, %%"#regd" \n\t" :: "m"(ff_pw_2))
#else
// for shared library it's better to use this way for accessing constants
// pcmpeqd -> -1
#define MOVQ_WTWO(regd) \
__asm__ volatile ( \
"pcmpeqd %%"#regd", %%"#regd" \n\t" \
"psrlw $15, %%"#regd" \n\t" \
"psllw $1, %%"#regd" \n\t"::)
#endif
// using regr as temporary and for the output result
// first argument is unmodifed and second is trashed
// regfe is supposed to contain 0xfefefefefefefefe
#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
"movq "#rega", "#regr" \n\t" \
"pand "#regb", "#regr" \n\t" \
"pxor "#rega", "#regb" \n\t" \
"pand "#regfe", "#regb" \n\t" \
"psrlq $1, "#regb" \n\t" \
"paddb "#regb", "#regr" \n\t"
#define PAVGB_MMX(rega, regb, regr, regfe) \
"movq "#rega", "#regr" \n\t" \
"por "#regb", "#regr" \n\t" \
"pxor "#rega", "#regb" \n\t" \
"pand "#regfe", "#regb" \n\t" \
"psrlq $1, "#regb" \n\t" \
"psubb "#regb", "#regr" \n\t"
// mm6 is supposed to contain 0xfefefefefefefefe
#define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
"movq "#rega", "#regr" \n\t" \
"movq "#regc", "#regp" \n\t" \
"pand "#regb", "#regr" \n\t" \
"pand "#regd", "#regp" \n\t" \
"pxor "#rega", "#regb" \n\t" \
"pxor "#regc", "#regd" \n\t" \
"pand %%mm6, "#regb" \n\t" \
"pand %%mm6, "#regd" \n\t" \
"psrlq $1, "#regb" \n\t" \
"psrlq $1, "#regd" \n\t" \
"paddb "#regb", "#regr" \n\t" \
"paddb "#regd", "#regp" \n\t"
#define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
"movq "#rega", "#regr" \n\t" \
"movq "#regc", "#regp" \n\t" \
"por "#regb", "#regr" \n\t" \
"por "#regd", "#regp" \n\t" \
"pxor "#rega", "#regb" \n\t" \
"pxor "#regc", "#regd" \n\t" \
"pand %%mm6, "#regb" \n\t" \
"pand %%mm6, "#regd" \n\t" \
"psrlq $1, "#regd" \n\t" \
"psrlq $1, "#regb" \n\t" \
"psubb "#regb", "#regr" \n\t" \
"psubb "#regd", "#regp" \n\t"
#endif /* AVCODEC_X86_INLINE_ASM_H */

View File

@@ -0,0 +1,144 @@
;******************************************************************************
;* SIMD-optimized JPEG2000 DSP functions
;* Copyright (c) 2014 Nicolas Bertrand
;* Copyright (c) 2015 James Almer
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA 32
pf_ict0: times 8 dd 1.402
pf_ict1: times 8 dd 0.34413
pf_ict2: times 8 dd 0.71414
pf_ict3: times 8 dd 1.772
SECTION .text
;***********************************************************************
; ff_ict_float_<opt>(float *src0, float *src1, float *src2, int csize)
;***********************************************************************
%macro ICT_FLOAT 1
cglobal ict_float, 4, 4, %1, src0, src1, src2, csize
shl csized, 2
add src0q, csizeq
add src1q, csizeq
add src2q, csizeq
neg csizeq
movaps m6, [pf_ict0]
movaps m7, [pf_ict1]
%define ICT0 m6
%define ICT1 m7
%if ARCH_X86_64
movaps m8, [pf_ict2]
%define ICT2 m8
%if cpuflag(avx)
movaps m3, [pf_ict3]
%define ICT3 m3
%else
movaps m9, [pf_ict3]
%define ICT3 m9
%endif
%else ; ARCH_X86_32
%define ICT2 [pf_ict2]
%if cpuflag(avx)
movaps m3, [pf_ict3]
%define ICT3 m3
%else
%define ICT3 [pf_ict3]
%endif
%endif ; ARCH
align 16
.loop:
movaps m0, [src0q+csizeq]
movaps m1, [src1q+csizeq]
movaps m2, [src2q+csizeq]
%if cpuflag(avx)
mulps m5, m1, ICT1
mulps m4, m2, ICT0
mulps m1, m1, ICT3
mulps m2, m2, ICT2
subps m5, m0, m5
%else ; sse
movaps m3, m1
movaps m4, m2
movaps m5, m0
mulps m3, ICT1
mulps m4, ICT0
mulps m1, ICT3
mulps m2, ICT2
subps m5, m3
%endif
addps m4, m4, m0
addps m0, m0, m1
subps m5, m5, m2
movaps [src0q+csizeq], m4
movaps [src2q+csizeq], m0
movaps [src1q+csizeq], m5
add csizeq, mmsize
jl .loop
REP_RET
%endmacro
INIT_XMM sse
ICT_FLOAT 10
INIT_YMM avx
ICT_FLOAT 9
;***************************************************************************
; ff_rct_int_<opt>(int32_t *src0, int32_t *src1, int32_t *src2, int csize)
;***************************************************************************
%macro RCT_INT 0
cglobal rct_int, 4, 4, 4, src0, src1, src2, csize
shl csized, 2
add src0q, csizeq
add src1q, csizeq
add src2q, csizeq
neg csizeq
align 16
.loop:
mova m1, [src1q+csizeq]
mova m2, [src2q+csizeq]
mova m0, [src0q+csizeq]
paddd m3, m1, m2
psrad m3, 2
psubd m0, m3
paddd m1, m0
paddd m2, m0
mova [src1q+csizeq], m0
mova [src2q+csizeq], m1
mova [src0q+csizeq], m2
add csizeq, mmsize
jl .loop
REP_RET
%endmacro
INIT_XMM sse2
RCT_INT
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2
RCT_INT
%endif

View File

@@ -0,0 +1,50 @@
/*
* SIMD optimized JPEG 2000 DSP functions
* Copyright (c) 2015 James Almer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/jpeg2000dsp.h"
void ff_ict_float_sse(void *src0, void *src1, void *src2, int csize);
void ff_ict_float_avx(void *src0, void *src1, void *src2, int csize);
void ff_rct_int_sse2 (void *src0, void *src1, void *src2, int csize);
void ff_rct_int_avx2 (void *src0, void *src1, void *src2, int csize);
av_cold void ff_jpeg2000dsp_init_x86(Jpeg2000DSPContext *c)
{
int cpu_flags = av_get_cpu_flags();
if (EXTERNAL_SSE(cpu_flags)) {
c->mct_decode[FF_DWT97] = ff_ict_float_sse;
}
if (EXTERNAL_SSE2(cpu_flags)) {
c->mct_decode[FF_DWT53] = ff_rct_int_sse2;
}
if (EXTERNAL_AVX_FAST(cpu_flags)) {
c->mct_decode[FF_DWT97] = ff_ict_float_avx;
}
if (EXTERNAL_AVX2(cpu_flags)) {
c->mct_decode[FF_DWT53] = ff_rct_int_avx2;
}
}

View File

@@ -0,0 +1,157 @@
;******************************************************************************
;* Copyright (c) 2008 Loren Merritt
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION .text
%macro SCALARPRODUCT 0
; int ff_scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3,
; int order, int mul)
cglobal scalarproduct_and_madd_int16, 4,4,8, v1, v2, v3, order, mul
shl orderq, 1
movd m7, mulm
%if mmsize == 16
pshuflw m7, m7, 0
punpcklqdq m7, m7
%else
pshufw m7, m7, 0
%endif
pxor m6, m6
add v1q, orderq
add v2q, orderq
add v3q, orderq
neg orderq
.loop:
movu m0, [v2q + orderq]
movu m1, [v2q + orderq + mmsize]
mova m4, [v1q + orderq]
mova m5, [v1q + orderq + mmsize]
movu m2, [v3q + orderq]
movu m3, [v3q + orderq + mmsize]
pmaddwd m0, m4
pmaddwd m1, m5
pmullw m2, m7
pmullw m3, m7
paddd m6, m0
paddd m6, m1
paddw m2, m4
paddw m3, m5
mova [v1q + orderq], m2
mova [v1q + orderq + mmsize], m3
add orderq, mmsize*2
jl .loop
HADDD m6, m0
movd eax, m6
RET
%endmacro
INIT_MMX mmxext
SCALARPRODUCT
INIT_XMM sse2
SCALARPRODUCT
%macro SCALARPRODUCT_LOOP 1
align 16
.loop%1:
sub orderq, mmsize*2
%if %1
mova m1, m4
mova m4, [v2q + orderq]
mova m0, [v2q + orderq + mmsize]
palignr m1, m0, %1
palignr m0, m4, %1
mova m3, m5
mova m5, [v3q + orderq]
mova m2, [v3q + orderq + mmsize]
palignr m3, m2, %1
palignr m2, m5, %1
%else
mova m0, [v2q + orderq]
mova m1, [v2q + orderq + mmsize]
mova m2, [v3q + orderq]
mova m3, [v3q + orderq + mmsize]
%endif
%define t0 [v1q + orderq]
%define t1 [v1q + orderq + mmsize]
%if ARCH_X86_64
mova m8, t0
mova m9, t1
%define t0 m8
%define t1 m9
%endif
pmaddwd m0, t0
pmaddwd m1, t1
pmullw m2, m7
pmullw m3, m7
paddw m2, t0
paddw m3, t1
paddd m6, m0
paddd m6, m1
mova [v1q + orderq], m2
mova [v1q + orderq + mmsize], m3
jg .loop%1
%if %1
jmp .end
%endif
%endmacro
; int ff_scalarproduct_and_madd_int16(int16_t *v1, int16_t *v2, int16_t *v3,
; int order, int mul)
INIT_XMM ssse3
cglobal scalarproduct_and_madd_int16, 4,5,10, v1, v2, v3, order, mul
shl orderq, 1
movd m7, mulm
pshuflw m7, m7, 0
punpcklqdq m7, m7
pxor m6, m6
mov r4d, v2d
and r4d, 15
and v2q, ~15
and v3q, ~15
mova m4, [v2q + orderq]
mova m5, [v3q + orderq]
; linear is faster than branch tree or jump table, because the branches taken are cyclic (i.e. predictable)
cmp r4d, 0
je .loop0
cmp r4d, 2
je .loop2
cmp r4d, 4
je .loop4
cmp r4d, 6
je .loop6
cmp r4d, 8
je .loop8
cmp r4d, 10
je .loop10
cmp r4d, 12
je .loop12
SCALARPRODUCT_LOOP 14
SCALARPRODUCT_LOOP 12
SCALARPRODUCT_LOOP 10
SCALARPRODUCT_LOOP 8
SCALARPRODUCT_LOOP 6
SCALARPRODUCT_LOOP 4
SCALARPRODUCT_LOOP 2
SCALARPRODUCT_LOOP 0
.end:
HADDD m6, m0
movd eax, m6
RET

View File

@@ -0,0 +1,49 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/lossless_audiodsp.h"
int32_t ff_scalarproduct_and_madd_int16_mmxext(int16_t *v1, const int16_t *v2,
const int16_t *v3,
int order, int mul);
int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2,
const int16_t *v3,
int order, int mul);
int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2,
const int16_t *v3,
int order, int mul);
av_cold void ff_llauddsp_init_x86(LLAudDSPContext *c)
{
#if HAVE_YASM
int cpu_flags = av_get_cpu_flags();
if (EXTERNAL_MMXEXT(cpu_flags))
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmxext;
if (EXTERNAL_SSE2(cpu_flags))
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
if (EXTERNAL_SSSE3(cpu_flags) &&
!(cpu_flags & (AV_CPU_FLAG_SSE42 | AV_CPU_FLAG_3DNOW))) // cachesplit
c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
#endif
}

View File

@@ -0,0 +1,294 @@
;******************************************************************************
;* SIMD lossless video DSP utils
;* Copyright (c) 2008 Loren Merritt
;* Copyright (c) 2014 Michael Niedermayer
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
pb_ef: times 8 db 14,15
pb_67: times 8 db 6, 7
pb_zzzz2323zzzzabab: db -1,-1,-1,-1, 2, 3, 2, 3,-1,-1,-1,-1,10,11,10,11
pb_zzzzzzzz67676767: db -1,-1,-1,-1,-1,-1,-1,-1, 6, 7, 6, 7, 6, 7, 6, 7
SECTION .text
%macro INT16_LOOP 2 ; %1 = a/u (aligned/unaligned), %2 = add/sub
movd m4, maskd
SPLATW m4, m4
add wd, wd
test wq, 2*mmsize - 1
jz %%.tomainloop
push tmpq
%%.wordloop:
sub wq, 2
%ifidn %2, add
mov tmpw, [srcq+wq]
add tmpw, [dstq+wq]
%else
mov tmpw, [src1q+wq]
sub tmpw, [src2q+wq]
%endif
and tmpw, maskw
mov [dstq+wq], tmpw
test wq, 2*mmsize - 1
jnz %%.wordloop
pop tmpq
%%.tomainloop:
%ifidn %2, add
add srcq, wq
%else
add src1q, wq
add src2q, wq
%endif
add dstq, wq
neg wq
jz %%.end
%%.loop:
%ifidn %2, add
mov%1 m0, [srcq+wq]
mov%1 m1, [dstq+wq]
mov%1 m2, [srcq+wq+mmsize]
mov%1 m3, [dstq+wq+mmsize]
%else
mov%1 m0, [src1q+wq]
mov%1 m1, [src2q+wq]
mov%1 m2, [src1q+wq+mmsize]
mov%1 m3, [src2q+wq+mmsize]
%endif
p%2w m0, m1
p%2w m2, m3
pand m0, m4
pand m2, m4
mov%1 [dstq+wq] , m0
mov%1 [dstq+wq+mmsize], m2
add wq, 2*mmsize
jl %%.loop
%%.end:
RET
%endmacro
INIT_MMX mmx
cglobal add_int16, 4,4,5, dst, src, mask, w, tmp
INT16_LOOP a, add
INIT_XMM sse2
cglobal add_int16, 4,4,5, dst, src, mask, w, tmp
test srcq, mmsize-1
jnz .unaligned
test dstq, mmsize-1
jnz .unaligned
INT16_LOOP a, add
.unaligned:
INT16_LOOP u, add
INIT_MMX mmx
cglobal diff_int16, 5,5,5, dst, src1, src2, mask, w, tmp
INT16_LOOP a, sub
INIT_XMM sse2
cglobal diff_int16, 5,5,5, dst, src1, src2, mask, w, tmp
test src1q, mmsize-1
jnz .unaligned
test src2q, mmsize-1
jnz .unaligned
test dstq, mmsize-1
jnz .unaligned
INT16_LOOP a, sub
.unaligned:
INT16_LOOP u, sub
%macro ADD_HFYU_LEFT_LOOP_INT16 2 ; %1 = dst alignment (a/u), %2 = src alignment (a/u)
add wd, wd
add srcq, wq
add dstq, wq
neg wq
%%.loop:
mov%2 m1, [srcq+wq]
mova m2, m1
pslld m1, 16
paddw m1, m2
mova m2, m1
pshufb m1, m3
paddw m1, m2
pshufb m0, m5
%if mmsize == 16
mova m2, m1
pshufb m1, m4
paddw m1, m2
%endif
paddw m0, m1
pand m0, m7
%ifidn %1, a
mova [dstq+wq], m0
%else
movq [dstq+wq], m0
movhps [dstq+wq+8], m0
%endif
add wq, mmsize
jl %%.loop
mov eax, mmsize-1
sub eax, wd
mov wd, eax
shl wd, 8
lea eax, [wd+eax-1]
movd m1, eax
pshufb m0, m1
movd eax, m0
RET
%endmacro
; int add_hfyu_left_pred_int16(uint16_t *dst, const uint16_t *src, unsigned mask, int w, int left)
INIT_MMX ssse3
cglobal add_hfyu_left_pred_int16, 4,4,8, dst, src, mask, w, left
.skip_prologue:
mova m5, [pb_67]
mova m3, [pb_zzzz2323zzzzabab]
movd m0, leftm
psllq m0, 48
movd m7, maskm
SPLATW m7 ,m7
ADD_HFYU_LEFT_LOOP_INT16 a, a
INIT_XMM sse4
cglobal add_hfyu_left_pred_int16, 4,4,8, dst, src, mask, w, left
mova m5, [pb_ef]
mova m4, [pb_zzzzzzzz67676767]
mova m3, [pb_zzzz2323zzzzabab]
movd m0, leftm
pslldq m0, 14
movd m7, maskm
SPLATW m7 ,m7
test srcq, 15
jnz .src_unaligned
test dstq, 15
jnz .dst_unaligned
ADD_HFYU_LEFT_LOOP_INT16 a, a
.dst_unaligned:
ADD_HFYU_LEFT_LOOP_INT16 u, a
.src_unaligned:
ADD_HFYU_LEFT_LOOP_INT16 u, u
; void add_hfyu_median_prediction_mmxext(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int mask, int w, int *left, int *left_top)
INIT_MMX mmxext
cglobal add_hfyu_median_pred_int16, 7,7,0, dst, top, diff, mask, w, left, left_top
add wd, wd
movd mm6, maskd
SPLATW mm6, mm6
movq mm0, [topq]
movq mm2, mm0
movd mm4, [left_topq]
psllq mm2, 16
movq mm1, mm0
por mm4, mm2
movd mm3, [leftq]
psubw mm0, mm4 ; t-tl
add dstq, wq
add topq, wq
add diffq, wq
neg wq
jmp .skip
.loop:
movq mm4, [topq+wq]
movq mm0, mm4
psllq mm4, 16
por mm4, mm1
movq mm1, mm0 ; t
psubw mm0, mm4 ; t-tl
.skip:
movq mm2, [diffq+wq]
%assign i 0
%rep 4
movq mm4, mm0
paddw mm4, mm3 ; t-tl+l
pand mm4, mm6
movq mm5, mm3
pmaxsw mm3, mm1
pminsw mm5, mm1
pminsw mm3, mm4
pmaxsw mm3, mm5 ; median
paddw mm3, mm2 ; +residual
pand mm3, mm6
%if i==0
movq mm7, mm3
psllq mm7, 48
%else
movq mm4, mm3
psrlq mm7, 16
psllq mm4, 48
por mm7, mm4
%endif
%if i<3
psrlq mm0, 16
psrlq mm1, 16
psrlq mm2, 16
%endif
%assign i i+1
%endrep
movq [dstq+wq], mm7
add wq, 8
jl .loop
movzx r2d, word [dstq-2]
mov [leftq], r2d
movzx r2d, word [topq-2]
mov [left_topq], r2d
RET
cglobal sub_hfyu_median_pred_int16, 7,7,0, dst, src1, src2, mask, w, left, left_top
add wd, wd
movd mm7, maskd
SPLATW mm7, mm7
movq mm0, [src1q]
movq mm2, [src2q]
psllq mm0, 16
psllq mm2, 16
movd mm6, [left_topq]
por mm0, mm6
movd mm6, [leftq]
por mm2, mm6
xor maskq, maskq
.loop:
movq mm1, [src1q + maskq]
movq mm3, [src2q + maskq]
movq mm4, mm2
psubw mm2, mm0
paddw mm2, mm1
pand mm2, mm7
movq mm5, mm4
pmaxsw mm4, mm1
pminsw mm1, mm5
pminsw mm4, mm2
pmaxsw mm4, mm1
psubw mm3, mm4
pand mm3, mm7
movq [dstq + maskq], mm3
add maskq, 8
movq mm0, [src1q + maskq - 2]
movq mm2, [src2q + maskq - 2]
cmp maskq, wq
jb .loop
movzx maskd, word [src1q + wq - 2]
mov [left_topq], maskd
movzx maskd, word [src2q + wq - 2]
mov [leftq], maskd
RET

View File

@@ -0,0 +1,62 @@
/*
* Lossless video DSP utils
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "../lossless_videodsp.h"
#include "libavutil/pixdesc.h"
#include "libavutil/x86/cpu.h"
void ff_add_int16_mmx(uint16_t *dst, const uint16_t *src, unsigned mask, int w);
void ff_add_int16_sse2(uint16_t *dst, const uint16_t *src, unsigned mask, int w);
void ff_diff_int16_mmx (uint16_t *dst, const uint16_t *src1, const uint16_t *src2, unsigned mask, int w);
void ff_diff_int16_sse2(uint16_t *dst, const uint16_t *src1, const uint16_t *src2, unsigned mask, int w);
int ff_add_hfyu_left_pred_int16_ssse3(uint16_t *dst, const uint16_t *src, unsigned mask, int w, unsigned acc);
int ff_add_hfyu_left_pred_int16_sse4(uint16_t *dst, const uint16_t *src, unsigned mask, int w, unsigned acc);
void ff_add_hfyu_median_pred_int16_mmxext(uint16_t *dst, const uint16_t *top, const uint16_t *diff, unsigned mask, int w, int *left, int *left_top);
void ff_sub_hfyu_median_pred_int16_mmxext(uint16_t *dst, const uint16_t *src1, const uint16_t *src2, unsigned mask, int w, int *left, int *left_top);
void ff_llviddsp_init_x86(LLVidDSPContext *c, AVCodecContext *avctx)
{
int cpu_flags = av_get_cpu_flags();
const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(avctx->pix_fmt);
if (EXTERNAL_MMX(cpu_flags)) {
c->add_int16 = ff_add_int16_mmx;
c->diff_int16 = ff_diff_int16_mmx;
}
if (EXTERNAL_MMXEXT(cpu_flags) && pix_desc->comp[0].depth_minus1<15) {
c->add_hfyu_median_pred_int16 = ff_add_hfyu_median_pred_int16_mmxext;
c->sub_hfyu_median_pred_int16 = ff_sub_hfyu_median_pred_int16_mmxext;
}
if (EXTERNAL_SSE2(cpu_flags)) {
c->add_int16 = ff_add_int16_sse2;
c->diff_int16 = ff_diff_int16_sse2;
}
if (EXTERNAL_SSSE3(cpu_flags)) {
c->add_hfyu_left_pred_int16 = ff_add_hfyu_left_pred_int16_ssse3;
}
if (EXTERNAL_SSE4(cpu_flags)) {
c->add_hfyu_left_pred_int16 = ff_add_hfyu_left_pred_int16_sse4;
}
}

View File

@@ -0,0 +1,162 @@
/*
* SIMD-optimized LPC functions
* Copyright (c) 2007 Loren Merritt
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/mem.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/lpc.h"
DECLARE_ASM_CONST(16, double, pd_1)[2] = { 1.0, 1.0 };
DECLARE_ASM_CONST(16, double, pd_2)[2] = { 2.0, 2.0 };
#if HAVE_SSE2_INLINE
static void lpc_apply_welch_window_sse2(const int32_t *data, int len,
double *w_data)
{
double c = 2.0 / (len-1.0);
int n2 = len>>1;
x86_reg i = -n2*sizeof(int32_t);
x86_reg j = n2*sizeof(int32_t);
__asm__ volatile(
"movsd %4, %%xmm7 \n\t"
"movapd "MANGLE(pd_1)", %%xmm6 \n\t"
"movapd "MANGLE(pd_2)", %%xmm5 \n\t"
"movlhps %%xmm7, %%xmm7 \n\t"
"subpd %%xmm5, %%xmm7 \n\t"
"addsd %%xmm6, %%xmm7 \n\t"
"test $1, %5 \n\t"
"jz 2f \n\t"
#define WELCH(MOVPD, offset)\
"1: \n\t"\
"movapd %%xmm7, %%xmm1 \n\t"\
"mulpd %%xmm1, %%xmm1 \n\t"\
"movapd %%xmm6, %%xmm0 \n\t"\
"subpd %%xmm1, %%xmm0 \n\t"\
"pshufd $0x4e, %%xmm0, %%xmm1 \n\t"\
"cvtpi2pd (%3,%0), %%xmm2 \n\t"\
"cvtpi2pd "#offset"*4(%3,%1), %%xmm3 \n\t"\
"mulpd %%xmm0, %%xmm2 \n\t"\
"mulpd %%xmm1, %%xmm3 \n\t"\
"movapd %%xmm2, (%2,%0,2) \n\t"\
MOVPD" %%xmm3, "#offset"*8(%2,%1,2) \n\t"\
"subpd %%xmm5, %%xmm7 \n\t"\
"sub $8, %1 \n\t"\
"add $8, %0 \n\t"\
"jl 1b \n\t"\
WELCH("movupd", -1)
"jmp 3f \n\t"
"2: \n\t"
WELCH("movapd", -2)
"3: \n\t"
:"+&r"(i), "+&r"(j)
:"r"(w_data+n2), "r"(data+n2), "m"(c), "r"(len)
NAMED_CONSTRAINTS_ARRAY_ADD(pd_1,pd_2)
XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm5", "%xmm6", "%xmm7")
);
#undef WELCH
}
static void lpc_compute_autocorr_sse2(const double *data, int len, int lag,
double *autoc)
{
int j;
if((x86_reg)data & 15)
data++;
for(j=0; j<lag; j+=2){
x86_reg i = -len*sizeof(double);
if(j == lag-2) {
__asm__ volatile(
"movsd "MANGLE(pd_1)", %%xmm0 \n\t"
"movsd "MANGLE(pd_1)", %%xmm1 \n\t"
"movsd "MANGLE(pd_1)", %%xmm2 \n\t"
"1: \n\t"
"movapd (%2,%0), %%xmm3 \n\t"
"movupd -8(%3,%0), %%xmm4 \n\t"
"movapd (%3,%0), %%xmm5 \n\t"
"mulpd %%xmm3, %%xmm4 \n\t"
"mulpd %%xmm3, %%xmm5 \n\t"
"mulpd -16(%3,%0), %%xmm3 \n\t"
"addpd %%xmm4, %%xmm1 \n\t"
"addpd %%xmm5, %%xmm0 \n\t"
"addpd %%xmm3, %%xmm2 \n\t"
"add $16, %0 \n\t"
"jl 1b \n\t"
"movhlps %%xmm0, %%xmm3 \n\t"
"movhlps %%xmm1, %%xmm4 \n\t"
"movhlps %%xmm2, %%xmm5 \n\t"
"addsd %%xmm3, %%xmm0 \n\t"
"addsd %%xmm4, %%xmm1 \n\t"
"addsd %%xmm5, %%xmm2 \n\t"
"movsd %%xmm0, (%1) \n\t"
"movsd %%xmm1, 8(%1) \n\t"
"movsd %%xmm2, 16(%1) \n\t"
:"+&r"(i)
:"r"(autoc+j), "r"(data+len), "r"(data+len-j)
NAMED_CONSTRAINTS_ARRAY_ADD(pd_1)
:"memory"
);
} else {
__asm__ volatile(
"movsd "MANGLE(pd_1)", %%xmm0 \n\t"
"movsd "MANGLE(pd_1)", %%xmm1 \n\t"
"1: \n\t"
"movapd (%3,%0), %%xmm3 \n\t"
"movupd -8(%4,%0), %%xmm4 \n\t"
"mulpd %%xmm3, %%xmm4 \n\t"
"mulpd (%4,%0), %%xmm3 \n\t"
"addpd %%xmm4, %%xmm1 \n\t"
"addpd %%xmm3, %%xmm0 \n\t"
"add $16, %0 \n\t"
"jl 1b \n\t"
"movhlps %%xmm0, %%xmm3 \n\t"
"movhlps %%xmm1, %%xmm4 \n\t"
"addsd %%xmm3, %%xmm0 \n\t"
"addsd %%xmm4, %%xmm1 \n\t"
"movsd %%xmm0, %1 \n\t"
"movsd %%xmm1, %2 \n\t"
:"+&r"(i), "=m"(autoc[j]), "=m"(autoc[j+1])
:"r"(data+len), "r"(data+len-j)
NAMED_CONSTRAINTS_ARRAY_ADD(pd_1)
);
}
}
}
#endif /* HAVE_SSE2_INLINE */
av_cold void ff_lpc_init_x86(LPCContext *c)
{
#if HAVE_SSE2_INLINE
int cpu_flags = av_get_cpu_flags();
if (HAVE_SSE2_INLINE && cpu_flags & (AV_CPU_FLAG_SSE2 | AV_CPU_FLAG_SSE2SLOW)) {
c->lpc_apply_welch_window = lpc_apply_welch_window_sse2;
c->lpc_compute_autocorr = lpc_compute_autocorr_sse2;
}
#endif /* HAVE_SSE2_INLINE */
}

View File

@@ -0,0 +1,133 @@
/*
* simple math operations
* Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_X86_MATHOPS_H
#define AVCODEC_X86_MATHOPS_H
#include "config.h"
#include "libavutil/common.h"
#include "libavutil/x86/asm.h"
#if HAVE_INLINE_ASM
#if ARCH_X86_32
#define MULL MULL
static av_always_inline av_const int MULL(int a, int b, unsigned shift)
{
int rt, dummy;
__asm__ (
"imull %3 \n\t"
"shrdl %4, %%edx, %%eax \n\t"
:"=a"(rt), "=d"(dummy)
:"a"(a), "rm"(b), "ci"((uint8_t)shift)
);
return rt;
}
#define MULH MULH
static av_always_inline av_const int MULH(int a, int b)
{
int rt, dummy;
__asm__ (
"imull %3"
:"=d"(rt), "=a"(dummy)
:"a"(a), "rm"(b)
);
return rt;
}
#define MUL64 MUL64
static av_always_inline av_const int64_t MUL64(int a, int b)
{
int64_t rt;
__asm__ (
"imull %2"
:"=A"(rt)
:"a"(a), "rm"(b)
);
return rt;
}
#endif /* ARCH_X86_32 */
#if HAVE_I686
/* median of 3 */
#define mid_pred mid_pred
static inline av_const int mid_pred(int a, int b, int c)
{
int i=b;
__asm__ (
"cmp %2, %1 \n\t"
"cmovg %1, %0 \n\t"
"cmovg %2, %1 \n\t"
"cmp %3, %1 \n\t"
"cmovl %3, %1 \n\t"
"cmp %1, %0 \n\t"
"cmovg %1, %0 \n\t"
:"+&r"(i), "+&r"(a)
:"r"(b), "r"(c)
);
return i;
}
#if HAVE_6REGS
#define COPY3_IF_LT(x, y, a, b, c, d)\
__asm__ volatile(\
"cmpl %0, %3 \n\t"\
"cmovl %3, %0 \n\t"\
"cmovl %4, %1 \n\t"\
"cmovl %5, %2 \n\t"\
: "+&r" (x), "+&r" (a), "+r" (c)\
: "r" (y), "r" (b), "r" (d)\
);
#endif /* HAVE_6REGS */
#endif /* HAVE_I686 */
#define MASK_ABS(mask, level) \
__asm__ ("cdq \n\t" \
"xorl %1, %0 \n\t" \
"subl %1, %0 \n\t" \
: "+a"(level), "=&d"(mask))
// avoid +32 for shift optimization (gcc should do that ...)
#define NEG_SSR32 NEG_SSR32
static inline int32_t NEG_SSR32( int32_t a, int8_t s){
__asm__ ("sarl %1, %0\n\t"
: "+r" (a)
: "ic" ((uint8_t)(-s))
);
return a;
}
#define NEG_USR32 NEG_USR32
static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
__asm__ ("shrl %1, %0\n\t"
: "+r" (a)
: "ic" ((uint8_t)(-s))
);
return a;
}
#endif /* HAVE_INLINE_ASM */
#endif /* AVCODEC_X86_MATHOPS_H */

View File

@@ -0,0 +1,934 @@
;*****************************************************************************
;* SIMD-optimized motion compensation estimation
;*****************************************************************************
;* Copyright (c) 2000, 2001 Fabrice Bellard
;* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;*****************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION_RODATA
cextern pb_1
cextern pb_80
SECTION .text
%macro DIFF_PIXELS_1 4
movh %1, %3
movh %2, %4
punpcklbw %2, %1
punpcklbw %1, %1
psubw %1, %2
%endmacro
; %1=uint8_t *pix1, %2=uint8_t *pix2, %3=static offset, %4=stride, %5=stride*3
; %6=temporary storage location
; this macro requires $mmsize stack space (aligned) on %6 (except on SSE+x86-64)
%macro DIFF_PIXELS_8 6
DIFF_PIXELS_1 m0, m7, [%1 +%3], [%2 +%3]
DIFF_PIXELS_1 m1, m7, [%1+%4 +%3], [%2+%4 +%3]
DIFF_PIXELS_1 m2, m7, [%1+%4*2+%3], [%2+%4*2+%3]
add %1, %5
add %2, %5
DIFF_PIXELS_1 m3, m7, [%1 +%3], [%2 +%3]
DIFF_PIXELS_1 m4, m7, [%1+%4 +%3], [%2+%4 +%3]
DIFF_PIXELS_1 m5, m7, [%1+%4*2+%3], [%2+%4*2+%3]
DIFF_PIXELS_1 m6, m7, [%1+%5 +%3], [%2+%5 +%3]
%ifdef m8
DIFF_PIXELS_1 m7, m8, [%1+%4*4+%3], [%2+%4*4+%3]
%else
mova [%6], m0
DIFF_PIXELS_1 m7, m0, [%1+%4*4+%3], [%2+%4*4+%3]
mova m0, [%6]
%endif
sub %1, %5
sub %2, %5
%endmacro
%macro HADAMARD8 0
SUMSUB_BADC w, 0, 1, 2, 3
SUMSUB_BADC w, 4, 5, 6, 7
SUMSUB_BADC w, 0, 2, 1, 3
SUMSUB_BADC w, 4, 6, 5, 7
SUMSUB_BADC w, 0, 4, 1, 5
SUMSUB_BADC w, 2, 6, 3, 7
%endmacro
%macro ABS1_SUM 3
ABS1 %1, %2
paddusw %3, %1
%endmacro
%macro ABS2_SUM 6
ABS2 %1, %2, %3, %4
paddusw %5, %1
paddusw %6, %2
%endmacro
%macro ABS_SUM_8x8_64 1
ABS2 m0, m1, m8, m9
ABS2_SUM m2, m3, m8, m9, m0, m1
ABS2_SUM m4, m5, m8, m9, m0, m1
ABS2_SUM m6, m7, m8, m9, m0, m1
paddusw m0, m1
%endmacro
%macro ABS_SUM_8x8_32 1
mova [%1], m7
ABS1 m0, m7
ABS1 m1, m7
ABS1_SUM m2, m7, m0
ABS1_SUM m3, m7, m1
ABS1_SUM m4, m7, m0
ABS1_SUM m5, m7, m1
ABS1_SUM m6, m7, m0
mova m2, [%1]
ABS1_SUM m2, m7, m1
paddusw m0, m1
%endmacro
; FIXME: HSUM saturates at 64k, while an 8x8 hadamard or dct block can get up to
; about 100k on extreme inputs. But that's very unlikely to occur in natural video,
; and it's even more unlikely to not have any alternative mvs/modes with lower cost.
%macro HSUM 3
%if cpuflag(sse2)
movhlps %2, %1
paddusw %1, %2
pshuflw %2, %1, 0xE
paddusw %1, %2
pshuflw %2, %1, 0x1
paddusw %1, %2
movd %3, %1
%elif cpuflag(mmxext)
pshufw %2, %1, 0xE
paddusw %1, %2
pshufw %2, %1, 0x1
paddusw %1, %2
movd %3, %1
%elif cpuflag(mmx)
mova %2, %1
psrlq %1, 32
paddusw %1, %2
mova %2, %1
psrlq %1, 16
paddusw %1, %2
movd %3, %1
%endif
%endmacro
%macro STORE4 5
mova [%1+mmsize*0], %2
mova [%1+mmsize*1], %3
mova [%1+mmsize*2], %4
mova [%1+mmsize*3], %5
%endmacro
%macro LOAD4 5
mova %2, [%1+mmsize*0]
mova %3, [%1+mmsize*1]
mova %4, [%1+mmsize*2]
mova %5, [%1+mmsize*3]
%endmacro
%macro hadamard8_16_wrapper 2
cglobal hadamard8_diff, 4, 4, %1
%ifndef m8
%assign pad %2*mmsize-(4+stack_offset&(mmsize-1))
SUB rsp, pad
%endif
call hadamard8x8_diff %+ SUFFIX
%ifndef m8
ADD rsp, pad
%endif
RET
cglobal hadamard8_diff16, 5, 6, %1
%ifndef m8
%assign pad %2*mmsize-(4+stack_offset&(mmsize-1))
SUB rsp, pad
%endif
call hadamard8x8_diff %+ SUFFIX
mov r5d, eax
add r1, 8
add r2, 8
call hadamard8x8_diff %+ SUFFIX
add r5d, eax
cmp r4d, 16
jne .done
lea r1, [r1+r3*8-8]
lea r2, [r2+r3*8-8]
call hadamard8x8_diff %+ SUFFIX
add r5d, eax
add r1, 8
add r2, 8
call hadamard8x8_diff %+ SUFFIX
add r5d, eax
.done:
mov eax, r5d
%ifndef m8
ADD rsp, pad
%endif
RET
%endmacro
%macro HADAMARD8_DIFF 0-1
%if cpuflag(sse2)
hadamard8x8_diff %+ SUFFIX:
lea r0, [r3*3]
DIFF_PIXELS_8 r1, r2, 0, r3, r0, rsp+gprsize
HADAMARD8
%if ARCH_X86_64
TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, 8
%else
TRANSPOSE8x8W 0, 1, 2, 3, 4, 5, 6, 7, [rsp+gprsize], [rsp+mmsize+gprsize]
%endif
HADAMARD8
ABS_SUM_8x8 rsp+gprsize
HSUM m0, m1, eax
and eax, 0xFFFF
ret
hadamard8_16_wrapper %1, 3
%elif cpuflag(mmx)
ALIGN 16
; int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1,
; uint8_t *src2, ptrdiff_t stride, int h)
; r0 = void *s = unused, int h = unused (always 8)
; note how r1, r2 and r3 are not clobbered in this function, so 16x16
; can simply call this 2x2x (and that's why we access rsp+gprsize
; everywhere, which is rsp of calling func
hadamard8x8_diff %+ SUFFIX:
lea r0, [r3*3]
; first 4x8 pixels
DIFF_PIXELS_8 r1, r2, 0, r3, r0, rsp+gprsize+0x60
HADAMARD8
mova [rsp+gprsize+0x60], m7
TRANSPOSE4x4W 0, 1, 2, 3, 7
STORE4 rsp+gprsize, m0, m1, m2, m3
mova m7, [rsp+gprsize+0x60]
TRANSPOSE4x4W 4, 5, 6, 7, 0
STORE4 rsp+gprsize+0x40, m4, m5, m6, m7
; second 4x8 pixels
DIFF_PIXELS_8 r1, r2, 4, r3, r0, rsp+gprsize+0x60
HADAMARD8
mova [rsp+gprsize+0x60], m7
TRANSPOSE4x4W 0, 1, 2, 3, 7
STORE4 rsp+gprsize+0x20, m0, m1, m2, m3
mova m7, [rsp+gprsize+0x60]
TRANSPOSE4x4W 4, 5, 6, 7, 0
LOAD4 rsp+gprsize+0x40, m0, m1, m2, m3
HADAMARD8
ABS_SUM_8x8_32 rsp+gprsize+0x60
mova [rsp+gprsize+0x60], m0
LOAD4 rsp+gprsize , m0, m1, m2, m3
LOAD4 rsp+gprsize+0x20, m4, m5, m6, m7
HADAMARD8
ABS_SUM_8x8_32 rsp+gprsize
paddusw m0, [rsp+gprsize+0x60]
HSUM m0, m1, eax
and rax, 0xFFFF
ret
hadamard8_16_wrapper 0, 14
%endif
%endmacro
INIT_MMX mmx
HADAMARD8_DIFF
INIT_MMX mmxext
HADAMARD8_DIFF
INIT_XMM sse2
%if ARCH_X86_64
%define ABS_SUM_8x8 ABS_SUM_8x8_64
%else
%define ABS_SUM_8x8 ABS_SUM_8x8_32
%endif
HADAMARD8_DIFF 10
INIT_XMM ssse3
%define ABS_SUM_8x8 ABS_SUM_8x8_64
HADAMARD8_DIFF 9
; int ff_sse*_*(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
; ptrdiff_t line_size, int h)
%macro SUM_SQUARED_ERRORS 1
cglobal sse%1, 5,5,8, v, pix1, pix2, lsize, h
%if %1 == mmsize
shr hd, 1
%endif
pxor m0, m0 ; mm0 = 0
pxor m7, m7 ; mm7 holds the sum
.next2lines: ; FIXME why are these unaligned movs? pix1[] is aligned
movu m1, [pix1q] ; m1 = pix1[0][0-15], [0-7] for mmx
movu m2, [pix2q] ; m2 = pix2[0][0-15], [0-7] for mmx
%if %1 == mmsize
movu m3, [pix1q+lsizeq] ; m3 = pix1[1][0-15], [0-7] for mmx
movu m4, [pix2q+lsizeq] ; m4 = pix2[1][0-15], [0-7] for mmx
%else ; %1 / 2 == mmsize; mmx only
mova m3, [pix1q+8] ; m3 = pix1[0][8-15]
mova m4, [pix2q+8] ; m4 = pix2[0][8-15]
%endif
; todo: mm1-mm2, mm3-mm4
; algo: subtract mm1 from mm2 with saturation and vice versa
; OR the result to get the absolute difference
mova m5, m1
mova m6, m3
psubusb m1, m2
psubusb m3, m4
psubusb m2, m5
psubusb m4, m6
por m2, m1
por m4, m3
; now convert to 16-bit vectors so we can square them
mova m1, m2
mova m3, m4
punpckhbw m2, m0
punpckhbw m4, m0
punpcklbw m1, m0 ; mm1 not spread over (mm1,mm2)
punpcklbw m3, m0 ; mm4 not spread over (mm3,mm4)
pmaddwd m2, m2
pmaddwd m4, m4
pmaddwd m1, m1
pmaddwd m3, m3
paddd m1, m2
paddd m3, m4
paddd m7, m1
paddd m7, m3
%if %1 == mmsize
lea pix1q, [pix1q + 2*lsizeq]
lea pix2q, [pix2q + 2*lsizeq]
%else
add pix1q, lsizeq
add pix2q, lsizeq
%endif
dec hd
jnz .next2lines
HADDD m7, m1
movd eax, m7 ; return value
RET
%endmacro
INIT_MMX mmx
SUM_SQUARED_ERRORS 8
INIT_MMX mmx
SUM_SQUARED_ERRORS 16
INIT_XMM sse2
SUM_SQUARED_ERRORS 16
;-----------------------------------------------
;int ff_sum_abs_dctelem(int16_t *block)
;-----------------------------------------------
; %1 = number of xmm registers used
; %2 = number of inline loops
%macro SUM_ABS_DCTELEM 2
cglobal sum_abs_dctelem, 1, 1, %1, block
pxor m0, m0
pxor m1, m1
%assign %%i 0
%rep %2
mova m2, [blockq+mmsize*(0+%%i)]
mova m3, [blockq+mmsize*(1+%%i)]
mova m4, [blockq+mmsize*(2+%%i)]
mova m5, [blockq+mmsize*(3+%%i)]
ABS1_SUM m2, m6, m0
ABS1_SUM m3, m6, m1
ABS1_SUM m4, m6, m0
ABS1_SUM m5, m6, m1
%assign %%i %%i+4
%endrep
paddusw m0, m1
HSUM m0, m1, eax
and eax, 0xFFFF
RET
%endmacro
INIT_MMX mmx
SUM_ABS_DCTELEM 0, 4
INIT_MMX mmxext
SUM_ABS_DCTELEM 0, 4
INIT_XMM sse2
SUM_ABS_DCTELEM 7, 2
INIT_XMM ssse3
SUM_ABS_DCTELEM 6, 2
;------------------------------------------------------------------------------
; int ff_hf_noise*_mmx(uint8_t *pix1, ptrdiff_t lsize, int h)
;------------------------------------------------------------------------------
; %1 = 8/16. %2-5=m#
%macro HF_NOISE_PART1 5
mova m%2, [pix1q]
%if %1 == 8
mova m%3, m%2
psllq m%2, 8
psrlq m%3, 8
psrlq m%2, 8
%else
mova m%3, [pix1q+1]
%endif
mova m%4, m%2
mova m%5, m%3
punpcklbw m%2, m7
punpcklbw m%3, m7
punpckhbw m%4, m7
punpckhbw m%5, m7
psubw m%2, m%3
psubw m%4, m%5
%endmacro
; %1-2 = m#
%macro HF_NOISE_PART2 4
psubw m%1, m%3
psubw m%2, m%4
pxor m3, m3
pxor m1, m1
pcmpgtw m3, m%1
pcmpgtw m1, m%2
pxor m%1, m3
pxor m%2, m1
psubw m%1, m3
psubw m%2, m1
paddw m%2, m%1
paddw m6, m%2
%endmacro
; %1 = 8/16
%macro HF_NOISE 1
cglobal hf_noise%1, 3,3,0, pix1, lsize, h
sub hd, 2
pxor m7, m7
pxor m6, m6
HF_NOISE_PART1 %1, 0, 1, 2, 3
add pix1q, lsizeq
HF_NOISE_PART1 %1, 4, 1, 5, 3
HF_NOISE_PART2 0, 2, 4, 5
add pix1q, lsizeq
.loop:
HF_NOISE_PART1 %1, 0, 1, 2, 3
HF_NOISE_PART2 4, 5, 0, 2
add pix1q, lsizeq
HF_NOISE_PART1 %1, 4, 1, 5, 3
HF_NOISE_PART2 0, 2, 4, 5
add pix1q, lsizeq
sub hd, 2
jne .loop
mova m0, m6
punpcklwd m0, m7
punpckhwd m6, m7
paddd m6, m0
mova m0, m6
psrlq m6, 32
paddd m0, m6
movd eax, m0 ; eax = result of hf_noise8;
REP_RET ; return eax;
%endmacro
INIT_MMX mmx
HF_NOISE 8
HF_NOISE 16
;---------------------------------------------------------------------------------------
;int ff_sad_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
;---------------------------------------------------------------------------------------
;%1 = 8/16
%macro SAD 1
cglobal sad%1, 5, 5, 3, v, pix1, pix2, stride, h
movu m2, [pix2q]
movu m1, [pix2q+strideq]
psadbw m2, [pix1q]
psadbw m1, [pix1q+strideq]
paddw m2, m1
%if %1 != mmsize
movu m0, [pix2q+8]
movu m1, [pix2q+strideq+8]
psadbw m0, [pix1q+8]
psadbw m1, [pix1q+strideq+8]
paddw m2, m0
paddw m2, m1
%endif
sub hd, 2
align 16
.loop:
lea pix1q, [pix1q+strideq*2]
lea pix2q, [pix2q+strideq*2]
movu m0, [pix2q]
movu m1, [pix2q+strideq]
psadbw m0, [pix1q]
psadbw m1, [pix1q+strideq]
paddw m2, m0
paddw m2, m1
%if %1 != mmsize
movu m0, [pix2q+8]
movu m1, [pix2q+strideq+8]
psadbw m0, [pix1q+8]
psadbw m1, [pix1q+strideq+8]
paddw m2, m0
paddw m2, m1
%endif
sub hd, 2
jg .loop
%if mmsize == 16
movhlps m0, m2
paddw m2, m0
%endif
movd eax, m2
RET
%endmacro
INIT_MMX mmxext
SAD 8
SAD 16
INIT_XMM sse2
SAD 16
;------------------------------------------------------------------------------------------
;int ff_sad_x2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
;------------------------------------------------------------------------------------------
;%1 = 8/16
%macro SAD_X2 1
cglobal sad%1_x2, 5, 5, 5, v, pix1, pix2, stride, h
movu m0, [pix2q]
movu m2, [pix2q+strideq]
%if mmsize == 16
movu m3, [pix2q+1]
movu m4, [pix2q+strideq+1]
pavgb m0, m3
pavgb m2, m4
%else
pavgb m0, [pix2q+1]
pavgb m2, [pix2q+strideq+1]
%endif
psadbw m0, [pix1q]
psadbw m2, [pix1q+strideq]
paddw m0, m2
%if %1 != mmsize
movu m1, [pix2q+8]
movu m2, [pix2q+strideq+8]
pavgb m1, [pix2q+9]
pavgb m2, [pix2q+strideq+9]
psadbw m1, [pix1q+8]
psadbw m2, [pix1q+strideq+8]
paddw m0, m1
paddw m0, m2
%endif
sub hd, 2
align 16
.loop:
lea pix1q, [pix1q+2*strideq]
lea pix2q, [pix2q+2*strideq]
movu m1, [pix2q]
movu m2, [pix2q+strideq]
%if mmsize == 16
movu m3, [pix2q+1]
movu m4, [pix2q+strideq+1]
pavgb m1, m3
pavgb m2, m4
%else
pavgb m1, [pix2q+1]
pavgb m2, [pix2q+strideq+1]
%endif
psadbw m1, [pix1q]
psadbw m2, [pix1q+strideq]
paddw m0, m1
paddw m0, m2
%if %1 != mmsize
movu m1, [pix2q+8]
movu m2, [pix2q+strideq+8]
pavgb m1, [pix2q+9]
pavgb m2, [pix2q+strideq+9]
psadbw m1, [pix1q+8]
psadbw m2, [pix1q+strideq+8]
paddw m0, m1
paddw m0, m2
%endif
sub hd, 2
jg .loop
%if mmsize == 16
movhlps m1, m0
paddw m0, m1
%endif
movd eax, m0
RET
%endmacro
INIT_MMX mmxext
SAD_X2 8
SAD_X2 16
INIT_XMM sse2
SAD_X2 16
;------------------------------------------------------------------------------------------
;int ff_sad_y2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
;------------------------------------------------------------------------------------------
;%1 = 8/16
%macro SAD_Y2 1
cglobal sad%1_y2, 5, 5, 4, v, pix1, pix2, stride, h
movu m1, [pix2q]
movu m0, [pix2q+strideq]
movu m3, [pix2q+2*strideq]
pavgb m1, m0
pavgb m0, m3
psadbw m1, [pix1q]
psadbw m0, [pix1q+strideq]
paddw m0, m1
mova m1, m3
%if %1 != mmsize
movu m4, [pix2q+8]
movu m5, [pix2q+strideq+8]
movu m6, [pix2q+2*strideq+8]
pavgb m4, m5
pavgb m5, m6
psadbw m4, [pix1q+8]
psadbw m5, [pix1q+strideq+8]
paddw m0, m4
paddw m0, m5
mova m4, m6
%endif
add pix2q, strideq
sub hd, 2
align 16
.loop:
lea pix1q, [pix1q+2*strideq]
lea pix2q, [pix2q+2*strideq]
movu m2, [pix2q]
movu m3, [pix2q+strideq]
pavgb m1, m2
pavgb m2, m3
psadbw m1, [pix1q]
psadbw m2, [pix1q+strideq]
paddw m0, m1
paddw m0, m2
mova m1, m3
%if %1 != mmsize
movu m5, [pix2q+8]
movu m6, [pix2q+strideq+8]
pavgb m4, m5
pavgb m5, m6
psadbw m4, [pix1q+8]
psadbw m5, [pix1q+strideq+8]
paddw m0, m4
paddw m0, m5
mova m4, m6
%endif
sub hd, 2
jg .loop
%if mmsize == 16
movhlps m1, m0
paddw m0, m1
%endif
movd eax, m0
RET
%endmacro
INIT_MMX mmxext
SAD_Y2 8
SAD_Y2 16
INIT_XMM sse2
SAD_Y2 16
;-------------------------------------------------------------------------------------------
;int ff_sad_approx_xy2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
;-------------------------------------------------------------------------------------------
;%1 = 8/16
%macro SAD_APPROX_XY2 1
cglobal sad%1_approx_xy2, 5, 5, 7, v, pix1, pix2, stride, h
mova m4, [pb_1]
movu m1, [pix2q]
movu m0, [pix2q+strideq]
movu m3, [pix2q+2*strideq]
%if mmsize == 16
movu m5, [pix2q+1]
movu m6, [pix2q+strideq+1]
movu m2, [pix2q+2*strideq+1]
pavgb m1, m5
pavgb m0, m6
pavgb m3, m2
%else
pavgb m1, [pix2q+1]
pavgb m0, [pix2q+strideq+1]
pavgb m3, [pix2q+2*strideq+1]
%endif
psubusb m0, m4
pavgb m1, m0
pavgb m0, m3
psadbw m1, [pix1q]
psadbw m0, [pix1q+strideq]
paddw m0, m1
mova m1, m3
%if %1 != mmsize
movu m5, [pix2q+8]
movu m6, [pix2q+strideq+8]
movu m7, [pix2q+2*strideq+8]
pavgb m5, [pix2q+1+8]
pavgb m6, [pix2q+strideq+1+8]
pavgb m7, [pix2q+2*strideq+1+8]
psubusb m6, m4
pavgb m5, m6
pavgb m6, m7
psadbw m5, [pix1q+8]
psadbw m6, [pix1q+strideq+8]
paddw m0, m5
paddw m0, m6
mova m5, m7
%endif
add pix2q, strideq
sub hd, 2
align 16
.loop:
lea pix1q, [pix1q+2*strideq]
lea pix2q, [pix2q+2*strideq]
movu m2, [pix2q]
movu m3, [pix2q+strideq]
%if mmsize == 16
movu m5, [pix2q+1]
movu m6, [pix2q+strideq+1]
pavgb m2, m5
pavgb m3, m6
%else
pavgb m2, [pix2q+1]
pavgb m3, [pix2q+strideq+1]
%endif
psubusb m2, m4
pavgb m1, m2
pavgb m2, m3
psadbw m1, [pix1q]
psadbw m2, [pix1q+strideq]
paddw m0, m1
paddw m0, m2
mova m1, m3
%if %1 != mmsize
movu m6, [pix2q+8]
movu m7, [pix2q+strideq+8]
pavgb m6, [pix2q+8+1]
pavgb m7, [pix2q+strideq+8+1]
psubusb m6, m4
pavgb m5, m6
pavgb m6, m7
psadbw m5, [pix1q+8]
psadbw m6, [pix1q+strideq+8]
paddw m0, m5
paddw m0, m6
mova m5, m7
%endif
sub hd, 2
jg .loop
%if mmsize == 16
movhlps m1, m0
paddw m0, m1
%endif
movd eax, m0
RET
%endmacro
INIT_MMX mmxext
SAD_APPROX_XY2 8
SAD_APPROX_XY2 16
INIT_XMM sse2
SAD_APPROX_XY2 16
;--------------------------------------------------------------------
;int ff_vsad_intra(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
; ptrdiff_t line_size, int h);
;--------------------------------------------------------------------
; %1 = 8/16
%macro VSAD_INTRA 1
cglobal vsad_intra%1, 5, 5, 3, v, pix1, pix2, lsize, h
mova m0, [pix1q]
%if %1 == mmsize
mova m2, [pix1q+lsizeq]
psadbw m0, m2
%else
mova m2, [pix1q+lsizeq]
mova m3, [pix1q+8]
mova m4, [pix1q+lsizeq+8]
psadbw m0, m2
psadbw m3, m4
paddw m0, m3
%endif
sub hd, 2
.loop:
lea pix1q, [pix1q + 2*lsizeq]
%if %1 == mmsize
mova m1, [pix1q]
psadbw m2, m1
paddw m0, m2
mova m2, [pix1q+lsizeq]
psadbw m1, m2
paddw m0, m1
%else
mova m1, [pix1q]
mova m3, [pix1q+8]
psadbw m2, m1
psadbw m4, m3
paddw m0, m2
paddw m0, m4
mova m2, [pix1q+lsizeq]
mova m4, [pix1q+lsizeq+8]
psadbw m1, m2
psadbw m3, m4
paddw m0, m1
paddw m0, m3
%endif
sub hd, 2
jg .loop
%if mmsize == 16
pshufd m1, m0, 0xe
paddd m0, m1
%endif
movd eax, m0
RET
%endmacro
INIT_MMX mmxext
VSAD_INTRA 8
VSAD_INTRA 16
INIT_XMM sse2
VSAD_INTRA 16
;---------------------------------------------------------------------
;int ff_vsad_approx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
; ptrdiff_t line_size, int h);
;---------------------------------------------------------------------
; %1 = 8/16
%macro VSAD_APPROX 1
cglobal vsad%1_approx, 5, 5, 5, v, pix1, pix2, lsize, h
mova m1, [pb_80]
mova m0, [pix1q]
%if %1 == mmsize ; vsad8_mmxext, vsad16_sse2
mova m4, [pix1q+lsizeq]
%if mmsize == 16
movu m3, [pix2q]
movu m2, [pix2q+lsizeq]
psubb m0, m3
psubb m4, m2
%else
psubb m0, [pix2q]
psubb m4, [pix2q+lsizeq]
%endif
pxor m0, m1
pxor m4, m1
psadbw m0, m4
%else ; vsad16_mmxext
mova m3, [pix1q+8]
psubb m0, [pix2q]
psubb m3, [pix2q+8]
pxor m0, m1
pxor m3, m1
mova m4, [pix1q+lsizeq]
mova m5, [pix1q+lsizeq+8]
psubb m4, [pix2q+lsizeq]
psubb m5, [pix2q+lsizeq+8]
pxor m4, m1
pxor m5, m1
psadbw m0, m4
psadbw m3, m5
paddw m0, m3
%endif
sub hd, 2
.loop:
lea pix1q, [pix1q + 2*lsizeq]
lea pix2q, [pix2q + 2*lsizeq]
mova m2, [pix1q]
%if %1 == mmsize ; vsad8_mmxext, vsad16_sse2
%if mmsize == 16
movu m3, [pix2q]
psubb m2, m3
%else
psubb m2, [pix2q]
%endif
pxor m2, m1
psadbw m4, m2
paddw m0, m4
mova m4, [pix1q+lsizeq]
movu m3, [pix2q+lsizeq]
psubb m4, m3
pxor m4, m1
psadbw m2, m4
paddw m0, m2
%else ; vsad16_mmxext
mova m3, [pix1q+8]
psubb m2, [pix2q]
psubb m3, [pix2q+8]
pxor m2, m1
pxor m3, m1
psadbw m4, m2
psadbw m5, m3
paddw m0, m4
paddw m0, m5
mova m4, [pix1q+lsizeq]
mova m5, [pix1q+lsizeq+8]
psubb m4, [pix2q+lsizeq]
psubb m5, [pix2q+lsizeq+8]
pxor m4, m1
pxor m5, m1
psadbw m2, m4
psadbw m3, m5
paddw m0, m2
paddw m0, m3
%endif
sub hd, 2
jg .loop
%if mmsize == 16
pshufd m1, m0, 0xe
paddd m0, m1
%endif
movd eax, m0
RET
%endmacro
INIT_MMX mmxext
VSAD_APPROX 8
VSAD_APPROX 16
INIT_XMM sse2
VSAD_APPROX 16

View File

@@ -0,0 +1,651 @@
/*
* SIMD-optimized motion estimation
* Copyright (c) 2000, 2001 Fabrice Bellard
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/me_cmp.h"
#include "libavcodec/mpegvideo.h"
int ff_sum_abs_dctelem_mmx(int16_t *block);
int ff_sum_abs_dctelem_mmxext(int16_t *block);
int ff_sum_abs_dctelem_sse2(int16_t *block);
int ff_sum_abs_dctelem_ssse3(int16_t *block);
int ff_sse8_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_sse16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_sse16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_hf_noise8_mmx(uint8_t *pix1, ptrdiff_t stride, int h);
int ff_hf_noise16_mmx(uint8_t *pix1, ptrdiff_t stride, int h);
int ff_sad8_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_sad16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_sad16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_sad8_x2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_sad16_x2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_sad16_x2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_sad8_y2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_sad16_y2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_sad16_y2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_sad8_approx_xy2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_sad16_approx_xy2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_sad16_approx_xy2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_vsad_intra8_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_vsad_intra16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_vsad_intra16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_vsad8_approx_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_vsad16_approx_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
int ff_vsad16_approx_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h);
#define hadamard_func(cpu) \
int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1, \
uint8_t *src2, ptrdiff_t stride, int h); \
int ff_hadamard8_diff16_ ## cpu(MpegEncContext *s, uint8_t *src1, \
uint8_t *src2, ptrdiff_t stride, int h);
hadamard_func(mmx)
hadamard_func(mmxext)
hadamard_func(sse2)
hadamard_func(ssse3)
#if HAVE_YASM
static int nsse16_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h)
{
int score1, score2;
if (c)
score1 = c->mecc.sse[0](c, pix1, pix2, stride, h);
else
score1 = ff_sse16_mmx(c, pix1, pix2, stride, h);
score2 = ff_hf_noise16_mmx(pix1, stride, h) + ff_hf_noise8_mmx(pix1+8, stride, h)
- ff_hf_noise16_mmx(pix2, stride, h) - ff_hf_noise8_mmx(pix2+8, stride, h);
if (c)
return score1 + FFABS(score2) * c->avctx->nsse_weight;
else
return score1 + FFABS(score2) * 8;
}
static int nsse8_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h)
{
int score1 = ff_sse8_mmx(c, pix1, pix2, stride, h);
int score2 = ff_hf_noise8_mmx(pix1, stride, h) -
ff_hf_noise8_mmx(pix2, stride, h);
if (c)
return score1 + FFABS(score2) * c->avctx->nsse_weight;
else
return score1 + FFABS(score2) * 8;
}
#endif /* HAVE_YASM */
#if HAVE_INLINE_ASM
static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
ptrdiff_t stride, int h)
{
int tmp;
av_assert2((((int) pix) & 7) == 0);
av_assert2((stride & 7) == 0);
#define SUM(in0, in1, out0, out1) \
"movq (%0), %%mm2\n" \
"movq 8(%0), %%mm3\n" \
"add %2,%0\n" \
"movq %%mm2, " #out0 "\n" \
"movq %%mm3, " #out1 "\n" \
"psubusb " #in0 ", %%mm2\n" \
"psubusb " #in1 ", %%mm3\n" \
"psubusb " #out0 ", " #in0 "\n" \
"psubusb " #out1 ", " #in1 "\n" \
"por %%mm2, " #in0 "\n" \
"por %%mm3, " #in1 "\n" \
"movq " #in0 ", %%mm2\n" \
"movq " #in1 ", %%mm3\n" \
"punpcklbw %%mm7, " #in0 "\n" \
"punpcklbw %%mm7, " #in1 "\n" \
"punpckhbw %%mm7, %%mm2\n" \
"punpckhbw %%mm7, %%mm3\n" \
"paddw " #in1 ", " #in0 "\n" \
"paddw %%mm3, %%mm2\n" \
"paddw %%mm2, " #in0 "\n" \
"paddw " #in0 ", %%mm6\n"
__asm__ volatile (
"movl %3, %%ecx\n"
"pxor %%mm6, %%mm6\n"
"pxor %%mm7, %%mm7\n"
"movq (%0), %%mm0\n"
"movq 8(%0), %%mm1\n"
"add %2, %0\n"
"jmp 2f\n"
"1:\n"
SUM(%%mm4, %%mm5, %%mm0, %%mm1)
"2:\n"
SUM(%%mm0, %%mm1, %%mm4, %%mm5)
"subl $2, %%ecx\n"
"jnz 1b\n"
"movq %%mm6, %%mm0\n"
"psrlq $32, %%mm6\n"
"paddw %%mm6, %%mm0\n"
"movq %%mm0, %%mm6\n"
"psrlq $16, %%mm0\n"
"paddw %%mm6, %%mm0\n"
"movd %%mm0, %1\n"
: "+r" (pix), "=r" (tmp)
: "r" (stride), "m" (h)
: "%ecx");
return tmp & 0xFFFF;
}
#undef SUM
static int vsad16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h)
{
int tmp;
av_assert2((((int) pix1) & 7) == 0);
av_assert2((((int) pix2) & 7) == 0);
av_assert2((stride & 7) == 0);
#define SUM(in0, in1, out0, out1) \
"movq (%0), %%mm2\n" \
"movq (%1), " #out0 "\n" \
"movq 8(%0), %%mm3\n" \
"movq 8(%1), " #out1 "\n" \
"add %3, %0\n" \
"add %3, %1\n" \
"psubb " #out0 ", %%mm2\n" \
"psubb " #out1 ", %%mm3\n" \
"pxor %%mm7, %%mm2\n" \
"pxor %%mm7, %%mm3\n" \
"movq %%mm2, " #out0 "\n" \
"movq %%mm3, " #out1 "\n" \
"psubusb " #in0 ", %%mm2\n" \
"psubusb " #in1 ", %%mm3\n" \
"psubusb " #out0 ", " #in0 "\n" \
"psubusb " #out1 ", " #in1 "\n" \
"por %%mm2, " #in0 "\n" \
"por %%mm3, " #in1 "\n" \
"movq " #in0 ", %%mm2\n" \
"movq " #in1 ", %%mm3\n" \
"punpcklbw %%mm7, " #in0 "\n" \
"punpcklbw %%mm7, " #in1 "\n" \
"punpckhbw %%mm7, %%mm2\n" \
"punpckhbw %%mm7, %%mm3\n" \
"paddw " #in1 ", " #in0 "\n" \
"paddw %%mm3, %%mm2\n" \
"paddw %%mm2, " #in0 "\n" \
"paddw " #in0 ", %%mm6\n"
__asm__ volatile (
"movl %4, %%ecx\n"
"pxor %%mm6, %%mm6\n"
"pcmpeqw %%mm7, %%mm7\n"
"psllw $15, %%mm7\n"
"packsswb %%mm7, %%mm7\n"
"movq (%0), %%mm0\n"
"movq (%1), %%mm2\n"
"movq 8(%0), %%mm1\n"
"movq 8(%1), %%mm3\n"
"add %3, %0\n"
"add %3, %1\n"
"psubb %%mm2, %%mm0\n"
"psubb %%mm3, %%mm1\n"
"pxor %%mm7, %%mm0\n"
"pxor %%mm7, %%mm1\n"
"jmp 2f\n"
"1:\n"
SUM(%%mm4, %%mm5, %%mm0, %%mm1)
"2:\n"
SUM(%%mm0, %%mm1, %%mm4, %%mm5)
"subl $2, %%ecx\n"
"jnz 1b\n"
"movq %%mm6, %%mm0\n"
"psrlq $32, %%mm6\n"
"paddw %%mm6, %%mm0\n"
"movq %%mm0, %%mm6\n"
"psrlq $16, %%mm0\n"
"paddw %%mm6, %%mm0\n"
"movd %%mm0, %2\n"
: "+r" (pix1), "+r" (pix2), "=r" (tmp)
: "r" (stride), "m" (h)
: "%ecx");
return tmp & 0x7FFF;
}
#undef SUM
DECLARE_ASM_CONST(8, uint64_t, round_tab)[3] = {
0x0000000000000000ULL,
0x0001000100010001ULL,
0x0002000200020002ULL,
};
static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2,
ptrdiff_t stride, int h)
{
x86_reg len = -stride * h;
__asm__ volatile (
".p2align 4 \n\t"
"1: \n\t"
"movq (%1, %%"REG_a"), %%mm0 \n\t"
"movq (%2, %%"REG_a"), %%mm2 \n\t"
"movq (%2, %%"REG_a"), %%mm4 \n\t"
"add %3, %%"REG_a" \n\t"
"psubusb %%mm0, %%mm2 \n\t"
"psubusb %%mm4, %%mm0 \n\t"
"movq (%1, %%"REG_a"), %%mm1 \n\t"
"movq (%2, %%"REG_a"), %%mm3 \n\t"
"movq (%2, %%"REG_a"), %%mm5 \n\t"
"psubusb %%mm1, %%mm3 \n\t"
"psubusb %%mm5, %%mm1 \n\t"
"por %%mm2, %%mm0 \n\t"
"por %%mm1, %%mm3 \n\t"
"movq %%mm0, %%mm1 \n\t"
"movq %%mm3, %%mm2 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
"punpckhbw %%mm7, %%mm1 \n\t"
"punpcklbw %%mm7, %%mm3 \n\t"
"punpckhbw %%mm7, %%mm2 \n\t"
"paddw %%mm1, %%mm0 \n\t"
"paddw %%mm3, %%mm2 \n\t"
"paddw %%mm2, %%mm0 \n\t"
"paddw %%mm0, %%mm6 \n\t"
"add %3, %%"REG_a" \n\t"
" js 1b \n\t"
: "+a" (len)
: "r" (blk1 - len), "r" (blk2 - len), "r" (stride));
}
static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2,
ptrdiff_t stride, int h)
{
x86_reg len = -stride * h;
__asm__ volatile (
".p2align 4 \n\t"
"1: \n\t"
"movq (%1, %%"REG_a"), %%mm0 \n\t"
"movq (%2, %%"REG_a"), %%mm1 \n\t"
"movq (%1, %%"REG_a"), %%mm2 \n\t"
"movq (%2, %%"REG_a"), %%mm3 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
"punpckhbw %%mm7, %%mm2 \n\t"
"punpckhbw %%mm7, %%mm3 \n\t"
"paddw %%mm0, %%mm1 \n\t"
"paddw %%mm2, %%mm3 \n\t"
"movq (%3, %%"REG_a"), %%mm4 \n\t"
"movq (%3, %%"REG_a"), %%mm2 \n\t"
"paddw %%mm5, %%mm1 \n\t"
"paddw %%mm5, %%mm3 \n\t"
"psrlw $1, %%mm1 \n\t"
"psrlw $1, %%mm3 \n\t"
"packuswb %%mm3, %%mm1 \n\t"
"psubusb %%mm1, %%mm4 \n\t"
"psubusb %%mm2, %%mm1 \n\t"
"por %%mm4, %%mm1 \n\t"
"movq %%mm1, %%mm0 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
"punpckhbw %%mm7, %%mm1 \n\t"
"paddw %%mm1, %%mm0 \n\t"
"paddw %%mm0, %%mm6 \n\t"
"add %4, %%"REG_a" \n\t"
" js 1b \n\t"
: "+a" (len)
: "r" (blk1a - len), "r" (blk1b - len), "r" (blk2 - len),
"r" (stride));
}
static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2,
ptrdiff_t stride, int h)
{
x86_reg len = -stride * h;
__asm__ volatile (
"movq (%1, %%"REG_a"), %%mm0 \n\t"
"movq 1(%1, %%"REG_a"), %%mm2 \n\t"
"movq %%mm0, %%mm1 \n\t"
"movq %%mm2, %%mm3 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
"punpckhbw %%mm7, %%mm1 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
"punpckhbw %%mm7, %%mm3 \n\t"
"paddw %%mm2, %%mm0 \n\t"
"paddw %%mm3, %%mm1 \n\t"
".p2align 4 \n\t"
"1: \n\t"
"movq (%2, %%"REG_a"), %%mm2 \n\t"
"movq 1(%2, %%"REG_a"), %%mm4 \n\t"
"movq %%mm2, %%mm3 \n\t"
"movq %%mm4, %%mm5 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
"punpckhbw %%mm7, %%mm3 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t"
"punpckhbw %%mm7, %%mm5 \n\t"
"paddw %%mm4, %%mm2 \n\t"
"paddw %%mm5, %%mm3 \n\t"
"movq %5, %%mm5 \n\t"
"paddw %%mm2, %%mm0 \n\t"
"paddw %%mm3, %%mm1 \n\t"
"paddw %%mm5, %%mm0 \n\t"
"paddw %%mm5, %%mm1 \n\t"
"movq (%3, %%"REG_a"), %%mm4 \n\t"
"movq (%3, %%"REG_a"), %%mm5 \n\t"
"psrlw $2, %%mm0 \n\t"
"psrlw $2, %%mm1 \n\t"
"packuswb %%mm1, %%mm0 \n\t"
"psubusb %%mm0, %%mm4 \n\t"
"psubusb %%mm5, %%mm0 \n\t"
"por %%mm4, %%mm0 \n\t"
"movq %%mm0, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
"punpckhbw %%mm7, %%mm4 \n\t"
"paddw %%mm0, %%mm6 \n\t"
"paddw %%mm4, %%mm6 \n\t"
"movq %%mm2, %%mm0 \n\t"
"movq %%mm3, %%mm1 \n\t"
"add %4, %%"REG_a" \n\t"
" js 1b \n\t"
: "+a" (len)
: "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len),
"r" (stride), "m" (round_tab[2]));
}
static inline int sum_mmx(void)
{
int ret;
__asm__ volatile (
"movq %%mm6, %%mm0 \n\t"
"psrlq $32, %%mm6 \n\t"
"paddw %%mm0, %%mm6 \n\t"
"movq %%mm6, %%mm0 \n\t"
"psrlq $16, %%mm6 \n\t"
"paddw %%mm0, %%mm6 \n\t"
"movd %%mm6, %0 \n\t"
: "=r" (ret));
return ret & 0xFFFF;
}
static inline void sad8_x2a_mmx(uint8_t *blk1, uint8_t *blk2,
ptrdiff_t stride, int h)
{
sad8_2_mmx(blk1, blk1 + 1, blk2, stride, h);
}
static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2,
ptrdiff_t stride, int h)
{
sad8_2_mmx(blk1, blk1 + stride, blk2, stride, h);
}
#define PIX_SAD(suf) \
static int sad8_ ## suf(MpegEncContext *v, uint8_t *blk2, \
uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
av_assert2(h == 8); \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
:); \
\
sad8_1_ ## suf(blk1, blk2, stride, 8); \
\
return sum_ ## suf(); \
} \
\
static int sad8_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
av_assert2(h == 8); \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
"movq %0, %%mm5 \n\t" \
:: "m" (round_tab[1])); \
\
sad8_x2a_ ## suf(blk1, blk2, stride, 8); \
\
return sum_ ## suf(); \
} \
\
static int sad8_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
av_assert2(h == 8); \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
"movq %0, %%mm5 \n\t" \
:: "m" (round_tab[1])); \
\
sad8_y2a_ ## suf(blk1, blk2, stride, 8); \
\
return sum_ ## suf(); \
} \
\
static int sad8_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
av_assert2(h == 8); \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
::); \
\
sad8_4_ ## suf(blk1, blk2, stride, 8); \
\
return sum_ ## suf(); \
} \
\
static int sad16_ ## suf(MpegEncContext *v, uint8_t *blk2, \
uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
:); \
\
sad8_1_ ## suf(blk1, blk2, stride, h); \
sad8_1_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
\
return sum_ ## suf(); \
} \
\
static int sad16_x2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
"movq %0, %%mm5 \n\t" \
:: "m" (round_tab[1])); \
\
sad8_x2a_ ## suf(blk1, blk2, stride, h); \
sad8_x2a_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
\
return sum_ ## suf(); \
} \
\
static int sad16_y2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
"movq %0, %%mm5 \n\t" \
:: "m" (round_tab[1])); \
\
sad8_y2a_ ## suf(blk1, blk2, stride, h); \
sad8_y2a_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
\
return sum_ ## suf(); \
} \
\
static int sad16_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2, \
uint8_t *blk1, ptrdiff_t stride, int h) \
{ \
__asm__ volatile ( \
"pxor %%mm7, %%mm7 \n\t" \
"pxor %%mm6, %%mm6 \n\t" \
::); \
\
sad8_4_ ## suf(blk1, blk2, stride, h); \
sad8_4_ ## suf(blk1 + 8, blk2 + 8, stride, h); \
\
return sum_ ## suf(); \
} \
PIX_SAD(mmx)
#endif /* HAVE_INLINE_ASM */
av_cold void ff_me_cmp_init_x86(MECmpContext *c, AVCodecContext *avctx)
{
int cpu_flags = av_get_cpu_flags();
#if HAVE_INLINE_ASM
if (INLINE_MMX(cpu_flags)) {
c->pix_abs[0][0] = sad16_mmx;
c->pix_abs[0][1] = sad16_x2_mmx;
c->pix_abs[0][2] = sad16_y2_mmx;
c->pix_abs[0][3] = sad16_xy2_mmx;
c->pix_abs[1][0] = sad8_mmx;
c->pix_abs[1][1] = sad8_x2_mmx;
c->pix_abs[1][2] = sad8_y2_mmx;
c->pix_abs[1][3] = sad8_xy2_mmx;
c->sad[0] = sad16_mmx;
c->sad[1] = sad8_mmx;
c->vsad[4] = vsad_intra16_mmx;
if (!(avctx->flags & AV_CODEC_FLAG_BITEXACT)) {
c->vsad[0] = vsad16_mmx;
}
}
#endif /* HAVE_INLINE_ASM */
if (EXTERNAL_MMX(cpu_flags)) {
c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx;
c->hadamard8_diff[1] = ff_hadamard8_diff_mmx;
c->sum_abs_dctelem = ff_sum_abs_dctelem_mmx;
c->sse[0] = ff_sse16_mmx;
c->sse[1] = ff_sse8_mmx;
#if HAVE_YASM
c->nsse[0] = nsse16_mmx;
c->nsse[1] = nsse8_mmx;
#endif
}
if (EXTERNAL_MMXEXT(cpu_flags)) {
c->hadamard8_diff[0] = ff_hadamard8_diff16_mmxext;
c->hadamard8_diff[1] = ff_hadamard8_diff_mmxext;
c->sum_abs_dctelem = ff_sum_abs_dctelem_mmxext;
c->sad[0] = ff_sad16_mmxext;
c->sad[1] = ff_sad8_mmxext;
c->pix_abs[0][0] = ff_sad16_mmxext;
c->pix_abs[0][1] = ff_sad16_x2_mmxext;
c->pix_abs[0][2] = ff_sad16_y2_mmxext;
c->pix_abs[1][0] = ff_sad8_mmxext;
c->pix_abs[1][1] = ff_sad8_x2_mmxext;
c->pix_abs[1][2] = ff_sad8_y2_mmxext;
c->vsad[4] = ff_vsad_intra16_mmxext;
c->vsad[5] = ff_vsad_intra8_mmxext;
if (!(avctx->flags & AV_CODEC_FLAG_BITEXACT)) {
c->pix_abs[0][3] = ff_sad16_approx_xy2_mmxext;
c->pix_abs[1][3] = ff_sad8_approx_xy2_mmxext;
c->vsad[0] = ff_vsad16_approx_mmxext;
c->vsad[1] = ff_vsad8_approx_mmxext;
}
}
if (EXTERNAL_SSE2(cpu_flags)) {
c->sse[0] = ff_sse16_sse2;
c->sum_abs_dctelem = ff_sum_abs_dctelem_sse2;
#if HAVE_ALIGNED_STACK
c->hadamard8_diff[0] = ff_hadamard8_diff16_sse2;
c->hadamard8_diff[1] = ff_hadamard8_diff_sse2;
#endif
if (!(cpu_flags & AV_CPU_FLAG_SSE2SLOW) && avctx->codec_id != AV_CODEC_ID_SNOW) {
c->sad[0] = ff_sad16_sse2;
c->pix_abs[0][0] = ff_sad16_sse2;
c->pix_abs[0][1] = ff_sad16_x2_sse2;
c->pix_abs[0][2] = ff_sad16_y2_sse2;
c->vsad[4] = ff_vsad_intra16_sse2;
if (!(avctx->flags & AV_CODEC_FLAG_BITEXACT)) {
c->pix_abs[0][3] = ff_sad16_approx_xy2_sse2;
c->vsad[0] = ff_vsad16_approx_sse2;
}
}
}
if (EXTERNAL_SSSE3(cpu_flags)) {
c->sum_abs_dctelem = ff_sum_abs_dctelem_ssse3;
#if HAVE_ALIGNED_STACK
c->hadamard8_diff[0] = ff_hadamard8_diff16_ssse3;
c->hadamard8_diff[1] = ff_hadamard8_diff_ssse3;
#endif
}
}

View File

@@ -0,0 +1,196 @@
;******************************************************************************
;* SIMD-optimized MLP DSP functions
;* Copyright (c) 2014 James Almer <jamrial@gmail.com>
;*
;* This file is part of FFmpeg.
;*
;* FFmpeg is free software; you can redistribute it and/or
;* modify it under the terms of the GNU Lesser General Public
;* License as published by the Free Software Foundation; either
;* version 2.1 of the License, or (at your option) any later version.
;*
;* FFmpeg is distributed in the hope that it will be useful,
;* but WITHOUT ANY WARRANTY; without even the implied warranty of
;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
;* Lesser General Public License for more details.
;*
;* You should have received a copy of the GNU Lesser General Public
;* License along with FFmpeg; if not, write to the Free Software
;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
;******************************************************************************
%include "libavutil/x86/x86util.asm"
SECTION .text
%if ARCH_X86_64
%macro SHLX 2
%if cpuflag(bmi2)
shlx %1, %1, %2q
%else
shl %1, %2b
%endif
%endmacro
%macro REMATRIX 0
movdqa m0, [samplesq]
movdqa m1, [coeffsq ]
pshufd m2, m0, q2301
pshufd m3, m1, q2301
pmuldq m0, m1
pmuldq m3, m2
paddq m0, m3
%if notcpuflag(avx2)
movdqa m1, [samplesq + 16]
movdqa m2, [coeffsq + 16]
pshufd m3, m1, q2301
pshufd m4, m2, q2301
pmuldq m1, m2
pmuldq m4, m3
paddq m0, m1
paddq m0, m4
%else
vextracti128 xm1, m0, 1
paddq xm0, xm1
%endif
%endmacro
%macro LOOP_END 0
pshufd xm1, xm0, q0032
paddq xm0, xm1
movq accumq, xm0
movzx blsbsd, byte [blsbs_ptrq] ; load *bypassed_lsbs
sar accumq, 14 ; accum >>= 14
and accumd, maskd ; accum &= mask
add accumd, blsbsd ; accum += *bypassed_lsbs
mov [samplesq + dest_chq], accumd ; samples[dest_ch] = accum
add blsbs_ptrq, 8 ; bypassed_lsbs += MAX_CHANNELS;
add samplesq, 32 ; samples += MAX_CHANNELS;
cmp blsbs_ptrq, cntq
%endmacro
%macro LOOP_SHIFT_END 0
pshufd xm1, xm0, q0032
paddq xm0, xm1
movq accumq, xm0
and indexd, auspd ; index &= access_unit_size_pow2;
movsx noiseq, byte [noise_bufferq + indexq] ; load noise_buffer[index]
add indexd, index2d ; index += index2
SHLX noiseq, mns ; noise_buffer[index] <<= matrix_noise_shift
add accumq, noiseq ; accum += noise_buffer[index]
movzx noised, byte [blsbs_ptrq] ; load *bypassed_lsbs (reuse tmp noise register)
sar accumq, 14 ; accum >>= 14
and accumd, maskd ; accum &= mask
add accumd, noised ; accum += *bypassed_lsbs
mov [samplesq + dest_chq], accumd ; samples[dest_ch] = accum
add blsbs_ptrq, 8 ; bypassed_lsbs += MAX_CHANNELS;
add samplesq, 32 ; samples += MAX_CHANNELS;
cmp blsbs_ptrq, cntq
%endmacro
;void ff_mlp_rematrix_channel(int32_t *samples, const int32_t *coeffs,
; const uint8_t *bypassed_lsbs, const int8_t *noise_buffer,
; int index, unsigned int dest_ch, uint16_t blockpos,
; unsigned int maxchan, int matrix_noise_shift,
; int access_unit_size_pow2, int32_t mask)
%macro MLP_REMATRIX_CHANNEL 0
cglobal mlp_rematrix_channel, 0, 13, 5, samples, coeffs, blsbs_ptr, blsbs, \
index, dest_ch, blockpos, maxchan, mns, \
accum, mask, cnt
mov mnsd, mnsm ; load matrix_noise_shift
movzx blockposq, word blockposm ; load and zero extend blockpos (16bit)
mov maxchand, maxchanm ; load maxchan
mov maskd, maskm ; load mask
%if WIN64
mov dest_chd, dest_chm ; load dest_chd (not needed on UNIX64)
%endif
shl dest_chd, 2
lea cntq, [blsbs_ptrq + blockposq*8]
test mnsd, mnsd ; is matrix_noise_shift != 0?
jne .shift ; jump if true
cmp maxchand, 4 ; is maxchan < 4?
jl .loop4 ; jump if true
align 16
.loop8:
; Process 5 or more channels
REMATRIX
LOOP_END
jne .loop8
RET
align 16
.loop4:
; Process up to 4 channels
movdqa xm0, [samplesq]
movdqa xm1, [coeffsq ]
pshufd xm2, xm0, q2301
pshufd xm3, xm1, q2301
pmuldq xm0, xm1
pmuldq xm3, xm2
paddq xm0, xm3
LOOP_END
jne .loop4
RET
.shift:
%if WIN64
mov indexd, indexm ; load index (not needed on UNIX64)
%endif
mov r9d, r9m ; load access_unit_size_pow2
%if cpuflag(bmi2)
; bmi2 has shift functions that accept any gpr, not just cl, so keep things in place.
DEFINE_ARGS samples, coeffs, blsbs_ptr, noise_buffer, \
index, dest_ch, accum, index2, mns, \
ausp, mask, cnt, noise
add mnsd, 7 ; matrix_noise_shift += 7
%else ; sse4
mov r6, rcx ; move rcx elsewhere so we can use cl for matrix_noise_shift
%if WIN64
; r0 = rcx
DEFINE_ARGS mns, coeffs, blsbs_ptr, noise_buffer, index, dest_ch, samples, \
index2, accum, ausp, mask, cnt, noise
%else ; UNIX64
; r3 = rcx
DEFINE_ARGS samples, coeffs, blsbs_ptr, mns, index, dest_ch, noise_buffer, \
index2, accum, ausp, mask, cnt, noise
%endif
lea mnsd, [r8 + 7] ; rcx = matrix_noise_shift + 7
%endif ; cpuflag
sub auspd, 1 ; access_unit_size_pow2 -= 1
cmp r7d, 4 ; is maxchan < 4?
lea index2q, [indexq*2 + 1] ; index2 = 2 * index + 1;
jl .loop4_shift ; jump if maxchan < 4
align 16
.loop8_shift:
; Process 5 or more channels
REMATRIX
LOOP_SHIFT_END
jne .loop8_shift
RET
align 16
.loop4_shift:
; Process up to 4 channels
movdqa xm0, [samplesq]
movdqa xm1, [coeffsq ]
pshufd xm2, xm0, q2301
pshufd xm3, xm1, q2301
pmuldq xm0, xm1
pmuldq xm3, xm2
paddq xm0, xm3
LOOP_SHIFT_END
jne .loop4_shift
RET
%endmacro
INIT_XMM sse4
MLP_REMATRIX_CHANNEL
%if HAVE_AVX2_EXTERNAL
INIT_YMM avx2, bmi2
MLP_REMATRIX_CHANNEL
%endif
%endif ; ARCH_X86_64

View File

@@ -0,0 +1,204 @@
/*
* MLP DSP functions x86-optimized
* Copyright (c) 2009 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/mlpdsp.h"
#include "libavcodec/mlp.h"
#define REMATRIX_CHANNEL_FUNC(opt) \
void ff_mlp_rematrix_channel_##opt(int32_t *samples, \
const int32_t *coeffs, \
const uint8_t *bypassed_lsbs, \
const int8_t *noise_buffer, \
int index, \
unsigned int dest_ch, \
uint16_t blockpos, \
unsigned int maxchan, \
int matrix_noise_shift, \
int access_unit_size_pow2, \
int32_t mask);
REMATRIX_CHANNEL_FUNC(sse4)
REMATRIX_CHANNEL_FUNC(avx2_bmi2)
#if HAVE_7REGS && HAVE_INLINE_ASM && HAVE_INLINE_ASM_NONLOCAL_LABELS
extern char ff_mlp_firorder_8;
extern char ff_mlp_firorder_7;
extern char ff_mlp_firorder_6;
extern char ff_mlp_firorder_5;
extern char ff_mlp_firorder_4;
extern char ff_mlp_firorder_3;
extern char ff_mlp_firorder_2;
extern char ff_mlp_firorder_1;
extern char ff_mlp_firorder_0;
extern char ff_mlp_iirorder_4;
extern char ff_mlp_iirorder_3;
extern char ff_mlp_iirorder_2;
extern char ff_mlp_iirorder_1;
extern char ff_mlp_iirorder_0;
static const void * const firtable[9] = { &ff_mlp_firorder_0, &ff_mlp_firorder_1,
&ff_mlp_firorder_2, &ff_mlp_firorder_3,
&ff_mlp_firorder_4, &ff_mlp_firorder_5,
&ff_mlp_firorder_6, &ff_mlp_firorder_7,
&ff_mlp_firorder_8 };
static const void * const iirtable[5] = { &ff_mlp_iirorder_0, &ff_mlp_iirorder_1,
&ff_mlp_iirorder_2, &ff_mlp_iirorder_3,
&ff_mlp_iirorder_4 };
#if ARCH_X86_64
#define MLPMUL(label, offset, offs, offc) \
LABEL_MANGLE(label)": \n\t" \
"movslq "offset"+"offs"(%0), %%rax\n\t" \
"movslq "offset"+"offc"(%1), %%rdx\n\t" \
"imul %%rdx, %%rax\n\t" \
"add %%rax, %%rsi\n\t"
#define FIRMULREG(label, offset, firc)\
LABEL_MANGLE(label)": \n\t" \
"movslq "#offset"(%0), %%rax\n\t" \
"imul %"#firc", %%rax\n\t" \
"add %%rax, %%rsi\n\t"
#define CLEAR_ACCUM \
"xor %%rsi, %%rsi\n\t"
#define SHIFT_ACCUM \
"shr %%cl, %%rsi\n\t"
#define ACCUM "%%rdx"
#define RESULT "%%rsi"
#define RESULT32 "%%esi"
#else /* if ARCH_X86_32 */
#define MLPMUL(label, offset, offs, offc) \
LABEL_MANGLE(label)": \n\t" \
"mov "offset"+"offs"(%0), %%eax\n\t" \
"imull "offset"+"offc"(%1) \n\t" \
"add %%eax , %%esi\n\t" \
"adc %%edx , %%ecx\n\t"
#define FIRMULREG(label, offset, firc) \
MLPMUL(label, #offset, "0", "0")
#define CLEAR_ACCUM \
"xor %%esi, %%esi\n\t" \
"xor %%ecx, %%ecx\n\t"
#define SHIFT_ACCUM \
"mov %%ecx, %%edx\n\t" \
"mov %%esi, %%eax\n\t" \
"movzbl %7 , %%ecx\n\t" \
"shrd %%cl, %%edx, %%eax\n\t" \
#define ACCUM "%%edx"
#define RESULT "%%eax"
#define RESULT32 "%%eax"
#endif /* !ARCH_X86_64 */
#define BINC AV_STRINGIFY(4* MAX_CHANNELS)
#define IOFFS AV_STRINGIFY(4*(MAX_FIR_ORDER + MAX_BLOCKSIZE))
#define IOFFC AV_STRINGIFY(4* MAX_FIR_ORDER)
#define FIRMUL(label, offset) MLPMUL(label, #offset, "0", "0")
#define IIRMUL(label, offset) MLPMUL(label, #offset, IOFFS, IOFFC)
static void mlp_filter_channel_x86(int32_t *state, const int32_t *coeff,
int firorder, int iirorder,
unsigned int filter_shift, int32_t mask,
int blocksize, int32_t *sample_buffer)
{
const void *firjump = firtable[firorder];
const void *iirjump = iirtable[iirorder];
blocksize = -blocksize;
__asm__ volatile(
"1: \n\t"
CLEAR_ACCUM
"jmp *%5 \n\t"
FIRMUL (ff_mlp_firorder_8, 0x1c )
FIRMUL (ff_mlp_firorder_7, 0x18 )
FIRMUL (ff_mlp_firorder_6, 0x14 )
FIRMUL (ff_mlp_firorder_5, 0x10 )
FIRMUL (ff_mlp_firorder_4, 0x0c )
FIRMUL (ff_mlp_firorder_3, 0x08 )
FIRMUL (ff_mlp_firorder_2, 0x04 )
FIRMULREG(ff_mlp_firorder_1, 0x00, 8)
LABEL_MANGLE(ff_mlp_firorder_0)":\n\t"
"jmp *%6 \n\t"
IIRMUL (ff_mlp_iirorder_4, 0x0c )
IIRMUL (ff_mlp_iirorder_3, 0x08 )
IIRMUL (ff_mlp_iirorder_2, 0x04 )
IIRMUL (ff_mlp_iirorder_1, 0x00 )
LABEL_MANGLE(ff_mlp_iirorder_0)":\n\t"
SHIFT_ACCUM
"mov "RESULT" ,"ACCUM" \n\t"
"add (%2) ,"RESULT" \n\t"
"and %4 ,"RESULT" \n\t"
"sub $4 , %0 \n\t"
"mov "RESULT32", (%0) \n\t"
"mov "RESULT32", (%2) \n\t"
"add $"BINC" , %2 \n\t"
"sub "ACCUM" ,"RESULT" \n\t"
"mov "RESULT32","IOFFS"(%0) \n\t"
"incl %3 \n\t"
"js 1b \n\t"
: /* 0*/"+r"(state),
/* 1*/"+r"(coeff),
/* 2*/"+r"(sample_buffer),
#if ARCH_X86_64
/* 3*/"+r"(blocksize)
: /* 4*/"r"((x86_reg)mask), /* 5*/"r"(firjump),
/* 6*/"r"(iirjump) , /* 7*/"c"(filter_shift)
, /* 8*/"r"((int64_t)coeff[0])
: "rax", "rdx", "rsi"
#else /* ARCH_X86_32 */
/* 3*/"+m"(blocksize)
: /* 4*/"m"( mask), /* 5*/"m"(firjump),
/* 6*/"m"(iirjump) , /* 7*/"m"(filter_shift)
: "eax", "edx", "esi", "ecx"
#endif /* !ARCH_X86_64 */
);
}
#endif /* HAVE_7REGS && HAVE_INLINE_ASM */
av_cold void ff_mlpdsp_init_x86(MLPDSPContext *c)
{
int cpu_flags = av_get_cpu_flags();
#if HAVE_7REGS && HAVE_INLINE_ASM && HAVE_INLINE_ASM_NONLOCAL_LABELS
if (INLINE_MMX(cpu_flags))
c->mlp_filter_channel = mlp_filter_channel_x86;
#endif
if (ARCH_X86_64 && EXTERNAL_SSE4(cpu_flags))
c->mlp_rematrix_channel = ff_mlp_rematrix_channel_sse4;
if (ARCH_X86_64 && EXTERNAL_AVX2(cpu_flags) && cpu_flags & AV_CPU_FLAG_BMI2)
c->mlp_rematrix_channel = ff_mlp_rematrix_channel_avx2_bmi2;
}

View File

@@ -0,0 +1,289 @@
/*
* SIMD-optimized MP3 decoding functions
* Copyright (c) 2010 Vitor Sessak
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/internal.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/mpegaudiodsp.h"
#define DECL(CPU)\
static void imdct36_blocks_ ## CPU(float *out, float *buf, float *in, int count, int switch_point, int block_type);\
void ff_imdct36_float_ ## CPU(float *out, float *buf, float *in, float *win);
#if HAVE_YASM
#if ARCH_X86_32
DECL(sse)
#endif
DECL(sse2)
DECL(sse3)
DECL(ssse3)
DECL(avx)
#endif /* HAVE_YASM */
void ff_four_imdct36_float_sse(float *out, float *buf, float *in, float *win,
float *tmpbuf);
void ff_four_imdct36_float_avx(float *out, float *buf, float *in, float *win,
float *tmpbuf);
DECLARE_ALIGNED(16, static float, mdct_win_sse)[2][4][4*40];
#if HAVE_6REGS && HAVE_SSE_INLINE
#define MACS(rt, ra, rb) rt+=(ra)*(rb)
#define MLSS(rt, ra, rb) rt-=(ra)*(rb)
#define SUM8(op, sum, w, p) \
{ \
op(sum, (w)[0 * 64], (p)[0 * 64]); \
op(sum, (w)[1 * 64], (p)[1 * 64]); \
op(sum, (w)[2 * 64], (p)[2 * 64]); \
op(sum, (w)[3 * 64], (p)[3 * 64]); \
op(sum, (w)[4 * 64], (p)[4 * 64]); \
op(sum, (w)[5 * 64], (p)[5 * 64]); \
op(sum, (w)[6 * 64], (p)[6 * 64]); \
op(sum, (w)[7 * 64], (p)[7 * 64]); \
}
static void apply_window(const float *buf, const float *win1,
const float *win2, float *sum1, float *sum2, int len)
{
x86_reg count = - 4*len;
const float *win1a = win1+len;
const float *win2a = win2+len;
const float *bufa = buf+len;
float *sum1a = sum1+len;
float *sum2a = sum2+len;
#define MULT(a, b) \
"movaps " #a "(%1,%0), %%xmm1 \n\t" \
"movaps " #a "(%3,%0), %%xmm2 \n\t" \
"mulps %%xmm2, %%xmm1 \n\t" \
"subps %%xmm1, %%xmm0 \n\t" \
"mulps " #b "(%2,%0), %%xmm2 \n\t" \
"subps %%xmm2, %%xmm4 \n\t" \
__asm__ volatile(
"1: \n\t"
"xorps %%xmm0, %%xmm0 \n\t"
"xorps %%xmm4, %%xmm4 \n\t"
MULT( 0, 0)
MULT( 256, 64)
MULT( 512, 128)
MULT( 768, 192)
MULT(1024, 256)
MULT(1280, 320)
MULT(1536, 384)
MULT(1792, 448)
"movaps %%xmm0, (%4,%0) \n\t"
"movaps %%xmm4, (%5,%0) \n\t"
"add $16, %0 \n\t"
"jl 1b \n\t"
:"+&r"(count)
:"r"(win1a), "r"(win2a), "r"(bufa), "r"(sum1a), "r"(sum2a)
);
#undef MULT
}
static void apply_window_mp3(float *in, float *win, int *unused, float *out,
int incr)
{
LOCAL_ALIGNED_16(float, suma, [17]);
LOCAL_ALIGNED_16(float, sumb, [17]);
LOCAL_ALIGNED_16(float, sumc, [17]);
LOCAL_ALIGNED_16(float, sumd, [17]);
float sum;
/* copy to avoid wrap */
__asm__ volatile(
"movaps 0(%0), %%xmm0 \n\t" \
"movaps 16(%0), %%xmm1 \n\t" \
"movaps 32(%0), %%xmm2 \n\t" \
"movaps 48(%0), %%xmm3 \n\t" \
"movaps %%xmm0, 0(%1) \n\t" \
"movaps %%xmm1, 16(%1) \n\t" \
"movaps %%xmm2, 32(%1) \n\t" \
"movaps %%xmm3, 48(%1) \n\t" \
"movaps 64(%0), %%xmm0 \n\t" \
"movaps 80(%0), %%xmm1 \n\t" \
"movaps 96(%0), %%xmm2 \n\t" \
"movaps 112(%0), %%xmm3 \n\t" \
"movaps %%xmm0, 64(%1) \n\t" \
"movaps %%xmm1, 80(%1) \n\t" \
"movaps %%xmm2, 96(%1) \n\t" \
"movaps %%xmm3, 112(%1) \n\t"
::"r"(in), "r"(in+512)
:"memory"
);
apply_window(in + 16, win , win + 512, suma, sumc, 16);
apply_window(in + 32, win + 48, win + 640, sumb, sumd, 16);
SUM8(MACS, suma[0], win + 32, in + 48);
sumc[ 0] = 0;
sumb[16] = 0;
sumd[16] = 0;
#define SUMS(suma, sumb, sumc, sumd, out1, out2) \
"movups " #sumd "(%4), %%xmm0 \n\t" \
"shufps $0x1b, %%xmm0, %%xmm0 \n\t" \
"subps " #suma "(%1), %%xmm0 \n\t" \
"movaps %%xmm0," #out1 "(%0) \n\t" \
\
"movups " #sumc "(%3), %%xmm0 \n\t" \
"shufps $0x1b, %%xmm0, %%xmm0 \n\t" \
"addps " #sumb "(%2), %%xmm0 \n\t" \
"movaps %%xmm0," #out2 "(%0) \n\t"
if (incr == 1) {
__asm__ volatile(
SUMS( 0, 48, 4, 52, 0, 112)
SUMS(16, 32, 20, 36, 16, 96)
SUMS(32, 16, 36, 20, 32, 80)
SUMS(48, 0, 52, 4, 48, 64)
:"+&r"(out)
:"r"(&suma[0]), "r"(&sumb[0]), "r"(&sumc[0]), "r"(&sumd[0])
:"memory"
);
out += 16*incr;
} else {
int j;
float *out2 = out + 32 * incr;
out[0 ] = -suma[ 0];
out += incr;
out2 -= incr;
for(j=1;j<16;j++) {
*out = -suma[ j] + sumd[16-j];
*out2 = sumb[16-j] + sumc[ j];
out += incr;
out2 -= incr;
}
}
sum = 0;
SUM8(MLSS, sum, win + 16 + 32, in + 32);
*out = sum;
}
#endif /* HAVE_6REGS && HAVE_SSE_INLINE */
#if HAVE_YASM
#define DECL_IMDCT_BLOCKS(CPU1, CPU2) \
static void imdct36_blocks_ ## CPU1(float *out, float *buf, float *in, \
int count, int switch_point, int block_type) \
{ \
int align_end = count - (count & 3); \
int j; \
for (j = 0; j < align_end; j+= 4) { \
LOCAL_ALIGNED_16(float, tmpbuf, [1024]); \
float *win = mdct_win_sse[switch_point && j < 4][block_type]; \
/* apply window & overlap with previous buffer */ \
\
/* select window */ \
ff_four_imdct36_float_ ## CPU2(out, buf, in, win, tmpbuf); \
in += 4*18; \
buf += 4*18; \
out += 4; \
} \
for (; j < count; j++) { \
/* apply window & overlap with previous buffer */ \
\
/* select window */ \
int win_idx = (switch_point && j < 2) ? 0 : block_type; \
float *win = ff_mdct_win_float[win_idx + (4 & -(j & 1))]; \
\
ff_imdct36_float_ ## CPU1(out, buf, in, win); \
\
in += 18; \
buf++; \
out++; \
} \
}
#if HAVE_SSE
#if ARCH_X86_32
DECL_IMDCT_BLOCKS(sse,sse)
#endif
DECL_IMDCT_BLOCKS(sse2,sse)
DECL_IMDCT_BLOCKS(sse3,sse)
DECL_IMDCT_BLOCKS(ssse3,sse)
#endif
#if HAVE_AVX_EXTERNAL
DECL_IMDCT_BLOCKS(avx,avx)
#endif
#endif /* HAVE_YASM */
av_cold void ff_mpadsp_init_x86(MPADSPContext *s)
{
int cpu_flags = av_get_cpu_flags();
int i, j;
for (j = 0; j < 4; j++) {
for (i = 0; i < 40; i ++) {
mdct_win_sse[0][j][4*i ] = ff_mdct_win_float[j ][i];
mdct_win_sse[0][j][4*i + 1] = ff_mdct_win_float[j + 4][i];
mdct_win_sse[0][j][4*i + 2] = ff_mdct_win_float[j ][i];
mdct_win_sse[0][j][4*i + 3] = ff_mdct_win_float[j + 4][i];
mdct_win_sse[1][j][4*i ] = ff_mdct_win_float[0 ][i];
mdct_win_sse[1][j][4*i + 1] = ff_mdct_win_float[4 ][i];
mdct_win_sse[1][j][4*i + 2] = ff_mdct_win_float[j ][i];
mdct_win_sse[1][j][4*i + 3] = ff_mdct_win_float[j + 4][i];
}
}
#if HAVE_6REGS && HAVE_SSE_INLINE
if (INLINE_SSE(cpu_flags)) {
s->apply_window_float = apply_window_mp3;
}
#endif /* HAVE_SSE_INLINE */
#if HAVE_YASM
#if HAVE_SSE
#if ARCH_X86_32
if (EXTERNAL_SSE(cpu_flags)) {
s->imdct36_blocks_float = imdct36_blocks_sse;
}
#endif
if (EXTERNAL_SSE2(cpu_flags)) {
s->imdct36_blocks_float = imdct36_blocks_sse2;
}
if (EXTERNAL_SSE3(cpu_flags)) {
s->imdct36_blocks_float = imdct36_blocks_sse3;
}
if (EXTERNAL_SSSE3(cpu_flags)) {
s->imdct36_blocks_float = imdct36_blocks_ssse3;
}
#endif
#if HAVE_AVX_EXTERNAL
if (EXTERNAL_AVX(cpu_flags)) {
s->imdct36_blocks_float = imdct36_blocks_avx;
}
#endif
#endif /* HAVE_YASM */
}

View File

@@ -0,0 +1,462 @@
/*
* Optimized for ia32 CPUs by Nick Kurshev <nickols_k@mail.ru>
* h263, mpeg1, mpeg2 dequantizer & draw_edges by Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/mpegvideo.h"
#if HAVE_MMX_INLINE
static void dct_unquantize_h263_intra_mmx(MpegEncContext *s,
int16_t *block, int n, int qscale)
{
x86_reg level, qmul, qadd, nCoeffs;
qmul = qscale << 1;
av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
if (!s->h263_aic) {
if (n < 4)
level = block[0] * s->y_dc_scale;
else
level = block[0] * s->c_dc_scale;
qadd = (qscale - 1) | 1;
}else{
qadd = 0;
level= block[0];
}
if(s->ac_pred)
nCoeffs=63;
else
nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
__asm__ volatile(
"movd %1, %%mm6 \n\t" //qmul
"packssdw %%mm6, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"movd %2, %%mm5 \n\t" //qadd
"pxor %%mm7, %%mm7 \n\t"
"packssdw %%mm5, %%mm5 \n\t"
"packssdw %%mm5, %%mm5 \n\t"
"psubw %%mm5, %%mm7 \n\t"
"pxor %%mm4, %%mm4 \n\t"
".p2align 4 \n\t"
"1: \n\t"
"movq (%0, %3), %%mm0 \n\t"
"movq 8(%0, %3), %%mm1 \n\t"
"pmullw %%mm6, %%mm0 \n\t"
"pmullw %%mm6, %%mm1 \n\t"
"movq (%0, %3), %%mm2 \n\t"
"movq 8(%0, %3), %%mm3 \n\t"
"pcmpgtw %%mm4, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
"pcmpgtw %%mm4, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
"pxor %%mm2, %%mm0 \n\t"
"pxor %%mm3, %%mm1 \n\t"
"paddw %%mm7, %%mm0 \n\t"
"paddw %%mm7, %%mm1 \n\t"
"pxor %%mm0, %%mm2 \n\t"
"pxor %%mm1, %%mm3 \n\t"
"pcmpeqw %%mm7, %%mm0 \n\t" // block[i] == 0 ? -1 : 0
"pcmpeqw %%mm7, %%mm1 \n\t" // block[i] == 0 ? -1 : 0
"pandn %%mm2, %%mm0 \n\t"
"pandn %%mm3, %%mm1 \n\t"
"movq %%mm0, (%0, %3) \n\t"
"movq %%mm1, 8(%0, %3) \n\t"
"add $16, %3 \n\t"
"jng 1b \n\t"
::"r" (block+nCoeffs), "rm"(qmul), "rm" (qadd), "r" (2*(-nCoeffs))
: "memory"
);
block[0]= level;
}
static void dct_unquantize_h263_inter_mmx(MpegEncContext *s,
int16_t *block, int n, int qscale)
{
x86_reg qmul, qadd, nCoeffs;
qmul = qscale << 1;
qadd = (qscale - 1) | 1;
av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
__asm__ volatile(
"movd %1, %%mm6 \n\t" //qmul
"packssdw %%mm6, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"movd %2, %%mm5 \n\t" //qadd
"pxor %%mm7, %%mm7 \n\t"
"packssdw %%mm5, %%mm5 \n\t"
"packssdw %%mm5, %%mm5 \n\t"
"psubw %%mm5, %%mm7 \n\t"
"pxor %%mm4, %%mm4 \n\t"
".p2align 4 \n\t"
"1: \n\t"
"movq (%0, %3), %%mm0 \n\t"
"movq 8(%0, %3), %%mm1 \n\t"
"pmullw %%mm6, %%mm0 \n\t"
"pmullw %%mm6, %%mm1 \n\t"
"movq (%0, %3), %%mm2 \n\t"
"movq 8(%0, %3), %%mm3 \n\t"
"pcmpgtw %%mm4, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
"pcmpgtw %%mm4, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
"pxor %%mm2, %%mm0 \n\t"
"pxor %%mm3, %%mm1 \n\t"
"paddw %%mm7, %%mm0 \n\t"
"paddw %%mm7, %%mm1 \n\t"
"pxor %%mm0, %%mm2 \n\t"
"pxor %%mm1, %%mm3 \n\t"
"pcmpeqw %%mm7, %%mm0 \n\t" // block[i] == 0 ? -1 : 0
"pcmpeqw %%mm7, %%mm1 \n\t" // block[i] == 0 ? -1 : 0
"pandn %%mm2, %%mm0 \n\t"
"pandn %%mm3, %%mm1 \n\t"
"movq %%mm0, (%0, %3) \n\t"
"movq %%mm1, 8(%0, %3) \n\t"
"add $16, %3 \n\t"
"jng 1b \n\t"
::"r" (block+nCoeffs), "rm"(qmul), "rm" (qadd), "r" (2*(-nCoeffs))
: "memory"
);
}
static void dct_unquantize_mpeg1_intra_mmx(MpegEncContext *s,
int16_t *block, int n, int qscale)
{
x86_reg nCoeffs;
const uint16_t *quant_matrix;
int block0;
av_assert2(s->block_last_index[n]>=0);
nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]+1;
if (n < 4)
block0 = block[0] * s->y_dc_scale;
else
block0 = block[0] * s->c_dc_scale;
/* XXX: only mpeg1 */
quant_matrix = s->intra_matrix;
__asm__ volatile(
"pcmpeqw %%mm7, %%mm7 \n\t"
"psrlw $15, %%mm7 \n\t"
"movd %2, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"mov %3, %%"REG_a" \n\t"
".p2align 4 \n\t"
"1: \n\t"
"movq (%0, %%"REG_a"), %%mm0 \n\t"
"movq 8(%0, %%"REG_a"), %%mm1 \n\t"
"movq (%1, %%"REG_a"), %%mm4 \n\t"
"movq 8(%1, %%"REG_a"), %%mm5 \n\t"
"pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
"pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
"pxor %%mm2, %%mm2 \n\t"
"pxor %%mm3, %%mm3 \n\t"
"pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
"pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
"pxor %%mm2, %%mm0 \n\t"
"pxor %%mm3, %%mm1 \n\t"
"psubw %%mm2, %%mm0 \n\t" // abs(block[i])
"psubw %%mm3, %%mm1 \n\t" // abs(block[i])
"pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*q
"pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*q
"pxor %%mm4, %%mm4 \n\t"
"pxor %%mm5, %%mm5 \n\t" // FIXME slow
"pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
"pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
"psraw $3, %%mm0 \n\t"
"psraw $3, %%mm1 \n\t"
"psubw %%mm7, %%mm0 \n\t"
"psubw %%mm7, %%mm1 \n\t"
"por %%mm7, %%mm0 \n\t"
"por %%mm7, %%mm1 \n\t"
"pxor %%mm2, %%mm0 \n\t"
"pxor %%mm3, %%mm1 \n\t"
"psubw %%mm2, %%mm0 \n\t"
"psubw %%mm3, %%mm1 \n\t"
"pandn %%mm0, %%mm4 \n\t"
"pandn %%mm1, %%mm5 \n\t"
"movq %%mm4, (%0, %%"REG_a") \n\t"
"movq %%mm5, 8(%0, %%"REG_a") \n\t"
"add $16, %%"REG_a" \n\t"
"js 1b \n\t"
::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs)
: "%"REG_a, "memory"
);
block[0]= block0;
}
static void dct_unquantize_mpeg1_inter_mmx(MpegEncContext *s,
int16_t *block, int n, int qscale)
{
x86_reg nCoeffs;
const uint16_t *quant_matrix;
av_assert2(s->block_last_index[n]>=0);
nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ]+1;
quant_matrix = s->inter_matrix;
__asm__ volatile(
"pcmpeqw %%mm7, %%mm7 \n\t"
"psrlw $15, %%mm7 \n\t"
"movd %2, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"mov %3, %%"REG_a" \n\t"
".p2align 4 \n\t"
"1: \n\t"
"movq (%0, %%"REG_a"), %%mm0 \n\t"
"movq 8(%0, %%"REG_a"), %%mm1 \n\t"
"movq (%1, %%"REG_a"), %%mm4 \n\t"
"movq 8(%1, %%"REG_a"), %%mm5 \n\t"
"pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
"pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
"pxor %%mm2, %%mm2 \n\t"
"pxor %%mm3, %%mm3 \n\t"
"pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
"pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
"pxor %%mm2, %%mm0 \n\t"
"pxor %%mm3, %%mm1 \n\t"
"psubw %%mm2, %%mm0 \n\t" // abs(block[i])
"psubw %%mm3, %%mm1 \n\t" // abs(block[i])
"paddw %%mm0, %%mm0 \n\t" // abs(block[i])*2
"paddw %%mm1, %%mm1 \n\t" // abs(block[i])*2
"paddw %%mm7, %%mm0 \n\t" // abs(block[i])*2 + 1
"paddw %%mm7, %%mm1 \n\t" // abs(block[i])*2 + 1
"pmullw %%mm4, %%mm0 \n\t" // (abs(block[i])*2 + 1)*q
"pmullw %%mm5, %%mm1 \n\t" // (abs(block[i])*2 + 1)*q
"pxor %%mm4, %%mm4 \n\t"
"pxor %%mm5, %%mm5 \n\t" // FIXME slow
"pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
"pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
"psraw $4, %%mm0 \n\t"
"psraw $4, %%mm1 \n\t"
"psubw %%mm7, %%mm0 \n\t"
"psubw %%mm7, %%mm1 \n\t"
"por %%mm7, %%mm0 \n\t"
"por %%mm7, %%mm1 \n\t"
"pxor %%mm2, %%mm0 \n\t"
"pxor %%mm3, %%mm1 \n\t"
"psubw %%mm2, %%mm0 \n\t"
"psubw %%mm3, %%mm1 \n\t"
"pandn %%mm0, %%mm4 \n\t"
"pandn %%mm1, %%mm5 \n\t"
"movq %%mm4, (%0, %%"REG_a") \n\t"
"movq %%mm5, 8(%0, %%"REG_a") \n\t"
"add $16, %%"REG_a" \n\t"
"js 1b \n\t"
::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs)
: "%"REG_a, "memory"
);
}
static void dct_unquantize_mpeg2_intra_mmx(MpegEncContext *s,
int16_t *block, int n, int qscale)
{
x86_reg nCoeffs;
const uint16_t *quant_matrix;
int block0;
av_assert2(s->block_last_index[n]>=0);
if(s->alternate_scan) nCoeffs= 63; //FIXME
else nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
if (n < 4)
block0 = block[0] * s->y_dc_scale;
else
block0 = block[0] * s->c_dc_scale;
quant_matrix = s->intra_matrix;
__asm__ volatile(
"pcmpeqw %%mm7, %%mm7 \n\t"
"psrlw $15, %%mm7 \n\t"
"movd %2, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"mov %3, %%"REG_a" \n\t"
".p2align 4 \n\t"
"1: \n\t"
"movq (%0, %%"REG_a"), %%mm0 \n\t"
"movq 8(%0, %%"REG_a"), %%mm1 \n\t"
"movq (%1, %%"REG_a"), %%mm4 \n\t"
"movq 8(%1, %%"REG_a"), %%mm5 \n\t"
"pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
"pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
"pxor %%mm2, %%mm2 \n\t"
"pxor %%mm3, %%mm3 \n\t"
"pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
"pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
"pxor %%mm2, %%mm0 \n\t"
"pxor %%mm3, %%mm1 \n\t"
"psubw %%mm2, %%mm0 \n\t" // abs(block[i])
"psubw %%mm3, %%mm1 \n\t" // abs(block[i])
"pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*q
"pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*q
"pxor %%mm4, %%mm4 \n\t"
"pxor %%mm5, %%mm5 \n\t" // FIXME slow
"pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
"pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
"psraw $3, %%mm0 \n\t"
"psraw $3, %%mm1 \n\t"
"pxor %%mm2, %%mm0 \n\t"
"pxor %%mm3, %%mm1 \n\t"
"psubw %%mm2, %%mm0 \n\t"
"psubw %%mm3, %%mm1 \n\t"
"pandn %%mm0, %%mm4 \n\t"
"pandn %%mm1, %%mm5 \n\t"
"movq %%mm4, (%0, %%"REG_a") \n\t"
"movq %%mm5, 8(%0, %%"REG_a") \n\t"
"add $16, %%"REG_a" \n\t"
"jng 1b \n\t"
::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "g" (-2*nCoeffs)
: "%"REG_a, "memory"
);
block[0]= block0;
//Note, we do not do mismatch control for intra as errors cannot accumulate
}
static void dct_unquantize_mpeg2_inter_mmx(MpegEncContext *s,
int16_t *block, int n, int qscale)
{
x86_reg nCoeffs;
const uint16_t *quant_matrix;
av_assert2(s->block_last_index[n]>=0);
if(s->alternate_scan) nCoeffs= 63; //FIXME
else nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
quant_matrix = s->inter_matrix;
__asm__ volatile(
"pcmpeqw %%mm7, %%mm7 \n\t"
"psrlq $48, %%mm7 \n\t"
"movd %2, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"packssdw %%mm6, %%mm6 \n\t"
"mov %3, %%"REG_a" \n\t"
".p2align 4 \n\t"
"1: \n\t"
"movq (%0, %%"REG_a"), %%mm0 \n\t"
"movq 8(%0, %%"REG_a"), %%mm1 \n\t"
"movq (%1, %%"REG_a"), %%mm4 \n\t"
"movq 8(%1, %%"REG_a"), %%mm5 \n\t"
"pmullw %%mm6, %%mm4 \n\t" // q=qscale*quant_matrix[i]
"pmullw %%mm6, %%mm5 \n\t" // q=qscale*quant_matrix[i]
"pxor %%mm2, %%mm2 \n\t"
"pxor %%mm3, %%mm3 \n\t"
"pcmpgtw %%mm0, %%mm2 \n\t" // block[i] < 0 ? -1 : 0
"pcmpgtw %%mm1, %%mm3 \n\t" // block[i] < 0 ? -1 : 0
"pxor %%mm2, %%mm0 \n\t"
"pxor %%mm3, %%mm1 \n\t"
"psubw %%mm2, %%mm0 \n\t" // abs(block[i])
"psubw %%mm3, %%mm1 \n\t" // abs(block[i])
"paddw %%mm0, %%mm0 \n\t" // abs(block[i])*2
"paddw %%mm1, %%mm1 \n\t" // abs(block[i])*2
"pmullw %%mm4, %%mm0 \n\t" // abs(block[i])*2*q
"pmullw %%mm5, %%mm1 \n\t" // abs(block[i])*2*q
"paddw %%mm4, %%mm0 \n\t" // (abs(block[i])*2 + 1)*q
"paddw %%mm5, %%mm1 \n\t" // (abs(block[i])*2 + 1)*q
"pxor %%mm4, %%mm4 \n\t"
"pxor %%mm5, %%mm5 \n\t" // FIXME slow
"pcmpeqw (%0, %%"REG_a"), %%mm4 \n\t" // block[i] == 0 ? -1 : 0
"pcmpeqw 8(%0, %%"REG_a"), %%mm5\n\t" // block[i] == 0 ? -1 : 0
"psrlw $4, %%mm0 \n\t"
"psrlw $4, %%mm1 \n\t"
"pxor %%mm2, %%mm0 \n\t"
"pxor %%mm3, %%mm1 \n\t"
"psubw %%mm2, %%mm0 \n\t"
"psubw %%mm3, %%mm1 \n\t"
"pandn %%mm0, %%mm4 \n\t"
"pandn %%mm1, %%mm5 \n\t"
"pxor %%mm4, %%mm7 \n\t"
"pxor %%mm5, %%mm7 \n\t"
"movq %%mm4, (%0, %%"REG_a") \n\t"
"movq %%mm5, 8(%0, %%"REG_a") \n\t"
"add $16, %%"REG_a" \n\t"
"jng 1b \n\t"
"movd 124(%0, %3), %%mm0 \n\t"
"movq %%mm7, %%mm6 \n\t"
"psrlq $32, %%mm7 \n\t"
"pxor %%mm6, %%mm7 \n\t"
"movq %%mm7, %%mm6 \n\t"
"psrlq $16, %%mm7 \n\t"
"pxor %%mm6, %%mm7 \n\t"
"pslld $31, %%mm7 \n\t"
"psrlq $15, %%mm7 \n\t"
"pxor %%mm7, %%mm0 \n\t"
"movd %%mm0, 124(%0, %3) \n\t"
::"r" (block+nCoeffs), "r"(quant_matrix+nCoeffs), "rm" (qscale), "r" (-2*nCoeffs)
: "%"REG_a, "memory"
);
}
#endif /* HAVE_MMX_INLINE */
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
{
#if HAVE_MMX_INLINE
int cpu_flags = av_get_cpu_flags();
if (INLINE_MMX(cpu_flags)) {
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_mmx;
s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_mmx;
s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_mmx;
s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_mmx;
if (!(s->avctx->flags & AV_CODEC_FLAG_BITEXACT))
s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_mmx;
s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_mmx;
}
#endif /* HAVE_MMX_INLINE */
}

View File

@@ -0,0 +1,161 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/mpegvideodsp.h"
#include "libavcodec/videodsp.h"
#if HAVE_INLINE_ASM
static void gmc_mmx(uint8_t *dst, uint8_t *src,
int stride, int h, int ox, int oy,
int dxx, int dxy, int dyx, int dyy,
int shift, int r, int width, int height)
{
const int w = 8;
const int ix = ox >> (16 + shift);
const int iy = oy >> (16 + shift);
const int oxs = ox >> 4;
const int oys = oy >> 4;
const int dxxs = dxx >> 4;
const int dxys = dxy >> 4;
const int dyxs = dyx >> 4;
const int dyys = dyy >> 4;
const uint16_t r4[4] = { r, r, r, r };
const uint16_t dxy4[4] = { dxys, dxys, dxys, dxys };
const uint16_t dyy4[4] = { dyys, dyys, dyys, dyys };
const uint64_t shift2 = 2 * shift;
#define MAX_STRIDE 4096U
#define MAX_H 8U
uint8_t edge_buf[(MAX_H + 1) * MAX_STRIDE];
int x, y;
const int dxw = (dxx - (1 << (16 + shift))) * (w - 1);
const int dyh = (dyy - (1 << (16 + shift))) * (h - 1);
const int dxh = dxy * (h - 1);
const int dyw = dyx * (w - 1);
int need_emu = (unsigned) ix >= width - w ||
(unsigned) iy >= height - h;
if ( // non-constant fullpel offset (3% of blocks)
((ox ^ (ox + dxw)) | (ox ^ (ox + dxh)) | (ox ^ (ox + dxw + dxh)) |
(oy ^ (oy + dyw)) | (oy ^ (oy + dyh)) | (oy ^ (oy + dyw + dyh))) >> (16 + shift) ||
// uses more than 16 bits of subpel mv (only at huge resolution)
(dxx | dxy | dyx | dyy) & 15 ||
(need_emu && (h > MAX_H || stride > MAX_STRIDE))) {
// FIXME could still use mmx for some of the rows
ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy,
shift, r, width, height);
return;
}
src += ix + iy * stride;
if (need_emu) {
ff_emulated_edge_mc_8(edge_buf, src, stride, stride, w + 1, h + 1, ix, iy, width, height);
src = edge_buf;
}
__asm__ volatile (
"movd %0, %%mm6 \n\t"
"pxor %%mm7, %%mm7 \n\t"
"punpcklwd %%mm6, %%mm6 \n\t"
"punpcklwd %%mm6, %%mm6 \n\t"
:: "r" (1 << shift));
for (x = 0; x < w; x += 4) {
uint16_t dx4[4] = { oxs - dxys + dxxs * (x + 0),
oxs - dxys + dxxs * (x + 1),
oxs - dxys + dxxs * (x + 2),
oxs - dxys + dxxs * (x + 3) };
uint16_t dy4[4] = { oys - dyys + dyxs * (x + 0),
oys - dyys + dyxs * (x + 1),
oys - dyys + dyxs * (x + 2),
oys - dyys + dyxs * (x + 3) };
for (y = 0; y < h; y++) {
__asm__ volatile (
"movq %0, %%mm4 \n\t"
"movq %1, %%mm5 \n\t"
"paddw %2, %%mm4 \n\t"
"paddw %3, %%mm5 \n\t"
"movq %%mm4, %0 \n\t"
"movq %%mm5, %1 \n\t"
"psrlw $12, %%mm4 \n\t"
"psrlw $12, %%mm5 \n\t"
: "+m" (*dx4), "+m" (*dy4)
: "m" (*dxy4), "m" (*dyy4));
__asm__ volatile (
"movq %%mm6, %%mm2 \n\t"
"movq %%mm6, %%mm1 \n\t"
"psubw %%mm4, %%mm2 \n\t"
"psubw %%mm5, %%mm1 \n\t"
"movq %%mm2, %%mm0 \n\t"
"movq %%mm4, %%mm3 \n\t"
"pmullw %%mm1, %%mm0 \n\t" // (s - dx) * (s - dy)
"pmullw %%mm5, %%mm3 \n\t" // dx * dy
"pmullw %%mm5, %%mm2 \n\t" // (s - dx) * dy
"pmullw %%mm4, %%mm1 \n\t" // dx * (s - dy)
"movd %4, %%mm5 \n\t"
"movd %3, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm5 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t"
"pmullw %%mm5, %%mm3 \n\t" // src[1, 1] * dx * dy
"pmullw %%mm4, %%mm2 \n\t" // src[0, 1] * (s - dx) * dy
"movd %2, %%mm5 \n\t"
"movd %1, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm5 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t"
"pmullw %%mm5, %%mm1 \n\t" // src[1, 0] * dx * (s - dy)
"pmullw %%mm4, %%mm0 \n\t" // src[0, 0] * (s - dx) * (s - dy)
"paddw %5, %%mm1 \n\t"
"paddw %%mm3, %%mm2 \n\t"
"paddw %%mm1, %%mm0 \n\t"
"paddw %%mm2, %%mm0 \n\t"
"psrlw %6, %%mm0 \n\t"
"packuswb %%mm0, %%mm0 \n\t"
"movd %%mm0, %0 \n\t"
: "=m" (dst[x + y * stride])
: "m" (src[0]), "m" (src[1]),
"m" (src[stride]), "m" (src[stride + 1]),
"m" (*r4), "m" (shift2));
src += stride;
}
src += 4 - h * stride;
}
}
#endif /* HAVE_INLINE_ASM */
av_cold void ff_mpegvideodsp_init_x86(MpegVideoDSPContext *c)
{
#if HAVE_INLINE_ASM
int cpu_flags = av_get_cpu_flags();
if (INLINE_MMX(cpu_flags))
c->gmc = gmc_mmx;
#endif /* HAVE_INLINE_ASM */
}

View File

@@ -0,0 +1,235 @@
/*
* The simplest mpeg encoder (well, it was the simplest!)
* Copyright (c) 2000,2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/dct.h"
#include "libavcodec/mpegvideo.h"
/* not permutated inverse zigzag_direct + 1 for MMX quantizer */
DECLARE_ALIGNED(16, static uint16_t, inv_zigzag_direct16)[64];
#if HAVE_6REGS
#if HAVE_MMX_INLINE
#define COMPILE_TEMPLATE_MMXEXT 0
#define COMPILE_TEMPLATE_SSE2 0
#define COMPILE_TEMPLATE_SSSE3 0
#define RENAME(a) a ## _mmx
#define RENAME_FDCT(a) a ## _mmx
#include "mpegvideoenc_template.c"
#endif /* HAVE_MMX_INLINE */
#if HAVE_MMXEXT_INLINE
#undef COMPILE_TEMPLATE_SSSE3
#undef COMPILE_TEMPLATE_SSE2
#undef COMPILE_TEMPLATE_MMXEXT
#define COMPILE_TEMPLATE_MMXEXT 1
#define COMPILE_TEMPLATE_SSE2 0
#define COMPILE_TEMPLATE_SSSE3 0
#undef RENAME
#undef RENAME_FDCT
#define RENAME(a) a ## _mmxext
#define RENAME_FDCT(a) a ## _mmxext
#include "mpegvideoenc_template.c"
#endif /* HAVE_MMXEXT_INLINE */
#if HAVE_SSE2_INLINE
#undef COMPILE_TEMPLATE_MMXEXT
#undef COMPILE_TEMPLATE_SSE2
#undef COMPILE_TEMPLATE_SSSE3
#define COMPILE_TEMPLATE_MMXEXT 0
#define COMPILE_TEMPLATE_SSE2 1
#define COMPILE_TEMPLATE_SSSE3 0
#undef RENAME
#undef RENAME_FDCT
#define RENAME(a) a ## _sse2
#define RENAME_FDCT(a) a ## _sse2
#include "mpegvideoenc_template.c"
#endif /* HAVE_SSE2_INLINE */
#if HAVE_SSSE3_INLINE
#undef COMPILE_TEMPLATE_MMXEXT
#undef COMPILE_TEMPLATE_SSE2
#undef COMPILE_TEMPLATE_SSSE3
#define COMPILE_TEMPLATE_MMXEXT 0
#define COMPILE_TEMPLATE_SSE2 1
#define COMPILE_TEMPLATE_SSSE3 1
#undef RENAME
#undef RENAME_FDCT
#define RENAME(a) a ## _ssse3
#define RENAME_FDCT(a) a ## _sse2
#include "mpegvideoenc_template.c"
#endif /* HAVE_SSSE3_INLINE */
#endif /* HAVE_6REGS */
#if HAVE_INLINE_ASM
static void denoise_dct_mmx(MpegEncContext *s, int16_t *block){
const int intra= s->mb_intra;
int *sum= s->dct_error_sum[intra];
uint16_t *offset= s->dct_offset[intra];
s->dct_count[intra]++;
__asm__ volatile(
"pxor %%mm7, %%mm7 \n\t"
"1: \n\t"
"pxor %%mm0, %%mm0 \n\t"
"pxor %%mm1, %%mm1 \n\t"
"movq (%0), %%mm2 \n\t"
"movq 8(%0), %%mm3 \n\t"
"pcmpgtw %%mm2, %%mm0 \n\t"
"pcmpgtw %%mm3, %%mm1 \n\t"
"pxor %%mm0, %%mm2 \n\t"
"pxor %%mm1, %%mm3 \n\t"
"psubw %%mm0, %%mm2 \n\t"
"psubw %%mm1, %%mm3 \n\t"
"movq %%mm2, %%mm4 \n\t"
"movq %%mm3, %%mm5 \n\t"
"psubusw (%2), %%mm2 \n\t"
"psubusw 8(%2), %%mm3 \n\t"
"pxor %%mm0, %%mm2 \n\t"
"pxor %%mm1, %%mm3 \n\t"
"psubw %%mm0, %%mm2 \n\t"
"psubw %%mm1, %%mm3 \n\t"
"movq %%mm2, (%0) \n\t"
"movq %%mm3, 8(%0) \n\t"
"movq %%mm4, %%mm2 \n\t"
"movq %%mm5, %%mm3 \n\t"
"punpcklwd %%mm7, %%mm4 \n\t"
"punpckhwd %%mm7, %%mm2 \n\t"
"punpcklwd %%mm7, %%mm5 \n\t"
"punpckhwd %%mm7, %%mm3 \n\t"
"paddd (%1), %%mm4 \n\t"
"paddd 8(%1), %%mm2 \n\t"
"paddd 16(%1), %%mm5 \n\t"
"paddd 24(%1), %%mm3 \n\t"
"movq %%mm4, (%1) \n\t"
"movq %%mm2, 8(%1) \n\t"
"movq %%mm5, 16(%1) \n\t"
"movq %%mm3, 24(%1) \n\t"
"add $16, %0 \n\t"
"add $32, %1 \n\t"
"add $16, %2 \n\t"
"cmp %3, %0 \n\t"
" jb 1b \n\t"
: "+r" (block), "+r" (sum), "+r" (offset)
: "r"(block+64)
);
}
static void denoise_dct_sse2(MpegEncContext *s, int16_t *block){
const int intra= s->mb_intra;
int *sum= s->dct_error_sum[intra];
uint16_t *offset= s->dct_offset[intra];
s->dct_count[intra]++;
__asm__ volatile(
"pxor %%xmm7, %%xmm7 \n\t"
"1: \n\t"
"pxor %%xmm0, %%xmm0 \n\t"
"pxor %%xmm1, %%xmm1 \n\t"
"movdqa (%0), %%xmm2 \n\t"
"movdqa 16(%0), %%xmm3 \n\t"
"pcmpgtw %%xmm2, %%xmm0 \n\t"
"pcmpgtw %%xmm3, %%xmm1 \n\t"
"pxor %%xmm0, %%xmm2 \n\t"
"pxor %%xmm1, %%xmm3 \n\t"
"psubw %%xmm0, %%xmm2 \n\t"
"psubw %%xmm1, %%xmm3 \n\t"
"movdqa %%xmm2, %%xmm4 \n\t"
"movdqa %%xmm3, %%xmm5 \n\t"
"psubusw (%2), %%xmm2 \n\t"
"psubusw 16(%2), %%xmm3 \n\t"
"pxor %%xmm0, %%xmm2 \n\t"
"pxor %%xmm1, %%xmm3 \n\t"
"psubw %%xmm0, %%xmm2 \n\t"
"psubw %%xmm1, %%xmm3 \n\t"
"movdqa %%xmm2, (%0) \n\t"
"movdqa %%xmm3, 16(%0) \n\t"
"movdqa %%xmm4, %%xmm6 \n\t"
"movdqa %%xmm5, %%xmm0 \n\t"
"punpcklwd %%xmm7, %%xmm4 \n\t"
"punpckhwd %%xmm7, %%xmm6 \n\t"
"punpcklwd %%xmm7, %%xmm5 \n\t"
"punpckhwd %%xmm7, %%xmm0 \n\t"
"paddd (%1), %%xmm4 \n\t"
"paddd 16(%1), %%xmm6 \n\t"
"paddd 32(%1), %%xmm5 \n\t"
"paddd 48(%1), %%xmm0 \n\t"
"movdqa %%xmm4, (%1) \n\t"
"movdqa %%xmm6, 16(%1) \n\t"
"movdqa %%xmm5, 32(%1) \n\t"
"movdqa %%xmm0, 48(%1) \n\t"
"add $32, %0 \n\t"
"add $64, %1 \n\t"
"add $32, %2 \n\t"
"cmp %3, %0 \n\t"
" jb 1b \n\t"
: "+r" (block), "+r" (sum), "+r" (offset)
: "r"(block+64)
XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7")
);
}
#endif /* HAVE_INLINE_ASM */
av_cold void ff_dct_encode_init_x86(MpegEncContext *s)
{
const int dct_algo = s->avctx->dct_algo;
int i;
for (i = 0; i < 64; i++)
inv_zigzag_direct16[ff_zigzag_direct[i]] = i + 1;
if (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX) {
#if HAVE_MMX_INLINE
int cpu_flags = av_get_cpu_flags();
if (INLINE_MMX(cpu_flags)) {
#if HAVE_6REGS
s->dct_quantize = dct_quantize_mmx;
#endif
s->denoise_dct = denoise_dct_mmx;
}
#endif
#if HAVE_6REGS && HAVE_MMXEXT_INLINE
if (INLINE_MMXEXT(cpu_flags))
s->dct_quantize = dct_quantize_mmxext;
#endif
#if HAVE_SSE2_INLINE
if (INLINE_SSE2(cpu_flags)) {
#if HAVE_6REGS
s->dct_quantize = dct_quantize_sse2;
#endif
s->denoise_dct = denoise_dct_sse2;
}
#endif
#if HAVE_6REGS && HAVE_SSSE3_INLINE
if (INLINE_SSSE3(cpu_flags))
s->dct_quantize = dct_quantize_ssse3;
#endif
}
}

View File

@@ -0,0 +1,109 @@
/*
* QNS functions are compiled 3 times for MMX/3DNOW/SSSE3
* Copyright (c) 2004 Michael Niedermayer
*
* MMX optimization by Michael Niedermayer <michaelni@gmx.at>
* 3DNow! and SSSE3 optimization by Zuxy Meng <zuxy.meng@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "libavutil/avassert.h"
#include "libavutil/common.h"
#include "libavutil/x86/asm.h"
#include "inline_asm.h"
#define MAX_ABS (512 >> (SCALE_OFFSET>0 ? SCALE_OFFSET : 0))
static int DEF(try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale)
{
x86_reg i=0;
av_assert2(FFABS(scale) < MAX_ABS);
scale<<= 16 + SCALE_OFFSET - BASIS_SHIFT + RECON_SHIFT;
SET_RND(mm6);
__asm__ volatile(
"pxor %%mm7, %%mm7 \n\t"
"movd %4, %%mm5 \n\t"
"punpcklwd %%mm5, %%mm5 \n\t"
"punpcklwd %%mm5, %%mm5 \n\t"
".p2align 4 \n\t"
"1: \n\t"
"movq (%1, %0), %%mm0 \n\t"
"movq 8(%1, %0), %%mm1 \n\t"
PMULHRW(%%mm0, %%mm1, %%mm5, %%mm6)
"paddw (%2, %0), %%mm0 \n\t"
"paddw 8(%2, %0), %%mm1 \n\t"
"psraw $6, %%mm0 \n\t"
"psraw $6, %%mm1 \n\t"
"pmullw (%3, %0), %%mm0 \n\t"
"pmullw 8(%3, %0), %%mm1 \n\t"
"pmaddwd %%mm0, %%mm0 \n\t"
"pmaddwd %%mm1, %%mm1 \n\t"
"paddd %%mm1, %%mm0 \n\t"
"psrld $4, %%mm0 \n\t"
"paddd %%mm0, %%mm7 \n\t"
"add $16, %0 \n\t"
"cmp $128, %0 \n\t" //FIXME optimize & bench
" jb 1b \n\t"
PHADDD(%%mm7, %%mm6)
"psrld $2, %%mm7 \n\t"
"movd %%mm7, %0 \n\t"
: "+r" (i)
: "r"(basis), "r"(rem), "r"(weight), "g"(scale)
);
return i;
}
static void DEF(add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale)
{
x86_reg i=0;
if(FFABS(scale) < MAX_ABS){
scale<<= 16 + SCALE_OFFSET - BASIS_SHIFT + RECON_SHIFT;
SET_RND(mm6);
__asm__ volatile(
"movd %3, %%mm5 \n\t"
"punpcklwd %%mm5, %%mm5 \n\t"
"punpcklwd %%mm5, %%mm5 \n\t"
".p2align 4 \n\t"
"1: \n\t"
"movq (%1, %0), %%mm0 \n\t"
"movq 8(%1, %0), %%mm1 \n\t"
PMULHRW(%%mm0, %%mm1, %%mm5, %%mm6)
"paddw (%2, %0), %%mm0 \n\t"
"paddw 8(%2, %0), %%mm1 \n\t"
"movq %%mm0, (%2, %0) \n\t"
"movq %%mm1, 8(%2, %0) \n\t"
"add $16, %0 \n\t"
"cmp $128, %0 \n\t" // FIXME optimize & bench
" jb 1b \n\t"
: "+r" (i)
: "r"(basis), "r"(rem), "g"(scale)
);
}else{
for(i=0; i<8*8; i++){
rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
}
}
}

View File

@@ -0,0 +1,371 @@
/*
* MPEG video MMX templates
*
* Copyright (c) 2002 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "libavutil/internal.h"
#include "libavutil/x86/asm.h"
#include "libavcodec/mpegvideo.h"
#include "fdct.h"
#undef MMREG_WIDTH
#undef MM
#undef MOVQ
#undef SPREADW
#undef PMAXW
#undef PMAX
#undef SAVE_SIGN
#undef RESTORE_SIGN
#if COMPILE_TEMPLATE_SSE2
#define MMREG_WIDTH "16"
#define MM "%%xmm"
#define MOVQ "movdqa"
#define SPREADW(a) \
"pshuflw $0, "a", "a" \n\t"\
"punpcklwd "a", "a" \n\t"
#define PMAXW(a,b) "pmaxsw "a", "b" \n\t"
#define PMAX(a,b) \
"movhlps "a", "b" \n\t"\
PMAXW(b, a)\
"pshuflw $0x0E, "a", "b" \n\t"\
PMAXW(b, a)\
"pshuflw $0x01, "a", "b" \n\t"\
PMAXW(b, a)
#else
#define MMREG_WIDTH "8"
#define MM "%%mm"
#define MOVQ "movq"
#if COMPILE_TEMPLATE_MMXEXT
#define SPREADW(a) "pshufw $0, "a", "a" \n\t"
#define PMAXW(a,b) "pmaxsw "a", "b" \n\t"
#define PMAX(a,b) \
"pshufw $0x0E, "a", "b" \n\t"\
PMAXW(b, a)\
"pshufw $0x01, "a", "b" \n\t"\
PMAXW(b, a)
#else
#define SPREADW(a) \
"punpcklwd "a", "a" \n\t"\
"punpcklwd "a", "a" \n\t"
#define PMAXW(a,b) \
"psubusw "a", "b" \n\t"\
"paddw "a", "b" \n\t"
#define PMAX(a,b) \
"movq "a", "b" \n\t"\
"psrlq $32, "a" \n\t"\
PMAXW(b, a)\
"movq "a", "b" \n\t"\
"psrlq $16, "a" \n\t"\
PMAXW(b, a)
#endif
#endif
#if COMPILE_TEMPLATE_SSSE3
#define SAVE_SIGN(a,b) \
"movdqa "b", "a" \n\t"\
"pabsw "b", "b" \n\t"
#define RESTORE_SIGN(a,b) \
"psignw "a", "b" \n\t"
#else
#define SAVE_SIGN(a,b) \
"pxor "a", "a" \n\t"\
"pcmpgtw "b", "a" \n\t" /* block[i] <= 0 ? 0xFF : 0x00 */\
"pxor "a", "b" \n\t"\
"psubw "a", "b" \n\t" /* ABS(block[i]) */
#define RESTORE_SIGN(a,b) \
"pxor "a", "b" \n\t"\
"psubw "a", "b" \n\t" // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i])
#endif
static int RENAME(dct_quantize)(MpegEncContext *s,
int16_t *block, int n,
int qscale, int *overflow)
{
x86_reg last_non_zero_p1;
int level=0, q; //=0 is because gcc says uninitialized ...
const uint16_t *qmat, *bias;
LOCAL_ALIGNED_16(int16_t, temp_block, [64]);
av_assert2((7&(int)(&temp_block[0])) == 0); //did gcc align it correctly?
//s->fdct (block);
RENAME_FDCT(ff_fdct)(block); // cannot be anything else ...
if(s->dct_error_sum)
s->denoise_dct(s, block);
if (s->mb_intra) {
int dummy;
if (n < 4){
q = s->y_dc_scale;
bias = s->q_intra_matrix16[qscale][1];
qmat = s->q_intra_matrix16[qscale][0];
}else{
q = s->c_dc_scale;
bias = s->q_chroma_intra_matrix16[qscale][1];
qmat = s->q_chroma_intra_matrix16[qscale][0];
}
/* note: block[0] is assumed to be positive */
if (!s->h263_aic) {
__asm__ volatile (
"mul %%ecx \n\t"
: "=d" (level), "=a"(dummy)
: "a" ((block[0]>>2) + q), "c" (ff_inverse[q<<1])
);
} else
/* For AIC we skip quant/dequant of INTRADC */
level = (block[0] + 4)>>3;
block[0]=0; //avoid fake overflow
// temp_block[0] = (block[0] + (q >> 1)) / q;
last_non_zero_p1 = 1;
} else {
last_non_zero_p1 = 0;
bias = s->q_inter_matrix16[qscale][1];
qmat = s->q_inter_matrix16[qscale][0];
}
if((s->out_format == FMT_H263 || s->out_format == FMT_H261) && s->mpeg_quant==0){
__asm__ volatile(
"movd %%"REG_a", "MM"3 \n\t" // last_non_zero_p1
SPREADW(MM"3")
"pxor "MM"7, "MM"7 \n\t" // 0
"pxor "MM"4, "MM"4 \n\t" // 0
MOVQ" (%2), "MM"5 \n\t" // qmat[0]
"pxor "MM"6, "MM"6 \n\t"
"psubw (%3), "MM"6 \n\t" // -bias[0]
"mov $-128, %%"REG_a" \n\t"
".p2align 4 \n\t"
"1: \n\t"
MOVQ" (%1, %%"REG_a"), "MM"0 \n\t" // block[i]
SAVE_SIGN(MM"1", MM"0") // ABS(block[i])
"psubusw "MM"6, "MM"0 \n\t" // ABS(block[i]) + bias[0]
"pmulhw "MM"5, "MM"0 \n\t" // (ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16
"por "MM"0, "MM"4 \n\t"
RESTORE_SIGN(MM"1", MM"0") // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i])
MOVQ" "MM"0, (%5, %%"REG_a") \n\t"
"pcmpeqw "MM"7, "MM"0 \n\t" // out==0 ? 0xFF : 0x00
MOVQ" (%4, %%"REG_a"), "MM"1 \n\t"
MOVQ" "MM"7, (%1, %%"REG_a") \n\t" // 0
"pandn "MM"1, "MM"0 \n\t"
PMAXW(MM"0", MM"3")
"add $"MMREG_WIDTH", %%"REG_a" \n\t"
" js 1b \n\t"
PMAX(MM"3", MM"0")
"movd "MM"3, %%"REG_a" \n\t"
"movzbl %%al, %%eax \n\t" // last_non_zero_p1
: "+a" (last_non_zero_p1)
: "r" (block+64), "r" (qmat), "r" (bias),
"r" (inv_zigzag_direct16 + 64), "r" (temp_block + 64)
XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7")
);
}else{ // FMT_H263
__asm__ volatile(
"movd %%"REG_a", "MM"3 \n\t" // last_non_zero_p1
SPREADW(MM"3")
"pxor "MM"7, "MM"7 \n\t" // 0
"pxor "MM"4, "MM"4 \n\t" // 0
"mov $-128, %%"REG_a" \n\t"
".p2align 4 \n\t"
"1: \n\t"
MOVQ" (%1, %%"REG_a"), "MM"0 \n\t" // block[i]
SAVE_SIGN(MM"1", MM"0") // ABS(block[i])
MOVQ" (%3, %%"REG_a"), "MM"6 \n\t" // bias[0]
"paddusw "MM"6, "MM"0 \n\t" // ABS(block[i]) + bias[0]
MOVQ" (%2, %%"REG_a"), "MM"5 \n\t" // qmat[i]
"pmulhw "MM"5, "MM"0 \n\t" // (ABS(block[i])*qmat[0] + bias[0]*qmat[0])>>16
"por "MM"0, "MM"4 \n\t"
RESTORE_SIGN(MM"1", MM"0") // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i])
MOVQ" "MM"0, (%5, %%"REG_a") \n\t"
"pcmpeqw "MM"7, "MM"0 \n\t" // out==0 ? 0xFF : 0x00
MOVQ" (%4, %%"REG_a"), "MM"1 \n\t"
MOVQ" "MM"7, (%1, %%"REG_a") \n\t" // 0
"pandn "MM"1, "MM"0 \n\t"
PMAXW(MM"0", MM"3")
"add $"MMREG_WIDTH", %%"REG_a" \n\t"
" js 1b \n\t"
PMAX(MM"3", MM"0")
"movd "MM"3, %%"REG_a" \n\t"
"movzbl %%al, %%eax \n\t" // last_non_zero_p1
: "+a" (last_non_zero_p1)
: "r" (block+64), "r" (qmat+64), "r" (bias+64),
"r" (inv_zigzag_direct16 + 64), "r" (temp_block + 64)
XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3",
"%xmm4", "%xmm5", "%xmm6", "%xmm7")
);
}
__asm__ volatile(
"movd %1, "MM"1 \n\t" // max_qcoeff
SPREADW(MM"1")
"psubusw "MM"1, "MM"4 \n\t"
"packuswb "MM"4, "MM"4 \n\t"
#if COMPILE_TEMPLATE_SSE2
"packsswb "MM"4, "MM"4 \n\t"
#endif
"movd "MM"4, %0 \n\t" // *overflow
: "=g" (*overflow)
: "g" (s->max_qcoeff)
);
if(s->mb_intra) block[0]= level;
else block[0]= temp_block[0];
if (s->idsp.perm_type == FF_IDCT_PERM_SIMPLE) {
if(last_non_zero_p1 <= 1) goto end;
block[0x08] = temp_block[0x01]; block[0x10] = temp_block[0x08];
block[0x20] = temp_block[0x10];
if(last_non_zero_p1 <= 4) goto end;
block[0x18] = temp_block[0x09]; block[0x04] = temp_block[0x02];
block[0x09] = temp_block[0x03];
if(last_non_zero_p1 <= 7) goto end;
block[0x14] = temp_block[0x0A]; block[0x28] = temp_block[0x11];
block[0x12] = temp_block[0x18]; block[0x02] = temp_block[0x20];
if(last_non_zero_p1 <= 11) goto end;
block[0x1A] = temp_block[0x19]; block[0x24] = temp_block[0x12];
block[0x19] = temp_block[0x0B]; block[0x01] = temp_block[0x04];
block[0x0C] = temp_block[0x05];
if(last_non_zero_p1 <= 16) goto end;
block[0x11] = temp_block[0x0C]; block[0x29] = temp_block[0x13];
block[0x16] = temp_block[0x1A]; block[0x0A] = temp_block[0x21];
block[0x30] = temp_block[0x28]; block[0x22] = temp_block[0x30];
block[0x38] = temp_block[0x29]; block[0x06] = temp_block[0x22];
if(last_non_zero_p1 <= 24) goto end;
block[0x1B] = temp_block[0x1B]; block[0x21] = temp_block[0x14];
block[0x1C] = temp_block[0x0D]; block[0x05] = temp_block[0x06];
block[0x0D] = temp_block[0x07]; block[0x15] = temp_block[0x0E];
block[0x2C] = temp_block[0x15]; block[0x13] = temp_block[0x1C];
if(last_non_zero_p1 <= 32) goto end;
block[0x0B] = temp_block[0x23]; block[0x34] = temp_block[0x2A];
block[0x2A] = temp_block[0x31]; block[0x32] = temp_block[0x38];
block[0x3A] = temp_block[0x39]; block[0x26] = temp_block[0x32];
block[0x39] = temp_block[0x2B]; block[0x03] = temp_block[0x24];
if(last_non_zero_p1 <= 40) goto end;
block[0x1E] = temp_block[0x1D]; block[0x25] = temp_block[0x16];
block[0x1D] = temp_block[0x0F]; block[0x2D] = temp_block[0x17];
block[0x17] = temp_block[0x1E]; block[0x0E] = temp_block[0x25];
block[0x31] = temp_block[0x2C]; block[0x2B] = temp_block[0x33];
if(last_non_zero_p1 <= 48) goto end;
block[0x36] = temp_block[0x3A]; block[0x3B] = temp_block[0x3B];
block[0x23] = temp_block[0x34]; block[0x3C] = temp_block[0x2D];
block[0x07] = temp_block[0x26]; block[0x1F] = temp_block[0x1F];
block[0x0F] = temp_block[0x27]; block[0x35] = temp_block[0x2E];
if(last_non_zero_p1 <= 56) goto end;
block[0x2E] = temp_block[0x35]; block[0x33] = temp_block[0x3C];
block[0x3E] = temp_block[0x3D]; block[0x27] = temp_block[0x36];
block[0x3D] = temp_block[0x2F]; block[0x2F] = temp_block[0x37];
block[0x37] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F];
}else if(s->idsp.perm_type == FF_IDCT_PERM_LIBMPEG2){
if(last_non_zero_p1 <= 1) goto end;
block[0x04] = temp_block[0x01];
block[0x08] = temp_block[0x08]; block[0x10] = temp_block[0x10];
if(last_non_zero_p1 <= 4) goto end;
block[0x0C] = temp_block[0x09]; block[0x01] = temp_block[0x02];
block[0x05] = temp_block[0x03];
if(last_non_zero_p1 <= 7) goto end;
block[0x09] = temp_block[0x0A]; block[0x14] = temp_block[0x11];
block[0x18] = temp_block[0x18]; block[0x20] = temp_block[0x20];
if(last_non_zero_p1 <= 11) goto end;
block[0x1C] = temp_block[0x19];
block[0x11] = temp_block[0x12]; block[0x0D] = temp_block[0x0B];
block[0x02] = temp_block[0x04]; block[0x06] = temp_block[0x05];
if(last_non_zero_p1 <= 16) goto end;
block[0x0A] = temp_block[0x0C]; block[0x15] = temp_block[0x13];
block[0x19] = temp_block[0x1A]; block[0x24] = temp_block[0x21];
block[0x28] = temp_block[0x28]; block[0x30] = temp_block[0x30];
block[0x2C] = temp_block[0x29]; block[0x21] = temp_block[0x22];
if(last_non_zero_p1 <= 24) goto end;
block[0x1D] = temp_block[0x1B]; block[0x12] = temp_block[0x14];
block[0x0E] = temp_block[0x0D]; block[0x03] = temp_block[0x06];
block[0x07] = temp_block[0x07]; block[0x0B] = temp_block[0x0E];
block[0x16] = temp_block[0x15]; block[0x1A] = temp_block[0x1C];
if(last_non_zero_p1 <= 32) goto end;
block[0x25] = temp_block[0x23]; block[0x29] = temp_block[0x2A];
block[0x34] = temp_block[0x31]; block[0x38] = temp_block[0x38];
block[0x3C] = temp_block[0x39]; block[0x31] = temp_block[0x32];
block[0x2D] = temp_block[0x2B]; block[0x22] = temp_block[0x24];
if(last_non_zero_p1 <= 40) goto end;
block[0x1E] = temp_block[0x1D]; block[0x13] = temp_block[0x16];
block[0x0F] = temp_block[0x0F]; block[0x17] = temp_block[0x17];
block[0x1B] = temp_block[0x1E]; block[0x26] = temp_block[0x25];
block[0x2A] = temp_block[0x2C]; block[0x35] = temp_block[0x33];
if(last_non_zero_p1 <= 48) goto end;
block[0x39] = temp_block[0x3A]; block[0x3D] = temp_block[0x3B];
block[0x32] = temp_block[0x34]; block[0x2E] = temp_block[0x2D];
block[0x23] = temp_block[0x26]; block[0x1F] = temp_block[0x1F];
block[0x27] = temp_block[0x27]; block[0x2B] = temp_block[0x2E];
if(last_non_zero_p1 <= 56) goto end;
block[0x36] = temp_block[0x35]; block[0x3A] = temp_block[0x3C];
block[0x3E] = temp_block[0x3D]; block[0x33] = temp_block[0x36];
block[0x2F] = temp_block[0x2F]; block[0x37] = temp_block[0x37];
block[0x3B] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F];
}else{
if(last_non_zero_p1 <= 1) goto end;
block[0x01] = temp_block[0x01];
block[0x08] = temp_block[0x08]; block[0x10] = temp_block[0x10];
if(last_non_zero_p1 <= 4) goto end;
block[0x09] = temp_block[0x09]; block[0x02] = temp_block[0x02];
block[0x03] = temp_block[0x03];
if(last_non_zero_p1 <= 7) goto end;
block[0x0A] = temp_block[0x0A]; block[0x11] = temp_block[0x11];
block[0x18] = temp_block[0x18]; block[0x20] = temp_block[0x20];
if(last_non_zero_p1 <= 11) goto end;
block[0x19] = temp_block[0x19];
block[0x12] = temp_block[0x12]; block[0x0B] = temp_block[0x0B];
block[0x04] = temp_block[0x04]; block[0x05] = temp_block[0x05];
if(last_non_zero_p1 <= 16) goto end;
block[0x0C] = temp_block[0x0C]; block[0x13] = temp_block[0x13];
block[0x1A] = temp_block[0x1A]; block[0x21] = temp_block[0x21];
block[0x28] = temp_block[0x28]; block[0x30] = temp_block[0x30];
block[0x29] = temp_block[0x29]; block[0x22] = temp_block[0x22];
if(last_non_zero_p1 <= 24) goto end;
block[0x1B] = temp_block[0x1B]; block[0x14] = temp_block[0x14];
block[0x0D] = temp_block[0x0D]; block[0x06] = temp_block[0x06];
block[0x07] = temp_block[0x07]; block[0x0E] = temp_block[0x0E];
block[0x15] = temp_block[0x15]; block[0x1C] = temp_block[0x1C];
if(last_non_zero_p1 <= 32) goto end;
block[0x23] = temp_block[0x23]; block[0x2A] = temp_block[0x2A];
block[0x31] = temp_block[0x31]; block[0x38] = temp_block[0x38];
block[0x39] = temp_block[0x39]; block[0x32] = temp_block[0x32];
block[0x2B] = temp_block[0x2B]; block[0x24] = temp_block[0x24];
if(last_non_zero_p1 <= 40) goto end;
block[0x1D] = temp_block[0x1D]; block[0x16] = temp_block[0x16];
block[0x0F] = temp_block[0x0F]; block[0x17] = temp_block[0x17];
block[0x1E] = temp_block[0x1E]; block[0x25] = temp_block[0x25];
block[0x2C] = temp_block[0x2C]; block[0x33] = temp_block[0x33];
if(last_non_zero_p1 <= 48) goto end;
block[0x3A] = temp_block[0x3A]; block[0x3B] = temp_block[0x3B];
block[0x34] = temp_block[0x34]; block[0x2D] = temp_block[0x2D];
block[0x26] = temp_block[0x26]; block[0x1F] = temp_block[0x1F];
block[0x27] = temp_block[0x27]; block[0x2E] = temp_block[0x2E];
if(last_non_zero_p1 <= 56) goto end;
block[0x35] = temp_block[0x35]; block[0x3C] = temp_block[0x3C];
block[0x3D] = temp_block[0x3D]; block[0x36] = temp_block[0x36];
block[0x2F] = temp_block[0x2F]; block[0x37] = temp_block[0x37];
block[0x3E] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F];
}
end:
return last_non_zero_p1 - 1;
}

Some files were not shown because too many files have changed in this diff Show More