36 #define LOOP_FILTER(EXT) \ 37 void ff_vc1_v_loop_filter4_ ## EXT(uint8_t *src, int stride, int pq); \ 38 void ff_vc1_h_loop_filter4_ ## EXT(uint8_t *src, int stride, int pq); \ 39 void ff_vc1_v_loop_filter8_ ## EXT(uint8_t *src, int stride, int pq); \ 40 void ff_vc1_h_loop_filter8_ ## EXT(uint8_t *src, int stride, int pq); \ 42 static void vc1_v_loop_filter16_ ## EXT(uint8_t *src, int stride, int pq) \ 44 ff_vc1_v_loop_filter8_ ## EXT(src, stride, pq); \ 45 ff_vc1_v_loop_filter8_ ## EXT(src+8, stride, pq); \ 48 static void vc1_h_loop_filter16_ ## EXT(uint8_t *src, int stride, int pq) \ 50 ff_vc1_h_loop_filter8_ ## EXT(src, stride, pq); \ 51 ff_vc1_h_loop_filter8_ ## EXT(src+8*stride, stride, pq); \ 63 ff_vc1_h_loop_filter8_sse4(src, stride, pq);
64 ff_vc1_h_loop_filter8_sse4(src+8*stride, stride, pq);
67 #define DECLARE_FUNCTION(OP, DEPTH, INSN) \ 68 static void OP##vc1_mspel_mc00_##DEPTH##INSN(uint8_t *dst, \ 69 const uint8_t *src, ptrdiff_t stride, int rnd) \ 71 ff_ ## OP ## pixels ## DEPTH ## INSN(dst, src, stride, DEPTH); \ 86 ptrdiff_t
stride,
int h,
int x,
int y);
88 ptrdiff_t
stride,
int h,
int x,
int y);
90 ptrdiff_t
stride,
int h,
int x,
int y);
92 ptrdiff_t
stride,
int h,
int x,
int y);
94 ptrdiff_t
stride,
int h,
int x,
int y);
117 #define ASSIGN_LF(EXT) \ 118 dsp->vc1_v_loop_filter4 = ff_vc1_v_loop_filter4_ ## EXT; \ 119 dsp->vc1_h_loop_filter4 = ff_vc1_h_loop_filter4_ ## EXT; \ 120 dsp->vc1_v_loop_filter8 = ff_vc1_v_loop_filter8_ ## EXT; \ 121 dsp->vc1_h_loop_filter8 = ff_vc1_h_loop_filter8_ ## EXT; \ 122 dsp->vc1_v_loop_filter16 = vc1_v_loop_filter16_ ## EXT; \ 123 dsp->vc1_h_loop_filter16 = vc1_h_loop_filter16_ ## EXT #define EXTERNAL_MMX(flags)
void(* vc1_v_loop_filter16)(uint8_t *src, int stride, int pq)
#define DECLARE_FUNCTION(a, b)
Macro to ease bicubic filter interpolation functions declarations.
void(* vc1_h_loop_filter16)(uint8_t *src, int stride, int pq)
static atomic_int cpu_flags
vc1op_pixels_func put_vc1_mspel_pixels_tab[2][16]
void ff_vc1dsp_init_mmx(VC1DSPContext *dsp)
Macro definitions for various function/variable attributes.
void ff_vc1_inv_trans_8x8_dc_mmxext(uint8_t *dest, ptrdiff_t linesize, int16_t *block)
void ff_put_vc1_chroma_mc8_nornd_ssse3(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int h, int x, int y)
#define EXTERNAL_SSE4(flags)
void(* vc1_inv_trans_4x8_dc)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
#define EXTERNAL_SSE2(flags)
void(* vc1_h_loop_filter8)(uint8_t *src, int stride, int pq)
#define INLINE_MMX(flags)
void(* vc1_v_loop_filter8)(uint8_t *src, int stride, int pq)
void(* vc1_inv_trans_8x8_dc)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3]
void ff_vc1dsp_init_mmxext(VC1DSPContext *dsp)
av_cold void ff_vc1dsp_init_x86(VC1DSPContext *dsp)
void ff_avg_vc1_chroma_mc8_nornd_ssse3(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int h, int x, int y)
vc1op_pixels_func avg_vc1_mspel_pixels_tab[2][16]
void(* vc1_inv_trans_8x4_dc)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
#define EXTERNAL_SSSE3(flags)
void ff_vc1_inv_trans_8x4_dc_mmxext(uint8_t *dest, ptrdiff_t linesize, int16_t *block)
void ff_put_vc1_chroma_mc8_nornd_mmx(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int h, int x, int y)
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3]
void(* vc1_inv_trans_4x4_dc)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
#define EXTERNAL_MMXEXT(flags)
#define INLINE_MMXEXT(flags)
void ff_avg_vc1_chroma_mc8_nornd_3dnow(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int h, int x, int y)
void ff_avg_vc1_chroma_mc8_nornd_mmxext(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int h, int x, int y)
void ff_vc1_inv_trans_4x8_dc_mmxext(uint8_t *dest, ptrdiff_t linesize, int16_t *block)
#define EXTERNAL_AMD3DNOW(flags)
void ff_vc1_inv_trans_4x4_dc_mmxext(uint8_t *dest, ptrdiff_t linesize, int16_t *block)