34 #define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd) \ 36 t0 = vec_sl(vec_add(s0, s4), vec_2); \ 37 t0 = vec_add(vec_sl(t0, vec_1), t0); \ 38 t0 = vec_add(t0, vec_rnd); \ 39 t1 = vec_sl(vec_sub(s0, s4), vec_2); \ 40 t1 = vec_add(vec_sl(t1, vec_1), t1); \ 41 t1 = vec_add(t1, vec_rnd); \ 42 t2 = vec_add(vec_sl(s6, vec_2), vec_sl(s6, vec_1)); \ 43 t2 = vec_add(t2, vec_sl(s2, vec_4)); \ 44 t3 = vec_add(vec_sl(s2, vec_2), vec_sl(s2, vec_1)); \ 45 t3 = vec_sub(t3, vec_sl(s6, vec_4)); \ 46 t4 = vec_add(t0, t2); \ 47 t5 = vec_add(t1, t3); \ 48 t6 = vec_sub(t1, t3); \ 49 t7 = vec_sub(t0, t2); \ 51 t0 = vec_sl(vec_add(s1, s3), vec_4); \ 52 t0 = vec_add(t0, vec_sl(s5, vec_3)); \ 53 t0 = vec_add(t0, vec_sl(s7, vec_2)); \ 54 t0 = vec_add(t0, vec_sub(s5, s3)); \ 56 t1 = vec_sl(vec_sub(s1, s5), vec_4); \ 57 t1 = vec_sub(t1, vec_sl(s7, vec_3)); \ 58 t1 = vec_sub(t1, vec_sl(s3, vec_2)); \ 59 t1 = vec_sub(t1, vec_add(s1, s7)); \ 61 t2 = vec_sl(vec_sub(s7, s3), vec_4); \ 62 t2 = vec_add(t2, vec_sl(s1, vec_3)); \ 63 t2 = vec_add(t2, vec_sl(s5, vec_2)); \ 64 t2 = vec_add(t2, vec_sub(s1, s7)); \ 66 t3 = vec_sl(vec_sub(s5, s7), vec_4); \ 67 t3 = vec_sub(t3, vec_sl(s3, vec_3)); \ 68 t3 = vec_add(t3, vec_sl(s1, vec_2)); \ 69 t3 = vec_sub(t3, vec_add(s3, s5)); \ 71 s0 = vec_add(t4, t0); \ 72 s1 = vec_add(t5, t1); \ 73 s2 = vec_add(t6, t2); \ 74 s3 = vec_add(t7, t3); \ 75 s4 = vec_sub(t7, t3); \ 76 s5 = vec_sub(t6, t2); \ 77 s6 = vec_sub(t5, t1); \ 78 s7 = vec_sub(t4, t0); \ 81 #define SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7) \ 83 s0 = vec_sra(s0, vec_3); \ 84 s1 = vec_sra(s1, vec_3); \ 85 s2 = vec_sra(s2, vec_3); \ 86 s3 = vec_sra(s3, vec_3); \ 87 s4 = vec_sra(s4, vec_3); \ 88 s5 = vec_sra(s5, vec_3); \ 89 s6 = vec_sra(s6, vec_3); \ 90 s7 = vec_sra(s7, vec_3); \ 93 #define SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7) \ 95 s0 = vec_sra(s0, vec_7); \ 96 s1 = vec_sra(s1, vec_7); \ 97 s2 = vec_sra(s2, vec_7); \ 98 s3 = vec_sra(s3, vec_7); \ 99 s4 = vec_sra(vec_add(s4, vec_1s), vec_7); \ 100 s5 = vec_sra(vec_add(s5, vec_1s), vec_7); \ 101 s6 = vec_sra(vec_add(s6, vec_1s), vec_7); \ 102 s7 = vec_sra(vec_add(s7, vec_1s), vec_7); \ 106 #define STEP4(s0, s1, s2, s3, vec_rnd) \ 108 t1 = vec_add(vec_sl(s0, vec_4), s0); \ 109 t1 = vec_add(t1, vec_rnd); \ 110 t2 = vec_add(vec_sl(s2, vec_4), s2); \ 111 t0 = vec_add(t1, t2); \ 112 t1 = vec_sub(t1, t2); \ 113 t3 = vec_sl(vec_sub(s3, s1), vec_1); \ 114 t3 = vec_add(t3, vec_sl(t3, vec_2)); \ 115 t2 = vec_add(t3, vec_sl(s1, vec_5)); \ 116 t3 = vec_add(t3, vec_sl(s3, vec_3)); \ 117 t3 = vec_add(t3, vec_sl(s3, vec_2)); \ 118 s0 = vec_add(t0, t2); \ 119 s1 = vec_sub(t1, t3); \ 120 s2 = vec_add(t1, t3); \ 121 s3 = vec_sub(t0, t2); \ 124 #define SHIFT_HOR4(s0, s1, s2, s3) \ 125 s0 = vec_sra(s0, vec_3); \ 126 s1 = vec_sra(s1, vec_3); \ 127 s2 = vec_sra(s2, vec_3); \ 128 s3 = vec_sra(s3, vec_3); 130 #define SHIFT_VERT4(s0, s1, s2, s3) \ 131 s0 = vec_sra(s0, vec_7); \ 132 s1 = vec_sra(s1, vec_7); \ 133 s2 = vec_sra(s2, vec_7); \ 134 s3 = vec_sra(s3, vec_7); 138 static void vc1_inv_trans_8x8_altivec(int16_t
block[64])
140 vector
signed short src0,
src1, src2, src3, src4, src5, src6, src7;
142 vector
signed int s8, s9, sA, sB, sC, sD, sE, sF;
144 const vector
signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
145 const vector
unsigned int vec_7 = vec_splat_u32(7);
146 const vector
unsigned int vec_4 = vec_splat_u32(4);
147 const vector
signed int vec_4s = vec_splat_s32(4);
148 const vector
unsigned int vec_3 = vec_splat_u32(3);
149 const vector
unsigned int vec_2 = vec_splat_u32(2);
150 const vector
signed int vec_1s = vec_splat_s32(1);
151 const vector
unsigned int vec_1 = vec_splat_u32(1);
153 src0 = vec_ld( 0, block);
154 src1 = vec_ld( 16, block);
155 src2 = vec_ld( 32, block);
156 src3 = vec_ld( 48, block);
157 src4 = vec_ld( 64, block);
158 src5 = vec_ld( 80, block);
159 src6 = vec_ld( 96, block);
160 src7 = vec_ld(112, block);
162 s0 = vec_unpackl(src0);
163 s1 = vec_unpackl(src1);
164 s2 = vec_unpackl(src2);
165 s3 = vec_unpackl(src3);
166 s4 = vec_unpackl(src4);
167 s5 = vec_unpackl(src5);
168 s6 = vec_unpackl(src6);
169 s7 = vec_unpackl(src7);
170 s8 = vec_unpackh(src0);
171 s9 = vec_unpackh(src1);
172 sA = vec_unpackh(src2);
173 sB = vec_unpackh(src3);
174 sC = vec_unpackh(src4);
175 sD = vec_unpackh(src5);
176 sE = vec_unpackh(src6);
177 sF = vec_unpackh(src7);
178 STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
179 SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
180 STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
181 SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
182 src0 = vec_pack(s8, s0);
183 src1 = vec_pack(s9, s1);
184 src2 = vec_pack(sA, s2);
185 src3 = vec_pack(sB, s3);
186 src4 = vec_pack(sC, s4);
187 src5 = vec_pack(sD, s5);
188 src6 = vec_pack(sE, s6);
189 src7 = vec_pack(sF, s7);
190 TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
192 s0 = vec_unpackl(src0);
193 s1 = vec_unpackl(src1);
194 s2 = vec_unpackl(src2);
195 s3 = vec_unpackl(src3);
196 s4 = vec_unpackl(src4);
197 s5 = vec_unpackl(src5);
198 s6 = vec_unpackl(src6);
199 s7 = vec_unpackl(src7);
200 s8 = vec_unpackh(src0);
201 s9 = vec_unpackh(src1);
202 sA = vec_unpackh(src2);
203 sB = vec_unpackh(src3);
204 sC = vec_unpackh(src4);
205 sD = vec_unpackh(src5);
206 sE = vec_unpackh(src6);
207 sF = vec_unpackh(src7);
208 STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64);
209 SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7);
210 STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64);
211 SHIFT_VERT8(s8, s9, sA, sB, sC, sD, sE, sF);
212 src0 = vec_pack(s8, s0);
213 src1 = vec_pack(s9, s1);
214 src2 = vec_pack(sA, s2);
215 src3 = vec_pack(sB, s3);
216 src4 = vec_pack(sC, s4);
217 src5 = vec_pack(sD, s5);
218 src6 = vec_pack(sE, s6);
219 src7 = vec_pack(sF, s7);
221 vec_st(src0, 0, block);
222 vec_st(src1, 16, block);
223 vec_st(src2, 32, block);
224 vec_st(src3, 48, block);
225 vec_st(src4, 64, block);
226 vec_st(src5, 80, block);
227 vec_st(src6, 96, block);
228 vec_st(src7,112, block);
233 static void vc1_inv_trans_8x4_altivec(
uint8_t *dest, ptrdiff_t
stride,
236 vector
signed short src0,
src1, src2, src3, src4, src5, src6, src7;
238 vector
signed int s8, s9, sA, sB, sC, sD, sE, sF;
240 const vector
signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
241 const vector
unsigned int vec_7 = vec_splat_u32(7);
242 const vector
unsigned int vec_5 = vec_splat_u32(5);
243 const vector
unsigned int vec_4 = vec_splat_u32(4);
244 const vector
signed int vec_4s = vec_splat_s32(4);
245 const vector
unsigned int vec_3 = vec_splat_u32(3);
246 const vector
unsigned int vec_2 = vec_splat_u32(2);
247 const vector
unsigned int vec_1 = vec_splat_u32(1);
248 vector
unsigned char tmp;
249 vector
signed short tmp2, tmp3;
250 vector
unsigned char perm0, perm1, p0, p1, p;
252 src0 = vec_ld( 0, block);
253 src1 = vec_ld( 16, block);
254 src2 = vec_ld( 32, block);
255 src3 = vec_ld( 48, block);
256 src4 = vec_ld( 64, block);
257 src5 = vec_ld( 80, block);
258 src6 = vec_ld( 96, block);
259 src7 = vec_ld(112, block);
261 TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
262 s0 = vec_unpackl(src0);
263 s1 = vec_unpackl(src1);
264 s2 = vec_unpackl(src2);
265 s3 = vec_unpackl(src3);
266 s4 = vec_unpackl(src4);
267 s5 = vec_unpackl(src5);
268 s6 = vec_unpackl(src6);
269 s7 = vec_unpackl(src7);
270 s8 = vec_unpackh(src0);
271 s9 = vec_unpackh(src1);
272 sA = vec_unpackh(src2);
273 sB = vec_unpackh(src3);
274 sC = vec_unpackh(src4);
275 sD = vec_unpackh(src5);
276 sE = vec_unpackh(src6);
277 sF = vec_unpackh(src7);
278 STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
279 SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
280 STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
281 SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
282 src0 = vec_pack(s8, s0);
283 src1 = vec_pack(s9, s1);
284 src2 = vec_pack(sA, s2);
285 src3 = vec_pack(sB, s3);
286 src4 = vec_pack(sC, s4);
287 src5 = vec_pack(sD, s5);
288 src6 = vec_pack(sE, s6);
289 src7 = vec_pack(sF, s7);
290 TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
292 s0 = vec_unpackh(src0);
293 s1 = vec_unpackh(src1);
294 s2 = vec_unpackh(src2);
295 s3 = vec_unpackh(src3);
296 s8 = vec_unpackl(src0);
297 s9 = vec_unpackl(src1);
298 sA = vec_unpackl(src2);
299 sB = vec_unpackl(src3);
300 STEP4(s0, s1, s2, s3, vec_64);
301 SHIFT_VERT4(s0, s1, s2, s3);
302 STEP4(s8, s9, sA, sB, vec_64);
303 SHIFT_VERT4(s8, s9, sA, sB);
304 src0 = vec_pack(s0, s8);
305 src1 = vec_pack(s1, s9);
306 src2 = vec_pack(s2, sA);
307 src3 = vec_pack(s3, sB);
310 p0 = vec_lvsl (0, dest);
311 p1 = vec_lvsl (stride, dest);
312 p = vec_splat_u8 (-1);
313 perm0 = vec_mergeh (p, p0);
314 perm1 = vec_mergeh (p, p1);
315 #define GET_TMP2(dst, p) \ 316 tmp = vec_ld (0, dest); \ 317 tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), p); 319 #define GET_TMP2(dst,p) \ 320 tmp = vec_vsx_ld (0, dst); \ 321 tmp2 = (vector signed short)vec_mergeh (tmp, vec_splat_u8(0)); 324 #define ADD(dest,src,perm) \ 325 GET_TMP2(dest, perm); \ 326 tmp3 = vec_adds (tmp2, src); \ 327 tmp = vec_packsu (tmp3, tmp3); \ 328 vec_ste ((vector unsigned int)tmp, 0, (unsigned int *)dest); \ 329 vec_ste ((vector unsigned int)tmp, 4, (unsigned int *)dest); 334 ADD (dest, src3, perm1)
337 #define PUT_OP_U8_ALTIVEC(d, s, dst) d = s 338 #define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s) 340 #define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC 341 #define PREFIX_no_rnd_vc1_chroma_mc8_altivec put_no_rnd_vc1_chroma_mc8_altivec 342 #include "h264chroma_template.c" 344 #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec 346 #define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC 347 #define PREFIX_no_rnd_vc1_chroma_mc8_altivec avg_no_rnd_vc1_chroma_mc8_altivec 348 #include "h264chroma_template.c" 350 #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
void(* vc1_inv_trans_8x4)(uint8_t *dest, ptrdiff_t stride, int16_t *block)
Macro definitions for various function/variable attributes.
void(* vc1_inv_trans_8x8)(int16_t *b)
#define PPC_ALTIVEC(flags)
h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3]
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
Contains misc utility macros and inline functions.
h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3]
av_cold void ff_vc1dsp_init_ppc(VC1DSPContext *dsp)