37 #define RV40_LOWPASS(OPNAME, OP) \ 38 static void OPNAME ## rv40_qpel8_h_lowpass(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride,\ 39 const int h, const int C1, const int C2, const int SHIFT){\ 40 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;\ 42 for(i = 0; i < h; i++)\ 44 OP(dst[0], (src[-2] + src[ 3] - 5*(src[-1]+src[2]) + src[0]*C1 + src[1]*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 45 OP(dst[1], (src[-1] + src[ 4] - 5*(src[ 0]+src[3]) + src[1]*C1 + src[2]*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 46 OP(dst[2], (src[ 0] + src[ 5] - 5*(src[ 1]+src[4]) + src[2]*C1 + src[3]*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 47 OP(dst[3], (src[ 1] + src[ 6] - 5*(src[ 2]+src[5]) + src[3]*C1 + src[4]*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 48 OP(dst[4], (src[ 2] + src[ 7] - 5*(src[ 3]+src[6]) + src[4]*C1 + src[5]*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 49 OP(dst[5], (src[ 3] + src[ 8] - 5*(src[ 4]+src[7]) + src[5]*C1 + src[6]*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 50 OP(dst[6], (src[ 4] + src[ 9] - 5*(src[ 5]+src[8]) + src[6]*C1 + src[7]*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 51 OP(dst[7], (src[ 5] + src[10] - 5*(src[ 6]+src[9]) + src[7]*C1 + src[8]*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 57 static void OPNAME ## rv40_qpel8_v_lowpass(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride,\ 58 const int w, const int C1, const int C2, const int SHIFT){\ 59 const uint8_t *cm = ff_crop_tab + MAX_NEG_CROP;\ 61 for(i = 0; i < w; i++)\ 63 const int srcB = src[-2*srcStride];\ 64 const int srcA = src[-1*srcStride];\ 65 const int src0 = src[0 *srcStride];\ 66 const int src1 = src[1 *srcStride];\ 67 const int src2 = src[2 *srcStride];\ 68 const int src3 = src[3 *srcStride];\ 69 const int src4 = src[4 *srcStride];\ 70 const int src5 = src[5 *srcStride];\ 71 const int src6 = src[6 *srcStride];\ 72 const int src7 = src[7 *srcStride];\ 73 const int src8 = src[8 *srcStride];\ 74 const int src9 = src[9 *srcStride];\ 75 const int src10 = src[10*srcStride];\ 76 OP(dst[0*dstStride], (srcB + src3 - 5*(srcA+src2) + src0*C1 + src1*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 77 OP(dst[1*dstStride], (srcA + src4 - 5*(src0+src3) + src1*C1 + src2*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 78 OP(dst[2*dstStride], (src0 + src5 - 5*(src1+src4) + src2*C1 + src3*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 79 OP(dst[3*dstStride], (src1 + src6 - 5*(src2+src5) + src3*C1 + src4*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 80 OP(dst[4*dstStride], (src2 + src7 - 5*(src3+src6) + src4*C1 + src5*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 81 OP(dst[5*dstStride], (src3 + src8 - 5*(src4+src7) + src5*C1 + src6*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 82 OP(dst[6*dstStride], (src4 + src9 - 5*(src5+src8) + src6*C1 + src7*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 83 OP(dst[7*dstStride], (src5 + src10 - 5*(src6+src9) + src7*C1 + src8*C2 + (1<<(SHIFT-1))) >> SHIFT);\ 89 static void OPNAME ## rv40_qpel16_v_lowpass(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride,\ 90 const int w, const int C1, const int C2, const int SHIFT){\ 91 OPNAME ## rv40_qpel8_v_lowpass(dst , src , dstStride, srcStride, 8, C1, C2, SHIFT);\ 92 OPNAME ## rv40_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, 8, C1, C2, SHIFT);\ 95 OPNAME ## rv40_qpel8_v_lowpass(dst , src , dstStride, srcStride, w-8, C1, C2, SHIFT);\ 96 OPNAME ## rv40_qpel8_v_lowpass(dst+8, src+8, dstStride, srcStride, w-8, C1, C2, SHIFT);\ 99 static void OPNAME ## rv40_qpel16_h_lowpass(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride,\ 100 const int h, const int C1, const int C2, const int SHIFT){\ 101 OPNAME ## rv40_qpel8_h_lowpass(dst , src , dstStride, srcStride, 8, C1, C2, SHIFT);\ 102 OPNAME ## rv40_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, 8, C1, C2, SHIFT);\ 105 OPNAME ## rv40_qpel8_h_lowpass(dst , src , dstStride, srcStride, h-8, C1, C2, SHIFT);\ 106 OPNAME ## rv40_qpel8_h_lowpass(dst+8, src+8, dstStride, srcStride, h-8, C1, C2, SHIFT);\ 110 #define RV40_MC(OPNAME, SIZE) \ 111 static void OPNAME ## rv40_qpel ## SIZE ## _mc10_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ 113 OPNAME ## rv40_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride, SIZE, 52, 20, 6);\ 116 static void OPNAME ## rv40_qpel ## SIZE ## _mc30_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ 118 OPNAME ## rv40_qpel ## SIZE ## _h_lowpass(dst, src, stride, stride, SIZE, 20, 52, 6);\ 121 static void OPNAME ## rv40_qpel ## SIZE ## _mc01_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ 123 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, src, stride, stride, SIZE, 52, 20, 6);\ 126 static void OPNAME ## rv40_qpel ## SIZE ## _mc11_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ 128 uint8_t full[SIZE*(SIZE+5)];\ 129 uint8_t * const full_mid = full + SIZE*2;\ 130 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\ 131 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\ 134 static void OPNAME ## rv40_qpel ## SIZE ## _mc21_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ 136 uint8_t full[SIZE*(SIZE+5)];\ 137 uint8_t * const full_mid = full + SIZE*2;\ 138 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\ 139 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\ 142 static void OPNAME ## rv40_qpel ## SIZE ## _mc31_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ 144 uint8_t full[SIZE*(SIZE+5)];\ 145 uint8_t * const full_mid = full + SIZE*2;\ 146 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 52, 6);\ 147 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 52, 20, 6);\ 150 static void OPNAME ## rv40_qpel ## SIZE ## _mc12_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ 152 uint8_t full[SIZE*(SIZE+5)];\ 153 uint8_t * const full_mid = full + SIZE*2;\ 154 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\ 155 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\ 158 static void OPNAME ## rv40_qpel ## SIZE ## _mc22_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ 160 uint8_t full[SIZE*(SIZE+5)];\ 161 uint8_t * const full_mid = full + SIZE*2;\ 162 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\ 163 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\ 166 static void OPNAME ## rv40_qpel ## SIZE ## _mc32_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ 168 uint8_t full[SIZE*(SIZE+5)];\ 169 uint8_t * const full_mid = full + SIZE*2;\ 170 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 52, 6);\ 171 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 20, 5);\ 174 static void OPNAME ## rv40_qpel ## SIZE ## _mc03_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ 176 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, src, stride, stride, SIZE, 20, 52, 6);\ 179 static void OPNAME ## rv40_qpel ## SIZE ## _mc13_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ 181 uint8_t full[SIZE*(SIZE+5)];\ 182 uint8_t * const full_mid = full + SIZE*2;\ 183 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 52, 20, 6);\ 184 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 52, 6);\ 187 static void OPNAME ## rv40_qpel ## SIZE ## _mc23_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)\ 189 uint8_t full[SIZE*(SIZE+5)];\ 190 uint8_t * const full_mid = full + SIZE*2;\ 191 put_rv40_qpel ## SIZE ## _h_lowpass(full, src - 2*stride, SIZE, stride, SIZE+5, 20, 20, 5);\ 192 OPNAME ## rv40_qpel ## SIZE ## _v_lowpass(dst, full_mid, stride, SIZE, SIZE, 20, 52, 6);\ 196 #define op_avg(a, b) a = (((a)+cm[b]+1)>>1) 197 #define op_put(a, b) a = cm[b] 210 #define PIXOP2(OPNAME, OP) \ 211 static inline void OPNAME ## _pixels8_xy2_8_c(uint8_t *block, \ 212 const uint8_t *pixels, \ 213 ptrdiff_t line_size, \ 219 for (j = 0; j < 2; j++) { \ 221 const uint32_t a = AV_RN32(pixels); \ 222 const uint32_t b = AV_RN32(pixels + 1); \ 223 uint32_t l0 = (a & 0x03030303UL) + \ 224 (b & 0x03030303UL) + \ 226 uint32_t h0 = ((a & 0xFCFCFCFCUL) >> 2) + \ 227 ((b & 0xFCFCFCFCUL) >> 2); \ 230 pixels += line_size; \ 231 for (i = 0; i < h; i += 2) { \ 232 uint32_t a = AV_RN32(pixels); \ 233 uint32_t b = AV_RN32(pixels + 1); \ 234 l1 = (a & 0x03030303UL) + \ 235 (b & 0x03030303UL); \ 236 h1 = ((a & 0xFCFCFCFCUL) >> 2) + \ 237 ((b & 0xFCFCFCFCUL) >> 2); \ 238 OP(*((uint32_t *) block), \ 239 h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL)); \ 240 pixels += line_size; \ 241 block += line_size; \ 242 a = AV_RN32(pixels); \ 243 b = AV_RN32(pixels + 1); \ 244 l0 = (a & 0x03030303UL) + \ 245 (b & 0x03030303UL) + \ 247 h0 = ((a & 0xFCFCFCFCUL) >> 2) + \ 248 ((b & 0xFCFCFCFCUL) >> 2); \ 249 OP(*((uint32_t *) block), \ 250 h0 + h1 + (((l0 + l1) >> 2) & 0x0F0F0F0FUL)); \ 251 pixels += line_size; \ 252 block += line_size; \ 254 pixels += 4 - line_size * (h + 1); \ 255 block += 4 - line_size * h; \ 259 CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_8_c, \ 260 OPNAME ## _pixels8_xy2_8_c, \ 263 #define op_avg(a, b) a = rnd_avg32(a, b) 264 #define op_put(a, b) a = b 272 put_pixels16_xy2_8_c(dst, src, stride, 16);
276 avg_pixels16_xy2_8_c(dst, src, stride, 16);
280 put_pixels8_xy2_8_c(dst, src, stride, 8);
284 avg_pixels8_xy2_8_c(dst, src, stride, 8);
294 #define RV40_CHROMA_MC(OPNAME, OP)\ 295 static void OPNAME ## rv40_chroma_mc4_c(uint8_t *dst ,\ 297 ptrdiff_t stride, int h, int x, int y)\ 299 const int A = (8-x) * (8-y);\ 300 const int B = ( x) * (8-y);\ 301 const int C = (8-x) * ( y);\ 302 const int D = ( x) * ( y);\ 304 int bias = rv40_bias[y>>1][x>>1];\ 306 av_assert2(x<8 && y<8 && x>=0 && y>=0);\ 309 for(i = 0; i < h; i++){\ 310 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + bias));\ 311 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + bias));\ 312 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + bias));\ 313 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + bias));\ 318 const int E = B + C;\ 319 const ptrdiff_t step = C ? stride : 1;\ 320 for(i = 0; i < h; i++){\ 321 OP(dst[0], (A*src[0] + E*src[step+0] + bias));\ 322 OP(dst[1], (A*src[1] + E*src[step+1] + bias));\ 323 OP(dst[2], (A*src[2] + E*src[step+2] + bias));\ 324 OP(dst[3], (A*src[3] + E*src[step+3] + bias));\ 331 static void OPNAME ## rv40_chroma_mc8_c(uint8_t *dst,\ 333 ptrdiff_t stride, int h, int x, int y)\ 335 const int A = (8-x) * (8-y);\ 336 const int B = ( x) * (8-y);\ 337 const int C = (8-x) * ( y);\ 338 const int D = ( x) * ( y);\ 340 int bias = rv40_bias[y>>1][x>>1];\ 342 av_assert2(x<8 && y<8 && x>=0 && y>=0);\ 345 for(i = 0; i < h; i++){\ 346 OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1] + bias));\ 347 OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + bias));\ 348 OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + bias));\ 349 OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + bias));\ 350 OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + bias));\ 351 OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + bias));\ 352 OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + bias));\ 353 OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + bias));\ 358 const int E = B + C;\ 359 const ptrdiff_t step = C ? stride : 1;\ 360 for(i = 0; i < h; i++){\ 361 OP(dst[0], (A*src[0] + E*src[step+0] + bias));\ 362 OP(dst[1], (A*src[1] + E*src[step+1] + bias));\ 363 OP(dst[2], (A*src[2] + E*src[step+2] + bias));\ 364 OP(dst[3], (A*src[3] + E*src[step+3] + bias));\ 365 OP(dst[4], (A*src[4] + E*src[step+4] + bias));\ 366 OP(dst[5], (A*src[5] + E*src[step+5] + bias));\ 367 OP(dst[6], (A*src[6] + E*src[step+6] + bias));\ 368 OP(dst[7], (A*src[7] + E*src[step+7] + bias));\ 375 #define op_avg(a, b) a = (((a)+((b)>>6)+1)>>1) 376 #define op_put(a, b) a = ((b)>>6) 381 #define RV40_WEIGHT_FUNC(size) \ 382 static void rv40_weight_func_rnd_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, ptrdiff_t stride)\ 386 for (j = 0; j < size; j++) {\ 387 for (i = 0; i < size; i++)\ 388 dst[i] = (((w2 * src1[i]) >> 9) + ((w1 * src2[i]) >> 9) + 0x10) >> 5;\ 394 static void rv40_weight_func_nornd_ ## size (uint8_t *dst, uint8_t *src1, uint8_t *src2, int w1, int w2, ptrdiff_t stride)\ 398 for (j = 0; j < size; j++) {\ 399 for (i = 0; i < size; i++)\ 400 dst[i] = (w2 * src1[i] + w1 * src2[i] + 0x10) >> 5;\ 414 0x40, 0x50, 0x20, 0x60, 0x30, 0x50, 0x40, 0x30,
415 0x50, 0x40, 0x50, 0x30, 0x60, 0x20, 0x50, 0x40
422 0x40, 0x30, 0x60, 0x20, 0x50, 0x30, 0x30, 0x40,
423 0x40, 0x40, 0x50, 0x30, 0x20, 0x60, 0x30, 0x40
426 #define CLIP_SYMM(a, b) av_clip(a, -(b), b) 444 for (i = 0; i < 4; i++, src +=
stride) {
445 int diff_p1p0 = src[-2*step] - src[-1*step];
446 int diff_q1q0 = src[ 1*step] - src[ 0*step];
447 int diff_p1p2 = src[-2*step] - src[-3*step];
448 int diff_q1q2 = src[ 1*step] - src[ 2*step];
450 t = src[0*step] - src[-1*step];
454 u = (alpha *
FFABS(t)) >> 7;
455 if (u > 3 - (filter_p1 && filter_q1))
459 if (filter_p1 && filter_q1)
460 t += src[-2*step] - src[1*step];
462 diff =
CLIP_SYMM((t + 4) >> 3, lim_p0q0);
463 src[-1*step] = cm[src[-1*step] +
diff];
464 src[ 0*step] = cm[src[ 0*step] -
diff];
466 if (filter_p1 &&
FFABS(diff_p1p2) <= beta) {
467 t = (diff_p1p0 + diff_p1p2 -
diff) >> 1;
468 src[-2*step] = cm[src[-2*step] -
CLIP_SYMM(t, lim_p1)];
471 if (filter_q1 &&
FFABS(diff_q1q2) <= beta) {
472 t = (diff_q1q0 + diff_q1q2 +
diff) >> 1;
473 src[ 1*step] = cm[src[ 1*step] -
CLIP_SYMM(t, lim_q1)];
479 const int filter_p1,
const int filter_q1,
480 const int alpha,
const int beta,
481 const int lim_p0q0,
const int lim_q1,
485 alpha, beta, lim_p0q0, lim_q1, lim_p1);
489 const int filter_p1,
const int filter_q1,
490 const int alpha,
const int beta,
491 const int lim_p0q0,
const int lim_q1,
495 alpha, beta, lim_p0q0, lim_q1, lim_p1);
508 for(i = 0; i < 4; i++, src +=
stride){
509 int sflag, p0,
q0, p1,
q1;
510 int t = src[0*step] - src[-1*step];
515 sflag = (alpha *
FFABS(t)) >> 7;
519 p0 = (25*src[-3*step] + 26*src[-2*step] + 26*src[-1*step] +
520 26*src[ 0*step] + 25*src[ 1*step] +
523 q0 = (25*src[-2*step] + 26*src[-1*step] + 26*src[ 0*step] +
524 26*src[ 1*step] + 25*src[ 2*step] +
525 rv40_dither_r[dmode + i]) >> 7;
528 p0 = av_clip(p0, src[-1*step] - lims, src[-1*step] + lims);
529 q0 = av_clip(q0, src[ 0*step] - lims, src[ 0*step] + lims);
532 p1 = (25*src[-4*step] + 26*src[-3*step] + 26*src[-2*step] + 26*p0 +
534 q1 = (25*src[-1*step] + 26*q0 + 26*src[ 1*step] + 26*src[ 2*step] +
535 25*src[ 3*step] + rv40_dither_r[dmode + i]) >> 7;
538 p1 = av_clip(p1, src[-2*step] - lims, src[-2*step] + lims);
539 q1 = av_clip(q1, src[ 1*step] - lims, src[ 1*step] + lims);
548 src[-3*step] = (25*src[-1*step] + 26*src[-2*step] +
549 51*src[-3*step] + 26*src[-4*step] + 64) >> 7;
550 src[ 2*step] = (25*src[ 0*step] + 26*src[ 1*step] +
551 51*src[ 2*step] + 26*src[ 3*step] + 64) >> 7;
557 const int alpha,
const int lims,
558 const int dmode,
const int chroma)
564 const int alpha,
const int lims,
565 const int dmode,
const int chroma)
571 int step, ptrdiff_t
stride,
576 int sum_p1p0 = 0, sum_q1q0 = 0, sum_p1p2 = 0, sum_q1q2 = 0;
577 int strong0 = 0, strong1 = 0;
581 for (i = 0, ptr = src; i < 4; i++, ptr +=
stride) {
582 sum_p1p0 += ptr[-2*step] - ptr[-1*step];
583 sum_q1q0 += ptr[ 1*step] - ptr[ 0*step];
586 *p1 =
FFABS(sum_p1p0) < (beta << 2);
587 *q1 =
FFABS(sum_q1q0) < (beta << 2);
595 for (i = 0, ptr = src; i < 4; i++, ptr +=
stride) {
596 sum_p1p2 += ptr[-2*step] - ptr[-3*step];
597 sum_q1q2 += ptr[ 1*step] - ptr[ 2*step];
600 strong0 = *p1 && (
FFABS(sum_p1p2) < beta2);
601 strong1 = *q1 && (
FFABS(sum_q1q2) < beta2);
603 return strong0 && strong1;
607 int beta,
int beta2,
int edge,
614 int beta,
int beta2,
int edge,
qpel_mc_func put_pixels_tab[4][16]
static void rv40_h_weak_loop_filter(uint8_t *src, const ptrdiff_t stride, const int filter_p1, const int filter_q1, const int alpha, const int beta, const int lim_p0q0, const int lim_q1, const int lim_p1)
static float alpha(float a)
static void rv40_h_strong_loop_filter(uint8_t *src, const ptrdiff_t stride, const int alpha, const int lims, const int dmode, const int chroma)
rv40_loop_filter_strength_func rv40_loop_filter_strength[2]
av_cold void ff_rv40dsp_init_arm(RV34DSPContext *c)
static const uint8_t q1[256]
static av_always_inline void rv40_strong_loop_filter(uint8_t *src, const int step, const ptrdiff_t stride, const int alpha, const int lims, const int dmode, const int chroma)
#define RV40_LOWPASS(OPNAME, OP)
static void put_rv40_qpel8_mc33_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
rv40_weak_loop_filter_func rv40_weak_loop_filter[2]
static void avg_rv40_qpel16_mc33_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
#define u(width, name, range_min, range_max)
rv40_weight_func rv40_weight_pixels_tab[2][2]
Biweight functions, first dimension is transform size (16/8), second is whether the weight is prescal...
av_cold void ff_rv40dsp_init_aarch64(RV34DSPContext *c)
av_cold void ff_rv34dsp_init(RV34DSPContext *c)
static av_always_inline int rv40_loop_filter_strength(uint8_t *src, int step, ptrdiff_t stride, int beta, int beta2, int edge, int *p1, int *q1)
qpel_mc_func avg_h264_qpel_pixels_tab[4][16]
static void rv40_v_strong_loop_filter(uint8_t *src, const ptrdiff_t stride, const int alpha, const int lims, const int dmode, const int chroma)
qpel_mc_func avg_pixels_tab[4][16]
RV30/40 decoder motion compensation functions.
av_cold void ff_rv40dsp_init(RV34DSPContext *c)
simple assert() macros that are a bit more flexible than ISO C assert().
#define RV40_MC(OPNAME, SIZE)
static av_always_inline void rv40_weak_loop_filter(uint8_t *src, const int step, const ptrdiff_t stride, const int filter_p1, const int filter_q1, const int alpha, const int beta, const int lim_p0q0, const int lim_q1, const int lim_p1)
weaker deblocking very similar to the one described in 4.4.2 of JVT-A003r1
qpel_mc_func put_h264_qpel_pixels_tab[4][16]
static const uint8_t q0[256]
static const uint8_t rv40_dither_r[16]
dither values for deblocking filter - right/bottom values
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static void rv40_v_weak_loop_filter(uint8_t *src, const ptrdiff_t stride, const int filter_p1, const int filter_q1, const int alpha, const int beta, const int lim_p0q0, const int lim_q1, const int lim_p1)
static void avg_rv40_qpel8_mc33_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Libavcodec external API header.
static void put_rv40_qpel16_mc33_c(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
static const int rv40_bias[4][4]
#define RV40_WEIGHT_FUNC(size)
void ff_rv40dsp_init_x86(RV34DSPContext *c)
static const uint8_t rv40_dither_l[16]
dither values for deblocking filter - left/top values
static int rv40_h_loop_filter_strength(uint8_t *src, ptrdiff_t stride, int beta, int beta2, int edge, int *p1, int *q1)
common internal and external API header
rv40_strong_loop_filter_func rv40_strong_loop_filter[2]
static int rv40_v_loop_filter_strength(uint8_t *src, ptrdiff_t stride, int beta, int beta2, int edge, int *p1, int *q1)
#define PIXOP2(OPNAME, OP)
static av_always_inline int diff(const uint32_t a, const uint32_t b)
h264_chroma_mc_func avg_chroma_pixels_tab[3]
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
h264_chroma_mc_func put_chroma_pixels_tab[3]
#define RV40_CHROMA_MC(OPNAME, OP)