141 #define LOAD_TOP_RIGHT_EDGE\ 142 const unsigned av_unused t4 = topright[0];\ 143 const unsigned av_unused t5 = topright[1];\ 144 const unsigned av_unused t6 = topright[2];\ 145 const unsigned av_unused t7 = topright[3];\ 147 #define LOAD_DOWN_LEFT_EDGE\ 148 const unsigned av_unused l4 = src[-1+4*stride];\ 149 const unsigned av_unused l5 = src[-1+5*stride];\ 150 const unsigned av_unused l6 = src[-1+6*stride];\ 151 const unsigned av_unused l7 = src[-1+7*stride];\ 153 #define LOAD_LEFT_EDGE\ 154 const unsigned av_unused l0 = src[-1+0*stride];\ 155 const unsigned av_unused l1 = src[-1+1*stride];\ 156 const unsigned av_unused l2 = src[-1+2*stride];\ 157 const unsigned av_unused l3 = src[-1+3*stride];\ 159 #define LOAD_TOP_EDGE\ 160 const unsigned av_unused t0 = src[ 0-1*stride];\ 161 const unsigned av_unused t1 = src[ 1-1*stride];\ 162 const unsigned av_unused t2 = src[ 2-1*stride];\ 163 const unsigned av_unused t3 = src[ 3-1*stride];\ 170 const int lt= src[-1-1*
stride];
174 src[0+3*
stride]=(l3 + 2*l2 + l1 + 2)>>2;
176 src[1+3*
stride]=(l2 + 2*l1 + l0 + 2)>>2;
179 src[2+3*
stride]=(l1 + 2*l0 + lt + 2)>>2;
183 src[3+3*
stride]=(l0 + 2*lt +
t0 + 2)>>2;
196 const pixel *topright = (
const pixel*)_topright;
226 const int lt= src[-1-1*
stride];
238 src[1+3*
stride]=(l0 + 2*lt +
t0 + 2)>>2;
244 src[0+2*
stride]=(lt + 2*l0 + l1 + 2)>>2;
245 src[0+3*
stride]=(l0 + 2*l1 + l2 + 2)>>2;
253 const pixel *topright = (
const pixel*)_topright;
283 src[0+0*
stride]=(l0 + l1 + 1)>>1;
284 src[1+0*
stride]=(l0 + 2*l1 + l2 + 2)>>2;
286 src[0+1*
stride]=(l1 + l2 + 1)>>1;
288 src[1+1*
stride]=(l1 + 2*l2 + l3 + 2)>>2;
290 src[0+2*
stride]=(l2 + l3 + 1)>>1;
292 src[1+2*
stride]=(l2 + 2*l3 + l3 + 2)>>2;
307 const int lt= src[-1-1*
stride];
312 src[2+1*
stride]=(lt + l0 + 1)>>1;
314 src[3+1*
stride]=(l0 + 2*lt +
t0 + 2)>>2;
318 src[2+2*
stride]=(l0 + l1 + 1)>>1;
320 src[3+2*
stride]=(lt + 2*l0 + l1 + 2)>>2;
322 src[2+3*
stride]=(l1 + l2+ 1)>>1;
324 src[3+3*
stride]=(l0 + 2*l1 + l2 + 2)>>2;
325 src[0+3*
stride]=(l2 + l3 + 1)>>1;
326 src[1+3*
stride]=(l1 + 2*l2 + l3 + 2)>>2;
363 #define PREDICT_16x16_DC(v)\ 364 for(i=0; i<16; i++){\ 365 AV_WN4PA(src+ 0, v);\ 366 AV_WN4PA(src+ 4, v);\ 367 AV_WN4PA(src+ 8, v);\ 368 AV_WN4PA(src+12, v);\ 421 #define PRED16x16_X(n, v) \ 422 static void FUNCC(pred16x16_##n##_dc)(uint8_t *_src, ptrdiff_t stride)\ 425 pixel *src = (pixel*)_src;\ 426 stride >>= sizeof(pixel)-1;\ 427 PREDICT_16x16_DC(PIXEL_SPLAT_X4(v));\ 434 static inline
void FUNCC(pred16x16_plane_compat)(
uint8_t *_src,
447 int H = src0[1] - src0[-1];
448 int V = src1[0] - src2[ 0];
449 for(k=2; k<=8; ++k) {
451 H += k*(src0[k] - src0[-k]);
452 V += k*(src1[0] - src2[ 0]);
455 H = ( 5*(H/4) ) / 16;
456 V = ( 5*(V/4) ) / 16;
461 H = ( H + (H>>2) ) >> 4;
462 V = ( V + (V>>2) ) >> 4;
468 a = 16*(src1[0] + src2[16] + 1) - 7*(V+H);
469 for(j=16; j>0; --j) {
472 for(i=-16; i<0; i+=4) {
473 src[16+i] =
CLIP((b ) >> 5);
474 src[17+i] =
CLIP((b+ H) >> 5);
475 src[18+i] =
CLIP((b+2*H) >> 5);
476 src[19+i] =
CLIP((b+3*H) >> 5);
492 int stride = _stride>>(
sizeof(
pixel)-1);
506 int stride = _stride>>(
sizeof(
pixel)-1);
520 stride >>=
sizeof(
pixel)-1;
533 stride >>=
sizeof(
pixel)-1;
541 #define PRED8x8_X(n, v)\ 542 static void FUNCC(pred8x8_##n##_dc)(uint8_t *_src, ptrdiff_t stride)\ 545 const pixel4 a = PIXEL_SPLAT_X4(v);\ 546 pixel *src = (pixel*)_src;\ 547 stride >>= sizeof(pixel)-1;\ 549 AV_WN4PA(((pixel4*)(src+i*stride))+0, a);\ 550 AV_WN4PA(((pixel4*)(src+i*stride))+1, a);\ 558 static
void FUNCC(pred8x16_128_dc)(
uint8_t *_src, ptrdiff_t stride)
570 stride >>=
sizeof(
pixel)-1;
575 dc2+= src[-1+(i+4)*stride];
600 pixel4 dc0splat, dc1splat;
602 stride >>=
sizeof(
pixel)-1;
626 pixel4 dc0splat, dc1splat;
628 stride >>=
sizeof(
pixel)-1;
650 stride >>=
sizeof(
pixel)-1;
656 dc2+= src[-1+(i+4)*stride];
676 int dc0, dc1, dc2, dc3, dc4;
677 pixel4 dc0splat, dc1splat,
dc2splat, dc3splat, dc4splat, dc5splat, dc6splat, dc7splat;
679 stride >>=
sizeof(
pixel)-1;
681 dc0=dc1=dc2=dc3=dc4=0;
685 dc2+= src[-1+(i+4)*stride];
686 dc3+= src[-1+(i+8)*stride];
687 dc4+= src[-1+(i+12)*stride];
710 for(i=12; i<16; i++){
775 int stride = _stride>>(
sizeof(
pixel)-1);
777 const pixel * src1 = src +4*stride-1;
779 int H = src0[1] - src0[-1];
780 int V = src1[0] - src2[ 0];
781 for(k=2; k<=4; ++k) {
783 H += k*(src0[k] - src0[-k]);
784 V += k*(src1[0] - src2[ 0]);
786 H = ( 17*H+16 ) >> 5;
787 V = ( 17*V+16 ) >> 5;
789 a = 16*(src1[0] + src2[8]+1) - 3*(V+H);
793 src[0] =
CLIP((b ) >> 5);
794 src[1] =
CLIP((b+ H) >> 5);
795 src[2] =
CLIP((b+2*H) >> 5);
796 src[3] =
CLIP((b+3*H) >> 5);
797 src[4] =
CLIP((b+4*H) >> 5);
798 src[5] =
CLIP((b+5*H) >> 5);
799 src[6] =
CLIP((b+6*H) >> 5);
800 src[7] =
CLIP((b+7*H) >> 5);
811 int stride = _stride>>(
sizeof(
pixel)-1);
813 const pixel * src1 = src +8*stride-1;
815 int H = src0[1] - src0[-1];
816 int V = src1[0] - src2[ 0];
818 for (k = 2; k <= 4; ++k) {
820 H += k*(src0[k] - src0[-k]);
821 V += k*(src1[0] - src2[ 0]);
823 for (; k <= 8; ++k) {
825 V += k*(src1[0] - src2[0]);
831 a = 16*(src1[0] + src2[8] + 1) - 7*V - 3*H;
832 for(j=16; j>0; --j) {
835 src[0] =
CLIP((b ) >> 5);
836 src[1] =
CLIP((b+ H) >> 5);
837 src[2] =
CLIP((b+2*H) >> 5);
838 src[3] =
CLIP((b+3*H) >> 5);
839 src[4] =
CLIP((b+4*H) >> 5);
840 src[5] =
CLIP((b+5*H) >> 5);
841 src[6] =
CLIP((b+6*H) >> 5);
842 src[7] =
CLIP((b+7*H) >> 5);
847 #define SRC(x,y) src[(x)+(y)*stride] 849 const int l##y = (SRC(-1,y-1) + 2*SRC(-1,y) + SRC(-1,y+1) + 2) >> 2; 850 #define PREDICT_8x8_LOAD_LEFT \ 851 const int l0 = ((has_topleft ? SRC(-1,-1) : SRC(-1,0)) \ 852 + 2*SRC(-1,0) + SRC(-1,1) + 2) >> 2; \ 853 PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) \ 854 const int l7 av_unused = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2 857 const int t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2; 858 #define PREDICT_8x8_LOAD_TOP \ 859 const int t0 = ((has_topleft ? SRC(-1,-1) : SRC(0,-1)) \ 860 + 2*SRC(0,-1) + SRC(1,-1) + 2) >> 2; \ 861 PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) \ 862 const int t7 av_unused = ((has_topright ? SRC(8,-1) : SRC(7,-1)) \ 863 + 2*SRC(7,-1) + SRC(6,-1) + 2) >> 2 866 t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2; 867 #define PREDICT_8x8_LOAD_TOPRIGHT \ 868 int t8, t9, t10, t11, t12, t13, t14, t15; \ 870 PTR(8) PTR(9) PTR(10) PTR(11) PTR(12) PTR(13) PTR(14) \ 871 t15 = (SRC(14,-1) + 3*SRC(15,-1) + 2) >> 2; \ 872 } else t8=t9=t10=t11=t12=t13=t14=t15= SRC(7,-1); 874 #define PREDICT_8x8_LOAD_TOPLEFT \ 875 const int lt = (SRC(-1,0) + 2*SRC(-1,-1) + SRC(0,-1) + 2) >> 2 877 #define PREDICT_8x8_DC(v) \ 879 for( y = 0; y < 8; y++ ) { \ 880 AV_WN4PA(((pixel4*)src)+0, v); \ 881 AV_WN4PA(((pixel4*)src)+1, v); \ 886 int has_topright, ptrdiff_t _stride)
889 int stride = _stride>>(
sizeof(
pixel)-1);
894 int has_topright, ptrdiff_t _stride)
897 int stride = _stride>>(
sizeof(
pixel)-1);
904 int has_topright, ptrdiff_t _stride)
907 int stride = _stride>>(
sizeof(
pixel)-1);
914 int has_topright, ptrdiff_t _stride)
917 int stride = _stride>>(
sizeof(
pixel)-1);
926 int has_topright, ptrdiff_t _stride)
929 int stride = _stride>>(
sizeof(
pixel)-1);
933 #define ROW(y) a = PIXEL_SPLAT_X4(l##y); \ 934 AV_WN4PA(src+y*stride, a); \ 935 AV_WN4PA(src+y*stride+4, a); 940 int has_topright, ptrdiff_t _stride)
944 int stride = _stride>>(
sizeof(
pixel)-1);
958 for( y = 1; y < 8; y++ ) {
964 int has_topright, ptrdiff_t _stride)
967 int stride = _stride>>(
sizeof(
pixel)-1);
982 SRC(5,7)=
SRC(6,6)=
SRC(7,5)= (
t12 + 2*t13 + t14 + 2) >> 2;
983 SRC(6,7)=
SRC(7,6)= (t13 + 2*t14 +
t15 + 2) >> 2;
984 SRC(7,7)= (t14 + 3*
t15 + 2) >> 2;
987 int has_topright, ptrdiff_t _stride)
990 int stride = _stride>>(
sizeof(
pixel)-1);
994 SRC(0,7)= (l7 + 2*l6 + l5 + 2) >> 2;
995 SRC(0,6)=
SRC(1,7)= (l6 + 2*l5 + l4 + 2) >> 2;
996 SRC(0,5)=
SRC(1,6)=
SRC(2,7)= (l5 + 2*l4 + l3 + 2) >> 2;
997 SRC(0,4)=
SRC(1,5)=
SRC(2,6)=
SRC(3,7)= (l4 + 2*l3 + l2 + 2) >> 2;
998 SRC(0,3)=
SRC(1,4)=
SRC(2,5)=
SRC(3,6)=
SRC(4,7)= (l3 + 2*l2 + l1 + 2) >> 2;
1011 int has_topright, ptrdiff_t _stride)
1014 int stride = _stride>>(
sizeof(
pixel)-1);
1018 SRC(0,6)= (l5 + 2*l4 + l3 + 2) >> 2;
1019 SRC(0,7)= (l6 + 2*l5 + l4 + 2) >> 2;
1020 SRC(0,4)=
SRC(1,6)= (l3 + 2*l2 + l1 + 2) >> 2;
1021 SRC(0,5)=
SRC(1,7)= (l4 + 2*l3 + l2 + 2) >> 2;
1022 SRC(0,2)=
SRC(1,4)=
SRC(2,6)= (l1 + 2*l0 + lt + 2) >> 2;
1023 SRC(0,3)=
SRC(1,5)=
SRC(2,7)= (l2 + 2*l1 + l0 + 2) >> 2;
1042 int has_topright, ptrdiff_t _stride)
1045 int stride = _stride>>(
sizeof(
pixel)-1);
1049 SRC(0,7)= (l6 + l7 + 1) >> 1;
1050 SRC(1,7)= (l5 + 2*l6 + l7 + 2) >> 2;
1051 SRC(0,6)=
SRC(2,7)= (l5 + l6 + 1) >> 1;
1052 SRC(1,6)=
SRC(3,7)= (l4 + 2*l5 + l6 + 2) >> 2;
1053 SRC(0,5)=
SRC(2,6)=
SRC(4,7)= (l4 + l5 + 1) >> 1;
1054 SRC(1,5)=
SRC(3,6)=
SRC(5,7)= (l3 + 2*l4 + l5 + 2) >> 2;
1055 SRC(0,4)=
SRC(2,5)=
SRC(4,6)=
SRC(6,7)= (l3 + l4 + 1) >> 1;
1056 SRC(1,4)=
SRC(3,5)=
SRC(5,6)=
SRC(7,7)= (l2 + 2*l3 + l4 + 2) >> 2;
1057 SRC(0,3)=
SRC(2,4)=
SRC(4,5)=
SRC(6,6)= (l2 + l3 + 1) >> 1;
1058 SRC(1,3)=
SRC(3,4)=
SRC(5,5)=
SRC(7,6)= (l1 + 2*l2 + l3 + 2) >> 2;
1059 SRC(0,2)=
SRC(2,3)=
SRC(4,4)=
SRC(6,5)= (l1 + l2 + 1) >> 1;
1060 SRC(1,2)=
SRC(3,3)=
SRC(5,4)=
SRC(7,5)= (l0 + 2*l1 + l2 + 2) >> 2;
1061 SRC(0,1)=
SRC(2,2)=
SRC(4,3)=
SRC(6,4)= (l0 + l1 + 1) >> 1;
1062 SRC(1,1)=
SRC(3,2)=
SRC(5,3)=
SRC(7,4)= (lt + 2*l0 + l1 + 2) >> 2;
1063 SRC(0,0)=
SRC(2,1)=
SRC(4,2)=
SRC(6,3)= (lt + l0 + 1) >> 1;
1073 int has_topright, ptrdiff_t _stride)
1076 int stride = _stride>>(
sizeof(
pixel)-1);
1103 int has_topright, ptrdiff_t _stride)
1106 int stride = _stride>>(
sizeof(
pixel)-1);
1108 SRC(0,0)= (l0 + l1 + 1) >> 1;
1109 SRC(1,0)= (l0 + 2*l1 + l2 + 2) >> 2;
1110 SRC(0,1)=
SRC(2,0)= (l1 + l2 + 1) >> 1;
1111 SRC(1,1)=
SRC(3,0)= (l1 + 2*l2 + l3 + 2) >> 2;
1112 SRC(0,2)=
SRC(2,1)=
SRC(4,0)= (l2 + l3 + 1) >> 1;
1113 SRC(1,2)=
SRC(3,1)=
SRC(5,0)= (l2 + 2*l3 + l4 + 2) >> 2;
1114 SRC(0,3)=
SRC(2,2)=
SRC(4,1)=
SRC(6,0)= (l3 + l4 + 1) >> 1;
1115 SRC(1,3)=
SRC(3,2)=
SRC(5,1)=
SRC(7,0)= (l3 + 2*l4 + l5 + 2) >> 2;
1116 SRC(0,4)=
SRC(2,3)=
SRC(4,2)=
SRC(6,1)= (l4 + l5 + 1) >> 1;
1117 SRC(1,4)=
SRC(3,3)=
SRC(5,2)=
SRC(7,1)= (l4 + 2*l5 + l6 + 2) >> 2;
1118 SRC(0,5)=
SRC(2,4)=
SRC(4,3)=
SRC(6,2)= (l5 + l6 + 1) >> 1;
1119 SRC(1,5)=
SRC(3,4)=
SRC(5,3)=
SRC(7,2)= (l5 + 2*l6 + l7 + 2) >> 2;
1120 SRC(0,6)=
SRC(2,5)=
SRC(4,4)=
SRC(6,3)= (l6 + l7 + 1) >> 1;
1121 SRC(1,6)=
SRC(3,5)=
SRC(5,4)=
SRC(7,3)= (l6 + 3*l7 + 2) >> 2;
1129 int has_topright, ptrdiff_t _stride)
1135 int stride = _stride>>(
sizeof(
pixel)-1);
1149 src[0*
stride]= v += block[0];
1150 src[1*
stride]= v += block[8];
1151 src[2*
stride]= v += block[16];
1152 src[3*
stride]= v += block[24];
1153 src[4*
stride]= v += block[32];
1154 src[5*
stride]= v += block[40];
1155 src[6*
stride]= v += block[48];
1156 src[7*
stride]= v + block[56];
1161 memset(_block, 0,
sizeof(
dctcoef) * 64);
1165 int has_topright, ptrdiff_t _stride)
1171 int stride = _stride>>(
sizeof(
pixel)-1);
1185 src[0]= v += block[0];
1186 src[1]= v += block[1];
1187 src[2]= v += block[2];
1188 src[3]= v += block[3];
1189 src[4]= v += block[4];
1190 src[5]= v += block[5];
1191 src[6]= v += block[6];
1192 src[7]= v + block[7];
1197 memset(_block, 0,
sizeof(
dctcoef) * 64);
1200 #undef PREDICT_8x8_LOAD_LEFT 1201 #undef PREDICT_8x8_LOAD_TOP 1202 #undef PREDICT_8x8_LOAD_TOPLEFT 1203 #undef PREDICT_8x8_LOAD_TOPRIGHT 1204 #undef PREDICT_8x8_DC 1216 stride >>=
sizeof(
pixel)-1;
1220 pix[1*
stride]= v += block[0];
1221 pix[2*
stride]= v += block[4];
1222 pix[3*
stride]= v += block[8];
1223 pix[4*
stride]= v + block[12];
1228 memset(_block, 0,
sizeof(
dctcoef) * 16);
1237 stride >>=
sizeof(
pixel)-1;
1240 pix[0]= v += block[0];
1241 pix[1]= v += block[1];
1242 pix[2]= v += block[2];
1243 pix[3]= v + block[3];
1248 memset(_block, 0,
sizeof(
dctcoef) * 16);
1257 stride >>=
sizeof(
pixel)-1;
1261 pix[1*
stride]= v += block[0];
1262 pix[2*
stride]= v += block[8];
1263 pix[3*
stride]= v += block[16];
1264 pix[4*
stride]= v += block[24];
1265 pix[5*
stride]= v += block[32];
1266 pix[6*
stride]= v += block[40];
1267 pix[7*
stride]= v += block[48];
1268 pix[8*
stride]= v + block[56];
1273 memset(_block, 0,
sizeof(
dctcoef) * 64);
1282 stride >>=
sizeof(
pixel)-1;
1285 pix[0]= v += block[0];
1286 pix[1]= v += block[1];
1287 pix[2]= v += block[2];
1288 pix[3]= v += block[3];
1289 pix[4]= v += block[4];
1290 pix[5]= v += block[5];
1291 pix[6]= v += block[6];
1292 pix[7]= v + block[7];
1297 memset(_block, 0,
sizeof(
dctcoef) * 64);
1310 const int *block_offset,
1347 const int *block_offset,
static void FUNCC() pred8x16_plane(uint8_t *_src, ptrdiff_t _stride)
static void FUNCC() pred4x4_dc(uint8_t *_src, const uint8_t *topright, ptrdiff_t _stride)
static void FUNCC() pred8x8l_dc(uint8_t *_src, int has_topleft, int has_topright, ptrdiff_t _stride)
static void FUNCC() pred4x4_vertical_right(uint8_t *_src, const uint8_t *topright, ptrdiff_t _stride)
static void FUNC() pred8x8_mad_cow_dc_l0t(uint8_t *src, ptrdiff_t stride)
#define PREDICT_8x8_DC(v)
static void FUNCC() pred4x4_horizontal_add(uint8_t *_pix, int16_t *_block, ptrdiff_t stride)
static void FUNC() pred8x8_mad_cow_dc_l00(uint8_t *src, ptrdiff_t stride)
static void FUNCC() pred8x8l_down_left(uint8_t *_src, int has_topleft, int has_topright, ptrdiff_t _stride)
static void FUNCC() pred8x8l_down_right(uint8_t *_src, int has_topleft, int has_topright, ptrdiff_t _stride)
static void FUNCC() pred8x8_horizontal(uint8_t *_src, ptrdiff_t stride)
static void FUNCC() pred8x8l_horizontal(uint8_t *_src, int has_topleft, int has_topright, ptrdiff_t _stride)
static void FUNCC() pred8x8l_vertical_right(uint8_t *_src, int has_topleft, int has_topright, ptrdiff_t _stride)
static void FUNCC() pred8x16_top_dc(uint8_t *_src, ptrdiff_t stride)
static void FUNCC() pred8x8l_128_dc(uint8_t *_src, int has_topleft, int has_topright, ptrdiff_t _stride)
static void FUNCC() pred4x4_down_left(uint8_t *_src, const uint8_t *_topright, ptrdiff_t _stride)
static void FUNCC() pred4x4_129_dc(uint8_t *_src, const uint8_t *topright, ptrdiff_t _stride)
static void FUNCC() pred16x16_vertical(uint8_t *_src, ptrdiff_t _stride)
static void FUNCC() pred8x8l_horizontal_down(uint8_t *_src, int has_topleft, int has_topright, ptrdiff_t _stride)
#define PRED16x16_X(n, v)
static void FUNCC() pred8x8l_vertical_add(uint8_t *_pix, int16_t *_block, ptrdiff_t stride)
static void FUNCC() pred8x16_horizontal(uint8_t *_src, ptrdiff_t stride)
static void FUNCC() pred4x4_vertical_add(uint8_t *_pix, int16_t *_block, ptrdiff_t stride)
static void FUNC() pred8x16_mad_cow_dc_l0t(uint8_t *src, ptrdiff_t stride)
static void FUNCC() pred8x16_left_dc(uint8_t *_src, ptrdiff_t stride)
static void FUNCC() pred4x4_vertical(uint8_t *_src, const uint8_t *topright, ptrdiff_t _stride)
static void FUNCC() pred16x16_dc(uint8_t *_src, ptrdiff_t stride)
static int t15(InterplayACMContext *s, unsigned ind, unsigned col)
static void FUNCC() pred8x8l_vertical_left(uint8_t *_src, int has_topleft, int has_topright, ptrdiff_t _stride)
static void FUNCC() pred4x4_128_dc(uint8_t *_src, const uint8_t *topright, ptrdiff_t _stride)
<<(BIT_DEPTH-1))+0) PRED8x8_X(129,(1<<(BIT_DEPTH-1))+1) static void FUNCC(pred8x16_128_dc)(uint8_t *_src, ptrdiff_t stride) { FUNCC(pred8x8_128_dc)(_src, stride);FUNCC(pred8x8_128_dc)(_src+8 *stride, stride);} static void FUNCC(pred8x8_left_dc)(uint8_t *_src, ptrdiff_t stride) { int i;int dc0, dc2;pixel4 dc0splat, dc2splat;pixel *src=(pixel *) _src;stride >>=sizeof(pixel) -1;dc0=dc2=0;for(i=0;i< 4;i++){ dc0+=src[-1+i *stride];dc2+=src[-1+(i+4) *stride];} dc0splat=PIXEL_SPLAT_X4((dc0+2)>> dc2splat
#define PREDICT_8x8_LOAD_LEFT
static void FUNC() pred8x8_mad_cow_dc_0l0(uint8_t *src, ptrdiff_t stride)
static void FUNCC() pred8x8l_horizontal_filter_add(uint8_t *_src, int16_t *_block, int has_topleft, int has_topright, ptrdiff_t _stride)
static void FUNCC() pred4x4_127_dc(uint8_t *_src, const uint8_t *topright, ptrdiff_t _stride)
static void FUNCC() pred4x4_vertical_left(uint8_t *_src, const uint8_t *_topright, ptrdiff_t _stride)
static void FUNCC() pred8x8_plane(uint8_t *_src, ptrdiff_t _stride)
#define PREDICT_8x8_LOAD_TOPLEFT
static void FUNCC() pred8x16_dc(uint8_t *_src, ptrdiff_t stride)
static void FUNCC() pred8x8l_top_dc(uint8_t *_src, int has_topleft, int has_topright, ptrdiff_t _stride)
static void FUNCC() pred8x8_vertical_add(uint8_t *pix, const int *block_offset, int16_t *block, ptrdiff_t stride)
static void FUNCC() pred8x16_vertical(uint8_t *_src, ptrdiff_t _stride)
static void FUNCC() pred8x8l_vertical(uint8_t *_src, int has_topleft, int has_topright, ptrdiff_t _stride)
static void FUNCC() pred16x16_horizontal_add(uint8_t *pix, const int *block_offset, int16_t *block, ptrdiff_t stride)
static void FUNC() pred8x8_mad_cow_dc_0lt(uint8_t *src, ptrdiff_t stride)
static void FUNCC() pred16x16_plane(uint8_t *src, ptrdiff_t stride)
static void FUNC() pred8x16_mad_cow_dc_0l0(uint8_t *src, ptrdiff_t stride)
<<(BIT_DEPTH-1))+0) PRED16x16_X(129,(1<<(BIT_DEPTH-1))+1) static inline void FUNCC(pred16x16_plane_compat)(uint8_t *_src, ptrdiff_t _stride, const int svq3, const int rv40) { int i, j, k;int a;INIT_CLIP pixel *src=(pixel *) _src;int stride=_stride >>(sizeof(pixel) -1);const pixel *const src0=src+7-stride;const pixel *src1=src+8 *stride-1;const pixel *src2=src1-2 *stride;int H=src0[1] - src0[-1];int V=src1[0] - src2[0];for(k=2;k<=8;++k) { src1+=stride;src2 -=stride;H+=k *(src0[k] - src0[-k]);V+=k *(src1[0] - src2[0]);} if(svq3){ H=(5 *(H/4))/16;V=(5 *(V/4))/16;i=H;H=V;V=i;}else if(rv40){ H=(H+(H >> V
#define PREDICT_16x16_DC(v)
static void FUNCC() pred8x8_horizontal_add(uint8_t *pix, const int *block_offset, int16_t *block, ptrdiff_t stride)
#define PREDICT_8x8_LOAD_TOPRIGHT
static void FUNCC() pred4x4_left_dc(uint8_t *_src, const uint8_t *topright, ptrdiff_t _stride)
static void FUNCC() pred8x8l_vertical_filter_add(uint8_t *_src, int16_t *_block, int has_topleft, int has_topright, ptrdiff_t _stride)
static void FUNCC() pred8x8_top_dc(uint8_t *_src, ptrdiff_t stride)
static void FUNCC() pred4x4_horizontal(uint8_t *_src, const uint8_t *topright, ptrdiff_t _stride)
static void FUNCC() pred8x16_horizontal_add(uint8_t *pix, const int *block_offset, int16_t *block, ptrdiff_t stride)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
static void FUNCC() pred8x16_vertical_add(uint8_t *pix, const int *block_offset, int16_t *block, ptrdiff_t stride)
static void FUNC() pred8x16_mad_cow_dc_l00(uint8_t *src, ptrdiff_t stride)
static void FUNCC() pred16x16_left_dc(uint8_t *_src, ptrdiff_t stride)
static void FUNCC() pred4x4_horizontal_up(uint8_t *_src, const uint8_t *topright, ptrdiff_t _stride)
static void FUNCC() pred8x8_dc(uint8_t *_src, ptrdiff_t stride)
static void FUNCC() pred8x8l_horizontal_up(uint8_t *_src, int has_topleft, int has_topright, ptrdiff_t _stride)
static void FUNCC() pred8x8_vertical(uint8_t *_src, ptrdiff_t _stride)
static void FUNCC() pred16x16_top_dc(uint8_t *_src, ptrdiff_t stride)
#define LOAD_TOP_RIGHT_EDGE
#define PREDICT_8x8_LOAD_TOP
static void FUNCC() pred16x16_horizontal(uint8_t *_src, ptrdiff_t stride)
static void FUNCC() pred4x4_top_dc(uint8_t *_src, const uint8_t *topright, ptrdiff_t _stride)
static void FUNCC() pred4x4_down_right(uint8_t *_src, const uint8_t *topright, ptrdiff_t _stride)
static void FUNC() pred8x16_mad_cow_dc_0lt(uint8_t *src, ptrdiff_t stride)
static void FUNCC() pred8x8l_left_dc(uint8_t *_src, int has_topleft, int has_topright, ptrdiff_t _stride)
static void FUNCC() pred8x8l_horizontal_add(uint8_t *_pix, int16_t *_block, ptrdiff_t stride)
#define PIXEL_SPLAT_X4(x)
static void FUNCC() pred4x4_horizontal_down(uint8_t *_src, const uint8_t *topright, ptrdiff_t _stride)
static void FUNCC() pred16x16_vertical_add(uint8_t *pix, const int *block_offset, int16_t *block, ptrdiff_t stride)