104 #undef PROFILE_THE_BEAST 107 typedef unsigned char ubyte;
108 typedef signed char sbyte;
145 static const vector
unsigned char 146 perm_rgb_0 = { 0x00, 0x01, 0x10, 0x02, 0x03, 0x11, 0x04, 0x05,
147 0x12, 0x06, 0x07, 0x13, 0x08, 0x09, 0x14, 0x0a },
148 perm_rgb_1 = { 0x0b, 0x15, 0x0c, 0x0d, 0x16, 0x0e, 0x0f, 0x17,
149 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
150 perm_rgb_2 = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
151 0x00, 0x01, 0x18, 0x02, 0x03, 0x19, 0x04, 0x05 },
152 perm_rgb_3 = { 0x1a, 0x06, 0x07, 0x1b, 0x08, 0x09, 0x1c, 0x0a,
153 0x0b, 0x1d, 0x0c, 0x0d, 0x1e, 0x0e, 0x0f, 0x1f };
155 #define vec_merge3(x2, x1, x0, y0, y1, y2) \ 157 __typeof__(x0) o0, o2, o3; \ 158 o0 = vec_mergeh(x0, x1); \ 159 y0 = vec_perm(o0, x2, perm_rgb_0); \ 160 o2 = vec_perm(o0, x2, perm_rgb_1); \ 161 o3 = vec_mergel(x0, x1); \ 162 y1 = vec_perm(o3, o2, perm_rgb_2); \ 163 y2 = vec_perm(o3, o2, perm_rgb_3); \ 166 #define vec_mstbgr24(x0, x1, x2, ptr) \ 168 __typeof__(x0) _0, _1, _2; \ 169 vec_merge3(x0, x1, x2, _0, _1, _2); \ 170 vec_st(_0, 0, ptr++); \ 171 vec_st(_1, 0, ptr++); \ 172 vec_st(_2, 0, ptr++); \ 175 #define vec_mstrgb24(x0, x1, x2, ptr) \ 177 __typeof__(x0) _0, _1, _2; \ 178 vec_merge3(x2, x1, x0, _0, _1, _2); \ 179 vec_st(_0, 0, ptr++); \ 180 vec_st(_1, 0, ptr++); \ 181 vec_st(_2, 0, ptr++); \ 188 #define vec_mstrgb32(T, x0, x1, x2, x3, ptr) \ 191 _0 = vec_mergeh(x0, x1); \ 192 _1 = vec_mergeh(x2, x3); \ 193 _2 = (T) vec_mergeh((vector unsigned short) _0, \ 194 (vector unsigned short) _1); \ 195 _3 = (T) vec_mergel((vector unsigned short) _0, \ 196 (vector unsigned short) _1); \ 197 vec_st(_2, 0 * 16, (T *) ptr); \ 198 vec_st(_3, 1 * 16, (T *) ptr); \ 199 _0 = vec_mergel(x0, x1); \ 200 _1 = vec_mergel(x2, x3); \ 201 _2 = (T) vec_mergeh((vector unsigned short) _0, \ 202 (vector unsigned short) _1); \ 203 _3 = (T) vec_mergel((vector unsigned short) _0, \ 204 (vector unsigned short) _1); \ 205 vec_st(_2, 2 * 16, (T *) ptr); \ 206 vec_st(_3, 3 * 16, (T *) ptr); \ 225 (vector signed short) \ 226 vec_perm(x, (__typeof__(x)) { 0 }, \ 227 ((vector unsigned char) { \ 228 0x10, 0x00, 0x10, 0x01, 0x10, 0x02, 0x10, 0x03, \ 229 0x10, 0x04, 0x10, 0x05, 0x10, 0x06, 0x10, 0x07 })) 232 (vector signed short) \ 233 vec_perm(x, (__typeof__(x)) { 0 }, \ 234 ((vector unsigned char) { \ 235 0x10, 0x08, 0x10, 0x09, 0x10, 0x0A, 0x10, 0x0B, \ 236 0x10, 0x0C, 0x10, 0x0D, 0x10, 0x0E, 0x10, 0x0F })) 238 #define vec_unh(x)(vector signed short) vec_mergeh(x,(__typeof__(x)) { 0 }) 239 #define vec_unl(x)(vector signed short) vec_mergel(x,(__typeof__(x)) { 0 }) 242 #define vec_clip_s16(x) \ 243 vec_max(vec_min(x, ((vector signed short) { \ 244 235, 235, 235, 235, 235, 235, 235, 235 })), \ 245 ((vector signed short) { 16, 16, 16, 16, 16, 16, 16, 16 })) 247 #define vec_packclp(x, y) \ 248 (vector unsigned char) \ 249 vec_packs((vector unsigned short) \ 250 vec_max(x, ((vector signed short) { 0 })), \ 251 (vector unsigned short) \ 252 vec_max(y, ((vector signed short) { 0 }))) 254 static inline void cvtyuvtoRGB(
SwsContext *
c, vector
signed short Y,
255 vector
signed short U, vector
signed short V,
256 vector
signed short *
R, vector
signed short *
G,
257 vector
signed short *
B)
259 vector
signed short vx, ux, uvx;
261 Y = vec_mradds(Y, c->CY, c->OY);
262 U = vec_sub(U, (vector
signed short)
263 vec_splat((vector
signed short) { 128 }, 0));
264 V = vec_sub(V, (vector
signed short)
265 vec_splat((vector
signed short) { 128 }, 0));
268 ux = vec_sl(U, c->CSHIFT);
269 *B = vec_mradds(ux, c->CBU, Y);
272 vx = vec_sl(V, c->CSHIFT);
273 *R = vec_mradds(vx, c->CRV, Y);
276 uvx = vec_mradds(U, c->CGU, Y);
277 *G = vec_mradds(V, c->CGV, uvx);
286 #define DEFCSP420_CVT(name, out_pixels) \ 287 static int altivec_ ## name(SwsContext *c, const unsigned char **in, \ 288 int *instrides, int srcSliceY, int srcSliceH, \ 289 unsigned char **oplanes, int *outstrides) \ 294 int instrides_scl[3]; \ 295 vector unsigned char y0, y1; \ 297 vector signed char u, v; \ 299 vector signed short Y0, Y1, Y2, Y3; \ 300 vector signed short U, V; \ 301 vector signed short vx, ux, uvx; \ 302 vector signed short vx0, ux0, uvx0; \ 303 vector signed short vx1, ux1, uvx1; \ 304 vector signed short R0, G0, B0; \ 305 vector signed short R1, G1, B1; \ 306 vector unsigned char R, G, B; \ 308 const vector unsigned char *y1ivP, *y2ivP, *uivP, *vivP; \ 309 vector unsigned char align_perm; \ 311 vector signed short lCY = c->CY; \ 312 vector signed short lOY = c->OY; \ 313 vector signed short lCRV = c->CRV; \ 314 vector signed short lCBU = c->CBU; \ 315 vector signed short lCGU = c->CGU; \ 316 vector signed short lCGV = c->CGV; \ 317 vector unsigned short lCSHIFT = c->CSHIFT; \ 319 const ubyte *y1i = in[0]; \ 320 const ubyte *y2i = in[0] + instrides[0]; \ 321 const ubyte *ui = in[1]; \ 322 const ubyte *vi = in[2]; \ 324 vector unsigned char *oute, *outo; \ 327 instrides_scl[0] = instrides[0] * 2 - w; \ 329 instrides_scl[1] = instrides[1] - w / 2; \ 331 instrides_scl[2] = instrides[2] - w / 2; \ 333 for (i = 0; i < h / 2; i++) { \ 334 oute = (vector unsigned char *)(oplanes[0] + outstrides[0] * \ 335 (srcSliceY + i * 2)); \ 336 outo = oute + (outstrides[0] >> 4); \ 337 vec_dstst(outo, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 0); \ 338 vec_dstst(oute, (0x02000002 | (((w * 3 + 32) / 32) << 16)), 1); \ 340 for (j = 0; j < w / 16; j++) { \ 341 y1ivP = (const vector unsigned char *) y1i; \ 342 y2ivP = (const vector unsigned char *) y2i; \ 343 uivP = (const vector unsigned char *) ui; \ 344 vivP = (const vector unsigned char *) vi; \ 346 align_perm = vec_lvsl(0, y1i); \ 347 y0 = (vector unsigned char) \ 348 vec_perm(y1ivP[0], y1ivP[1], align_perm); \ 350 align_perm = vec_lvsl(0, y2i); \ 351 y1 = (vector unsigned char) \ 352 vec_perm(y2ivP[0], y2ivP[1], align_perm); \ 354 align_perm = vec_lvsl(0, ui); \ 355 u = (vector signed char) \ 356 vec_perm(uivP[0], uivP[1], align_perm); \ 358 align_perm = vec_lvsl(0, vi); \ 359 v = (vector signed char) \ 360 vec_perm(vivP[0], vivP[1], align_perm); \ 362 u = (vector signed char) \ 364 (vector signed char) \ 365 vec_splat((vector signed char) { 128 }, 0)); \ 366 v = (vector signed char) \ 368 (vector signed char) \ 369 vec_splat((vector signed char) { 128 }, 0)); \ 371 U = vec_unpackh(u); \ 372 V = vec_unpackh(v); \ 379 Y0 = vec_mradds(Y0, lCY, lOY); \ 380 Y1 = vec_mradds(Y1, lCY, lOY); \ 381 Y2 = vec_mradds(Y2, lCY, lOY); \ 382 Y3 = vec_mradds(Y3, lCY, lOY); \ 385 ux = vec_sl(U, lCSHIFT); \ 386 ux = vec_mradds(ux, lCBU, (vector signed short) { 0 }); \ 387 ux0 = vec_mergeh(ux, ux); \ 388 ux1 = vec_mergel(ux, ux); \ 391 vx = vec_sl(V, lCSHIFT); \ 392 vx = vec_mradds(vx, lCRV, (vector signed short) { 0 }); \ 393 vx0 = vec_mergeh(vx, vx); \ 394 vx1 = vec_mergel(vx, vx); \ 397 uvx = vec_mradds(U, lCGU, (vector signed short) { 0 }); \ 398 uvx = vec_mradds(V, lCGV, uvx); \ 399 uvx0 = vec_mergeh(uvx, uvx); \ 400 uvx1 = vec_mergel(uvx, uvx); \ 402 R0 = vec_add(Y0, vx0); \ 403 G0 = vec_add(Y0, uvx0); \ 404 B0 = vec_add(Y0, ux0); \ 405 R1 = vec_add(Y1, vx1); \ 406 G1 = vec_add(Y1, uvx1); \ 407 B1 = vec_add(Y1, ux1); \ 409 R = vec_packclp(R0, R1); \ 410 G = vec_packclp(G0, G1); \ 411 B = vec_packclp(B0, B1); \ 413 out_pixels(R, G, B, oute); \ 415 R0 = vec_add(Y2, vx0); \ 416 G0 = vec_add(Y2, uvx0); \ 417 B0 = vec_add(Y2, ux0); \ 418 R1 = vec_add(Y3, vx1); \ 419 G1 = vec_add(Y3, uvx1); \ 420 B1 = vec_add(Y3, ux1); \ 421 R = vec_packclp(R0, R1); \ 422 G = vec_packclp(G0, G1); \ 423 B = vec_packclp(B0, B1); \ 426 out_pixels(R, G, B, outo); \ 434 ui += instrides_scl[1]; \ 435 vi += instrides_scl[2]; \ 436 y1i += instrides_scl[0]; \ 437 y2i += instrides_scl[0]; \ 442 #define out_abgr(a, b, c, ptr) \ 443 vec_mstrgb32(__typeof__(a), ((__typeof__(a)) { 255 }), c, b, a, ptr) 444 #define out_bgra(a, b, c, ptr) \ 445 vec_mstrgb32(__typeof__(a), c, b, a, ((__typeof__(a)) { 255 }), ptr) 446 #define out_rgba(a, b, c, ptr) \ 447 vec_mstrgb32(__typeof__(a), a, b, c, ((__typeof__(a)) { 255 }), ptr) 448 #define out_argb(a, b, c, ptr) \ 449 vec_mstrgb32(__typeof__(a), ((__typeof__(a)) { 255 }), a, b, c, ptr) 450 #define out_rgb24(a, b, c, ptr) vec_mstrgb24(a, b, c, ptr) 451 #define out_bgr24(a, b, c, ptr) vec_mstbgr24(a, b, c, ptr) 453 DEFCSP420_CVT(yuv2_abgr, out_abgr)
454 DEFCSP420_CVT(yuv2_bgra, out_bgra)
455 DEFCSP420_CVT(yuv2_rgba, out_rgba)
456 DEFCSP420_CVT(yuv2_argb, out_argb)
457 DEFCSP420_CVT(yuv2_rgb24, out_rgb24)
458 DEFCSP420_CVT(yuv2_bgr24, out_bgr24)
462 static const vector
unsigned char 463 demux_u = { 0x10, 0x00, 0x10, 0x00,
464 0x10, 0x04, 0x10, 0x04,
465 0x10, 0x08, 0x10, 0x08,
466 0x10, 0x0c, 0x10, 0x0c },
467 demux_v = { 0x10, 0x02, 0x10, 0x02,
468 0x10, 0x06, 0x10, 0x06,
469 0x10, 0x0A, 0x10, 0x0A,
470 0x10, 0x0E, 0x10, 0x0E },
471 demux_y = { 0x10, 0x01, 0x10, 0x03,
472 0x10, 0x05, 0x10, 0x07,
473 0x10, 0x09, 0x10, 0x0B,
474 0x10, 0x0D, 0x10, 0x0F };
479 static int altivec_uyvy_rgb32(
SwsContext *c,
const unsigned char **
in,
480 int *instrides,
int srcSliceY,
int srcSliceH,
481 unsigned char **oplanes,
int *outstrides)
486 vector
unsigned char uyvy;
487 vector
signed short Y,
U,
V;
488 vector
signed short R0, G0,
B0,
R1, G1,
B1;
489 vector
unsigned char R,
G,
B;
490 vector
unsigned char *
out;
494 out = (vector
unsigned char *) (oplanes[0] + srcSliceY * outstrides[0]);
496 for (i = 0; i <
h; i++)
497 for (j = 0; j < w / 16; j++) {
498 uyvy = vec_ld(0, img);
500 U = (vector
signed short)
501 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_u);
502 V = (vector
signed short)
503 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_v);
504 Y = (vector
signed short)
505 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_y);
507 cvtyuvtoRGB(c, Y, U, V, &R0, &G0, &B0);
509 uyvy = vec_ld(16, img);
511 U = (vector
signed short)
512 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_u);
513 V = (vector
signed short)
514 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_v);
515 Y = (vector
signed short)
516 vec_perm(uyvy, (vector
unsigned char) { 0 }, demux_y);
518 cvtyuvtoRGB(c, Y, U, V, &R1, &G1, &B1);
520 R = vec_packclp(R0, R1);
521 G = vec_packclp(G0, G1);
522 B = vec_packclp(B0, B1);
525 out_rgba(R, G, B, out);
553 if ((c->
srcW & 0xf) != 0)
563 if ((c->
srcH & 0x1) != 0)
569 return altivec_yuv2_rgb24;
572 return altivec_yuv2_bgr24;
575 return altivec_yuv2_argb;
578 return altivec_yuv2_abgr;
581 return altivec_yuv2_rgba;
584 return altivec_yuv2_bgra;
585 default:
return NULL;
593 return altivec_uyvy_rgb32;
594 default:
return NULL;
604 const int inv_table[4],
612 vector
signed short vec;
618 buf.tmp[0] = ((0xffffLL) * contrast >> 8) >> 9;
619 buf.tmp[1] = -256 * brightness;
620 buf.tmp[2] = (inv_table[0] >> 3) * (contrast >> 16) * (saturation >> 16);
621 buf.tmp[3] = (inv_table[1] >> 3) * (contrast >> 16) * (saturation >> 16);
622 buf.tmp[4] = -((inv_table[2] >> 1) * (contrast >> 16) * (saturation >> 16));
623 buf.tmp[5] = -((inv_table[3] >> 1) * (contrast >> 16) * (saturation >> 16));
625 c->CSHIFT = (vector
unsigned short) vec_splat_u16(2);
626 c->CY = vec_splat((vector
signed short)
buf.vec, 0);
627 c->OY = vec_splat((vector
signed short)
buf.vec, 1);
628 c->CRV = vec_splat((vector
signed short)
buf.vec, 2);
629 c->CBU = vec_splat((vector
signed short)
buf.vec, 3);
630 c->CGU = vec_splat((vector
signed short)
buf.vec, 4);
631 c->CGV = vec_splat((vector
signed short)
buf.vec, 5);
639 const int16_t *lumFilter,
640 const int16_t **lumSrc,
642 const int16_t *chrFilter,
643 const int16_t **chrUSrc,
644 const int16_t **chrVSrc,
646 const int16_t **alpSrc,
652 vector
signed short X, X0, X1, Y0, U0, V0, Y1, U1, V1,
U,
V;
653 vector
signed short R0, G0,
B0,
R1, G1,
B1;
655 vector
unsigned char R,
G,
B;
656 vector
unsigned char *
out, *nout;
658 vector
signed short RND = vec_splat_s16(1 << 3);
659 vector
unsigned short SCL = vec_splat_u16(4);
662 vector
signed short *YCoeffs, *CCoeffs;
664 YCoeffs = c->vYCoeffsBank + dstY * lumFilterSize;
665 CCoeffs = c->vCCoeffsBank + dstY * chrFilterSize;
667 out = (vector
unsigned char *) dest;
669 for (i = 0; i < dstW; i += 16) {
673 for (j = 0; j < lumFilterSize; j++) {
674 X0 = vec_ld(0, &lumSrc[j][i]);
675 X1 = vec_ld(16, &lumSrc[j][i]);
676 Y0 = vec_mradds(X0, YCoeffs[j], Y0);
677 Y1 = vec_mradds(X1, YCoeffs[j], Y1);
683 for (j = 0; j < chrFilterSize; j++) {
684 X = vec_ld(0, &chrUSrc[j][i / 2]);
685 U = vec_mradds(X, CCoeffs[j], U);
686 X = vec_ld(0, &chrVSrc[j][i / 2]);
687 V = vec_mradds(X, CCoeffs[j], V);
691 Y0 = vec_sra(Y0, SCL);
692 Y1 = vec_sra(Y1, SCL);
696 Y0 = vec_clip_s16(Y0);
697 Y1 = vec_clip_s16(Y1);
710 U0 = vec_mergeh(U, U);
711 V0 = vec_mergeh(V, V);
713 U1 = vec_mergel(U, U);
714 V1 = vec_mergel(V, V);
716 cvtyuvtoRGB(c, Y0, U0, V0, &R0, &G0, &B0);
717 cvtyuvtoRGB(c, Y1, U1, V1, &R1, &G1, &B1);
719 R = vec_packclp(R0, R1);
720 G = vec_packclp(G0, G1);
721 B = vec_packclp(B0, B1);
725 out_abgr(R, G, B, out);
728 out_bgra(R, G, B, out);
731 out_rgba(R, G, B, out);
734 out_argb(R, G, B, out);
737 out_rgb24(R, G, B, out);
740 out_bgr24(R, G, B, out);
746 static int printed_error_message;
747 if (!printed_error_message) {
749 "altivec_yuv2packedX doesn't support %s output\n",
751 printed_error_message = 1;
764 for (j = 0; j < lumFilterSize; j++) {
765 X0 = vec_ld(0, &lumSrc[j][i]);
766 X1 = vec_ld(16, &lumSrc[j][i]);
767 Y0 = vec_mradds(X0, YCoeffs[j], Y0);
768 Y1 = vec_mradds(X1, YCoeffs[j], Y1);
774 for (j = 0; j < chrFilterSize; j++) {
775 X = vec_ld(0, &chrUSrc[j][i / 2]);
776 U = vec_mradds(X, CCoeffs[j], U);
777 X = vec_ld(0, &chrVSrc[j][i / 2]);
778 V = vec_mradds(X, CCoeffs[j], V);
782 Y0 = vec_sra(Y0, SCL);
783 Y1 = vec_sra(Y1, SCL);
787 Y0 = vec_clip_s16(Y0);
788 Y1 = vec_clip_s16(Y1);
801 U0 = vec_mergeh(U, U);
802 V0 = vec_mergeh(V, V);
804 U1 = vec_mergel(U, U);
805 V1 = vec_mergel(V, V);
807 cvtyuvtoRGB(c, Y0, U0, V0, &R0, &G0, &B0);
808 cvtyuvtoRGB(c, Y1, U1, V1, &R1, &G1, &B1);
810 R = vec_packclp(R0, R1);
811 G = vec_packclp(G0, G1);
812 B = vec_packclp(B0, B1);
814 nout = (vector
unsigned char *) scratch;
817 out_abgr(R, G, B, nout);
820 out_bgra(R, G, B, nout);
823 out_rgba(R, G, B, nout);
826 out_argb(R, G, B, nout);
829 out_rgb24(R, G, B, nout);
832 out_bgr24(R, G, B, nout);
837 "altivec_yuv2packedX doesn't support %s output\n",
842 memcpy(&((uint32_t *) dest)[i], scratch, (dstW - i) / 4);
846 #define YUV2PACKEDX_WRAPPER(suffix, pixfmt) \ 847 void ff_yuv2 ## suffix ## _X_altivec(SwsContext *c, \ 848 const int16_t *lumFilter, \ 849 const int16_t **lumSrc, \ 851 const int16_t *chrFilter, \ 852 const int16_t **chrUSrc, \ 853 const int16_t **chrVSrc, \ 855 const int16_t **alpSrc, \ 856 uint8_t *dest, int dstW, int dstY) \ 858 yuv2packedX_altivec(c, lumFilter, lumSrc, lumFilterSize, \ 859 chrFilter, chrUSrc, chrVSrc, \ 860 chrFilterSize, alpSrc, \ 861 dest, dstW, dstY, pixfmt); \ packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
#define AV_CPU_FLAG_ALTIVEC
standard
av_cold void ff_yuv2rgb_init_tables_ppc(SwsContext *c, const int inv_table[4], int brightness, int contrast, int saturation)
#define AV_LOG_WARNING
Something somehow does not look correct.
packed RGB 8:8:8, 24bpp, RGBRGB...
Macro definitions for various function/variable attributes.
int srcH
Height of source luma/alpha planes.
packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
enum AVPixelFormat dstFormat
Destination pixel format.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define R0(v, w, x, y, z, i)
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
as above, but U and V bytes are swapped
packed RGB 8:8:8, 24bpp, BGRBGR...
int(* SwsFunc)(struct SwsContext *context, const uint8_t *src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t *dst[], int dstStride[])
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
int av_get_cpu_flags(void)
Return the flags which specify extensions supported by the CPU.
av_cold SwsFunc ff_yuv2rgb_init_ppc(SwsContext *c)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
enum AVPixelFormat srcFormat
Source pixel format.
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
int srcW
Width of source luma/alpha planes.
AVPixelFormat
Pixel format.