FFmpeg  4.0
Macros | Functions
postprocess_altivec_template.c File Reference
#include "libavutil/avutil.h"

Go to the source code of this file.

Macros

#define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a, src_b, src_c, src_d, src_e, src_f, src_g, src_h)
 
#define LOAD_LINE(i)
 
#define LOAD_LINE_ALIGNED(i)
 
#define ITER(i, j)
 
#define LOAD_LINE(i)
 
#define LOAD_LINE_ALIGNED(i)
 
#define COMPUTE_VR(i, j, k)
 
#define PACK_AND_STORE(i)
 
#define PACK_AND_STORE_ALIGNED(i)
 
#define LOAD_LINE(i)
 
#define STORE(i)
 
#define LOAD_LINE(i)
 
#define EXTRACT(op)
 
#define COMPARE(i)
 
#define F_INIT()
 
#define F2(i, j, k, l)
 
#define ITER(i, j, k)
 
#define STORE_LINE(i)
 
#define doHorizLowPass_altivec(a...)   doHorizLowPass_C(a)
 
#define doHorizDefFilter_altivec(a...)   doHorizDefFilter_C(a)
 
#define do_a_deblock_altivec(a...)   do_a_deblock_C(a)
 
#define LOAD_LINE(src, i)
 
#define ACCUMULATE_DIFFS(i)
 
#define OP(i)   v_tempBlurredAss##i = vec_avg(v_tempBlurredAss##i, v_srcAss##i);
 
#define OP(i)   v_tempBlurredAss##i = v_srcAss##i;
 
#define OP(i)
 
#define OP(i)
 
#define PACK_AND_STORE(src, i)
 
#define LOAD_DOUBLE_LINE(i, j)
 
#define LOAD_DOUBLE_LINE(i, j)
 
#define STORE_DOUBLE_LINE(i, j)
 

Functions

static int vertClassify_altivec (uint8_t src[], int stride, PPContext *c)
 
static void doVertLowPass_altivec (uint8_t *src, int stride, PPContext *c)
 
static void doVertDefFilter_altivec (uint8_t src[], int stride, PPContext *c)
 
static void dering_altivec (uint8_t src[], int stride, PPContext *c)
 
static void tempNoiseReducer_altivec (uint8_t *src, int stride, uint8_t *tempBlurred, uint32_t *tempBlurredPast, int *maxNoise)
 
static void transpose_16x8_char_toPackedAlign_altivec (unsigned char *dst, unsigned char *src, int stride)
 
static void transpose_8x16_char_fromPackedAlign_altivec (unsigned char *dst, unsigned char *src, int stride)
 

Macro Definition Documentation

◆ ALTIVEC_TRANSPOSE_8x8_SHORT

#define ALTIVEC_TRANSPOSE_8x8_SHORT (   src_a,
  src_b,
  src_c,
  src_d,
  src_e,
  src_f,
  src_g,
  src_h 
)
Value:
do { \
__typeof__(src_a) tempA1, tempB1, tempC1, tempD1; \
__typeof__(src_a) tempE1, tempF1, tempG1, tempH1; \
__typeof__(src_a) tempA2, tempB2, tempC2, tempD2; \
__typeof__(src_a) tempE2, tempF2, tempG2, tempH2; \
tempA1 = vec_mergeh (src_a, src_e); \
tempB1 = vec_mergel (src_a, src_e); \
tempC1 = vec_mergeh (src_b, src_f); \
tempD1 = vec_mergel (src_b, src_f); \
tempE1 = vec_mergeh (src_c, src_g); \
tempF1 = vec_mergel (src_c, src_g); \
tempG1 = vec_mergeh (src_d, src_h); \
tempH1 = vec_mergel (src_d, src_h); \
tempA2 = vec_mergeh (tempA1, tempE1); \
tempB2 = vec_mergel (tempA1, tempE1); \
tempC2 = vec_mergeh (tempB1, tempF1); \
tempD2 = vec_mergel (tempB1, tempF1); \
tempE2 = vec_mergeh (tempC1, tempG1); \
tempF2 = vec_mergel (tempC1, tempG1); \
tempG2 = vec_mergeh (tempD1, tempH1); \
tempH2 = vec_mergel (tempD1, tempH1); \
src_a = vec_mergeh (tempA2, tempE2); \
src_b = vec_mergel (tempA2, tempE2); \
src_c = vec_mergeh (tempB2, tempF2); \
src_d = vec_mergel (tempB2, tempF2); \
src_e = vec_mergeh (tempC2, tempG2); \
src_f = vec_mergel (tempC2, tempG2); \
src_g = vec_mergeh (tempD2, tempH2); \
src_h = vec_mergel (tempD2, tempH2); \
} while (0)

Definition at line 25 of file postprocess_altivec_template.c.

◆ LOAD_LINE [1/5]

#define LOAD_LINE (   i)
Value:
{ \
vector unsigned char perm##i = vec_lvsl(j##i, src2); \
vector unsigned char v_srcA2##i; \
vector unsigned char v_srcA1##i = vec_ld(j##i, src2); \
if (two_vectors) \
v_srcA2##i = vec_ld(j##i + 16, src2); \
v_srcA##i = \
vec_perm(v_srcA1##i, v_srcA2##i, perm##i); \
v_srcAss##i = \
(vector signed short)vec_mergeh((vector signed char)zero, \
(vector signed char)v_srcA##i); }
#define zero
Definition: regdef.h:64
perm
Definition: f_perms.c:74

Referenced by dering_altivec(), doVertDefFilter_altivec(), doVertLowPass_altivec(), tempNoiseReducer_altivec(), and vertClassify_altivec().

◆ LOAD_LINE_ALIGNED [1/2]

#define LOAD_LINE_ALIGNED (   i)
Value:
v_srcA##i = vec_ld(j##i, src2); \
v_srcAss##i = \
(vector signed short)vec_mergeh((vector signed char)zero, \
(vector signed char)v_srcA##i)
#define zero
Definition: regdef.h:64

Referenced by doVertLowPass_altivec(), and vertClassify_altivec().

◆ ITER [1/2]

#define ITER (   i,
 
)
Value:
const vector signed short v_diff##i = \
vec_sub(v_srcAss##i, v_srcAss##j); \
const vector signed short v_sum##i = \
vec_add(v_diff##i, v_dcOffset); \
const vector signed short v_comp##i = \
(vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \
v_dcThreshold); \
const vector signed short v_part##i = vec_and(mask, v_comp##i);
static const uint16_t mask[17]
Definition: lzw.c:38

Referenced by dering_altivec(), and vertClassify_altivec().

◆ LOAD_LINE [2/5]

#define LOAD_LINE (   i)
Value:
perml##i = vec_lvsl(i * stride, src2); \
vbA##i = vec_ld(i * stride, src2); \
vbB##i = vec_ld(i * stride + 16, src2); \
vbT##i = vec_perm(vbA##i, vbB##i, perml##i); \
vb##i = \
(vector signed short)vec_mergeh((vector unsigned char)zero, \
(vector unsigned char)vbT##i)
int stride
Definition: mace.c:144
#define zero
Definition: regdef.h:64

◆ LOAD_LINE_ALIGNED [2/2]

#define LOAD_LINE_ALIGNED (   i)
Value:
vbT##i = vec_ld(j##i, src2); \
vb##i = \
(vector signed short)vec_mergeh((vector signed char)zero, \
(vector signed char)vbT##i)
#define zero
Definition: regdef.h:64

◆ COMPUTE_VR

#define COMPUTE_VR (   i,
  j,
 
)
Value:
const vector signed short temps1##i = \
vec_add(v_sumsB##i, v_sumsB##k); \
const vector signed short temps2##i = \
vec_mladd(vb##j, (vector signed short)v_2, temps1##i); \
const vector signed short vr##j = vec_sra(temps2##i, v_4)

Referenced by doVertLowPass_altivec().

◆ PACK_AND_STORE [1/2]

#define PACK_AND_STORE (   i)
Value:
{ const vector unsigned char perms##i = \
vec_lvsr(i * stride, src2); \
const vector unsigned char vf##i = \
vec_packsu(vr##i, (vector signed short)zero); \
const vector unsigned char vg##i = \
vec_perm(vf##i, vbT##i, permHH); \
const vector unsigned char mask##i = \
vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
const vector unsigned char vg2##i = \
vec_perm(vg##i, vg##i, perms##i); \
const vector unsigned char svA##i = \
vec_sel(vbA##i, vg2##i, mask##i); \
const vector unsigned char svB##i = \
vec_sel(vg2##i, vbB##i, mask##i); \
vec_st(svA##i, i * stride, src2); \
vec_st(svB##i, i * stride + 16, src2);}
int stride
Definition: mace.c:144
static const uint16_t mask[17]
Definition: lzw.c:38
#define zero
Definition: regdef.h:64

Referenced by doVertLowPass_altivec(), and tempNoiseReducer_altivec().

◆ PACK_AND_STORE_ALIGNED

#define PACK_AND_STORE_ALIGNED (   i)
Value:
{ const vector unsigned char vf##i = \
vec_packsu(vr##i, (vector signed short)zero); \
const vector unsigned char vg##i = \
vec_perm(vf##i, vbT##i, permHH); \
vec_st(vg##i, i * stride, src2);}
int stride
Definition: mace.c:144
#define zero
Definition: regdef.h:64

Referenced by doVertLowPass_altivec().

◆ LOAD_LINE [3/5]

#define LOAD_LINE (   i)
Value:
const vector unsigned char perm##i = \
vec_lvsl(i * stride, src2); \
const vector unsigned char vbA##i = \
vec_ld(i * stride, src2); \
const vector unsigned char vbB##i = \
vec_ld(i * stride + 16, src2); \
const vector unsigned char vbT##i = \
vec_perm(vbA##i, vbB##i, perm##i); \
const vector signed short vb##i = \
(vector signed short)vec_mergeh((vector unsigned char)zero, \
(vector unsigned char)vbT##i)
int stride
Definition: mace.c:144
#define zero
Definition: regdef.h:64
perm
Definition: f_perms.c:74

◆ STORE

#define STORE (   i)
Value:
{ const vector unsigned char perms##i = \
vec_lvsr(i * stride, src2); \
const vector unsigned char vg##i = \
vec_perm(st##i, vbT##i, permHH); \
const vector unsigned char mask##i = \
vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
const vector unsigned char vg2##i = \
vec_perm(vg##i, vg##i, perms##i); \
const vector unsigned char svA##i = \
vec_sel(vbA##i, vg2##i, mask##i); \
const vector unsigned char svB##i = \
vec_sel(vg2##i, vbB##i, mask##i); \
vec_st(svA##i, i * stride, src2); \
vec_st(svB##i, i * stride + 16, src2);}
int stride
Definition: mace.c:144
static const uint16_t mask[17]
Definition: lzw.c:38
#define zero
Definition: regdef.h:64

Referenced by doVertDefFilter_altivec().

◆ LOAD_LINE [4/5]

#define LOAD_LINE (   i)
Value:
const vector unsigned char perm##i = \
vec_lvsl(i * stride, srcCopy); \
vector unsigned char sA##i = vec_ld(i * stride, srcCopy); \
vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy); \
vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i)
#define src
Definition: vp8dsp.c:254
int stride
Definition: mace.c:144
perm
Definition: f_perms.c:74

◆ EXTRACT

#define EXTRACT (   op)
Value:
do { \
const vector unsigned char s_1 = vec_##op(trunc_src12, trunc_src34); \
const vector unsigned char s_2 = vec_##op(trunc_src56, trunc_src78); \
const vector unsigned char s_6 = vec_##op(s_1, s_2); \
const vector unsigned char s_8h = vec_mergeh(s_6, s_6); \
const vector unsigned char s_8l = vec_mergel(s_6, s_6); \
const vector unsigned char s_9 = vec_##op(s_8h, s_8l); \
const vector unsigned char s_9h = vec_mergeh(s_9, s_9); \
const vector unsigned char s_9l = vec_mergel(s_9, s_9); \
const vector unsigned char s_10 = vec_##op(s_9h, s_9l); \
const vector unsigned char s_10h = vec_mergeh(s_10, s_10); \
const vector unsigned char s_10l = vec_mergel(s_10, s_10); \
const vector unsigned char s_11 = vec_##op(s_10h, s_10l); \
const vector unsigned char s_11h = vec_mergeh(s_11, s_11); \
const vector unsigned char s_11l = vec_mergel(s_11, s_11); \
v_##op = vec_##op(s_11h, s_11l); \
} while (0)
static int op(uint8_t **dst, const uint8_t *dst_end, GetByteContext *gb, int pixel, int count, int *x, int width, int linesize)
Perform decode operation.
Definition: anm.c:78

Referenced by dering_altivec().

◆ COMPARE

#define COMPARE (   i)
Value:
do { \
const vector unsigned char cmp = \
(vector unsigned char)vec_cmpgt(src##i, v_avg); \
const vector unsigned short cmpHi = \
(vector unsigned short)vec_mergeh(cmp, cmp); \
const vector unsigned short cmpLi = \
(vector unsigned short)vec_mergel(cmp, cmp); \
const vector signed short cmpHf = \
(vector signed short)vec_and(cmpHi, mask1); \
const vector signed short cmpLf = \
(vector signed short)vec_and(cmpLi, mask2); \
const vector signed int sump = vec_sum4s(cmpHf, zero); \
const vector signed int sumq = vec_sum4s(cmpLf, sump); \
sum##i = vec_sums(sumq, zero); \
} while (0)
#define src
Definition: vp8dsp.c:254
#define zero
Definition: regdef.h:64
static av_always_inline int cmp(MpegEncContext *s, const int x, const int y, const int subx, const int suby, const int size, const int h, int ref_index, int src_index, me_cmp_func cmp_func, me_cmp_func chroma_cmp_func, const int flags)
compares a block (either a full macroblock or a partition thereof) against a proposed motion-compensa...
Definition: motion_est.c:260

Referenced by dering_altivec().

◆ F_INIT

#define F_INIT ( )
Value:
vector unsigned char tenRightM = tenRight; \
vector unsigned char permA1M = permA1; \
vector unsigned char permA2M = permA2; \
vector unsigned char extractPermM = extractPerm

◆ F2

#define F2 (   i,
  j,
  k,
 
)
Value:
if (S[i] & (1 << (l+1))) { \
const vector unsigned char a_A = vec_perm(src##i, src##j, permA1M); \
const vector unsigned char a_B = vec_perm(a_A, src##k, permA2M); \
const vector signed int a_sump = \
(vector signed int)vec_msum(a_B, magic, (vector unsigned int)zero);\
vector signed int F = vec_sr(vec_sums(a_sump, vsint32_8), vuint32_4); \
const vector signed int p = \
(vector signed int)vec_perm(src##j, (vector unsigned char)zero, \
extractPermM); \
const vector signed int sum = vec_add(p, vQP2); \
const vector signed int diff = vec_sub(p, vQP2); \
vector signed int newpm; \
vector unsigned char newpm2, mask; \
F = vec_splat(F, 3); \
if (vec_all_lt(sum, F)) \
newpm = sum; \
else if (vec_all_gt(diff, F)) \
newpm = diff; \
else newpm = F; \
newpm2 = vec_splat((vector unsigned char)newpm, 15); \
mask = vec_add(identity, tenRightM); \
src##j = vec_perm(src##j, newpm2, mask); \
} \
permA1M = vec_add(permA1M, permA1inc); \
permA2M = vec_add(permA2M, permA2inc); \
tenRightM = vec_sro(tenRightM, eightLeft); \
extractPermM = vec_add(extractPermM, extractPermInc)
#define src
Definition: vp8dsp.c:254
static const uint16_t mask[17]
Definition: lzw.c:38
#define S(s, c, i)
#define zero
Definition: regdef.h:64
int
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define F(x)

◆ ITER [2/2]

#define ITER (   i,
  j,
 
)
Value:
do { \
F_INIT(); \
F2(i, j, k, 0); \
F2(i, j, k, 1); \
F2(i, j, k, 2); \
F2(i, j, k, 3); \
F2(i, j, k, 4); \
F2(i, j, k, 5); \
F2(i, j, k, 6); \
F2(i, j, k, 7); \
} while (0)

◆ STORE_LINE

#define STORE_LINE (   i)
Value:
do { \
const vector unsigned char permST = \
vec_lvsr(i * stride, srcCopy); \
const vector unsigned char maskST = \
vec_perm((vector unsigned char)zero, \
(vector unsigned char)neg1, permST); \
src##i = vec_perm(src##i ,src##i, permST); \
sA##i= vec_sel(sA##i, src##i, maskST); \
sB##i= vec_sel(src##i, sB##i, maskST); \
vec_st(sA##i, i * stride, srcCopy); \
vec_st(sB##i, i * stride + 16, srcCopy); \
} while (0)
#define src
Definition: vp8dsp.c:254
int stride
Definition: mace.c:144
#define zero
Definition: regdef.h:64

Referenced by dering_altivec().

◆ doHorizLowPass_altivec

#define doHorizLowPass_altivec (   a...)    doHorizLowPass_C(a)

Definition at line 824 of file postprocess_altivec_template.c.

◆ doHorizDefFilter_altivec

#define doHorizDefFilter_altivec (   a...)    doHorizDefFilter_C(a)

Definition at line 825 of file postprocess_altivec_template.c.

◆ do_a_deblock_altivec

#define do_a_deblock_altivec (   a...)    do_a_deblock_C(a)

Definition at line 826 of file postprocess_altivec_template.c.

◆ LOAD_LINE [5/5]

#define LOAD_LINE (   src,
 
)
Value:
register int j##src##i = i * stride; \
vector unsigned char perm##src##i = vec_lvsl(j##src##i, src); \
const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \
const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \
const vector unsigned char v_##src##A##i = \
vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i); \
vector signed short v_##src##Ass##i = \
(vector signed short)vec_mergeh((vector signed char)zero, \
(vector signed char)v_##src##A##i)
#define A1
Definition: binkdsp.c:31
#define src
Definition: vp8dsp.c:254
int stride
Definition: mace.c:144
#define A(x)
Definition: vp56_arith.h:28
#define A2
Definition: binkdsp.c:32
#define zero
Definition: regdef.h:64
perm
Definition: f_perms.c:74

◆ ACCUMULATE_DIFFS

#define ACCUMULATE_DIFFS (   i)
Value:
do { \
vector signed short v_d = vec_sub(v_tempBlurredAss##i, \
v_srcAss##i); \
v_dp = vec_msums(v_d, v_d, v_dp); \
v_sysdp = vec_msums(v_d, vsint16_1, v_sysdp); \
} while (0)

Referenced by tempNoiseReducer_altivec().

◆ OP [1/4]

#define OP (   i)    v_tempBlurredAss##i = vec_avg(v_tempBlurredAss##i, v_srcAss##i);

◆ OP [2/4]

#define OP (   i)    v_tempBlurredAss##i = v_srcAss##i;

◆ OP [3/4]

#define OP (   i)
Value:
do { \
const vector signed short v_temp = \
vec_mladd(v_tempBlurredAss##i, vsint16_7, v_srcAss##i); \
const vector signed short v_temp2 = vec_add(v_temp, vsint16_4); \
v_tempBlurredAss##i = vec_sr(v_temp2, vuint16_3); \
} while (0)

◆ OP [4/4]

#define OP (   i)
Value:
do { \
const vector signed short v_temp = \
vec_mladd(v_tempBlurredAss##i, vsint16_3, v_srcAss##i); \
const vector signed short v_temp2 = vec_add(v_temp, vsint16_2); \
v_tempBlurredAss##i = \
vec_sr(v_temp2, (vector unsigned short)vsint16_2); \
} while (0)

◆ PACK_AND_STORE [2/2]

#define PACK_AND_STORE (   src,
 
)
Value:
do { \
const vector unsigned char perms = vec_lvsr(i * stride, src); \
const vector unsigned char vf = \
vec_packsu(v_tempBlurredAss##1, (vector signed short)zero); \
const vector unsigned char vg = vec_perm(vf, v_##src##A##i, permHH); \
const vector unsigned char mask = \
vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms); \
const vector unsigned char vg2 = vec_perm(vg, vg, perms); \
const vector unsigned char svA = vec_sel(v_##src##A1##i, vg2, mask); \
const vector unsigned char svB = vec_sel(vg2, v_##src##A2##i, mask); \
vec_st(svA, i * stride, src); \
vec_st(svB, i * stride + 16, src); \
} while (0)
#define A1
Definition: binkdsp.c:31
#define src
Definition: vp8dsp.c:254
int stride
Definition: mace.c:144
#define A(x)
Definition: vp56_arith.h:28
#define A2
Definition: binkdsp.c:32
static const uint16_t mask[17]
Definition: lzw.c:38
#define zero
Definition: regdef.h:64

◆ LOAD_DOUBLE_LINE [1/2]

#define LOAD_DOUBLE_LINE (   i,
 
)
Value:
vector unsigned char perm1##i = vec_lvsl(i * stride, src); \
vector unsigned char perm2##i = vec_lvsl(j * stride, src); \
vector unsigned char srcA##i = vec_ld(i * stride, src); \
vector unsigned char srcB##i = vec_ld(i * stride + 16, src); \
vector unsigned char srcC##i = vec_ld(j * stride, src); \
vector unsigned char srcD##i = vec_ld(j * stride+ 16, src); \
vector unsigned char src##i = vec_perm(srcA##i, srcB##i, perm1##i); \
vector unsigned char src##j = vec_perm(srcC##i, srcD##i, perm2##i)
#define src
Definition: vp8dsp.c:254
int stride
Definition: mace.c:144

Referenced by transpose_16x8_char_toPackedAlign_altivec(), and transpose_8x16_char_fromPackedAlign_altivec().

◆ LOAD_DOUBLE_LINE [2/2]

#define LOAD_DOUBLE_LINE (   i,
 
)
Value:
vector unsigned char src##i = vec_ld(i * 16, src); \
vector unsigned char src##j = vec_ld(j * 16, src)
#define src
Definition: vp8dsp.c:254

◆ STORE_DOUBLE_LINE

#define STORE_DOUBLE_LINE (   i,
 
)
Value:
do { \
vector unsigned char dstAi = vec_ld(i * stride, dst); \
vector unsigned char dstBi = vec_ld(i * stride + 16, dst); \
vector unsigned char dstAj = vec_ld(j * stride, dst); \
vector unsigned char dstBj = vec_ld(j * stride+ 16, dst); \
vector unsigned char aligni = vec_lvsr(i * stride, dst); \
vector unsigned char alignj = vec_lvsr(j * stride, dst); \
vector unsigned char maski = \
vec_perm(zero, (vector unsigned char)neg1, aligni); \
vector unsigned char maskj = \
vec_perm(zero, (vector unsigned char)neg1, alignj); \
vector unsigned char dstRi = vec_perm(temp##i, temp##i, aligni); \
vector unsigned char dstRj = vec_perm(temp##j, temp##j, alignj); \
vector unsigned char dstAFi = vec_sel(dstAi, dstRi, maski); \
vector unsigned char dstBFi = vec_sel(dstRi, dstBi, maski); \
vector unsigned char dstAFj = vec_sel(dstAj, dstRj, maskj); \
vector unsigned char dstBFj = vec_sel(dstRj, dstBj, maskj); \
vec_st(dstAFi, i * stride, dst); \
vec_st(dstBFi, i * stride + 16, dst); \
vec_st(dstAFj, j * stride, dst); \
vec_st(dstBFj, j * stride + 16, dst); \
} while (0)
else temp
Definition: vf_mcdeint.c:256
int stride
Definition: mace.c:144
#define zero
Definition: regdef.h:64

Referenced by transpose_8x16_char_fromPackedAlign_altivec().

Function Documentation

◆ vertClassify_altivec()

static int vertClassify_altivec ( uint8_t  src[],
int  stride,
PPContext c 
)
inlinestatic

Definition at line 58 of file postprocess_altivec_template.c.

Referenced by postProcess().

◆ doVertLowPass_altivec()

static void doVertLowPass_altivec ( uint8_t src,
int  stride,
PPContext c 
)
inlinestatic

Definition at line 213 of file postprocess_altivec_template.c.

Referenced by postProcess().

◆ doVertDefFilter_altivec()

static void doVertDefFilter_altivec ( uint8_t  src[],
int  stride,
PPContext c 
)
inlinestatic

Definition at line 411 of file postprocess_altivec_template.c.

Referenced by postProcess().

◆ dering_altivec()

static void dering_altivec ( uint8_t  src[],
int  stride,
PPContext c 
)
inlinestatic

Definition at line 532 of file postprocess_altivec_template.c.

◆ tempNoiseReducer_altivec()

static void tempNoiseReducer_altivec ( uint8_t src,
int  stride,
uint8_t tempBlurred,
uint32_t *  tempBlurredPast,
int maxNoise 
)
inlinestatic

Definition at line 828 of file postprocess_altivec_template.c.

◆ transpose_16x8_char_toPackedAlign_altivec()

static void transpose_16x8_char_toPackedAlign_altivec ( unsigned char *  dst,
unsigned char *  src,
int  stride 
)
inlinestatic

Definition at line 1015 of file postprocess_altivec_template.c.

Referenced by postProcess().

◆ transpose_8x16_char_fromPackedAlign_altivec()

static void transpose_8x16_char_fromPackedAlign_altivec ( unsigned char *  dst,
unsigned char *  src,
int  stride 
)
inlinestatic

Definition at line 1120 of file postprocess_altivec_template.c.

Referenced by postProcess().