25 #define pb_7f (~0UL / 255 * 0x7f) 26 #define pb_80 (~0UL / 255 * 0x80) 32 #if !HAVE_FAST_UNALIGNED 33 if (((
long)src1 | (
long)src2) & (
sizeof(
long) - 1)) {
34 for (i = 0; i + 7 <
w; i += 8) {
35 dst[i + 0] = src1[i + 0] - src2[i + 0];
36 dst[i + 1] = src1[i + 1] - src2[i + 1];
37 dst[i + 2] = src1[i + 2] - src2[i + 2];
38 dst[i + 3] = src1[i + 3] - src2[i + 3];
39 dst[i + 4] = src1[i + 4] - src2[i + 4];
40 dst[i + 5] = src1[i + 5] - src2[i + 5];
41 dst[i + 6] = src1[i + 6] - src2[i + 6];
42 dst[i + 7] = src1[i + 7] - src2[i + 7];
46 for (i = 0; i <= w - (
int)
sizeof(
long); i +=
sizeof(long)) {
47 long a = *(
long *) (src1 + i);
48 long b = *(
long *) (src2 + i);
49 *(
long *) (dst + i) = ((a |
pb_80) - (b &
pb_7f)) ^
53 dst[i + 0] = src1[i + 0] - src2[i + 0];
58 int *left,
int *left_top)
66 for (i = 0; i <
w; i++) {
67 const int pred =
mid_pred(l, src1[i], (l + src1[i] - lt) & 0xFF);
82 for (j = 0; j <
height; j++) {
83 for (i = 0; i <
width; i++) {
84 *dst++ = src[i] - prev;
static void diff_bytes_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, intptr_t w)
Macro definitions for various function/variable attributes.
av_cold void ff_llvidencdsp_init(LLVidEncDSPContext *c)
static void sub_median_pred_c(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, intptr_t w, int *left, int *left_top)
void(* diff_bytes)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, intptr_t w)
void ff_llvidencdsp_init_x86(LLVidEncDSPContext *c)
void(* sub_median_pred)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, intptr_t w, int *left, int *left_top)
Subtract HuffYUV's variant of median prediction.
static const float pred[4]
static void sub_left_predict_c(uint8_t *dst, uint8_t *src, ptrdiff_t stride, ptrdiff_t width, int height)
void(* sub_left_predict)(uint8_t *dst, uint8_t *src, ptrdiff_t stride, ptrdiff_t width, int height)