FFmpeg  4.0
h264chroma_msa.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 - 2017 Shivraj Patil (Shivraj.Patil@imgtec.com)
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
22 #include "h264chroma_mips.h"
23 
24 static const uint8_t chroma_mask_arr[16 * 5] = {
25  0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20,
26  0, 2, 2, 4, 4, 6, 6, 8, 16, 18, 18, 20, 20, 22, 22, 24,
27  0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
28  0, 1, 1, 2, 16, 17, 17, 18, 4, 5, 5, 6, 6, 7, 7, 8,
29  0, 1, 1, 2, 16, 17, 17, 18, 16, 17, 17, 18, 18, 19, 19, 20
30 };
31 
33  uint32_t coeff0, uint32_t coeff1)
34 {
35  uint16_t out0, out1;
36  v16i8 src0, src1;
37  v8u16 res_r;
38  v8i16 res;
39  v16i8 mask;
40  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
41  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
42  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
43 
44  mask = LD_SB(&chroma_mask_arr[0]);
45 
46  LD_SB2(src, stride, src0, src1);
47 
48  src0 = __msa_vshf_b(mask, src1, src0);
49  res_r = __msa_dotp_u_h((v16u8) src0, coeff_vec);
50  res_r <<= 3;
51  res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
52  res_r = __msa_sat_u_h(res_r, 7);
53  res = (v8i16) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
54 
55  out0 = __msa_copy_u_h(res, 0);
56  out1 = __msa_copy_u_h(res, 2);
57 
58  SH(out0, dst);
59  dst += stride;
60  SH(out1, dst);
61 }
62 
64  uint32_t coeff0, uint32_t coeff1)
65 {
66  v16u8 src0, src1, src2, src3;
67  v8u16 res_r;
68  v8i16 res;
69  v16i8 mask;
70  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
71  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
72  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
73 
74  mask = LD_SB(&chroma_mask_arr[64]);
75 
76  LD_UB4(src, stride, src0, src1, src2, src3);
77 
78  VSHF_B2_UB(src0, src1, src2, src3, mask, mask, src0, src2);
79 
80  src0 = (v16u8) __msa_ilvr_d((v2i64) src2, (v2i64) src0);
81 
82  res_r = __msa_dotp_u_h(src0, coeff_vec);
83  res_r <<= 3;
84  res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
85  res_r = __msa_sat_u_h(res_r, 7);
86  res = (v8i16) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
87 
88  ST2x4_UB(res, 0, dst, stride);
89 }
90 
92  uint32_t coeff0, uint32_t coeff1,
94 {
95  if (2 == height) {
96  avc_chroma_hz_2x2_msa(src, dst, stride, coeff0, coeff1);
97  } else if (4 == height) {
98  avc_chroma_hz_2x4_msa(src, dst, stride, coeff0, coeff1);
99  }
100 }
101 
103  uint32_t coeff0, uint32_t coeff1)
104 {
105  v16i8 src0, src1;
106  v8u16 res_r;
107  v4i32 res;
108  v16i8 mask;
109  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
110  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
111  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
112 
113  mask = LD_SB(&chroma_mask_arr[0]);
114 
115  LD_SB2(src, stride, src0, src1);
116 
117  src0 = __msa_vshf_b(mask, src1, src0);
118  res_r = __msa_dotp_u_h((v16u8) src0, coeff_vec);
119  res_r <<= 3;
120  res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
121  res_r = __msa_sat_u_h(res_r, 7);
122  res = (v4i32) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
123 
124  ST4x2_UB(res, dst, stride);
125 }
126 
128  uint32_t coeff0, uint32_t coeff1)
129 {
130  v16u8 src0, src1, src2, src3, out;
131  v8u16 res0_r, res1_r;
132  v16i8 mask;
133  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
134  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
135  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
136 
137  mask = LD_SB(&chroma_mask_arr[0]);
138 
139  LD_UB4(src, stride, src0, src1, src2, src3);
140  VSHF_B2_UB(src0, src1, src2, src3, mask, mask, src0, src2);
141  DOTP_UB2_UH(src0, src2, coeff_vec, coeff_vec, res0_r, res1_r);
142  res0_r <<= 3;
143  res1_r <<= 3;
144  SRARI_H2_UH(res0_r, res1_r, 6);
145  SAT_UH2_UH(res0_r, res1_r, 7);
146  out = (v16u8) __msa_pckev_b((v16i8) res1_r, (v16i8) res0_r);
147  ST4x4_UB(out, out, 0, 1, 2, 3, dst, stride);
148 }
149 
151  uint32_t coeff0, uint32_t coeff1)
152 {
153  v16u8 src0, src1, src2, src3, src4, src5, src6, src7, out0, out1;
154  v16i8 mask;
155  v8u16 res0, res1, res2, res3;
156  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
157  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
158  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
159 
160  mask = LD_SB(&chroma_mask_arr[0]);
161 
162  LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7);
163  VSHF_B2_UB(src0, src1, src2, src3, mask, mask, src0, src2);
164  VSHF_B2_UB(src4, src5, src6, src7, mask, mask, src4, src6);
165  DOTP_UB2_UH(src0, src2, coeff_vec, coeff_vec, res0, res1);
166  DOTP_UB2_UH(src4, src6, coeff_vec, coeff_vec, res2, res3);
167  SLLI_4V(res0, res1, res2, res3, 3);
168  SRARI_H4_UH(res0, res1, res2, res3, 6);
169  SAT_UH4_UH(res0, res1, res2, res3, 7);
170  PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
171  ST4x8_UB(out0, out1, dst, stride);
172 }
173 
175  uint32_t coeff0, uint32_t coeff1,
176  int32_t height)
177 {
178  if (2 == height) {
179  avc_chroma_hz_4x2_msa(src, dst, stride, coeff0, coeff1);
180  } else if (4 == height) {
181  avc_chroma_hz_4x4_msa(src, dst, stride, coeff0, coeff1);
182  } else if (8 == height) {
183  avc_chroma_hz_4x8_msa(src, dst, stride, coeff0, coeff1);
184  }
185 }
186 
188  uint32_t coeff0, uint32_t coeff1)
189 {
190  v16u8 src0, src1, src2, src3, out0, out1;
191  v8u16 res0, res1, res2, res3;
192  v16i8 mask;
193  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
194  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
195  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
196 
197  mask = LD_SB(&chroma_mask_arr[32]);
198  LD_UB4(src, stride, src0, src1, src2, src3);
199  VSHF_B2_UB(src0, src0, src1, src1, mask, mask, src0, src1);
200  VSHF_B2_UB(src2, src2, src3, src3, mask, mask, src2, src3);
201  DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
202  coeff_vec, res0, res1, res2, res3);
203  SLLI_4V(res0, res1, res2, res3, 3);
204  SRARI_H4_UH(res0, res1, res2, res3, 6);
205  SAT_UH4_UH(res0, res1, res2, res3, 7);
206  PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
207  ST8x4_UB(out0, out1, dst, stride);
208 }
209 
211  uint32_t coeff0, uint32_t coeff1)
212 {
213  v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
214  v16u8 out0, out1, out2, out3;
215  v8u16 res0, res1, res2, res3, res4, res5, res6, res7;
216  v16i8 mask;
217  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
218  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
219  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
220 
221  mask = LD_SB(&chroma_mask_arr[32]);
222 
223  LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7);
224  VSHF_B2_UB(src0, src0, src1, src1, mask, mask, src0, src1);
225  VSHF_B2_UB(src2, src2, src3, src3, mask, mask, src2, src3);
226  VSHF_B2_UB(src4, src4, src5, src5, mask, mask, src4, src5);
227  VSHF_B2_UB(src6, src6, src7, src7, mask, mask, src6, src7);
228  DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
229  coeff_vec, res0, res1, res2, res3);
230  DOTP_UB4_UH(src4, src5, src6, src7, coeff_vec, coeff_vec, coeff_vec,
231  coeff_vec, res4, res5, res6, res7);
232  SLLI_4V(res0, res1, res2, res3, 3);
233  SLLI_4V(res4, res5, res6, res7, 3);
234  SRARI_H4_UH(res0, res1, res2, res3, 6);
235  SRARI_H4_UH(res4, res5, res6, res7, 6);
236  SAT_UH4_UH(res0, res1, res2, res3, 7);
237  SAT_UH4_UH(res4, res5, res6, res7, 7);
238  PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
239  PCKEV_B2_UB(res5, res4, res7, res6, out2, out3);
240  ST8x8_UB(out0, out1, out2, out3, dst, stride);
241 }
242 
244  int32_t stride, uint32_t coeff0,
245  uint32_t coeff1, int32_t height)
246 {
247  uint32_t row;
248  v16u8 src0, src1, src2, src3, out0, out1;
249  v8u16 res0, res1, res2, res3;
250  v16i8 mask;
251  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
252  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
253  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
254 
255  mask = LD_SB(&chroma_mask_arr[32]);
256 
257  for (row = height >> 2; row--;) {
258  LD_UB4(src, stride, src0, src1, src2, src3);
259  src += (4 * stride);
260 
261  VSHF_B2_UB(src0, src0, src1, src1, mask, mask, src0, src1);
262  VSHF_B2_UB(src2, src2, src3, src3, mask, mask, src2, src3);
263  DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
264  coeff_vec, res0, res1, res2, res3);
265  SLLI_4V(res0, res1, res2, res3, 3);
266  SRARI_H4_UH(res0, res1, res2, res3, 6);
267  SAT_UH4_UH(res0, res1, res2, res3, 7);
268  PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
269  ST8x4_UB(out0, out1, dst, stride);
270  dst += (4 * stride);
271  }
272 
273  if (0 != (height % 4)) {
274  for (row = (height % 4); row--;) {
275  src0 = LD_UB(src);
276  src += stride;
277 
278  src0 = (v16u8) __msa_vshf_b(mask, (v16i8) src0, (v16i8) src0);
279 
280  res0 = __msa_dotp_u_h(src0, coeff_vec);
281  res0 <<= 3;
282  res0 = (v8u16) __msa_srari_h((v8i16) res0, 6);
283  res0 = __msa_sat_u_h(res0, 7);
284  res0 = (v8u16) __msa_pckev_b((v16i8) res0, (v16i8) res0);
285 
286  ST8x1_UB(res0, dst);
287  dst += stride;
288  }
289  }
290 }
291 
293  uint32_t coeff0, uint32_t coeff1,
294  int32_t height)
295 {
296  if (4 == height) {
297  avc_chroma_hz_8x4_msa(src, dst, stride, coeff0, coeff1);
298  } else if (8 == height) {
299  avc_chroma_hz_8x8_msa(src, dst, stride, coeff0, coeff1);
300  } else {
301  avc_chroma_hz_nonmult_msa(src, dst, stride, coeff0, coeff1, height);
302  }
303 }
304 
306  uint32_t coeff0, uint32_t coeff1)
307 {
308  uint16_t out0, out1;
309  v16i8 src0, src1, src2;
310  v16u8 tmp0, tmp1;
311  v8i16 res;
312  v8u16 res_r;
313  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
314  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
315  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
316 
317  LD_SB3(src, stride, src0, src1, src2);
318 
319  ILVR_B2_UB(src1, src0, src2, src1, tmp0, tmp1);
320 
321  tmp0 = (v16u8) __msa_ilvr_d((v2i64) tmp1, (v2i64) tmp0);
322 
323  res_r = __msa_dotp_u_h(tmp0, coeff_vec);
324  res_r <<= 3;
325  res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
326  res_r = __msa_sat_u_h(res_r, 7);
327  res = (v8i16) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
328 
329  out0 = __msa_copy_u_h(res, 0);
330  out1 = __msa_copy_u_h(res, 2);
331 
332  SH(out0, dst);
333  dst += stride;
334  SH(out1, dst);
335 }
336 
338  uint32_t coeff0, uint32_t coeff1)
339 {
340  v16u8 src0, src1, src2, src3, src4;
341  v16u8 tmp0, tmp1, tmp2, tmp3;
342  v8i16 res;
343  v8u16 res_r;
344  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
345  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
346  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
347 
348  LD_UB5(src, stride, src0, src1, src2, src3, src4);
349  ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
350  tmp0, tmp1, tmp2, tmp3);
351  ILVR_W2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
352 
353  tmp0 = (v16u8) __msa_ilvr_d((v2i64) tmp2, (v2i64) tmp0);
354 
355  res_r = __msa_dotp_u_h(tmp0, coeff_vec);
356  res_r <<= 3;
357  res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
358  res_r = __msa_sat_u_h(res_r, 7);
359 
360  res = (v8i16) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
361 
362  ST2x4_UB(res, 0, dst, stride);
363 }
364 
366  uint32_t coeff0, uint32_t coeff1,
367  int32_t height)
368 {
369  if (2 == height) {
370  avc_chroma_vt_2x2_msa(src, dst, stride, coeff0, coeff1);
371  } else if (4 == height) {
372  avc_chroma_vt_2x4_msa(src, dst, stride, coeff0, coeff1);
373  }
374 }
375 
377  uint32_t coeff0, uint32_t coeff1)
378 {
379  v16u8 src0, src1, src2;
380  v16u8 tmp0, tmp1;
381  v4i32 res;
382  v8u16 res_r;
383  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
384  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
385  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
386 
387  LD_UB3(src, stride, src0, src1, src2);
388  ILVR_B2_UB(src1, src0, src2, src1, tmp0, tmp1);
389 
390  tmp0 = (v16u8) __msa_ilvr_d((v2i64) tmp1, (v2i64) tmp0);
391  res_r = __msa_dotp_u_h(tmp0, coeff_vec);
392  res_r <<= 3;
393  res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
394  res_r = __msa_sat_u_h(res_r, 7);
395  res = (v4i32) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
396 
397  ST4x2_UB(res, dst, stride);
398 }
399 
401  uint32_t coeff0, uint32_t coeff1)
402 {
403  v16u8 src0, src1, src2, src3, src4;
404  v16u8 tmp0, tmp1, tmp2, tmp3;
405  v16u8 out;
406  v8u16 res0_r, res1_r;
407  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
408  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
409  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
410 
411  LD_UB5(src, stride, src0, src1, src2, src3, src4);
412  ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, tmp0, tmp1, tmp2,
413  tmp3);
414  ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
415  DOTP_UB2_UH(tmp0, tmp2, coeff_vec, coeff_vec, res0_r, res1_r);
416  res0_r <<= 3;
417  res1_r <<= 3;
418  SRARI_H2_UH(res0_r, res1_r, 6);
419  SAT_UH2_UH(res0_r, res1_r, 7);
420  out = (v16u8) __msa_pckev_b((v16i8) res1_r, (v16i8) res0_r);
421  ST4x4_UB(out, out, 0, 1, 2, 3, dst, stride);
422 }
423 
425  uint32_t coeff0, uint32_t coeff1)
426 {
427  v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
428  v16u8 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, out0, out1;
429  v8u16 res0, res1, res2, res3;
430  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
431  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
432  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
433 
434  LD_UB5(src, stride, src0, src1, src2, src3, src4);
435  src += (5 * stride);
436  LD_UB4(src, stride, src5, src6, src7, src8);
437  ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, tmp0, tmp1, tmp2,
438  tmp3);
439  ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, tmp4, tmp5, tmp6,
440  tmp7);
441  ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
442  ILVR_D2_UB(tmp5, tmp4, tmp7, tmp6, tmp4, tmp6);
443  DOTP_UB2_UH(tmp0, tmp2, coeff_vec, coeff_vec, res0, res1);
444  DOTP_UB2_UH(tmp4, tmp6, coeff_vec, coeff_vec, res2, res3);
445  SLLI_4V(res0, res1, res2, res3, 3);
446  SRARI_H4_UH(res0, res1, res2, res3, 6);
447  SAT_UH4_UH(res0, res1, res2, res3, 7);
448  PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
449  ST4x8_UB(out0, out1, dst, stride);
450 }
451 
453  uint32_t coeff0, uint32_t coeff1,
454  int32_t height)
455 {
456  if (2 == height) {
457  avc_chroma_vt_4x2_msa(src, dst, stride, coeff0, coeff1);
458  } else if (4 == height) {
459  avc_chroma_vt_4x4_msa(src, dst, stride, coeff0, coeff1);
460  } else if (8 == height) {
461  avc_chroma_vt_4x8_msa(src, dst, stride, coeff0, coeff1);
462  }
463 }
464 
466  uint32_t coeff0, uint32_t coeff1)
467 {
468  v16u8 src0, src1, src2, src3, src4, out0, out1;
469  v8u16 res0, res1, res2, res3;
470  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
471  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
472  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
473 
474  LD_UB5(src, stride, src0, src1, src2, src3, src4);
475  ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, src0, src1, src2,
476  src3);
477  DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
478  coeff_vec, res0, res1, res2, res3);
479  SLLI_4V(res0, res1, res2, res3, 3);
480  SRARI_H4_UH(res0, res1, res2, res3, 6);
481  SAT_UH4_UH(res0, res1, res2, res3, 7);
482  PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
483  ST8x4_UB(out0, out1, dst, stride);
484 }
485 
487  uint32_t coeff0, uint32_t coeff1)
488 {
489  v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
490  v16u8 out0, out1, out2, out3;
491  v8u16 res0, res1, res2, res3, res4, res5, res6, res7;
492  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
493  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
494  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
495 
496  LD_UB5(src, stride, src0, src1, src2, src3, src4);
497  src += (5 * stride);
498  LD_UB4(src, stride, src5, src6, src7, src8);
499  ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, src0, src1, src2,
500  src3);
501  ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, src4, src5, src6,
502  src7);
503  DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
504  coeff_vec, res0, res1, res2, res3);
505  DOTP_UB4_UH(src4, src5, src6, src7, coeff_vec, coeff_vec, coeff_vec,
506  coeff_vec, res4, res5, res6, res7);
507  SLLI_4V(res0, res1, res2, res3, 3);
508  SLLI_4V(res4, res5, res6, res7, 3);
509  SRARI_H4_UH(res0, res1, res2, res3, 6);
510  SRARI_H4_UH(res4, res5, res6, res7, 6);
511  SAT_UH4_UH(res0, res1, res2, res3, 7);
512  SAT_UH4_UH(res0, res1, res2, res3, 7);
513  PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
514  PCKEV_B2_UB(res5, res4, res7, res6, out2, out3);
515  ST8x8_UB(out0, out1, out2, out3, dst, stride);
516 }
517 
519  uint32_t coeff0, uint32_t coeff1,
520  int32_t height)
521 {
522  if (4 == height) {
523  avc_chroma_vt_8x4_msa(src, dst, stride, coeff0, coeff1);
524  } else if (8 == height) {
525  avc_chroma_vt_8x8_msa(src, dst, stride, coeff0, coeff1);
526  }
527 }
528 
530  uint32_t coef_hor0, uint32_t coef_hor1,
531  uint32_t coef_ver0, uint32_t coef_ver1)
532 {
533  uint16_t out0, out1;
534  v16u8 src0, src1, src2;
535  v8u16 res_hz0, res_hz1, res_vt0, res_vt1;
536  v8i16 res_vert;
537  v16i8 mask;
538  v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
539  v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
540  v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
541  v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
542  v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
543 
544  mask = LD_SB(&chroma_mask_arr[48]);
545 
546  LD_UB3(src, stride, src0, src1, src2);
547  VSHF_B2_UB(src0, src1, src1, src2, mask, mask, src0, src1);
548  DOTP_UB2_UH(src0, src1, coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1);
549  MUL2(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_vt0, res_vt1);
550 
551  res_vt0 += res_vt1;
552  res_vt0 = (v8u16) __msa_srari_h((v8i16) res_vt0, 6);
553  res_vt0 = __msa_sat_u_h(res_vt0, 7);
554  res_vert = (v8i16) __msa_pckev_b((v16i8) res_vt0, (v16i8) res_vt0);
555 
556  out0 = __msa_copy_u_h(res_vert, 0);
557  out1 = __msa_copy_u_h(res_vert, 1);
558 
559  SH(out0, dst);
560  dst += stride;
561  SH(out1, dst);
562 }
563 
565  uint32_t coef_hor0, uint32_t coef_hor1,
566  uint32_t coef_ver0, uint32_t coef_ver1)
567 {
568  v16u8 src0, src1, src2, src3, src4;
569  v16u8 tmp0, tmp1, tmp2, tmp3;
570  v8u16 res_hz0, res_hz1, res_vt0, res_vt1;
571  v8i16 res;
572  v16i8 mask;
573  v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
574  v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
575  v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
576  v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
577  v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
578 
579  mask = LD_SB(&chroma_mask_arr[48]);
580 
581  LD_UB5(src, stride, src0, src1, src2, src3, src4);
582 
583  VSHF_B2_UB(src0, src1, src2, src3, mask, mask, tmp0, tmp1);
584  VSHF_B2_UB(src1, src2, src3, src4, mask, mask, tmp2, tmp3);
585  ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, src0, src1);
586  DOTP_UB2_UH(src0, src1, coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1);
587  MUL2(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_vt0, res_vt1);
588 
589  res_vt0 += res_vt1;
590  res_vt0 = (v8u16) __msa_srari_h((v8i16) res_vt0, 6);
591  res_vt0 = __msa_sat_u_h(res_vt0, 7);
592 
593  res = (v8i16) __msa_pckev_b((v16i8) res_vt0, (v16i8) res_vt0);
594 
595  ST2x4_UB(res, 0, dst, stride);
596 }
597 
599  uint32_t coef_hor0, uint32_t coef_hor1,
600  uint32_t coef_ver0, uint32_t coef_ver1,
601  int32_t height)
602 {
603  if (2 == height) {
604  avc_chroma_hv_2x2_msa(src, dst, stride, coef_hor0, coef_hor1, coef_ver0,
605  coef_ver1);
606  } else if (4 == height) {
607  avc_chroma_hv_2x4_msa(src, dst, stride, coef_hor0, coef_hor1, coef_ver0,
608  coef_ver1);
609  }
610 }
611 
613  uint32_t coef_hor0, uint32_t coef_hor1,
614  uint32_t coef_ver0, uint32_t coef_ver1)
615 {
616  v16u8 src0, src1, src2;
617  v8u16 res_hz0, res_hz1, res_vt0, res_vt1;
618  v16i8 mask;
619  v4i32 res;
620  v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
621  v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
622  v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
623  v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
624  v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
625 
626  mask = LD_SB(&chroma_mask_arr[0]);
627  LD_UB3(src, stride, src0, src1, src2);
628  VSHF_B2_UB(src0, src1, src1, src2, mask, mask, src0, src1);
629  DOTP_UB2_UH(src0, src1, coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1);
630  MUL2(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_vt0, res_vt1);
631 
632  res_vt0 += res_vt1;
633  res_vt0 = (v8u16) __msa_srari_h((v8i16) res_vt0, 6);
634  res_vt0 = __msa_sat_u_h(res_vt0, 7);
635  res = (v4i32) __msa_pckev_b((v16i8) res_vt0, (v16i8) res_vt0);
636 
637  ST4x2_UB(res, dst, stride);
638 }
639 
641  uint32_t coef_hor0, uint32_t coef_hor1,
642  uint32_t coef_ver0, uint32_t coef_ver1)
643 {
644  v16u8 src0, src1, src2, src3, src4;
645  v8u16 res_hz0, res_hz1, res_hz2, res_hz3;
646  v8u16 res_vt0, res_vt1, res_vt2, res_vt3;
647  v16i8 mask;
648  v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
649  v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
650  v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
651  v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
652  v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
653  v4i32 res0, res1;
654 
655  mask = LD_SB(&chroma_mask_arr[0]);
656 
657  LD_UB5(src, stride, src0, src1, src2, src3, src4);
658  VSHF_B2_UB(src0, src1, src1, src2, mask, mask, src0, src1);
659  VSHF_B2_UB(src2, src3, src3, src4, mask, mask, src2, src3);
660  DOTP_UB4_UH(src0, src1, src2, src3, coeff_hz_vec, coeff_hz_vec,
661  coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1, res_hz2,
662  res_hz3);
663  MUL4(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_hz2, coeff_vt_vec1,
664  res_hz3, coeff_vt_vec0, res_vt0, res_vt1, res_vt2, res_vt3);
665  ADD2(res_vt0, res_vt1, res_vt2, res_vt3, res_vt0, res_vt1);
666  SRARI_H2_UH(res_vt0, res_vt1, 6);
667  SAT_UH2_UH(res_vt0, res_vt1, 7);
668  PCKEV_B2_SW(res_vt0, res_vt0, res_vt1, res_vt1, res0, res1);
669  ST4x4_UB(res0, res1, 0, 1, 0, 1, dst, stride);
670 }
671 
673  uint32_t coef_hor0, uint32_t coef_hor1,
674  uint32_t coef_ver0, uint32_t coef_ver1)
675 {
676  v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8, res0, res1;
677  v8u16 res_hz0, res_hz1, res_hz2, res_hz3, res_hz4, res_hz5, res_hz6, res_hz7;
678  v8u16 res_vt0, res_vt1, res_vt2, res_vt3, res_vt4, res_vt5, res_vt6, res_vt7;
679  v16i8 mask;
680  v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
681  v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
682  v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
683  v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
684  v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
685 
686  mask = LD_SB(&chroma_mask_arr[0]);
687 
688  LD_UB5(src, stride, src0, src1, src2, src3, src4);
689  src += (5 * stride);
690  LD_UB4(src, stride, src5, src6, src7, src8);
691 
692  VSHF_B2_UB(src0, src1, src1, src2, mask, mask, src0, src1);
693  VSHF_B2_UB(src2, src3, src3, src4, mask, mask, src2, src3);
694  VSHF_B2_UB(src4, src5, src5, src6, mask, mask, src4, src5);
695  VSHF_B2_UB(src6, src7, src7, src8, mask, mask, src6, src7);
696  DOTP_UB4_UH(src0, src1, src2, src3, coeff_hz_vec, coeff_hz_vec,
697  coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1, res_hz2, res_hz3);
698  DOTP_UB4_UH(src4, src5, src6, src7, coeff_hz_vec, coeff_hz_vec,
699  coeff_hz_vec, coeff_hz_vec, res_hz4, res_hz5, res_hz6, res_hz7);
700  MUL4(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_hz2, coeff_vt_vec1,
701  res_hz3, coeff_vt_vec0, res_vt0, res_vt1, res_vt2, res_vt3);
702  MUL4(res_hz4, coeff_vt_vec1, res_hz5, coeff_vt_vec0, res_hz6, coeff_vt_vec1,
703  res_hz7, coeff_vt_vec0, res_vt4, res_vt5, res_vt6, res_vt7);
704  ADD2(res_vt0, res_vt1, res_vt2, res_vt3, res_vt0, res_vt1);
705  ADD2(res_vt4, res_vt5, res_vt6, res_vt7, res_vt2, res_vt3);
706  SRARI_H4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 6);
707  SAT_UH4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 7);
708  PCKEV_B2_UB(res_vt1, res_vt0, res_vt3, res_vt2, res0, res1);
709  ST4x8_UB(res0, res1, dst, stride);
710 }
711 
713  uint32_t coef_hor0, uint32_t coef_hor1,
714  uint32_t coef_ver0, uint32_t coef_ver1,
715  int32_t height)
716 {
717  if (2 == height) {
718  avc_chroma_hv_4x2_msa(src, dst, stride, coef_hor0, coef_hor1, coef_ver0,
719  coef_ver1);
720  } else if (4 == height) {
721  avc_chroma_hv_4x4_msa(src, dst, stride, coef_hor0, coef_hor1, coef_ver0,
722  coef_ver1);
723  } else if (8 == height) {
724  avc_chroma_hv_4x8_msa(src, dst, stride, coef_hor0, coef_hor1, coef_ver0,
725  coef_ver1);
726  }
727 }
728 
730  uint32_t coef_hor0, uint32_t coef_hor1,
731  uint32_t coef_ver0, uint32_t coef_ver1)
732 {
733  v16u8 src0, src1, src2, src3, src4, out0, out1;
734  v8u16 res_hz0, res_hz1, res_hz2, res_hz3, res_hz4;
735  v8u16 res_vt0, res_vt1, res_vt2, res_vt3;
736  v16i8 mask;
737  v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
738  v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
739  v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
740  v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
741  v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
742 
743  mask = LD_SB(&chroma_mask_arr[32]);
744 
745  src0 = LD_UB(src);
746  src += stride;
747 
748  src0 = (v16u8) __msa_vshf_b(mask, (v16i8) src0, (v16i8) src0);
749  res_hz0 = __msa_dotp_u_h(src0, coeff_hz_vec);
750 
751  LD_UB4(src, stride, src1, src2, src3, src4);
752  src += (4 * stride);
753 
754  VSHF_B2_UB(src1, src1, src2, src2, mask, mask, src1, src2);
755  VSHF_B2_UB(src3, src3, src4, src4, mask, mask, src3, src4);
756  DOTP_UB4_UH(src1, src2, src3, src4, coeff_hz_vec, coeff_hz_vec,
757  coeff_hz_vec, coeff_hz_vec, res_hz1, res_hz2, res_hz3, res_hz4);
758  MUL4(res_hz1, coeff_vt_vec0, res_hz2, coeff_vt_vec0, res_hz3, coeff_vt_vec0,
759  res_hz4, coeff_vt_vec0, res_vt0, res_vt1, res_vt2, res_vt3);
760 
761  res_vt0 += (res_hz0 * coeff_vt_vec1);
762  res_vt1 += (res_hz1 * coeff_vt_vec1);
763  res_vt2 += (res_hz2 * coeff_vt_vec1);
764  res_vt3 += (res_hz3 * coeff_vt_vec1);
765 
766  SRARI_H4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 6);
767  SAT_UH4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 7);
768  PCKEV_B2_UB(res_vt1, res_vt0, res_vt3, res_vt2, out0, out1);
769  ST8x4_UB(out0, out1, dst, stride);
770 }
771 
773  uint32_t coef_hor0, uint32_t coef_hor1,
774  uint32_t coef_ver0, uint32_t coef_ver1)
775 {
776  v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
777  v16u8 out0, out1, out2, out3;
778  v8u16 res_hz0, res_hz1, res_hz2, res_hz3, res_hz4;
779  v8u16 res_hz5, res_hz6, res_hz7, res_hz8;
780  v8u16 res_vt0, res_vt1, res_vt2, res_vt3;
781  v8u16 res_vt4, res_vt5, res_vt6, res_vt7;
782  v16i8 mask;
783  v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
784  v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
785  v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
786  v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
787  v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
788 
789  mask = LD_SB(&chroma_mask_arr[32]);
790 
791  LD_UB5(src, stride, src0, src1, src2, src3, src4);
792  src += (5 * stride);
793  LD_UB4(src, stride, src5, src6, src7, src8);
794  src0 = (v16u8) __msa_vshf_b(mask, (v16i8) src0, (v16i8) src0);
795  VSHF_B2_UB(src1, src1, src2, src2, mask, mask, src1, src2);
796  VSHF_B2_UB(src3, src3, src4, src4, mask, mask, src3, src4);
797  VSHF_B2_UB(src5, src5, src6, src6, mask, mask, src5, src6);
798  VSHF_B2_UB(src7, src7, src8, src8, mask, mask, src7, src8);
799  res_hz0 = __msa_dotp_u_h(src0, coeff_hz_vec);
800  DOTP_UB4_UH(src1, src2, src3, src4, coeff_hz_vec, coeff_hz_vec,
801  coeff_hz_vec, coeff_hz_vec, res_hz1, res_hz2, res_hz3,
802  res_hz4);
803  DOTP_UB4_UH(src5, src6, src7, src8, coeff_hz_vec, coeff_hz_vec,
804  coeff_hz_vec, coeff_hz_vec, res_hz5, res_hz6, res_hz7, res_hz8);
805  MUL4(res_hz1, coeff_vt_vec0, res_hz2, coeff_vt_vec0, res_hz3,
806  coeff_vt_vec0, res_hz4, coeff_vt_vec0, res_vt0, res_vt1, res_vt2,
807  res_vt3);
808  MUL4(res_hz5, coeff_vt_vec0, res_hz6, coeff_vt_vec0, res_hz7,
809  coeff_vt_vec0, res_hz8, coeff_vt_vec0, res_vt4, res_vt5, res_vt6,
810  res_vt7);
811  res_vt0 += (res_hz0 * coeff_vt_vec1);
812  res_vt1 += (res_hz1 * coeff_vt_vec1);
813  res_vt2 += (res_hz2 * coeff_vt_vec1);
814  res_vt3 += (res_hz3 * coeff_vt_vec1);
815  res_vt4 += (res_hz4 * coeff_vt_vec1);
816  res_vt5 += (res_hz5 * coeff_vt_vec1);
817  res_vt6 += (res_hz6 * coeff_vt_vec1);
818  res_vt7 += (res_hz7 * coeff_vt_vec1);
819  SRARI_H4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 6);
820  SRARI_H4_UH(res_vt4, res_vt5, res_vt6, res_vt7, 6);
821  SAT_UH4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 7);
822  SAT_UH4_UH(res_vt4, res_vt5, res_vt6, res_vt7, 7);
823  PCKEV_B2_UB(res_vt1, res_vt0, res_vt3, res_vt2, out0, out1);
824  PCKEV_B2_UB(res_vt5, res_vt4, res_vt7, res_vt6, out2, out3);
825  ST8x8_UB(out0, out1, out2, out3, dst, stride);
826 }
827 
829  uint32_t coef_hor0, uint32_t coef_hor1,
830  uint32_t coef_ver0, uint32_t coef_ver1,
831  int32_t height)
832 {
833  if (4 == height) {
834  avc_chroma_hv_8x4_msa(src, dst, stride, coef_hor0, coef_hor1, coef_ver0,
835  coef_ver1);
836  } else if (8 == height) {
837  avc_chroma_hv_8x8_msa(src, dst, stride, coef_hor0, coef_hor1, coef_ver0,
838  coef_ver1);
839  }
840 }
841 
843  int32_t stride, uint32_t coeff0,
844  uint32_t coeff1)
845 {
846  uint16_t out0, out1;
847  v16i8 src0, src1;
848  v16u8 dst_data = { 0 };
849  v8u16 res_r;
850  v16u8 res;
851  v16i8 mask;
852  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
853  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
854  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
855 
856  mask = LD_SB(&chroma_mask_arr[0]);
857 
858  LD_SB2(src, stride, src0, src1);
859 
860  out0 = LH(dst);
861  out1 = LH(dst + stride);
862 
863  dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 0, out0);
864  dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 2, out1);
865 
866  src0 = __msa_vshf_b(mask, src1, src0);
867 
868  res_r = __msa_dotp_u_h((v16u8) src0, coeff_vec);
869  res_r <<= 3;
870  res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
871  res_r = __msa_sat_u_h(res_r, 7);
872 
873  res = (v16u8) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
874  dst_data = __msa_aver_u_b(res, dst_data);
875 
876  out0 = __msa_copy_u_h((v8i16) dst_data, 0);
877  out1 = __msa_copy_u_h((v8i16) dst_data, 2);
878 
879  SH(out0, dst);
880  dst += stride;
881  SH(out1, dst);
882 }
883 
885  int32_t stride, uint32_t coeff0,
886  uint32_t coeff1)
887 {
888  uint16_t tp0, tp1, tp2, tp3;
889  v16u8 src0, src1, src2, src3;
890  v16u8 dst0, dst_data = { 0 };
891  v8u16 res_r;
892  v16i8 mask;
893  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
894  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
895  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
896 
897  mask = LD_SB(&chroma_mask_arr[64]);
898 
899  LD_UB4(src, stride, src0, src1, src2, src3);
900  tp0 = LH(dst);
901  tp1 = LH(dst + stride);
902  tp2 = LH(dst + 2 * stride);
903  tp3 = LH(dst + 3 * stride);
904  dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 0, tp0);
905  dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 1, tp1);
906  dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 2, tp2);
907  dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 3, tp3);
908 
909  VSHF_B2_UB(src0, src1, src2, src3, mask, mask, src0, src2);
910 
911  src0 = (v16u8) __msa_ilvr_d((v2i64) src2, (v2i64) src0);
912 
913  res_r = __msa_dotp_u_h(src0, coeff_vec);
914  res_r <<= 3;
915  res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
916  res_r = __msa_sat_u_h(res_r, 7);
917 
918  dst0 = (v16u8) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
919  dst0 = __msa_aver_u_b(dst0, dst_data);
920 
921  ST2x4_UB(dst0, 0, dst, stride);
922 }
923 
925  int32_t stride, uint32_t coeff0,
926  uint32_t coeff1, int32_t height)
927 {
928  if (2 == height) {
929  avc_chroma_hz_and_aver_dst_2x2_msa(src, dst, stride, coeff0, coeff1);
930  } else if (4 == height) {
931  avc_chroma_hz_and_aver_dst_2x4_msa(src, dst, stride, coeff0, coeff1);
932  }
933 }
934 
936  int32_t stride, uint32_t coeff0,
937  uint32_t coeff1)
938 {
939  uint32_t load0, load1;
940  v16i8 src0, src1;
941  v16u8 dst_data = { 0 };
942  v8u16 res_r;
943  v16i8 res, mask;
944  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
945  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
946  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
947 
948  mask = LD_SB(&chroma_mask_arr[0]);
949 
950  LD_SB2(src, stride, src0, src1);
951 
952  LW2(dst, stride, load0, load1);
953 
954  INSERT_W2_UB(load0, load1, dst_data);
955 
956  src0 = __msa_vshf_b(mask, src1, src0);
957 
958  res_r = __msa_dotp_u_h((v16u8) src0, coeff_vec);
959  res_r <<= 3;
960  res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
961  res_r = __msa_sat_u_h(res_r, 7);
962  res = __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
963  dst_data = __msa_aver_u_b((v16u8) res, dst_data);
964 
965  ST4x2_UB(dst_data, dst, stride);
966 }
967 
969  int32_t stride, uint32_t coeff0,
970  uint32_t coeff1)
971 {
972  uint32_t tp0, tp1, tp2, tp3;
973  v16u8 src0, src1, src2, src3;
974  v16u8 out, dst_data = { 0 };
975  v16i8 mask;
976  v8u16 res0_r, res1_r;
977  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
978  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
979  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
980 
981  mask = LD_SB(&chroma_mask_arr[0]);
982 
983  LD_UB4(src, stride, src0, src1, src2, src3);
984  LW4(dst, stride, tp0, tp1, tp2, tp3);
985  INSERT_W4_UB(tp0, tp1, tp2, tp3, dst_data);
986  VSHF_B2_UB(src0, src1, src2, src3, mask, mask, src0, src2);
987  DOTP_UB2_UH(src0, src2, coeff_vec, coeff_vec, res0_r, res1_r);
988  res0_r <<= 3;
989  res1_r <<= 3;
990  SRARI_H2_UH(res0_r, res1_r, 6);
991  SAT_UH2_UH(res0_r, res1_r, 7);
992  out = (v16u8) __msa_pckev_b((v16i8) res1_r, (v16i8) res0_r);
993  out = __msa_aver_u_b(out, dst_data);
994  ST4x4_UB(out, out, 0, 1, 2, 3, dst, stride);
995 }
996 
998  int32_t stride, uint32_t coeff0,
999  uint32_t coeff1)
1000 {
1001  uint32_t tp0, tp1, tp2, tp3;
1002  v16u8 src0, src1, src2, src3, src4, src5, src6, src7, out0, out1;
1003  v16u8 dst0 = { 0 }, dst1 = { 0 };
1004  v16i8 mask;
1005  v8u16 res0, res1, res2, res3;
1006  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
1007  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
1008  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
1009 
1010  mask = LD_SB(&chroma_mask_arr[0]);
1011 
1012  LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7);
1013  LW4(dst, stride, tp0, tp1, tp2, tp3);
1014  INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0);
1015  LW4(dst + 4 * stride, stride, tp0, tp1, tp2, tp3);
1016  INSERT_W4_UB(tp0, tp1, tp2, tp3, dst1);
1017  VSHF_B2_UB(src0, src1, src2, src3, mask, mask, src0, src2);
1018  VSHF_B2_UB(src4, src5, src6, src7, mask, mask, src4, src6);
1019  DOTP_UB2_UH(src0, src2, coeff_vec, coeff_vec, res0, res1);
1020  DOTP_UB2_UH(src4, src6, coeff_vec, coeff_vec, res2, res3);
1021  SLLI_4V(res0, res1, res2, res3, 3);
1022  SRARI_H4_UH(res0, res1, res2, res3, 6);
1023  SAT_UH4_UH(res0, res1, res2, res3, 7);
1024  PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
1025  AVER_UB2_UB(out0, dst0, out1, dst1, out0, out1);
1026  ST4x8_UB(out0, out1, dst, stride);
1027 }
1028 
1030  int32_t stride, uint32_t coeff0,
1031  uint32_t coeff1, int32_t height)
1032 {
1033  if (2 == height) {
1034  avc_chroma_hz_and_aver_dst_4x2_msa(src, dst, stride, coeff0, coeff1);
1035  } else if (4 == height) {
1036  avc_chroma_hz_and_aver_dst_4x4_msa(src, dst, stride, coeff0, coeff1);
1037  } else if (8 == height) {
1038  avc_chroma_hz_and_aver_dst_4x8_msa(src, dst, stride, coeff0, coeff1);
1039  }
1040 }
1041 
1043  int32_t stride, uint32_t coeff0,
1044  uint32_t coeff1)
1045 {
1046  uint64_t tp0, tp1, tp2, tp3;
1047  v16u8 src0, src1, src2, src3, out0, out1;
1048  v16u8 dst0 = { 0 }, dst1 = { 0 };
1049  v8u16 res0, res1, res2, res3;
1050  v16i8 mask;
1051  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
1052  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
1053  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
1054 
1055  mask = LD_SB(&chroma_mask_arr[32]);
1056  LD_UB4(src, stride, src0, src1, src2, src3);
1057  LD4(dst, stride, tp0, tp1, tp2, tp3);
1058  INSERT_D2_UB(tp0, tp1, dst0);
1059  INSERT_D2_UB(tp2, tp3, dst1);
1060  VSHF_B2_UB(src0, src0, src1, src1, mask, mask, src0, src1);
1061  VSHF_B2_UB(src2, src2, src3, src3, mask, mask, src2, src3);
1062  DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
1063  coeff_vec, res0, res1, res2, res3);
1064  SLLI_4V(res0, res1, res2, res3, 3);
1065  SRARI_H4_UH(res0, res1, res2, res3, 6);
1066  SAT_UH4_UH(res0, res1, res2, res3, 7);
1067  PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
1068  AVER_UB2_UB(out0, dst0, out1, dst1, dst0, dst1);
1069  ST8x4_UB(dst0, dst1, dst, stride);
1070 }
1071 
1073  int32_t stride, uint32_t coeff0,
1074  uint32_t coeff1)
1075 {
1076  uint64_t tp0, tp1, tp2, tp3;
1077  v16u8 src0, src1, src2, src3, src4, src5, src6, src7;
1078  v16u8 out0, out1, out2, out3;
1079  v16u8 dst0 = { 0 }, dst1 = { 0 }, dst2 = { 0 }, dst3 = { 0 };
1080  v8u16 res0, res1, res2, res3, res4, res5, res6, res7;
1081  v16i8 mask;
1082  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
1083  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
1084  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
1085 
1086  mask = LD_SB(&chroma_mask_arr[32]);
1087 
1088  LD_UB8(src, stride, src0, src1, src2, src3, src4, src5, src6, src7);
1089  LD4(dst, stride, tp0, tp1, tp2, tp3);
1090  INSERT_D2_UB(tp0, tp1, dst0);
1091  INSERT_D2_UB(tp2, tp3, dst1);
1092  LD4(dst + 4 * stride, stride, tp0, tp1, tp2, tp3);
1093  INSERT_D2_UB(tp0, tp1, dst2);
1094  INSERT_D2_UB(tp2, tp3, dst3);
1095  VSHF_B2_UB(src0, src0, src1, src1, mask, mask, src0, src1);
1096  VSHF_B2_UB(src2, src2, src3, src3, mask, mask, src2, src3);
1097  VSHF_B2_UB(src4, src4, src5, src5, mask, mask, src4, src5);
1098  VSHF_B2_UB(src6, src6, src7, src7, mask, mask, src6, src7);
1099  DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
1100  coeff_vec, res0, res1, res2, res3);
1101  DOTP_UB4_UH(src4, src5, src6, src7, coeff_vec, coeff_vec, coeff_vec,
1102  coeff_vec, res4, res5, res6, res7);
1103  SLLI_4V(res0, res1, res2, res3, 3);
1104  SLLI_4V(res4, res5, res6, res7, 3);
1105  SRARI_H4_UH(res0, res1, res2, res3, 6);
1106  SRARI_H4_UH(res4, res5, res6, res7, 6);
1107  SAT_UH4_UH(res0, res1, res2, res3, 7);
1108  SAT_UH4_UH(res4, res5, res6, res7, 7);
1109  PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
1110  PCKEV_B2_UB(res5, res4, res7, res6, out2, out3);
1111  AVER_UB2_UB(out0, dst0, out1, dst1, out0, out1);
1112  AVER_UB2_UB(out2, dst2, out3, dst3, out2, out3);
1113  ST8x8_UB(out0, out1, out2, out3, dst, stride);
1114 }
1115 
1117  int32_t stride, uint32_t coeff0,
1118  uint32_t coeff1, int32_t height)
1119 {
1120  if (4 == height) {
1121  avc_chroma_hz_and_aver_dst_8x4_msa(src, dst, stride, coeff0, coeff1);
1122  } else if (8 == height) {
1123  avc_chroma_hz_and_aver_dst_8x8_msa(src, dst, stride, coeff0, coeff1);
1124  }
1125 }
1126 
1128  int32_t stride, uint32_t coeff0,
1129  uint32_t coeff1)
1130 {
1131  uint16_t out0, out1;
1132  v16i8 src0, src1, src2, tmp0, tmp1, res;
1133  v16u8 dst_data = { 0 };
1134  v8i16 out;
1135  v8u16 res_r;
1136  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
1137  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
1138  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
1139 
1140  LD_SB3(src, stride, src0, src1, src2);
1141  out0 = LH(dst);
1142  out1 = LH(dst + stride);
1143 
1144  dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 0, out0);
1145  dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 2, out1);
1146 
1147  ILVR_B2_SB(src1, src0, src2, src1, tmp0, tmp1);
1148 
1149  tmp0 = (v16i8) __msa_ilvr_d((v2i64) tmp1, (v2i64) tmp0);
1150  res_r = __msa_dotp_u_h((v16u8) tmp0, coeff_vec);
1151  res_r <<= 3;
1152  res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
1153  res_r = __msa_sat_u_h(res_r, 7);
1154  res = __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
1155  out = (v8i16) __msa_aver_u_b((v16u8) res, dst_data);
1156  out0 = __msa_copy_u_h(out, 0);
1157  out1 = __msa_copy_u_h(out, 2);
1158 
1159  SH(out0, dst);
1160  dst += stride;
1161  SH(out1, dst);
1162 }
1163 
1165  int32_t stride, uint32_t coeff0,
1166  uint32_t coeff1)
1167 {
1168  uint16_t tp0, tp1, tp2, tp3;
1169  v16i8 src0, src1, src2, src3, src4;
1170  v16u8 tmp0, tmp1, tmp2, tmp3;
1171  v8u16 res_r;
1172  v8i16 res;
1173  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
1174  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
1175  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
1176  v16u8 dst_data = { 0 };
1177 
1178  LD_SB5(src, stride, src0, src1, src2, src3, src4);
1179 
1180  tp0 = LH(dst);
1181  tp1 = LH(dst + stride);
1182  tp2 = LH(dst + 2 * stride);
1183  tp3 = LH(dst + 3 * stride);
1184  dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 0, tp0);
1185  dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 1, tp1);
1186  dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 2, tp2);
1187  dst_data = (v16u8) __msa_insert_h((v8i16) dst_data, 3, tp3);
1188 
1189  ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
1190  tmp0, tmp1, tmp2, tmp3);
1191  ILVR_W2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
1192 
1193  tmp0 = (v16u8) __msa_ilvr_d((v2i64) tmp2, (v2i64) tmp0);
1194 
1195  res_r = __msa_dotp_u_h(tmp0, coeff_vec);
1196  res_r <<= 3;
1197  res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
1198  res_r = __msa_sat_u_h(res_r, 7);
1199 
1200  res = (v8i16) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
1201  res = (v8i16) __msa_aver_u_b((v16u8) res, dst_data);
1202 
1203  ST2x4_UB(res, 0, dst, stride);
1204 }
1205 
1207  int32_t stride, uint32_t coeff0,
1208  uint32_t coeff1, int32_t height)
1209 {
1210  if (2 == height) {
1211  avc_chroma_vt_and_aver_dst_2x2_msa(src, dst, stride, coeff0, coeff1);
1212  } else if (4 == height) {
1213  avc_chroma_vt_and_aver_dst_2x4_msa(src, dst, stride, coeff0, coeff1);
1214  }
1215 }
1216 
1218  int32_t stride, uint32_t coeff0,
1219  uint32_t coeff1)
1220 {
1221  uint32_t load0, load1;
1222  v16u8 src0, src1, src2, tmp0, tmp1;
1223  v16u8 dst_data = { 0 };
1224  v8u16 res_r;
1225  v16u8 res;
1226  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
1227  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
1228  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
1229 
1230  LD_UB3(src, stride, src0, src1, src2);
1231 
1232  LW2(dst, stride, load0, load1);
1233 
1234  INSERT_W2_UB(load0, load1, dst_data);
1235  ILVR_B2_UB(src1, src0, src2, src1, tmp0, tmp1);
1236 
1237  tmp0 = (v16u8) __msa_ilvr_d((v2i64) tmp1, (v2i64) tmp0);
1238 
1239  res_r = __msa_dotp_u_h(tmp0, coeff_vec);
1240  res_r <<= 3;
1241  res_r = (v8u16) __msa_srari_h((v8i16) res_r, 6);
1242  res_r = __msa_sat_u_h(res_r, 7);
1243  res = (v16u8) __msa_pckev_b((v16i8) res_r, (v16i8) res_r);
1244  res = __msa_aver_u_b(res, dst_data);
1245 
1246  ST4x2_UB(res, dst, stride);
1247 }
1248 
1250  int32_t stride, uint32_t coeff0,
1251  uint32_t coeff1)
1252 {
1253  uint32_t tp0, tp1, tp2, tp3;
1254  v16u8 src0, src1, src2, src3, src4;
1255  v16u8 tmp0, tmp1, tmp2, tmp3;
1256  v16u8 dst0 = { 0 };
1257  v8u16 res0_r, res1_r;
1258  v16u8 out;
1259  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
1260  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
1261  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
1262 
1263  LD_UB5(src, stride, src0, src1, src2, src3, src4);
1264  LW4(dst, stride, tp0, tp1, tp2, tp3);
1265  INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0);
1266  ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, tmp0, tmp1, tmp2,
1267  tmp3);
1268  ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
1269  DOTP_UB2_UH(tmp0, tmp2, coeff_vec, coeff_vec, res0_r, res1_r);
1270  res0_r <<= 3;
1271  res1_r <<= 3;
1272  SRARI_H2_UH(res0_r, res1_r, 6);
1273  SAT_UH2_UH(res0_r, res1_r, 7);
1274  out = (v16u8) __msa_pckev_b((v16i8) res1_r, (v16i8) res0_r);
1275  out = __msa_aver_u_b(out, dst0);
1276  ST4x4_UB(out, out, 0, 1, 2, 3, dst, stride);
1277 }
1278 
1280  int32_t stride, uint32_t coeff0,
1281  uint32_t coeff1)
1282 {
1283  uint32_t tp0, tp1, tp2, tp3;
1284  v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
1285  v16u8 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, out0, out1;
1286  v16u8 dst0 = { 0 }, dst1 = { 0 };
1287  v8u16 res0, res1, res2, res3;
1288  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
1289  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
1290  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
1291 
1292  LD_UB5(src, stride, src0, src1, src2, src3, src4);
1293  src += (5 * stride);
1294  LD_UB4(src, stride, src5, src6, src7, src8);
1295  LW4(dst, stride, tp0, tp1, tp2, tp3);
1296  INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0);
1297  LW4(dst + 4 * stride, stride, tp0, tp1, tp2, tp3);
1298  INSERT_W4_UB(tp0, tp1, tp2, tp3, dst1);
1299  ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3, tmp0, tmp1, tmp2,
1300  tmp3);
1301  ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7, tmp4, tmp5, tmp6,
1302  tmp7);
1303  ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, tmp0, tmp2);
1304  ILVR_D2_UB(tmp5, tmp4, tmp7, tmp6, tmp4, tmp6);
1305  DOTP_UB2_UH(tmp0, tmp2, coeff_vec, coeff_vec, res0, res1);
1306  DOTP_UB2_UH(tmp4, tmp6, coeff_vec, coeff_vec, res2, res3);
1307  SLLI_4V(res0, res1, res2, res3, 3);
1308  SRARI_H4_UH(res0, res1, res2, res3, 6);
1309  SAT_UH4_UH(res0, res1, res2, res3, 7);
1310  PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
1311  AVER_UB2_UB(out0, dst0, out1, dst1, out0, out1);
1312  ST4x8_UB(out0, out1, dst, stride);
1313 }
1314 
1316  int32_t stride, uint32_t coeff0,
1317  uint32_t coeff1, int32_t height)
1318 {
1319  if (2 == height) {
1320  avc_chroma_vt_and_aver_dst_4x2_msa(src, dst, stride, coeff0, coeff1);
1321  } else if (4 == height) {
1322  avc_chroma_vt_and_aver_dst_4x4_msa(src, dst, stride, coeff0, coeff1);
1323  } else if (8 == height) {
1324  avc_chroma_vt_and_aver_dst_4x8_msa(src, dst, stride, coeff0, coeff1);
1325  }
1326 }
1327 
1329  int32_t stride, uint32_t coeff0,
1330  uint32_t coeff1)
1331 {
1332  uint64_t tp0, tp1, tp2, tp3;
1333  v16u8 src0, src1, src2, src3, src4;
1334  v16u8 out0, out1;
1335  v8u16 res0, res1, res2, res3;
1336  v16u8 dst0 = { 0 }, dst1 = { 0 };
1337  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
1338  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
1339  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
1340 
1341  LD_UB5(src, stride, src0, src1, src2, src3, src4);
1342  LD4(dst, stride, tp0, tp1, tp2, tp3);
1343  INSERT_D2_UB(tp0, tp1, dst0);
1344  INSERT_D2_UB(tp2, tp3, dst1);
1345  ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
1346  src0, src1, src2, src3);
1347  DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
1348  coeff_vec, res0, res1, res2, res3);
1349  SLLI_4V(res0, res1, res2, res3, 3);
1350  SRARI_H4_UH(res0, res1, res2, res3, 6);
1351  SAT_UH4_UH(res0, res1, res2, res3, 7);
1352  PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
1353  AVER_UB2_UB(out0, dst0, out1, dst1, out0, out1);
1354  ST8x4_UB(out0, out1, dst, stride);
1355 }
1356 
1358  int32_t stride, uint32_t coeff0,
1359  uint32_t coeff1)
1360 {
1361  uint64_t tp0, tp1, tp2, tp3;
1362  v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
1363  v16u8 out0, out1, out2, out3;
1364  v16u8 dst0 = { 0 }, dst1 = { 0 }, dst2 = { 0 }, dst3 = { 0 };
1365  v8u16 res0, res1, res2, res3, res4, res5, res6, res7;
1366  v16i8 coeff_vec0 = __msa_fill_b(coeff0);
1367  v16i8 coeff_vec1 = __msa_fill_b(coeff1);
1368  v16u8 coeff_vec = (v16u8) __msa_ilvr_b(coeff_vec0, coeff_vec1);
1369 
1370  LD_UB5(src, stride, src0, src1, src2, src3, src4);
1371  src += (5 * stride);
1372  LD_UB4(src, stride, src5, src6, src7, src8);
1373  LD4(dst, stride, tp0, tp1, tp2, tp3);
1374  INSERT_D2_UB(tp0, tp1, dst0);
1375  INSERT_D2_UB(tp2, tp3, dst1);
1376  LD4(dst + 4 * stride, stride, tp0, tp1, tp2, tp3);
1377  INSERT_D2_UB(tp0, tp1, dst2);
1378  INSERT_D2_UB(tp2, tp3, dst3);
1379  ILVR_B4_UB(src1, src0, src2, src1, src3, src2, src4, src3,
1380  src0, src1, src2, src3);
1381  ILVR_B4_UB(src5, src4, src6, src5, src7, src6, src8, src7,
1382  src4, src5, src6, src7);
1383  DOTP_UB4_UH(src0, src1, src2, src3, coeff_vec, coeff_vec, coeff_vec,
1384  coeff_vec, res0, res1, res2, res3);
1385  DOTP_UB4_UH(src4, src5, src6, src7, coeff_vec, coeff_vec, coeff_vec,
1386  coeff_vec, res4, res5, res6, res7);
1387  SLLI_4V(res0, res1, res2, res3, 3);
1388  SLLI_4V(res4, res5, res6, res7, 3);
1389  SRARI_H4_UH(res0, res1, res2, res3, 6);
1390  SRARI_H4_UH(res4, res5, res6, res7, 6);
1391  SAT_UH4_UH(res0, res1, res2, res3, 7);
1392  SAT_UH4_UH(res0, res1, res2, res3, 7);
1393  PCKEV_B2_UB(res1, res0, res3, res2, out0, out1);
1394  PCKEV_B2_UB(res5, res4, res7, res6, out2, out3);
1395  AVER_UB2_UB(out0, dst0, out1, dst1, out0, out1);
1396  AVER_UB2_UB(out2, dst2, out3, dst3, out2, out3);
1397  ST8x8_UB(out0, out1, out2, out3, dst, stride);
1398 }
1399 
1401  int32_t stride, uint32_t coeff0,
1402  uint32_t coeff1, int32_t height)
1403 {
1404  if (4 == height) {
1405  avc_chroma_vt_and_aver_dst_8x4_msa(src, dst, stride, coeff0, coeff1);
1406  } else if (8 == height) {
1407  avc_chroma_vt_and_aver_dst_8x8_msa(src, dst, stride, coeff0, coeff1);
1408  }
1409 }
1410 
1412  int32_t stride,
1413  uint32_t coef_hor0,
1414  uint32_t coef_hor1,
1415  uint32_t coef_ver0,
1416  uint32_t coef_ver1)
1417 {
1418  uint16_t out0, out1;
1419  v16u8 dst0 = { 0 };
1420  v16u8 src0, src1, src2;
1421  v8u16 res_hz0, res_hz1, res_vt0, res_vt1;
1422  v16i8 res, mask;
1423  v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
1424  v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
1425  v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
1426  v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
1427  v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
1428 
1429  mask = LD_SB(&chroma_mask_arr[48]);
1430 
1431  LD_UB3(src, stride, src0, src1, src2);
1432  out0 = LH(dst);
1433  out1 = LH(dst + stride);
1434  dst0 = (v16u8) __msa_insert_h((v8i16) dst0, 0, out0);
1435  dst0 = (v16u8) __msa_insert_h((v8i16) dst0, 1, out1);
1436  VSHF_B2_UB(src0, src1, src1, src2, mask, mask, src0, src1);
1437  DOTP_UB2_UH(src0, src1, coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1);
1438  MUL2(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_vt0, res_vt1);
1439 
1440  res_vt0 += res_vt1;
1441  res_vt0 = (v8u16) __msa_srari_h((v8i16) res_vt0, 6);
1442  res_vt0 = __msa_sat_u_h(res_vt0, 7);
1443  res = __msa_pckev_b((v16i8) res_vt0, (v16i8) res_vt0);
1444  dst0 = __msa_aver_u_b((v16u8) res, dst0);
1445  out0 = __msa_copy_u_h((v8i16) dst0, 0);
1446  out1 = __msa_copy_u_h((v8i16) dst0, 1);
1447 
1448  SH(out0, dst);
1449  dst += stride;
1450  SH(out1, dst);
1451 }
1452 
1454  int32_t stride,
1455  uint32_t coef_hor0,
1456  uint32_t coef_hor1,
1457  uint32_t coef_ver0,
1458  uint32_t coef_ver1)
1459 {
1460  uint16_t tp0, tp1, tp2, tp3;
1461  v16u8 src0, src1, src2, src3, src4;
1462  v16u8 tmp0, tmp1, tmp2, tmp3;
1463  v16u8 dst0 = { 0 };
1464  v8u16 res_hz0, res_hz1, res_vt0, res_vt1;
1465  v16i8 res, mask;
1466  v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
1467  v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
1468  v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
1469  v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
1470  v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
1471 
1472  mask = LD_SB(&chroma_mask_arr[48]);
1473 
1474  LD_UB5(src, stride, src0, src1, src2, src3, src4);
1475  tp0 = LH(dst);
1476  tp1 = LH(dst + stride);
1477  tp2 = LH(dst + 2 * stride);
1478  tp3 = LH(dst + 3 * stride);
1479  dst0 = (v16u8) __msa_insert_h((v8i16) dst0, 0, tp0);
1480  dst0 = (v16u8) __msa_insert_h((v8i16) dst0, 1, tp1);
1481  dst0 = (v16u8) __msa_insert_h((v8i16) dst0, 2, tp2);
1482  dst0 = (v16u8) __msa_insert_h((v8i16) dst0, 3, tp3);
1483  VSHF_B2_UB(src0, src1, src2, src3, mask, mask, tmp0, tmp1);
1484  VSHF_B2_UB(src1, src2, src3, src4, mask, mask, tmp2, tmp3);
1485  ILVR_D2_UB(tmp1, tmp0, tmp3, tmp2, src0, src1);
1486  DOTP_UB2_UH(src0, src1, coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1);
1487  MUL2(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_vt0, res_vt1);
1488 
1489  res_vt0 += res_vt1;
1490  res_vt0 = (v8u16) __msa_srari_h((v8i16) res_vt0, 6);
1491  res_vt0 = __msa_sat_u_h(res_vt0, 7);
1492  res = __msa_pckev_b((v16i8) res_vt0, (v16i8) res_vt0);
1493  dst0 = __msa_aver_u_b((v16u8) res, dst0);
1494 
1495  ST2x4_UB(dst0, 0, dst, stride);
1496 }
1497 
1499  int32_t stride,
1500  uint32_t coef_hor0,
1501  uint32_t coef_hor1,
1502  uint32_t coef_ver0,
1503  uint32_t coef_ver1,
1504  int32_t height)
1505 {
1506  if (2 == height) {
1507  avc_chroma_hv_and_aver_dst_2x2_msa(src, dst, stride, coef_hor0,
1508  coef_hor1, coef_ver0, coef_ver1);
1509  } else if (4 == height) {
1510  avc_chroma_hv_and_aver_dst_2x4_msa(src, dst, stride, coef_hor0,
1511  coef_hor1, coef_ver0, coef_ver1);
1512  }
1513 }
1514 
1516  int32_t stride,
1517  uint32_t coef_hor0,
1518  uint32_t coef_hor1,
1519  uint32_t coef_ver0,
1520  uint32_t coef_ver1)
1521 {
1522  uint32_t tp0, tp1;
1523  v16u8 src0, src1, src2;
1524  v16u8 dst0, dst_data = { 0 };
1525  v8u16 res_hz0, res_hz1, res_vt0, res_vt1;
1526  v16i8 mask;
1527  v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
1528  v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
1529  v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
1530  v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
1531  v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
1532 
1533  mask = LD_SB(&chroma_mask_arr[0]);
1534 
1535  LD_UB3(src, stride, src0, src1, src2);
1536  LW2(dst, stride, tp0, tp1);
1537  INSERT_W2_UB(tp0, tp1, dst_data);
1538  VSHF_B2_UB(src0, src1, src1, src2, mask, mask, src0, src1);
1539  DOTP_UB2_UH(src0, src1, coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1);
1540  MUL2(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_vt0, res_vt1);
1541 
1542  res_vt0 += res_vt1;
1543  res_vt0 = (v8u16) __msa_srari_h((v8i16) res_vt0, 6);
1544  res_vt0 = __msa_sat_u_h(res_vt0, 7);
1545  dst0 = (v16u8) __msa_pckev_b((v16i8) res_vt0, (v16i8) res_vt0);
1546  dst0 = __msa_aver_u_b(dst0, dst_data);
1547 
1548  ST4x2_UB(dst0, dst, stride);
1549 }
1550 
1552  int32_t stride,
1553  uint32_t coef_hor0,
1554  uint32_t coef_hor1,
1555  uint32_t coef_ver0,
1556  uint32_t coef_ver1)
1557 {
1558  uint32_t tp0, tp1, tp2, tp3;
1559  v16u8 src0, src1, src2, src3, src4;
1560  v16u8 out, dst_data = { 0 };
1561  v8u16 res_hz0, res_hz1, res_hz2, res_hz3;
1562  v8u16 res_vt0, res_vt1, res_vt2, res_vt3;
1563  v16i8 mask;
1564  v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
1565  v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
1566  v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
1567  v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
1568  v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
1569 
1570  mask = LD_SB(&chroma_mask_arr[0]);
1571 
1572  LD_UB5(src, stride, src0, src1, src2, src3, src4);
1573  LW4(dst, stride, tp0, tp1, tp2, tp3);
1574  INSERT_W4_UB(tp0, tp1, tp2, tp3, dst_data);
1575  VSHF_B2_UB(src0, src1, src1, src2, mask, mask, src0, src1);
1576  VSHF_B2_UB(src2, src3, src3, src4, mask, mask, src2, src3);
1577  DOTP_UB4_UH(src0, src1, src2, src3, coeff_hz_vec, coeff_hz_vec,
1578  coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1, res_hz2,
1579  res_hz3);
1580  MUL4(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_hz2, coeff_vt_vec1,
1581  res_hz3, coeff_vt_vec0, res_vt0, res_vt1, res_vt2, res_vt3);
1582  ADD2(res_vt0, res_vt1, res_vt2, res_vt3, res_vt0, res_vt1);
1583  SRARI_H2_UH(res_vt0, res_vt1, 6);
1584  SAT_UH2_UH(res_vt0, res_vt1, 7);
1585  out = (v16u8) __msa_pckev_b((v16i8) res_vt1, (v16i8) res_vt0);
1586  out = __msa_aver_u_b(out, dst_data);
1587  ST4x4_UB(out, out, 0, 1, 2, 3, dst, stride);
1588 }
1589 
1591  int32_t stride,
1592  uint32_t coef_hor0,
1593  uint32_t coef_hor1,
1594  uint32_t coef_ver0,
1595  uint32_t coef_ver1)
1596 {
1597  uint32_t tp0, tp1, tp2, tp3;
1598  v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8, res0, res1;
1599  v16u8 dst0 = { 0 }, dst1 = { 0 };
1600  v8u16 res_hz0, res_hz1, res_hz2, res_hz3, res_hz4, res_hz5, res_hz6, res_hz7;
1601  v8u16 res_vt0, res_vt1, res_vt2, res_vt3, res_vt4, res_vt5, res_vt6, res_vt7;
1602  v16i8 mask;
1603  v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
1604  v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
1605  v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
1606  v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
1607  v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
1608 
1609  mask = LD_SB(&chroma_mask_arr[0]);
1610 
1611  LD_UB5(src, stride, src0, src1, src2, src3, src4);
1612  src += (5 * stride);
1613  LD_UB4(src, stride, src5, src6, src7, src8);
1614  LW4(dst, stride, tp0, tp1, tp2, tp3);
1615  INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0);
1616  LW4(dst + 4 * stride, stride, tp0, tp1, tp2, tp3);
1617  INSERT_W4_UB(tp0, tp1, tp2, tp3, dst1);
1618  VSHF_B2_UB(src0, src1, src1, src2, mask, mask, src0, src1);
1619  VSHF_B2_UB(src2, src3, src3, src4, mask, mask, src2, src3);
1620  VSHF_B2_UB(src4, src5, src5, src6, mask, mask, src4, src5);
1621  VSHF_B2_UB(src6, src7, src7, src8, mask, mask, src6, src7);
1622  DOTP_UB4_UH(src0, src1, src2, src3, coeff_hz_vec, coeff_hz_vec,
1623  coeff_hz_vec, coeff_hz_vec, res_hz0, res_hz1, res_hz2, res_hz3);
1624  DOTP_UB4_UH(src4, src5, src6, src7, coeff_hz_vec, coeff_hz_vec,
1625  coeff_hz_vec, coeff_hz_vec, res_hz4, res_hz5, res_hz6, res_hz7);
1626  MUL4(res_hz0, coeff_vt_vec1, res_hz1, coeff_vt_vec0, res_hz2, coeff_vt_vec1,
1627  res_hz3, coeff_vt_vec0, res_vt0, res_vt1, res_vt2, res_vt3);
1628  MUL4(res_hz4, coeff_vt_vec1, res_hz5, coeff_vt_vec0, res_hz6, coeff_vt_vec1,
1629  res_hz7, coeff_vt_vec0, res_vt4, res_vt5, res_vt6, res_vt7);
1630  ADD2(res_vt0, res_vt1, res_vt2, res_vt3, res_vt0, res_vt1);
1631  ADD2(res_vt4, res_vt5, res_vt6, res_vt7, res_vt2, res_vt3);
1632  SRARI_H4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 6);
1633  SAT_UH4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 7);
1634  PCKEV_B2_UB(res_vt1, res_vt0, res_vt3, res_vt2, res0, res1);
1635  AVER_UB2_UB(res0, dst0, res1, dst1, res0, res1);
1636  ST4x8_UB(res0, res1, dst, stride);
1637 }
1638 
1640  int32_t stride,
1641  uint32_t coef_hor0,
1642  uint32_t coef_hor1,
1643  uint32_t coef_ver0,
1644  uint32_t coef_ver1,
1645  int32_t height)
1646 {
1647  if (2 == height) {
1648  avc_chroma_hv_and_aver_dst_4x2_msa(src, dst, stride, coef_hor0,
1649  coef_hor1, coef_ver0, coef_ver1);
1650  } else if (4 == height) {
1651  avc_chroma_hv_and_aver_dst_4x4_msa(src, dst, stride, coef_hor0,
1652  coef_hor1, coef_ver0, coef_ver1);
1653  } else if (8 == height) {
1654  avc_chroma_hv_and_aver_dst_4x8_msa(src, dst, stride, coef_hor0,
1655  coef_hor1, coef_ver0, coef_ver1);
1656  }
1657 }
1658 
1660  int32_t stride,
1661  uint32_t coef_hor0,
1662  uint32_t coef_hor1,
1663  uint32_t coef_ver0,
1664  uint32_t coef_ver1)
1665 {
1666  uint64_t tp0, tp1, tp2, tp3;
1667  v16u8 src0, src1, src2, src3, src4, out0, out1;
1668  v8u16 res_hz0, res_hz1, res_hz2;
1669  v8u16 res_hz3, res_hz4;
1670  v8u16 res_vt0, res_vt1, res_vt2, res_vt3;
1671  v16u8 dst0 = { 0 }, dst1 = { 0 };
1672  v16i8 mask;
1673  v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
1674  v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
1675  v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
1676  v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
1677  v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
1678 
1679  mask = LD_SB(&chroma_mask_arr[32]);
1680 
1681  src0 = LD_UB(src);
1682  src += stride;
1683  src0 = (v16u8) __msa_vshf_b(mask, (v16i8) src0, (v16i8) src0);
1684  res_hz0 = __msa_dotp_u_h(src0, coeff_hz_vec);
1685  LD_UB4(src, stride, src1, src2, src3, src4);
1686  src += (4 * stride);
1687  LD4(dst, stride, tp0, tp1, tp2, tp3);
1688  INSERT_D2_UB(tp0, tp1, dst0);
1689  INSERT_D2_UB(tp2, tp3, dst1);
1690  VSHF_B2_UB(src1, src1, src2, src2, mask, mask, src1, src2);
1691  VSHF_B2_UB(src3, src3, src4, src4, mask, mask, src3, src4);
1692  DOTP_UB4_UH(src1, src2, src3, src4, coeff_hz_vec, coeff_hz_vec,
1693  coeff_hz_vec, coeff_hz_vec, res_hz1, res_hz2, res_hz3, res_hz4);
1694  MUL4(res_hz1, coeff_vt_vec0, res_hz2, coeff_vt_vec0, res_hz3, coeff_vt_vec0,
1695  res_hz4, coeff_vt_vec0, res_vt0, res_vt1, res_vt2, res_vt3);
1696  res_vt0 += (res_hz0 * coeff_vt_vec1);
1697  res_vt1 += (res_hz1 * coeff_vt_vec1);
1698  res_vt2 += (res_hz2 * coeff_vt_vec1);
1699  res_vt3 += (res_hz3 * coeff_vt_vec1);
1700  SRARI_H4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 6);
1701  SAT_UH4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 7);
1702  PCKEV_B2_UB(res_vt1, res_vt0, res_vt3, res_vt2, out0, out1);
1703  AVER_UB2_UB(out0, dst0, out1, dst1, out0, out1);
1704  ST8x4_UB(out0, out1, dst, stride);
1705 }
1706 
1708  int32_t stride,
1709  uint32_t coef_hor0,
1710  uint32_t coef_hor1,
1711  uint32_t coef_ver0,
1712  uint32_t coef_ver1)
1713 {
1714  uint64_t tp0, tp1, tp2, tp3;
1715  v16u8 src0, src1, src2, src3, src4, src5, src6, src7, src8;
1716  v16u8 out0, out1, out2, out3;
1717  v16u8 dst0 = { 0 }, dst1 = { 0 }, dst2 = { 0 }, dst3 = { 0 };
1718  v8u16 res_hz0, res_hz1, res_hz2, res_hz3, res_hz4;
1719  v8u16 res_hz5, res_hz6, res_hz7, res_hz8;
1720  v8u16 res_vt0, res_vt1, res_vt2, res_vt3;
1721  v8u16 res_vt4, res_vt5, res_vt6, res_vt7;
1722  v16i8 mask;
1723  v16i8 coeff_hz_vec0 = __msa_fill_b(coef_hor0);
1724  v16i8 coeff_hz_vec1 = __msa_fill_b(coef_hor1);
1725  v16u8 coeff_hz_vec = (v16u8) __msa_ilvr_b(coeff_hz_vec0, coeff_hz_vec1);
1726  v8u16 coeff_vt_vec0 = (v8u16) __msa_fill_h(coef_ver0);
1727  v8u16 coeff_vt_vec1 = (v8u16) __msa_fill_h(coef_ver1);
1728 
1729  mask = LD_SB(&chroma_mask_arr[32]);
1730 
1731  LD_UB5(src, stride, src0, src1, src2, src3, src4);
1732  src += (5 * stride);
1733  LD_UB4(src, stride, src5, src6, src7, src8);
1734  src0 = (v16u8) __msa_vshf_b(mask, (v16i8) src0, (v16i8) src0);
1735  VSHF_B2_UB(src1, src1, src2, src2, mask, mask, src1, src2);
1736  VSHF_B2_UB(src3, src3, src4, src4, mask, mask, src3, src4);
1737  VSHF_B2_UB(src5, src5, src6, src6, mask, mask, src5, src6);
1738  VSHF_B2_UB(src7, src7, src8, src8, mask, mask, src7, src8);
1739  res_hz0 = __msa_dotp_u_h(src0, coeff_hz_vec);
1740  DOTP_UB4_UH(src1, src2, src3, src4, coeff_hz_vec, coeff_hz_vec,
1741  coeff_hz_vec, coeff_hz_vec, res_hz1, res_hz2, res_hz3,
1742  res_hz4);
1743  DOTP_UB4_UH(src5, src6, src7, src8, coeff_hz_vec, coeff_hz_vec,
1744  coeff_hz_vec, coeff_hz_vec, res_hz5, res_hz6, res_hz7, res_hz8);
1745  MUL4(res_hz1, coeff_vt_vec0, res_hz2, coeff_vt_vec0, res_hz3,
1746  coeff_vt_vec0, res_hz4, coeff_vt_vec0, res_vt0, res_vt1, res_vt2,
1747  res_vt3);
1748  MUL4(res_hz5, coeff_vt_vec0, res_hz6, coeff_vt_vec0, res_hz7,
1749  coeff_vt_vec0, res_hz8, coeff_vt_vec0, res_vt4, res_vt5, res_vt6,
1750  res_vt7);
1751  LD4(dst, stride, tp0, tp1, tp2, tp3);
1752  INSERT_D2_UB(tp0, tp1, dst0);
1753  INSERT_D2_UB(tp2, tp3, dst1);
1754  LD4(dst + 4 * stride, stride, tp0, tp1, tp2, tp3);
1755  INSERT_D2_UB(tp0, tp1, dst2);
1756  INSERT_D2_UB(tp2, tp3, dst3);
1757  res_vt0 += (res_hz0 * coeff_vt_vec1);
1758  res_vt1 += (res_hz1 * coeff_vt_vec1);
1759  res_vt2 += (res_hz2 * coeff_vt_vec1);
1760  res_vt3 += (res_hz3 * coeff_vt_vec1);
1761  res_vt4 += (res_hz4 * coeff_vt_vec1);
1762  res_vt5 += (res_hz5 * coeff_vt_vec1);
1763  res_vt6 += (res_hz6 * coeff_vt_vec1);
1764  res_vt7 += (res_hz7 * coeff_vt_vec1);
1765  SRARI_H4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 6);
1766  SRARI_H4_UH(res_vt4, res_vt5, res_vt6, res_vt7, 6);
1767  SAT_UH4_UH(res_vt0, res_vt1, res_vt2, res_vt3, 7);
1768  SAT_UH4_UH(res_vt4, res_vt5, res_vt6, res_vt7, 7);
1769  PCKEV_B2_UB(res_vt1, res_vt0, res_vt3, res_vt2, out0, out1);
1770  PCKEV_B2_UB(res_vt5, res_vt4, res_vt7, res_vt6, out2, out3);
1771  AVER_UB2_UB(out0, dst0, out1, dst1, out0, out1);
1772  AVER_UB2_UB(out2, dst2, out3, dst3, out2, out3);
1773  ST8x8_UB(out0, out1, out2, out3, dst, stride);
1774 }
1775 
1777  int32_t stride,
1778  uint32_t coef_hor0,
1779  uint32_t coef_hor1,
1780  uint32_t coef_ver0,
1781  uint32_t coef_ver1,
1782  int32_t height)
1783 {
1784  if (4 == height) {
1785  avc_chroma_hv_and_aver_dst_8x4_msa(src, dst, stride, coef_hor0,
1786  coef_hor1, coef_ver0, coef_ver1);
1787  } else if (8 == height) {
1788  avc_chroma_hv_and_aver_dst_8x8_msa(src, dst, stride, coef_hor0,
1789  coef_hor1, coef_ver0, coef_ver1);
1790  }
1791 }
1792 
1794  int32_t height)
1795 {
1796  uint32_t tp0, tp1, tp2, tp3, tp4, tp5, tp6, tp7;
1797 
1798  if (8 == height) {
1799  LW4(src, stride, tp0, tp1, tp2, tp3);
1800  src += 4 * stride;
1801  LW4(src, stride, tp4, tp5, tp6, tp7);
1802  SW4(tp0, tp1, tp2, tp3, dst, stride);
1803  dst += 4 * stride;
1804  SW4(tp4, tp5, tp6, tp7, dst, stride);
1805  } else if (4 == height) {
1806  LW4(src, stride, tp0, tp1, tp2, tp3);
1807  SW4(tp0, tp1, tp2, tp3, dst, stride);
1808  } else if (2 == height) {
1809  LW2(src, stride, tp0, tp1);
1810  SW(tp0, dst);
1811  dst += stride;
1812  SW(tp1, dst);
1813  }
1814 }
1815 
1817  int32_t height)
1818 {
1819  uint64_t src0, src1, src2, src3, src4, src5, src6, src7;
1820 
1821  if (8 == height) {
1822  LD4(src, stride, src0, src1, src2, src3);
1823  src += 4 * stride;
1824  LD4(src, stride, src4, src5, src6, src7);
1825  SD4(src0, src1, src2, src3, dst, stride);
1826  dst += 4 * stride;
1827  SD4(src4, src5, src6, src7, dst, stride);
1828  } else if (4 == height) {
1829  LD4(src, stride, src0, src1, src2, src3);
1830  SD4(src0, src1, src2, src3, dst, stride);
1831  }
1832 }
1833 
1835  int32_t height)
1836 {
1837  uint32_t tp0, tp1, tp2, tp3;
1838  v16u8 src0 = { 0 }, src1 = { 0 }, dst0 = { 0 }, dst1 = { 0 };
1839 
1840  if (8 == height) {
1841  LW4(src, stride, tp0, tp1, tp2, tp3);
1842  src += 4 * stride;
1843  INSERT_W4_UB(tp0, tp1, tp2, tp3, src0);
1844  LW4(src, stride, tp0, tp1, tp2, tp3);
1845  INSERT_W4_UB(tp0, tp1, tp2, tp3, src1);
1846  LW4(dst, stride, tp0, tp1, tp2, tp3);
1847  INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0);
1848  LW4(dst + 4 * stride, stride, tp0, tp1, tp2, tp3);
1849  INSERT_W4_UB(tp0, tp1, tp2, tp3, dst1);
1850  AVER_UB2_UB(src0, dst0, src1, dst1, dst0, dst1);
1851  ST4x8_UB(dst0, dst1, dst, stride);
1852  } else if (4 == height) {
1853  LW4(src, stride, tp0, tp1, tp2, tp3);
1854  INSERT_W4_UB(tp0, tp1, tp2, tp3, src0);
1855  LW4(dst, stride, tp0, tp1, tp2, tp3);
1856  INSERT_W4_UB(tp0, tp1, tp2, tp3, dst0);
1857  dst0 = __msa_aver_u_b(src0, dst0);
1858  ST4x4_UB(dst0, dst0, 0, 1, 2, 3, dst, stride);
1859  } else if (2 == height) {
1860  LW2(src, stride, tp0, tp1);
1861  INSERT_W2_UB(tp0, tp1, src0);
1862  LW2(dst, stride, tp0, tp1);
1863  INSERT_W2_UB(tp0, tp1, dst0);
1864  dst0 = __msa_aver_u_b(src0, dst0);
1865  ST4x2_UB(dst0, dst, stride);
1866  }
1867 }
1868 
1870  int32_t height)
1871 {
1872  uint64_t tp0, tp1, tp2, tp3, tp4, tp5, tp6, tp7;
1873  v16u8 src0 = { 0 }, src1 = { 0 }, src2 = { 0 }, src3 = { 0 };
1874  v16u8 dst0 = { 0 }, dst1 = { 0 }, dst2 = { 0 }, dst3 = { 0 };
1875 
1876  if (8 == height) {
1877  LD4(src, stride, tp0, tp1, tp2, tp3);
1878  src += 4 * stride;
1879  LD4(src, stride, tp4, tp5, tp6, tp7);
1880  INSERT_D2_UB(tp0, tp1, src0);
1881  INSERT_D2_UB(tp2, tp3, src1);
1882  INSERT_D2_UB(tp4, tp5, src2);
1883  INSERT_D2_UB(tp6, tp7, src3);
1884  LD4(dst, stride, tp0, tp1, tp2, tp3);
1885  LD4(dst + 4 * stride, stride, tp4, tp5, tp6, tp7);
1886  INSERT_D2_UB(tp0, tp1, dst0);
1887  INSERT_D2_UB(tp2, tp3, dst1);
1888  INSERT_D2_UB(tp4, tp5, dst2);
1889  INSERT_D2_UB(tp6, tp7, dst3);
1890  AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, dst0, dst1,
1891  dst2, dst3);
1892  ST8x8_UB(dst0, dst1, dst2, dst3, dst, stride);
1893  } else if (4 == height) {
1894  LD4(src, stride, tp0, tp1, tp2, tp3);
1895  INSERT_D2_UB(tp0, tp1, src0);
1896  INSERT_D2_UB(tp2, tp3, src1);
1897  LD4(dst, stride, tp0, tp1, tp2, tp3);
1898  INSERT_D2_UB(tp0, tp1, dst0);
1899  INSERT_D2_UB(tp2, tp3, dst1);
1900  AVER_UB2_UB(src0, dst0, src1, dst1, dst0, dst1);
1901  ST8x4_UB(dst0, dst1, dst, stride);
1902  }
1903 }
1904 
1906  ptrdiff_t stride, int height, int x, int y)
1907 {
1908  av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
1909 
1910  if (x && y) {
1911  avc_chroma_hv_8w_msa(src, dst, stride, x, (8 - x), y, (8 - y), height);
1912  } else if (x) {
1913  avc_chroma_hz_8w_msa(src, dst, stride, x, (8 - x), height);
1914  } else if (y) {
1915  avc_chroma_vt_8w_msa(src, dst, stride, y, (8 - y), height);
1916  } else {
1917  copy_width8_msa(src, dst, stride, height);
1918  }
1919 }
1920 
1922  ptrdiff_t stride, int height, int x, int y)
1923 {
1924  av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
1925 
1926  if (x && y) {
1927  avc_chroma_hv_4w_msa(src, dst, stride, x, (8 - x), y, (8 - y), height);
1928  } else if (x) {
1929  avc_chroma_hz_4w_msa(src, dst, stride, x, (8 - x), height);
1930  } else if (y) {
1931  avc_chroma_vt_4w_msa(src, dst, stride, y, (8 - y), height);
1932  } else {
1933  copy_width4_msa(src, dst, stride, height);
1934  }
1935 }
1936 
1938  ptrdiff_t stride, int height, int x, int y)
1939 {
1940  int32_t cnt;
1941 
1942  av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
1943 
1944  if (x && y) {
1945  avc_chroma_hv_2w_msa(src, dst, stride, x, (8 - x), y, (8 - y), height);
1946  } else if (x) {
1947  avc_chroma_hz_2w_msa(src, dst, stride, x, (8 - x), height);
1948  } else if (y) {
1949  avc_chroma_vt_2w_msa(src, dst, stride, y, (8 - y), height);
1950  } else {
1951  for (cnt = height; cnt--;) {
1952  *((uint16_t *) dst) = *((uint16_t *) src);
1953 
1954  src += stride;
1955  dst += stride;
1956  }
1957  }
1958 }
1959 
1961  ptrdiff_t stride, int height, int x, int y)
1962 {
1963  av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
1964 
1965 
1966  if (x && y) {
1967  avc_chroma_hv_and_aver_dst_8w_msa(src, dst, stride, x, (8 - x), y,
1968  (8 - y), height);
1969  } else if (x) {
1970  avc_chroma_hz_and_aver_dst_8w_msa(src, dst, stride, x, (8 - x), height);
1971  } else if (y) {
1972  avc_chroma_vt_and_aver_dst_8w_msa(src, dst, stride, y, (8 - y), height);
1973  } else {
1974  avg_width8_msa(src, dst, stride, height);
1975  }
1976 }
1977 
1979  ptrdiff_t stride, int height, int x, int y)
1980 {
1981  av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
1982 
1983  if (x && y) {
1984  avc_chroma_hv_and_aver_dst_4w_msa(src, dst, stride, x, (8 - x), y,
1985  (8 - y), height);
1986  } else if (x) {
1987  avc_chroma_hz_and_aver_dst_4w_msa(src, dst, stride, x, (8 - x), height);
1988  } else if (y) {
1989  avc_chroma_vt_and_aver_dst_4w_msa(src, dst, stride, y, (8 - y), height);
1990  } else {
1991  avg_width4_msa(src, dst, stride, height);
1992  }
1993 }
1994 
1996  ptrdiff_t stride, int height, int x, int y)
1997 {
1998  int32_t cnt;
1999 
2000  av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
2001 
2002  if (x && y) {
2003  avc_chroma_hv_and_aver_dst_2w_msa(src, dst, stride, x, (8 - x), y,
2004  (8 - y), height);
2005  } else if (x) {
2006  avc_chroma_hz_and_aver_dst_2w_msa(src, dst, stride, x, (8 - x), height);
2007  } else if (y) {
2008  avc_chroma_vt_and_aver_dst_2w_msa(src, dst, stride, y, (8 - y), height);
2009  } else {
2010  for (cnt = height; cnt--;) {
2011  dst[0] = (dst[0] + src[0] + 1) >> 1;
2012  dst[1] = (dst[1] + src[1] + 1) >> 1;
2013 
2014  src += stride;
2015  dst += stride;
2016  }
2017  }
2018 }
static void avc_chroma_vt_4x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_vt_8x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_hz_8x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_vt_2x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_hz_2x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_hz_and_aver_dst_2x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_hv_and_aver_dst_8x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
static void avc_chroma_hv_2x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
static void avc_chroma_hz_and_aver_dst_4x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_hv_2x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
static void avc_chroma_hv_and_aver_dst_8x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
#define LH(psrc)
static void avc_chroma_hv_and_aver_dst_2w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1, int32_t height)
static void avc_chroma_vt_4w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
#define MUL2(in0, in1, in2, in3, out0, out1)
#define LD_SB(...)
static void avc_chroma_hz_and_aver_dst_8x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_hv_and_aver_dst_4x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
static void avc_chroma_vt_and_aver_dst_2x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_vt_8w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
#define ILVR_D2_UB(...)
#define LD_UB4(...)
#define ILVR_B2_SB(...)
#define src
Definition: vp8dsp.c:254
int stride
Definition: mace.c:144
static void avc_chroma_hz_and_aver_dst_8x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_hv_8w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1, int32_t height)
#define LD_SB2(...)
#define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride)
static void avc_chroma_hz_8x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_hv_and_aver_dst_8w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1, int32_t height)
static void avc_chroma_hz_and_aver_dst_2w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
static void avc_chroma_vt_8x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
uint8_t
#define LD4(psrc, stride, out0, out1, out2, out3)
static void avc_chroma_hz_and_aver_dst_8w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
static void avc_chroma_vt_2w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
static void avc_chroma_hv_2w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1, int32_t height)
#define height
#define LD_UB5(...)
static void avc_chroma_hv_4w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1, int32_t height)
static void avc_chroma_vt_and_aver_dst_8x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
void ff_avg_h264_chroma_mc4_msa(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, int x, int y)
void ff_put_h264_chroma_mc8_msa(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, int x, int y)
#define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3)
static void avc_chroma_hv_and_aver_dst_2x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
#define INSERT_W2_UB(...)
static void avc_chroma_hv_and_aver_dst_4x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
static const uint16_t mask[17]
Definition: lzw.c:38
static void avc_chroma_hv_8x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
static void avc_chroma_hz_8w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
#define LW2(psrc, stride, out0, out1)
static void avc_chroma_hz_and_aver_dst_4x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avg_width4_msa(uint8_t *src, uint8_t *dst, int32_t stride, int32_t height)
#define SW4(in0, in1, in2, in3, pdst, stride)
static void avc_chroma_vt_and_aver_dst_4w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
void ff_avg_h264_chroma_mc8_msa(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, int x, int y)
void ff_put_h264_chroma_mc2_msa(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, int x, int y)
#define ILVR_B4_UB(...)
static void avc_chroma_vt_and_aver_dst_4x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
#define LD_UB3(...)
#define LD_UB8(...)
static void avc_chroma_hz_and_aver_dst_4w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
#define SRARI_H2_UH(...)
int32_t
#define AVER_UB2_UB(...)
#define LD_SB3(...)
#define ST2x4_UB(in, stidx, pdst, stride)
#define INSERT_W4_UB(...)
#define AVER_UB4_UB(...)
static const uint8_t chroma_mask_arr[16 *5]
static void avc_chroma_vt_4x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
#define src1
Definition: h264pred.c:139
static void avg_width8_msa(uint8_t *src, uint8_t *dst, int32_t stride, int32_t height)
static void avc_chroma_hv_and_aver_dst_4w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1, int32_t height)
static void avc_chroma_vt_and_aver_dst_4x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_hz_nonmult_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
static void avc_chroma_hz_4x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
void ff_avg_h264_chroma_mc2_msa(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, int x, int y)
#define DOTP_UB2_UH(...)
static void avc_chroma_vt_and_aver_dst_2w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
static void avc_chroma_hz_4x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void avc_chroma_vt_4x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
#define SRARI_H4_UH(...)
#define src0
Definition: h264pred.c:138
static void avc_chroma_hz_and_aver_dst_4x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
#define ADD2(in0, in1, in2, in3, out0, out1)
#define SH(val, pdst)
static void avc_chroma_vt_and_aver_dst_8w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
#define SD4(in0, in1, in2, in3, pdst, stride)
#define SW(val, pdst)
static void avc_chroma_hv_4x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
#define SLLI_4V(in0, in1, in2, in3, shift)
static void avc_chroma_hv_and_aver_dst_2x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
static void avc_chroma_hv_and_aver_dst_4x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
static void avc_chroma_vt_and_aver_dst_2x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
#define ILVR_W2_UB(...)
#define ST4x8_UB(in0, in1, pdst, stride)
#define LD_SB5(...)
static void avc_chroma_hv_8x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
#define INSERT_D2_UB(...)
static void avc_chroma_vt_and_aver_dst_4x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
#define LW4(psrc, stride, out0, out1, out2, out3)
#define ST8x4_UB(in0, in1, pdst, stride)
static void avc_chroma_hz_2x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
#define SAT_UH2_UH(...)
#define ST8x8_UB(in0, in1, in2, in3, pdst, stride)
#define PCKEV_B2_SW(...)
#define SAT_UH4_UH(...)
static void avc_chroma_hz_and_aver_dst_2x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
#define LD_UB(...)
static void avc_chroma_hz_4x2_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
#define DOTP_UB4_UH(...)
static void avc_chroma_hv_4x8_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
#define VSHF_B2_UB(...)
static void avc_chroma_hz_4w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
FILE * out
Definition: movenc.c:54
static void copy_width4_msa(uint8_t *src, uint8_t *dst, int32_t stride, int32_t height)
static void avc_chroma_vt_and_aver_dst_8x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
static void copy_width8_msa(uint8_t *src, uint8_t *dst, int32_t stride, int32_t height)
#define ST8x1_UB(in, pdst)
#define ST4x2_UB(in, pdst, stride)
static void avc_chroma_hv_4x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coef_hor0, uint32_t coef_hor1, uint32_t coef_ver0, uint32_t coef_ver1)
static void avc_chroma_hz_2w_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1, int32_t height)
#define PCKEV_B2_UB(...)
#define ILVR_B2_UB(...)
static void avc_chroma_vt_2x4_msa(uint8_t *src, uint8_t *dst, int32_t stride, uint32_t coeff0, uint32_t coeff1)
void ff_put_h264_chroma_mc4_msa(uint8_t *dst, uint8_t *src, ptrdiff_t stride, int height, int x, int y)