FFmpeg  4.0
vp9_idct_msa.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 - 2017 Shivraj Patil (Shivraj.Patil@imgtec.com)
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <string.h>
22 #include "libavcodec/vp9dsp.h"
24 #include "vp9dsp_mips.h"
25 
26 #define VP9_DCT_CONST_BITS 14
27 #define ROUND_POWER_OF_TWO(value, n) (((value) + (1 << ((n) - 1))) >> (n))
28 
29 static const int32_t cospi_1_64 = 16364;
30 static const int32_t cospi_2_64 = 16305;
31 static const int32_t cospi_3_64 = 16207;
32 static const int32_t cospi_4_64 = 16069;
33 static const int32_t cospi_5_64 = 15893;
34 static const int32_t cospi_6_64 = 15679;
35 static const int32_t cospi_7_64 = 15426;
36 static const int32_t cospi_8_64 = 15137;
37 static const int32_t cospi_9_64 = 14811;
38 static const int32_t cospi_10_64 = 14449;
39 static const int32_t cospi_11_64 = 14053;
40 static const int32_t cospi_12_64 = 13623;
41 static const int32_t cospi_13_64 = 13160;
42 static const int32_t cospi_14_64 = 12665;
43 static const int32_t cospi_15_64 = 12140;
44 static const int32_t cospi_16_64 = 11585;
45 static const int32_t cospi_17_64 = 11003;
46 static const int32_t cospi_18_64 = 10394;
47 static const int32_t cospi_19_64 = 9760;
48 static const int32_t cospi_20_64 = 9102;
49 static const int32_t cospi_21_64 = 8423;
50 static const int32_t cospi_22_64 = 7723;
51 static const int32_t cospi_23_64 = 7005;
52 static const int32_t cospi_24_64 = 6270;
53 static const int32_t cospi_25_64 = 5520;
54 static const int32_t cospi_26_64 = 4756;
55 static const int32_t cospi_27_64 = 3981;
56 static const int32_t cospi_28_64 = 3196;
57 static const int32_t cospi_29_64 = 2404;
58 static const int32_t cospi_30_64 = 1606;
59 static const int32_t cospi_31_64 = 804;
60 
61 // 16384 * sqrt(2) * sin(kPi/9) * 2 / 3
62 static const int32_t sinpi_1_9 = 5283;
63 static const int32_t sinpi_2_9 = 9929;
64 static const int32_t sinpi_3_9 = 13377;
65 static const int32_t sinpi_4_9 = 15212;
66 
67 #define VP9_DOTP_CONST_PAIR(reg0, reg1, cnst0, cnst1, out0, out1) \
68 { \
69  v8i16 k0_m = __msa_fill_h(cnst0); \
70  v4i32 s0_m, s1_m, s2_m, s3_m; \
71  \
72  s0_m = (v4i32) __msa_fill_h(cnst1); \
73  k0_m = __msa_ilvev_h((v8i16) s0_m, k0_m); \
74  \
75  ILVRL_H2_SW((-reg1), reg0, s1_m, s0_m); \
76  ILVRL_H2_SW(reg0, reg1, s3_m, s2_m); \
77  DOTP_SH2_SW(s1_m, s0_m, k0_m, k0_m, s1_m, s0_m); \
78  SRARI_W2_SW(s1_m, s0_m, VP9_DCT_CONST_BITS); \
79  out0 = __msa_pckev_h((v8i16) s0_m, (v8i16) s1_m); \
80  \
81  DOTP_SH2_SW(s3_m, s2_m, k0_m, k0_m, s1_m, s0_m); \
82  SRARI_W2_SW(s1_m, s0_m, VP9_DCT_CONST_BITS); \
83  out1 = __msa_pckev_h((v8i16) s0_m, (v8i16) s1_m); \
84 }
85 
86 #define VP9_DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7, \
87  dst0, dst1, dst2, dst3) \
88 { \
89  v4i32 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m; \
90  v4i32 tp5_m, tp6_m, tp7_m, tp8_m, tp9_m; \
91  \
92  DOTP_SH4_SW(in0, in1, in0, in1, in4, in4, in5, in5, \
93  tp0_m, tp2_m, tp3_m, tp4_m); \
94  DOTP_SH4_SW(in2, in3, in2, in3, in6, in6, in7, in7, \
95  tp5_m, tp6_m, tp7_m, tp8_m); \
96  BUTTERFLY_4(tp0_m, tp3_m, tp7_m, tp5_m, tp1_m, tp9_m, tp7_m, tp5_m); \
97  BUTTERFLY_4(tp2_m, tp4_m, tp8_m, tp6_m, tp3_m, tp0_m, tp4_m, tp2_m); \
98  SRARI_W4_SW(tp1_m, tp9_m, tp7_m, tp5_m, VP9_DCT_CONST_BITS); \
99  SRARI_W4_SW(tp3_m, tp0_m, tp4_m, tp2_m, VP9_DCT_CONST_BITS); \
100  PCKEV_H4_SH(tp1_m, tp3_m, tp9_m, tp0_m, tp7_m, tp4_m, tp5_m, tp2_m, \
101  dst0, dst1, dst2, dst3); \
102 }
103 
104 #define VP9_DOT_SHIFT_RIGHT_PCK_H(in0, in1, in2) \
105 ( { \
106  v8i16 dst_m; \
107  v4i32 tp0_m, tp1_m; \
108  \
109  DOTP_SH2_SW(in0, in1, in2, in2, tp1_m, tp0_m); \
110  SRARI_W2_SW(tp1_m, tp0_m, VP9_DCT_CONST_BITS); \
111  dst_m = __msa_pckev_h((v8i16) tp1_m, (v8i16) tp0_m); \
112  \
113  dst_m; \
114 } )
115 
116 #define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, \
117  out0, out1, out2, out3, out4, out5, out6, out7) \
118 { \
119  v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \
120  v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m; \
121  v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64, \
122  cospi_18_64, cospi_22_64, cospi_26_64, cospi_30_64 }; \
123  v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, \
124  -cospi_16_64, cospi_24_64, -cospi_24_64, 0, 0 }; \
125  \
126  SPLATI_H2_SH(coeff0_m, 0, 7, cnst0_m, cnst1_m); \
127  cnst2_m = -cnst0_m; \
128  ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \
129  SPLATI_H2_SH(coeff0_m, 4, 3, cnst2_m, cnst3_m); \
130  cnst4_m = -cnst2_m; \
131  ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \
132  \
133  ILVRL_H2_SH(in0, in7, vec1_m, vec0_m); \
134  ILVRL_H2_SH(in4, in3, vec3_m, vec2_m); \
135  VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
136  cnst1_m, cnst2_m, cnst3_m, in7, in0, \
137  in4, in3); \
138  \
139  SPLATI_H2_SH(coeff0_m, 2, 5, cnst0_m, cnst1_m); \
140  cnst2_m = -cnst0_m; \
141  ILVEV_H2_SH(cnst0_m, cnst1_m, cnst1_m, cnst2_m, cnst0_m, cnst1_m); \
142  SPLATI_H2_SH(coeff0_m, 6, 1, cnst2_m, cnst3_m); \
143  cnst4_m = -cnst2_m; \
144  ILVEV_H2_SH(cnst2_m, cnst3_m, cnst3_m, cnst4_m, cnst2_m, cnst3_m); \
145  \
146  ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
147  ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \
148  \
149  VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
150  cnst1_m, cnst2_m, cnst3_m, in5, in2, \
151  in6, in1); \
152  BUTTERFLY_4(in7, in0, in2, in5, s1_m, s0_m, in2, in5); \
153  out7 = -s0_m; \
154  out0 = s1_m; \
155  \
156  SPLATI_H4_SH(coeff1_m, 0, 4, 1, 5, \
157  cnst0_m, cnst1_m, cnst2_m, cnst3_m); \
158  \
159  ILVEV_H2_SH(cnst3_m, cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst2_m); \
160  cnst0_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
161  cnst1_m = cnst0_m; \
162  \
163  ILVRL_H2_SH(in4, in3, vec1_m, vec0_m); \
164  ILVRL_H2_SH(in6, in1, vec3_m, vec2_m); \
165  VP9_DOT_ADD_SUB_SRARI_PCK(vec0_m, vec1_m, vec2_m, vec3_m, cnst0_m, \
166  cnst2_m, cnst3_m, cnst1_m, out1, out6, \
167  s0_m, s1_m); \
168  \
169  SPLATI_H2_SH(coeff1_m, 2, 3, cnst0_m, cnst1_m); \
170  cnst1_m = __msa_ilvev_h(cnst1_m, cnst0_m); \
171  \
172  ILVRL_H2_SH(in2, in5, vec1_m, vec0_m); \
173  ILVRL_H2_SH(s0_m, s1_m, vec3_m, vec2_m); \
174  out3 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst0_m); \
175  out4 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec0_m, vec1_m, cnst1_m); \
176  out2 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst0_m); \
177  out5 = VP9_DOT_SHIFT_RIGHT_PCK_H(vec2_m, vec3_m, cnst1_m); \
178  \
179  out1 = -out1; \
180  out3 = -out3; \
181  out5 = -out5; \
182 }
183 
184 #define VP9_MADD_SHORT(m0, m1, c0, c1, res0, res1) \
185 { \
186  v4i32 madd0_m, madd1_m, madd2_m, madd3_m; \
187  v8i16 madd_s0_m, madd_s1_m; \
188  \
189  ILVRL_H2_SH(m1, m0, madd_s0_m, madd_s1_m); \
190  DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s0_m, madd_s1_m, \
191  c0, c0, c1, c1, madd0_m, madd1_m, madd2_m, madd3_m); \
192  SRARI_W4_SW(madd0_m, madd1_m, madd2_m, madd3_m, VP9_DCT_CONST_BITS); \
193  PCKEV_H2_SH(madd1_m, madd0_m, madd3_m, madd2_m, res0, res1); \
194 }
195 
196 #define VP9_MADD_BF(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, \
197  out0, out1, out2, out3) \
198 { \
199  v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m; \
200  v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m, m4_m, m5_m; \
201  \
202  ILVRL_H2_SH(inp1, inp0, madd_s0_m, madd_s1_m); \
203  ILVRL_H2_SH(inp3, inp2, madd_s2_m, madd_s3_m); \
204  DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m, \
205  cst0, cst0, cst2, cst2, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
206  BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m, \
207  m4_m, m5_m, tmp3_m, tmp2_m); \
208  SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS); \
209  PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out0, out1); \
210  DOTP_SH4_SW(madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m, \
211  cst1, cst1, cst3, cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
212  BUTTERFLY_4(tmp0_m, tmp1_m, tmp3_m, tmp2_m, \
213  m4_m, m5_m, tmp3_m, tmp2_m); \
214  SRARI_W4_SW(m4_m, m5_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS); \
215  PCKEV_H2_SH(m5_m, m4_m, tmp3_m, tmp2_m, out2, out3); \
216 }
217 
218 #define VP9_SET_COSPI_PAIR(c0_h, c1_h) \
219 ( { \
220  v8i16 out0_m, r0_m, r1_m; \
221  \
222  r0_m = __msa_fill_h(c0_h); \
223  r1_m = __msa_fill_h(c1_h); \
224  out0_m = __msa_ilvev_h(r1_m, r0_m); \
225  \
226  out0_m; \
227 } )
228 
229 #define VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3) \
230 { \
231  uint8_t *dst_m = (uint8_t *) (dst); \
232  v16u8 dst0_m, dst1_m, dst2_m, dst3_m; \
233  v16i8 tmp0_m, tmp1_m; \
234  v16i8 zero_m = { 0 }; \
235  v8i16 res0_m, res1_m, res2_m, res3_m; \
236  \
237  LD_UB4(dst_m, dst_stride, dst0_m, dst1_m, dst2_m, dst3_m); \
238  ILVR_B4_SH(zero_m, dst0_m, zero_m, dst1_m, zero_m, dst2_m, \
239  zero_m, dst3_m, res0_m, res1_m, res2_m, res3_m); \
240  ADD4(res0_m, in0, res1_m, in1, res2_m, in2, res3_m, in3, \
241  res0_m, res1_m, res2_m, res3_m); \
242  CLIP_SH4_0_255(res0_m, res1_m, res2_m, res3_m); \
243  PCKEV_B2_SB(res1_m, res0_m, res3_m, res2_m, tmp0_m, tmp1_m); \
244  ST8x4_UB(tmp0_m, tmp1_m, dst_m, dst_stride); \
245 }
246 
247 #define VP9_IDCT4x4(in0, in1, in2, in3, out0, out1, out2, out3) \
248 { \
249  v8i16 c0_m, c1_m, c2_m, c3_m; \
250  v8i16 step0_m, step1_m; \
251  v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
252  \
253  c0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \
254  c1_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \
255  step0_m = __msa_ilvr_h(in2, in0); \
256  DOTP_SH2_SW(step0_m, step0_m, c0_m, c1_m, tmp0_m, tmp1_m); \
257  \
258  c2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
259  c3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
260  step1_m = __msa_ilvr_h(in3, in1); \
261  DOTP_SH2_SW(step1_m, step1_m, c2_m, c3_m, tmp2_m, tmp3_m); \
262  SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS); \
263  \
264  PCKEV_H2_SW(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp0_m, tmp2_m); \
265  SLDI_B2_0_SW(tmp0_m, tmp2_m, tmp1_m, tmp3_m, 8); \
266  BUTTERFLY_4((v8i16) tmp0_m, (v8i16) tmp1_m, \
267  (v8i16) tmp2_m, (v8i16) tmp3_m, \
268  out0, out1, out2, out3); \
269 }
270 
271 #define VP9_IADST4x4(in0, in1, in2, in3, out0, out1, out2, out3) \
272 { \
273  v8i16 res0_m, res1_m, c0_m, c1_m; \
274  v8i16 k1_m, k2_m, k3_m, k4_m; \
275  v8i16 zero_m = { 0 }; \
276  v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
277  v4i32 int0_m, int1_m, int2_m, int3_m; \
278  v8i16 mask_m = { sinpi_1_9, sinpi_2_9, sinpi_3_9, \
279  sinpi_4_9, -sinpi_1_9, -sinpi_2_9, -sinpi_3_9, \
280  -sinpi_4_9 }; \
281  \
282  SPLATI_H4_SH(mask_m, 3, 0, 1, 2, c0_m, c1_m, k1_m, k2_m); \
283  ILVEV_H2_SH(c0_m, c1_m, k1_m, k2_m, c0_m, c1_m); \
284  ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m); \
285  DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp2_m, tmp1_m); \
286  int0_m = tmp2_m + tmp1_m; \
287  \
288  SPLATI_H2_SH(mask_m, 4, 7, k4_m, k3_m); \
289  ILVEV_H2_SH(k4_m, k1_m, k3_m, k2_m, c0_m, c1_m); \
290  DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m); \
291  int1_m = tmp0_m + tmp1_m; \
292  \
293  c0_m = __msa_splati_h(mask_m, 6); \
294  ILVL_H2_SH(k2_m, c0_m, zero_m, k2_m, c0_m, c1_m); \
295  ILVR_H2_SH(in0, in2, in1, in3, res0_m, res1_m); \
296  DOTP_SH2_SW(res0_m, res1_m, c0_m, c1_m, tmp0_m, tmp1_m); \
297  int2_m = tmp0_m + tmp1_m; \
298  \
299  c0_m = __msa_splati_h(mask_m, 6); \
300  c0_m = __msa_ilvev_h(c0_m, k1_m); \
301  \
302  res0_m = __msa_ilvr_h((in1), (in3)); \
303  tmp0_m = __msa_dotp_s_w(res0_m, c0_m); \
304  int3_m = tmp2_m + tmp0_m; \
305  \
306  res0_m = __msa_ilvr_h((in2), (in3)); \
307  c1_m = __msa_ilvev_h(k4_m, k3_m); \
308  \
309  tmp2_m = __msa_dotp_s_w(res0_m, c1_m); \
310  res1_m = __msa_ilvr_h((in0), (in2)); \
311  c1_m = __msa_ilvev_h(k1_m, zero_m); \
312  \
313  tmp3_m = __msa_dotp_s_w(res1_m, c1_m); \
314  int3_m += tmp2_m; \
315  int3_m += tmp3_m; \
316  \
317  SRARI_W4_SW(int0_m, int1_m, int2_m, int3_m, VP9_DCT_CONST_BITS); \
318  PCKEV_H2_SH(int0_m, int0_m, int1_m, int1_m, out0, out1); \
319  PCKEV_H2_SH(int2_m, int2_m, int3_m, int3_m, out2, out3); \
320 }
321 
322 #define TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, \
323  out0, out1, out2, out3, out4, out5, out6, out7) \
324 { \
325  v8i16 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
326  v8i16 tmp0_n, tmp1_n, tmp2_n, tmp3_n; \
327  v8i16 zero_m = { 0 }; \
328  \
329  ILVR_H4_SH(in1, in0, in3, in2, in5, in4, in7, in6, \
330  tmp0_n, tmp1_n, tmp2_n, tmp3_n); \
331  ILVRL_W2_SH(tmp1_n, tmp0_n, tmp0_m, tmp2_m); \
332  ILVRL_W2_SH(tmp3_n, tmp2_n, tmp1_m, tmp3_m); \
333  \
334  out0 = (v8i16) __msa_ilvr_d((v2i64) tmp1_m, (v2i64) tmp0_m); \
335  out1 = (v8i16) __msa_ilvl_d((v2i64) tmp1_m, (v2i64) tmp0_m); \
336  out2 = (v8i16) __msa_ilvr_d((v2i64) tmp3_m, (v2i64) tmp2_m); \
337  out3 = (v8i16) __msa_ilvl_d((v2i64) tmp3_m, (v2i64) tmp2_m); \
338  \
339  out4 = zero_m; \
340  out5 = zero_m; \
341  out6 = zero_m; \
342  out7 = zero_m; \
343 }
344 
345 static void vp9_idct4x4_1_add_msa(int16_t *input, uint8_t *dst,
346  int32_t dst_stride)
347 {
348  int16_t out;
349  v8i16 vec;
350 
352  out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS);
353  out = ROUND_POWER_OF_TWO(out, 4);
354  vec = __msa_fill_h(out);
355  input[0] = 0;
356 
357  ADDBLK_ST4x4_UB(vec, vec, vec, vec, dst, dst_stride);
358 }
359 
360 static void vp9_idct4x4_colcol_addblk_msa(int16_t *input, uint8_t *dst,
361  int32_t dst_stride)
362 {
363  v8i16 in0, in1, in2, in3;
364  v8i16 zero = { 0 };
365 
366  /* load vector elements of 4x4 block */
367  LD4x4_SH(input, in0, in1, in2, in3);
368  ST_SH2(zero, zero, input, 8);
369  /* rows */
370  VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
371  /* columns */
372  TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
373  VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
374  /* rounding (add 2^3, divide by 2^4) */
375  SRARI_H4_SH(in0, in1, in2, in3, 4);
376  ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride);
377 }
378 
379 static void vp9_iadst4x4_colcol_addblk_msa(int16_t *input, uint8_t *dst,
380  int32_t dst_stride)
381 {
382  v8i16 in0, in1, in2, in3;
383  v8i16 zero = { 0 };
384 
385  /* load vector elements of 4x4 block */
386  LD4x4_SH(input, in0, in1, in2, in3);
387  ST_SH2(zero, zero, input, 8);
388  /* rows */
389  VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
390  /* columns */
391  TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
392  VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
393  /* rounding (add 2^3, divide by 2^4) */
394  SRARI_H4_SH(in0, in1, in2, in3, 4);
395  ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride);
396 }
397 
398 static void vp9_iadst_idct_4x4_add_msa(int16_t *input, uint8_t *dst,
399  int32_t dst_stride, int32_t eob)
400 {
401  v8i16 in0, in1, in2, in3;
402  v8i16 zero = { 0 };
403 
404  /* load vector elements of 4x4 block */
405  LD4x4_SH(input, in0, in1, in2, in3);
406  ST_SH2(zero, zero, input, 8);
407  /* cols */
408  VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
409  /* columns */
410  TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
411  VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
412  /* rounding (add 2^3, divide by 2^4) */
413  SRARI_H4_SH(in0, in1, in2, in3, 4);
414  ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride);
415 }
416 
417 static void vp9_idct_iadst_4x4_add_msa(int16_t *input, uint8_t *dst,
418  int32_t dst_stride, int32_t eob)
419 {
420  v8i16 in0, in1, in2, in3;
421  v8i16 zero = { 0 };
422 
423  /* load vector elements of 4x4 block */
424  LD4x4_SH(input, in0, in1, in2, in3);
425  ST_SH2(zero, zero, input, 8);
426  /* cols */
427  VP9_IDCT4x4(in0, in1, in2, in3, in0, in1, in2, in3);
428  /* columns */
429  TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
430  VP9_IADST4x4(in0, in1, in2, in3, in0, in1, in2, in3);
431  /* rounding (add 2^3, divide by 2^4) */
432  SRARI_H4_SH(in0, in1, in2, in3, 4);
433  ADDBLK_ST4x4_UB(in0, in1, in2, in3, dst, dst_stride);
434 }
435 
436 #define VP9_SET_CONST_PAIR(mask_h, idx1_h, idx2_h) \
437 ( { \
438  v8i16 c0_m, c1_m; \
439  \
440  SPLATI_H2_SH(mask_h, idx1_h, idx2_h, c0_m, c1_m); \
441  c0_m = __msa_ilvev_h(c1_m, c0_m); \
442  \
443  c0_m; \
444 } )
445 
446 /* multiply and add macro */
447 #define VP9_MADD(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, \
448  out0, out1, out2, out3) \
449 { \
450  v8i16 madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m; \
451  v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
452  \
453  ILVRL_H2_SH(inp1, inp0, madd_s1_m, madd_s0_m); \
454  ILVRL_H2_SH(inp3, inp2, madd_s3_m, madd_s2_m); \
455  DOTP_SH4_SW(madd_s1_m, madd_s0_m, madd_s1_m, madd_s0_m, \
456  cst0, cst0, cst1, cst1, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
457  SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS); \
458  PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out1); \
459  DOTP_SH4_SW(madd_s3_m, madd_s2_m, madd_s3_m, madd_s2_m, \
460  cst2, cst2, cst3, cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
461  SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS); \
462  PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, out2, out3); \
463 }
464 
465 /* idct 8x8 macro */
466 #define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, \
467  out0, out1, out2, out3, out4, out5, out6, out7) \
468 { \
469  v8i16 tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m; \
470  v8i16 k0_m, k1_m, k2_m, k3_m, res0_m, res1_m, res2_m, res3_m; \
471  v4i32 tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
472  v8i16 mask_m = { cospi_28_64, cospi_4_64, cospi_20_64, cospi_12_64, \
473  cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 }; \
474  \
475  k0_m = VP9_SET_CONST_PAIR(mask_m, 0, 5); \
476  k1_m = VP9_SET_CONST_PAIR(mask_m, 1, 0); \
477  k2_m = VP9_SET_CONST_PAIR(mask_m, 6, 3); \
478  k3_m = VP9_SET_CONST_PAIR(mask_m, 3, 2); \
479  VP9_MADD(in1, in7, in3, in5, k0_m, k1_m, k2_m, k3_m, in1, in7, in3, in5); \
480  SUB2(in1, in3, in7, in5, res0_m, res1_m); \
481  k0_m = VP9_SET_CONST_PAIR(mask_m, 4, 7); \
482  k1_m = __msa_splati_h(mask_m, 4); \
483  \
484  ILVRL_H2_SH(res0_m, res1_m, res2_m, res3_m); \
485  DOTP_SH4_SW(res2_m, res3_m, res2_m, res3_m, k0_m, k0_m, k1_m, k1_m, \
486  tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
487  SRARI_W4_SW(tmp0_m, tmp1_m, tmp2_m, tmp3_m, VP9_DCT_CONST_BITS); \
488  tp4_m = in1 + in3; \
489  PCKEV_H2_SH(tmp1_m, tmp0_m, tmp3_m, tmp2_m, tp5_m, tp6_m); \
490  tp7_m = in7 + in5; \
491  k2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
492  k3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
493  VP9_MADD(in0, in4, in2, in6, k1_m, k0_m, k2_m, k3_m, \
494  in0, in4, in2, in6); \
495  BUTTERFLY_4(in0, in4, in2, in6, tp0_m, tp1_m, tp2_m, tp3_m); \
496  BUTTERFLY_8(tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m, \
497  out0, out1, out2, out3, out4, out5, out6, out7); \
498 }
499 
500 #define VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, \
501  out0, out1, out2, out3, out4, out5, out6, out7) \
502 { \
503  v4i32 r0_m, r1_m, r2_m, r3_m, r4_m, r5_m, r6_m, r7_m; \
504  v4i32 m0_m, m1_m, m2_m, m3_m, t0_m, t1_m; \
505  v8i16 res0_m, res1_m, res2_m, res3_m, k0_m, k1_m, in_s0, in_s1; \
506  v8i16 mask1_m = { cospi_2_64, cospi_30_64, -cospi_2_64, \
507  cospi_10_64, cospi_22_64, -cospi_10_64, cospi_18_64, cospi_14_64 }; \
508  v8i16 mask2_m = { cospi_14_64, -cospi_18_64, cospi_26_64, \
509  cospi_6_64, -cospi_26_64, cospi_8_64, cospi_24_64, -cospi_8_64 }; \
510  v8i16 mask3_m = { -cospi_24_64, cospi_8_64, cospi_16_64, \
511  -cospi_16_64, 0, 0, 0, 0 }; \
512  \
513  k0_m = VP9_SET_CONST_PAIR(mask1_m, 0, 1); \
514  k1_m = VP9_SET_CONST_PAIR(mask1_m, 1, 2); \
515  ILVRL_H2_SH(in1, in0, in_s1, in_s0); \
516  DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \
517  r0_m, r1_m, r2_m, r3_m); \
518  k0_m = VP9_SET_CONST_PAIR(mask1_m, 6, 7); \
519  k1_m = VP9_SET_CONST_PAIR(mask2_m, 0, 1); \
520  ILVRL_H2_SH(in5, in4, in_s1, in_s0); \
521  DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \
522  r4_m, r5_m, r6_m, r7_m); \
523  ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, \
524  m0_m, m1_m, m2_m, m3_m); \
525  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \
526  PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, res0_m, res1_m); \
527  SUB4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, \
528  m0_m, m1_m, m2_m, m3_m); \
529  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \
530  PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, t0_m, t1_m); \
531  k0_m = VP9_SET_CONST_PAIR(mask1_m, 3, 4); \
532  k1_m = VP9_SET_CONST_PAIR(mask1_m, 4, 5); \
533  ILVRL_H2_SH(in3, in2, in_s1, in_s0); \
534  DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \
535  r0_m, r1_m, r2_m, r3_m); \
536  k0_m = VP9_SET_CONST_PAIR(mask2_m, 2, 3); \
537  k1_m = VP9_SET_CONST_PAIR(mask2_m, 3, 4); \
538  ILVRL_H2_SH(in7, in6, in_s1, in_s0); \
539  DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \
540  r4_m, r5_m, r6_m, r7_m); \
541  ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, \
542  m0_m, m1_m, m2_m, m3_m); \
543  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \
544  PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, res2_m, res3_m); \
545  SUB4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, \
546  m0_m, m1_m, m2_m, m3_m); \
547  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \
548  PCKEV_H2_SW(m1_m, m0_m, m3_m, m2_m, r2_m, r3_m); \
549  ILVRL_H2_SW(r3_m, r2_m, m2_m, m3_m); \
550  BUTTERFLY_4(res0_m, res1_m, res3_m, res2_m, out0, in7, in4, in3); \
551  k0_m = VP9_SET_CONST_PAIR(mask2_m, 5, 6); \
552  k1_m = VP9_SET_CONST_PAIR(mask2_m, 6, 7); \
553  ILVRL_H2_SH(t1_m, t0_m, in_s1, in_s0); \
554  DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \
555  r0_m, r1_m, r2_m, r3_m); \
556  k1_m = VP9_SET_CONST_PAIR(mask3_m, 0, 1); \
557  DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m, \
558  r4_m, r5_m, r6_m, r7_m); \
559  ADD4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m, \
560  m0_m, m1_m, m2_m, m3_m); \
561  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \
562  PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in1, out6); \
563  SUB4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m, \
564  m0_m, m1_m, m2_m, m3_m); \
565  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \
566  PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in2, in5); \
567  k0_m = VP9_SET_CONST_PAIR(mask3_m, 2, 2); \
568  k1_m = VP9_SET_CONST_PAIR(mask3_m, 2, 3); \
569  ILVRL_H2_SH(in4, in3, in_s1, in_s0); \
570  DOTP_SH4_SW(in_s1, in_s0, in_s1, in_s0, k0_m, k0_m, k1_m, k1_m, \
571  m0_m, m1_m, m2_m, m3_m); \
572  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \
573  PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, in3, out4); \
574  ILVRL_H2_SW(in5, in2, m2_m, m3_m); \
575  DOTP_SH4_SW(m2_m, m3_m, m2_m, m3_m, k0_m, k0_m, k1_m, k1_m, \
576  m0_m, m1_m, m2_m, m3_m); \
577  SRARI_W4_SW(m0_m, m1_m, m2_m, m3_m, VP9_DCT_CONST_BITS); \
578  PCKEV_H2_SH(m1_m, m0_m, m3_m, m2_m, out2, in5); \
579  \
580  out1 = -in1; \
581  out3 = -in3; \
582  out5 = -in5; \
583  out7 = -in7; \
584 }
585 
586 static void vp9_idct8x8_1_add_msa(int16_t *input, uint8_t *dst,
587  int32_t dst_stride)
588 {
589  int16_t out;
590  int32_t val;
591  v8i16 vec;
592 
594  out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS);
595  val = ROUND_POWER_OF_TWO(out, 5);
596  vec = __msa_fill_h(val);
597  input[0] = 0;
598 
599  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
600  dst += (4 * dst_stride);
601  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
602 }
603 
604 static void vp9_idct8x8_12_colcol_addblk_msa(int16_t *input, uint8_t *dst,
605  int32_t dst_stride)
606 {
607  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
608  v8i16 s0, s1, s2, s3, s4, s5, s6, s7, k0, k1, k2, k3, m0, m1, m2, m3;
609  v4i32 tmp0, tmp1, tmp2, tmp3;
610  v8i16 zero = { 0 };
611 
612  /* load vector elements of 8x8 block */
613  LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
614  ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8);
615  ILVR_D2_SH(in1, in0, in3, in2, in0, in1);
616  ILVR_D2_SH(in5, in4, in7, in6, in2, in3);
617 
618  /* stage1 */
619  ILVL_H2_SH(in3, in0, in2, in1, s0, s1);
624  DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
625  SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, VP9_DCT_CONST_BITS);
626  PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
627  PCKEV_H2_SH(zero, tmp2, zero, tmp3, s2, s3);
628  BUTTERFLY_4(s0, s1, s3, s2, s4, s7, s6, s5);
629 
630  /* stage2 */
631  ILVR_H2_SH(in3, in1, in2, in0, s1, s0);
636  DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
637  SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, VP9_DCT_CONST_BITS);
638  PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
639  PCKEV_H2_SH(zero, tmp2, zero, tmp3, s2, s3);
640  BUTTERFLY_4(s0, s1, s2, s3, m0, m1, m2, m3);
641 
642  /* stage3 */
643  s0 = __msa_ilvr_h(s6, s5);
644 
646  DOTP_SH2_SW(s0, s0, k1, k0, tmp0, tmp1);
647  SRARI_W2_SW(tmp0, tmp1, VP9_DCT_CONST_BITS);
648  PCKEV_H2_SH(zero, tmp0, zero, tmp1, s2, s3);
649 
650  /* stage4 */
651  BUTTERFLY_8(m0, m1, m2, m3, s4, s2, s3, s7,
652  in0, in1, in2, in3, in4, in5, in6, in7);
653  TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
654  in0, in1, in2, in3, in4, in5, in6, in7);
655  VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
656  in0, in1, in2, in3, in4, in5, in6, in7);
657 
658  /* final rounding (add 2^4, divide by 2^5) and shift */
659  SRARI_H4_SH(in0, in1, in2, in3, 5);
660  SRARI_H4_SH(in4, in5, in6, in7, 5);
661 
662  /* add block and store 8x8 */
663  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
664  dst += (4 * dst_stride);
665  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
666 }
667 
668 static void vp9_idct8x8_colcol_addblk_msa(int16_t *input, uint8_t *dst,
669  int32_t dst_stride)
670 {
671  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
672  v8i16 zero = { 0 };
673 
674  /* load vector elements of 8x8 block */
675  LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
676  ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8);
677  /* 1D idct8x8 */
678  VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
679  in0, in1, in2, in3, in4, in5, in6, in7);
680  /* columns transform */
681  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
682  in0, in1, in2, in3, in4, in5, in6, in7);
683  /* 1D idct8x8 */
684  VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
685  in0, in1, in2, in3, in4, in5, in6, in7);
686  /* final rounding (add 2^4, divide by 2^5) and shift */
687  SRARI_H4_SH(in0, in1, in2, in3, 5);
688  SRARI_H4_SH(in4, in5, in6, in7, 5);
689  /* add block and store 8x8 */
690  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
691  dst += (4 * dst_stride);
692  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
693 }
694 
695 static void vp9_iadst8x8_colcol_addblk_msa(int16_t *input, uint8_t *dst,
696  int32_t dst_stride)
697 {
698  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
699  v8i16 res0, res1, res2, res3, res4, res5, res6, res7;
700  v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
701  v8i16 out0, out1, out2, out3, out4, out5, out6, out7;
702  v8i16 cnst0, cnst1, cnst2, cnst3, cnst4;
703  v8i16 temp0, temp1, temp2, temp3, s0, s1;
704  v8i16 zero = { 0 };
705 
706  /* load vector elements of 8x8 block */
707  LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
708  ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8);
709 
710  /* 1D adst8x8 */
711  VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7,
712  in0, in1, in2, in3, in4, in5, in6, in7);
713 
714  /* columns transform */
715  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
716  in0, in1, in2, in3, in4, in5, in6, in7);
717 
718  cnst0 = __msa_fill_h(cospi_2_64);
719  cnst1 = __msa_fill_h(cospi_30_64);
720  cnst2 = -cnst0;
721  ILVEV_H2_SH(cnst0, cnst1, cnst1, cnst2, cnst0, cnst1);
722  cnst2 = __msa_fill_h(cospi_18_64);
723  cnst3 = __msa_fill_h(cospi_14_64);
724  cnst4 = -cnst2;
725  ILVEV_H2_SH(cnst2, cnst3, cnst3, cnst4, cnst2, cnst3);
726 
727  ILVRL_H2_SH(in0, in7, temp1, temp0);
728  ILVRL_H2_SH(in4, in3, temp3, temp2);
729  VP9_DOT_ADD_SUB_SRARI_PCK(temp0, temp1, temp2, temp3, cnst0, cnst1, cnst2,
730  cnst3, in7, in0, in4, in3);
731 
732  cnst0 = __msa_fill_h(cospi_10_64);
733  cnst1 = __msa_fill_h(cospi_22_64);
734  cnst2 = -cnst0;
735  ILVEV_H2_SH(cnst0, cnst1, cnst1, cnst2, cnst0, cnst1);
736  cnst2 = __msa_fill_h(cospi_26_64);
737  cnst3 = __msa_fill_h(cospi_6_64);
738  cnst4 = -cnst2;
739  ILVEV_H2_SH(cnst2, cnst3, cnst3, cnst4, cnst2, cnst3);
740 
741  ILVRL_H2_SH(in2, in5, temp1, temp0);
742  ILVRL_H2_SH(in6, in1, temp3, temp2);
743  VP9_DOT_ADD_SUB_SRARI_PCK(temp0, temp1, temp2, temp3, cnst0, cnst1, cnst2,
744  cnst3, in5, in2, in6, in1);
745  BUTTERFLY_4(in7, in0, in2, in5, s1, s0, in2, in5);
746  out7 = -s0;
747  out0 = s1;
748  SRARI_H2_SH(out0, out7, 5);
749  dst0 = LD_UB(dst + 0 * dst_stride);
750  dst7 = LD_UB(dst + 7 * dst_stride);
751 
752  res0 = (v8i16) __msa_ilvr_b((v16i8) zero, (v16i8) dst0);
753  res0 += out0;
754  res0 = CLIP_SH_0_255(res0);
755  res0 = (v8i16) __msa_pckev_b((v16i8) res0, (v16i8) res0);
756  ST8x1_UB(res0, dst);
757 
758  res7 = (v8i16) __msa_ilvr_b((v16i8) zero, (v16i8) dst7);
759  res7 += out7;
760  res7 = CLIP_SH_0_255(res7);
761  res7 = (v8i16) __msa_pckev_b((v16i8) res7, (v16i8) res7);
762  ST8x1_UB(res7, dst + 7 * dst_stride);
763 
764  cnst1 = __msa_fill_h(cospi_24_64);
765  cnst0 = __msa_fill_h(cospi_8_64);
766  cnst3 = -cnst1;
767  cnst2 = -cnst0;
768 
769  ILVEV_H2_SH(cnst3, cnst0, cnst1, cnst2, cnst3, cnst2);
770  cnst0 = __msa_ilvev_h(cnst1, cnst0);
771  cnst1 = cnst0;
772 
773  ILVRL_H2_SH(in4, in3, temp1, temp0);
774  ILVRL_H2_SH(in6, in1, temp3, temp2);
775  VP9_DOT_ADD_SUB_SRARI_PCK(temp0, temp1, temp2, temp3, cnst0, cnst2, cnst3,
776  cnst1, out1, out6, s0, s1);
777  out1 = -out1;
778  SRARI_H2_SH(out1, out6, 5);
779  dst1 = LD_UB(dst + 1 * dst_stride);
780  dst6 = LD_UB(dst + 6 * dst_stride);
781  ILVR_B2_SH(zero, dst1, zero, dst6, res1, res6);
782  ADD2(res1, out1, res6, out6, res1, res6);
783  CLIP_SH2_0_255(res1, res6);
784  PCKEV_B2_SH(res1, res1, res6, res6, res1, res6);
785  ST8x1_UB(res1, dst + dst_stride);
786  ST8x1_UB(res6, dst + 6 * dst_stride);
787 
788  cnst0 = __msa_fill_h(cospi_16_64);
789  cnst1 = -cnst0;
790  cnst1 = __msa_ilvev_h(cnst1, cnst0);
791 
792  ILVRL_H2_SH(in2, in5, temp1, temp0);
793  ILVRL_H2_SH(s0, s1, temp3, temp2);
794  out3 = VP9_DOT_SHIFT_RIGHT_PCK_H(temp0, temp1, cnst0);
795  out4 = VP9_DOT_SHIFT_RIGHT_PCK_H(temp0, temp1, cnst1);
796  out3 = -out3;
797  SRARI_H2_SH(out3, out4, 5);
798  dst3 = LD_UB(dst + 3 * dst_stride);
799  dst4 = LD_UB(dst + 4 * dst_stride);
800  ILVR_B2_SH(zero, dst3, zero, dst4, res3, res4);
801  ADD2(res3, out3, res4, out4, res3, res4);
802  CLIP_SH2_0_255(res3, res4);
803  PCKEV_B2_SH(res3, res3, res4, res4, res3, res4);
804  ST8x1_UB(res3, dst + 3 * dst_stride);
805  ST8x1_UB(res4, dst + 4 * dst_stride);
806 
807  out2 = VP9_DOT_SHIFT_RIGHT_PCK_H(temp2, temp3, cnst0);
808  out5 = VP9_DOT_SHIFT_RIGHT_PCK_H(temp2, temp3, cnst1);
809  out5 = -out5;
810  SRARI_H2_SH(out2, out5, 5);
811  dst2 = LD_UB(dst + 2 * dst_stride);
812  dst5 = LD_UB(dst + 5 * dst_stride);
813  ILVR_B2_SH(zero, dst2, zero, dst5, res2, res5);
814  ADD2(res2, out2, res5, out5, res2, res5);
815  CLIP_SH2_0_255(res2, res5);
816  PCKEV_B2_SH(res2, res2, res5, res5, res2, res5);
817  ST8x1_UB(res2, dst + 2 * dst_stride);
818  ST8x1_UB(res5, dst + 5 * dst_stride);
819 }
820 
821 static void vp9_iadst_idct_8x8_add_msa(int16_t *input, uint8_t *dst,
822  int32_t dst_stride, int32_t eob)
823 {
824  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
825  v8i16 zero = { 0 };
826 
827  /* load vector elements of 8x8 block */
828  LD_SH8(input, 8, in1, in6, in3, in4, in5, in2, in7, in0);
829  ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8);
830  /* 1D idct8x8 */
831  VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
832  in0, in1, in2, in3, in4, in5, in6, in7);
833  /* columns transform */
834  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
835  in0, in1, in2, in3, in4, in5, in6, in7);
836  /* 1D idct8x8 */
837  VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
838  in0, in1, in2, in3, in4, in5, in6, in7);
839  /* final rounding (add 2^4, divide by 2^5) and shift */
840  SRARI_H4_SH(in0, in1, in2, in3, 5);
841  SRARI_H4_SH(in4, in5, in6, in7, 5);
842  /* add block and store 8x8 */
843  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
844  dst += (4 * dst_stride);
845  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
846 }
847 
848 static void vp9_idct_iadst_8x8_add_msa(int16_t *input, uint8_t *dst,
849  int32_t dst_stride, int32_t eob)
850 {
851  v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
852  v8i16 zero = { 0 };
853 
854  /* load vector elements of 8x8 block */
855  LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
856  ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 8);
857 
858  /* 1D idct8x8 */
859  VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
860  in0, in1, in2, in3, in4, in5, in6, in7);
861  /* columns transform */
862  TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7,
863  in1, in6, in3, in4, in5, in2, in7, in0);
864  /* 1D idct8x8 */
865  VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
866  in0, in1, in2, in3, in4, in5, in6, in7);
867  /* final rounding (add 2^4, divide by 2^5) and shift */
868  SRARI_H4_SH(in0, in1, in2, in3, 5);
869  SRARI_H4_SH(in4, in5, in6, in7, 5);
870  /* add block and store 8x8 */
871  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
872  dst += (4 * dst_stride);
873  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
874 }
875 
876 #define VP9_IADST8x16_1D(r0, r1, r2, r3, r4, r5, r6, r7, r8, \
877  r9, r10, r11, r12, r13, r14, r15, \
878  out0, out1, out2, out3, out4, out5, \
879  out6, out7, out8, out9, out10, out11, \
880  out12, out13, out14, out15) \
881 { \
882  v8i16 g0_m, g1_m, g2_m, g3_m, g4_m, g5_m, g6_m, g7_m; \
883  v8i16 g8_m, g9_m, g10_m, g11_m, g12_m, g13_m, g14_m, g15_m; \
884  v8i16 h0_m, h1_m, h2_m, h3_m, h4_m, h5_m, h6_m, h7_m; \
885  v8i16 h8_m, h9_m, h10_m, h11_m; \
886  v8i16 k0_m, k1_m, k2_m, k3_m; \
887  \
888  /* stage 1 */ \
889  k0_m = VP9_SET_COSPI_PAIR(cospi_1_64, cospi_31_64); \
890  k1_m = VP9_SET_COSPI_PAIR(cospi_31_64, -cospi_1_64); \
891  k2_m = VP9_SET_COSPI_PAIR(cospi_17_64, cospi_15_64); \
892  k3_m = VP9_SET_COSPI_PAIR(cospi_15_64, -cospi_17_64); \
893  VP9_MADD_BF(r15, r0, r7, r8, k0_m, k1_m, k2_m, k3_m, \
894  g0_m, g1_m, g2_m, g3_m); \
895  k0_m = VP9_SET_COSPI_PAIR(cospi_5_64, cospi_27_64); \
896  k1_m = VP9_SET_COSPI_PAIR(cospi_27_64, -cospi_5_64); \
897  k2_m = VP9_SET_COSPI_PAIR(cospi_21_64, cospi_11_64); \
898  k3_m = VP9_SET_COSPI_PAIR(cospi_11_64, -cospi_21_64); \
899  VP9_MADD_BF(r13, r2, r5, r10, k0_m, k1_m, k2_m, k3_m, \
900  g4_m, g5_m, g6_m, g7_m); \
901  k0_m = VP9_SET_COSPI_PAIR(cospi_9_64, cospi_23_64); \
902  k1_m = VP9_SET_COSPI_PAIR(cospi_23_64, -cospi_9_64); \
903  k2_m = VP9_SET_COSPI_PAIR(cospi_25_64, cospi_7_64); \
904  k3_m = VP9_SET_COSPI_PAIR(cospi_7_64, -cospi_25_64); \
905  VP9_MADD_BF(r11, r4, r3, r12, k0_m, k1_m, k2_m, k3_m, \
906  g8_m, g9_m, g10_m, g11_m); \
907  k0_m = VP9_SET_COSPI_PAIR(cospi_13_64, cospi_19_64); \
908  k1_m = VP9_SET_COSPI_PAIR(cospi_19_64, -cospi_13_64); \
909  k2_m = VP9_SET_COSPI_PAIR(cospi_29_64, cospi_3_64); \
910  k3_m = VP9_SET_COSPI_PAIR(cospi_3_64, -cospi_29_64); \
911  VP9_MADD_BF(r9, r6, r1, r14, k0_m, k1_m, k2_m, k3_m, \
912  g12_m, g13_m, g14_m, g15_m); \
913  \
914  /* stage 2 */ \
915  k0_m = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64); \
916  k1_m = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64); \
917  k2_m = VP9_SET_COSPI_PAIR(-cospi_28_64, cospi_4_64); \
918  VP9_MADD_BF(g1_m, g3_m, g9_m, g11_m, k0_m, k1_m, k2_m, k0_m, \
919  h0_m, h1_m, h2_m, h3_m); \
920  k0_m = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64); \
921  k1_m = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64); \
922  k2_m = VP9_SET_COSPI_PAIR(cospi_20_64, -cospi_12_64); \
923  VP9_MADD_BF(g7_m, g5_m, g15_m, g13_m, k0_m, k1_m, k2_m, k0_m, \
924  h4_m, h5_m, h6_m, h7_m); \
925  BUTTERFLY_4(h0_m, h2_m, h6_m, h4_m, out8, out9, out11, out10); \
926  BUTTERFLY_8(g0_m, g2_m, g4_m, g6_m, g14_m, g12_m, g10_m, g8_m, \
927  h8_m, h9_m, h10_m, h11_m, h6_m, h4_m, h2_m, h0_m); \
928  \
929  /* stage 3 */ \
930  BUTTERFLY_4(h8_m, h9_m, h11_m, h10_m, out0, out1, h11_m, h10_m); \
931  k0_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
932  k1_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
933  k2_m = VP9_SET_COSPI_PAIR(-cospi_24_64, cospi_8_64); \
934  VP9_MADD_BF(h0_m, h2_m, h4_m, h6_m, k0_m, k1_m, k2_m, k0_m, \
935  out4, out6, out5, out7); \
936  VP9_MADD_BF(h1_m, h3_m, h5_m, h7_m, k0_m, k1_m, k2_m, k0_m, \
937  out12, out14, out13, out15); \
938  \
939  /* stage 4 */ \
940  k0_m = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64); \
941  k1_m = VP9_SET_COSPI_PAIR(-cospi_16_64, -cospi_16_64); \
942  k2_m = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64); \
943  k3_m = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64); \
944  VP9_MADD_SHORT(h10_m, h11_m, k1_m, k2_m, out2, out3); \
945  VP9_MADD_SHORT(out6, out7, k0_m, k3_m, out6, out7); \
946  VP9_MADD_SHORT(out10, out11, k0_m, k3_m, out10, out11); \
947  VP9_MADD_SHORT(out14, out15, k1_m, k2_m, out14, out15); \
948 }
949 
950 static void vp9_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
951  int32_t dst_stride)
952 {
953  v8i16 loc0, loc1, loc2, loc3;
954  v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14;
955  v8i16 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15;
956  v8i16 tmp5, tmp6, tmp7;
957  v8i16 zero = { 0 };
958 
959  /* load up 8x16 */
960  LD_SH16(input, 16,
961  reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7,
962  reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15);
963 
964  ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16);
965  input += 8 * 16;
966  ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16);
967 
968  VP9_DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14);
969  VP9_DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6);
970  BUTTERFLY_4(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2);
971  VP9_DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3);
972  VP9_DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8);
973  VP9_DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12);
974  BUTTERFLY_4(reg8, reg0, reg4, reg12, reg2, reg6, reg10, reg14);
975 
976  reg0 = reg2 - loc1;
977  reg2 = reg2 + loc1;
978  reg12 = reg14 - loc0;
979  reg14 = reg14 + loc0;
980  reg4 = reg6 - loc3;
981  reg6 = reg6 + loc3;
982  reg8 = reg10 - loc2;
983  reg10 = reg10 + loc2;
984 
985  /* stage 2 */
986  VP9_DOTP_CONST_PAIR(reg1, reg15, cospi_30_64, cospi_2_64, reg1, reg15);
987  VP9_DOTP_CONST_PAIR(reg9, reg7, cospi_14_64, cospi_18_64, loc2, loc3);
988 
989  reg9 = reg1 - loc2;
990  reg1 = reg1 + loc2;
991  reg7 = reg15 - loc3;
992  reg15 = reg15 + loc3;
993 
994  VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_22_64, cospi_10_64, reg5, reg11);
995  VP9_DOTP_CONST_PAIR(reg13, reg3, cospi_6_64, cospi_26_64, loc0, loc1);
996  BUTTERFLY_4(loc0, loc1, reg11, reg5, reg13, reg3, reg11, reg5);
997 
998  loc1 = reg15 + reg3;
999  reg3 = reg15 - reg3;
1000  loc2 = reg2 + loc1;
1001  reg15 = reg2 - loc1;
1002 
1003  loc1 = reg1 + reg13;
1004  reg13 = reg1 - reg13;
1005  loc0 = reg0 + loc1;
1006  loc1 = reg0 - loc1;
1007  tmp6 = loc0;
1008  tmp7 = loc1;
1009  reg0 = loc2;
1010 
1011  VP9_DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9);
1012  VP9_DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5,
1013  reg11);
1014 
1015  loc0 = reg9 + reg5;
1016  reg5 = reg9 - reg5;
1017  reg2 = reg6 + loc0;
1018  reg1 = reg6 - loc0;
1019 
1020  loc0 = reg7 + reg11;
1021  reg11 = reg7 - reg11;
1022  loc1 = reg4 + loc0;
1023  loc2 = reg4 - loc0;
1024  tmp5 = loc1;
1025 
1026  VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11);
1027  BUTTERFLY_4(reg8, reg10, reg11, reg5, loc0, reg4, reg9, loc1);
1028 
1029  reg10 = loc0;
1030  reg11 = loc1;
1031 
1032  VP9_DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13);
1033  BUTTERFLY_4(reg12, reg14, reg13, reg3, reg8, reg6, reg7, reg5);
1034  reg13 = loc2;
1035 
1036  /* Transpose and store the output */
1037  reg12 = tmp5;
1038  reg14 = tmp6;
1039  reg3 = tmp7;
1040 
1041  SRARI_H4_SH(reg0, reg2, reg4, reg6, 6);
1042  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg0, reg2, reg4, reg6);
1043  dst += (4 * dst_stride);
1044  SRARI_H4_SH(reg8, reg10, reg12, reg14, 6);
1045  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg8, reg10, reg12, reg14);
1046  dst += (4 * dst_stride);
1047  SRARI_H4_SH(reg3, reg13, reg11, reg5, 6);
1048  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg3, reg13, reg11, reg5);
1049  dst += (4 * dst_stride);
1050  SRARI_H4_SH(reg7, reg9, reg1, reg15, 6);
1051  VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg7, reg9, reg1, reg15);
1052 }
1053 
1054 static void vp9_idct16_1d_columns_msa(int16_t *input, int16_t *output)
1055 {
1056  v8i16 loc0, loc1, loc2, loc3;
1057  v8i16 reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14;
1058  v8i16 reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15;
1059  v8i16 tmp5, tmp6, tmp7;
1060  v8i16 zero = { 0 };
1061 
1062  /* load up 8x16 */
1063  LD_SH16(input, 16,
1064  reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7,
1065  reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15);
1066 
1067  ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16);
1068  input += 16 * 8;
1069  ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16);
1070 
1071  VP9_DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14);
1072  VP9_DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6);
1073  BUTTERFLY_4(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2);
1074  VP9_DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3);
1075  VP9_DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8);
1076  VP9_DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12);
1077  BUTTERFLY_4(reg8, reg0, reg4, reg12, reg2, reg6, reg10, reg14);
1078 
1079  reg0 = reg2 - loc1;
1080  reg2 = reg2 + loc1;
1081  reg12 = reg14 - loc0;
1082  reg14 = reg14 + loc0;
1083  reg4 = reg6 - loc3;
1084  reg6 = reg6 + loc3;
1085  reg8 = reg10 - loc2;
1086  reg10 = reg10 + loc2;
1087 
1088  /* stage 2 */
1089  VP9_DOTP_CONST_PAIR(reg1, reg15, cospi_30_64, cospi_2_64, reg1, reg15);
1090  VP9_DOTP_CONST_PAIR(reg9, reg7, cospi_14_64, cospi_18_64, loc2, loc3);
1091 
1092  reg9 = reg1 - loc2;
1093  reg1 = reg1 + loc2;
1094  reg7 = reg15 - loc3;
1095  reg15 = reg15 + loc3;
1096 
1097  VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_22_64, cospi_10_64, reg5, reg11);
1098  VP9_DOTP_CONST_PAIR(reg13, reg3, cospi_6_64, cospi_26_64, loc0, loc1);
1099  BUTTERFLY_4(loc0, loc1, reg11, reg5, reg13, reg3, reg11, reg5);
1100 
1101  loc1 = reg15 + reg3;
1102  reg3 = reg15 - reg3;
1103  loc2 = reg2 + loc1;
1104  reg15 = reg2 - loc1;
1105 
1106  loc1 = reg1 + reg13;
1107  reg13 = reg1 - reg13;
1108  loc0 = reg0 + loc1;
1109  loc1 = reg0 - loc1;
1110  tmp6 = loc0;
1111  tmp7 = loc1;
1112  reg0 = loc2;
1113 
1114  VP9_DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9);
1115  VP9_DOTP_CONST_PAIR((-reg5), (-reg11), cospi_8_64, cospi_24_64, reg5,
1116  reg11);
1117 
1118  loc0 = reg9 + reg5;
1119  reg5 = reg9 - reg5;
1120  reg2 = reg6 + loc0;
1121  reg1 = reg6 - loc0;
1122 
1123  loc0 = reg7 + reg11;
1124  reg11 = reg7 - reg11;
1125  loc1 = reg4 + loc0;
1126  loc2 = reg4 - loc0;
1127 
1128  tmp5 = loc1;
1129 
1130  VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11);
1131  BUTTERFLY_4(reg8, reg10, reg11, reg5, loc0, reg4, reg9, loc1);
1132 
1133  reg10 = loc0;
1134  reg11 = loc1;
1135 
1136  VP9_DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13);
1137  BUTTERFLY_4(reg12, reg14, reg13, reg3, reg8, reg6, reg7, reg5);
1138  reg13 = loc2;
1139 
1140  /* Transpose and store the output */
1141  reg12 = tmp5;
1142  reg14 = tmp6;
1143  reg3 = tmp7;
1144 
1145  /* transpose block */
1146  TRANSPOSE8x8_SH_SH(reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14,
1147  reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14);
1148  ST_SH4(reg0, reg2, reg4, reg6, output, 16);
1149  ST_SH4(reg8, reg10, reg12, reg14, (output + 4 * 16), 16);
1150 
1151  /* transpose block */
1152  TRANSPOSE8x8_SH_SH(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15,
1153  reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15);
1154  ST_SH4(reg3, reg13, reg11, reg5, (output + 8), 16);
1155  ST_SH4(reg7, reg9, reg1, reg15, (output + 8 + 4 * 16), 16);
1156 }
1157 
1158 static void vp9_idct16x16_1_add_msa(int16_t *input, uint8_t *dst,
1159  int32_t dst_stride)
1160 {
1161  uint8_t i;
1162  int16_t out;
1163  v8i16 vec, res0, res1, res2, res3, res4, res5, res6, res7;
1164  v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
1165 
1166  out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS);
1167  out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS);
1168  out = ROUND_POWER_OF_TWO(out, 6);
1169  input[0] = 0;
1170 
1171  vec = __msa_fill_h(out);
1172 
1173  for (i = 4; i--;) {
1174  LD_UB4(dst, dst_stride, dst0, dst1, dst2, dst3);
1175  UNPCK_UB_SH(dst0, res0, res4);
1176  UNPCK_UB_SH(dst1, res1, res5);
1177  UNPCK_UB_SH(dst2, res2, res6);
1178  UNPCK_UB_SH(dst3, res3, res7);
1179  ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2,
1180  res3);
1181  ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6,
1182  res7);
1183  CLIP_SH4_0_255(res0, res1, res2, res3);
1184  CLIP_SH4_0_255(res4, res5, res6, res7);
1185  PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3,
1186  tmp0, tmp1, tmp2, tmp3);
1187  ST_UB4(tmp0, tmp1, tmp2, tmp3, dst, dst_stride);
1188  dst += (4 * dst_stride);
1189  }
1190 }
1191 
1192 static void vp9_idct16x16_10_colcol_addblk_msa(int16_t *input, uint8_t *dst,
1193  int32_t dst_stride)
1194 {
1195  int32_t i;
1196  int16_t out_arr[16 * 16] ALLOC_ALIGNED(ALIGNMENT);
1197  int16_t *out = out_arr;
1198 
1199  /* transform rows */
1200  vp9_idct16_1d_columns_msa(input, out);
1201 
1202  /* short case just considers top 4 rows as valid output */
1203  out += 4 * 16;
1204  for (i = 12; i--;) {
1205  __asm__ volatile (
1206  "sw $zero, 0(%[out]) \n\t"
1207  "sw $zero, 4(%[out]) \n\t"
1208  "sw $zero, 8(%[out]) \n\t"
1209  "sw $zero, 12(%[out]) \n\t"
1210  "sw $zero, 16(%[out]) \n\t"
1211  "sw $zero, 20(%[out]) \n\t"
1212  "sw $zero, 24(%[out]) \n\t"
1213  "sw $zero, 28(%[out]) \n\t"
1214 
1215  :
1216  : [out] "r" (out)
1217  );
1218 
1219  out += 16;
1220  }
1221 
1222  out = out_arr;
1223 
1224  /* transform columns */
1225  for (i = 0; i < 2; i++) {
1226  /* process 8 * 16 block */
1227  vp9_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
1228  dst_stride);
1229  }
1230 }
1231 
1232 static void vp9_idct16x16_colcol_addblk_msa(int16_t *input, uint8_t *dst,
1233  int32_t dst_stride)
1234 {
1235  int32_t i;
1236  int16_t out_arr[16 * 16] ALLOC_ALIGNED(ALIGNMENT);
1237  int16_t *out = out_arr;
1238 
1239  /* transform rows */
1240  for (i = 0; i < 2; i++) {
1241  /* process 8 * 16 block */
1242  vp9_idct16_1d_columns_msa((input + (i << 3)), (out + (i << 7)));
1243  }
1244 
1245  /* transform columns */
1246  for (i = 0; i < 2; i++) {
1247  /* process 8 * 16 block */
1248  vp9_idct16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
1249  dst_stride);
1250  }
1251 }
1252 
1253 static void vp9_iadst16_1d_columns_msa(int16_t *input, int16_t *output)
1254 {
1255  v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
1256  v8i16 l0, l1, l2, l3, l4, l5, l6, l7, l8, l9, l10, l11, l12, l13, l14, l15;
1257  v8i16 zero = { 0 };
1258 
1259  /* load input data */
1260  LD_SH16(input, 16,
1261  l0, l1, l2, l3, l4, l5, l6, l7,
1262  l8, l9, l10, l11, l12, l13, l14, l15);
1263 
1264  ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16);
1265  input += 16 * 8;
1266  ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, input, 16);
1267 
1268  /* ADST in horizontal */
1269  VP9_IADST8x16_1D(l0, l1, l2, l3, l4, l5, l6, l7,
1270  l8, l9, l10, l11, l12, l13, l14, l15,
1271  r0, r1, r2, r3, r4, r5, r6, r7,
1272  r8, r9, r10, r11, r12, r13, r14, r15);
1273 
1274  l1 = -r8;
1275  l3 = -r4;
1276  l13 = -r13;
1277  l15 = -r1;
1278 
1279  TRANSPOSE8x8_SH_SH(r0, l1, r12, l3, r6, r14, r10, r2,
1280  l0, l1, l2, l3, l4, l5, l6, l7);
1281  ST_SH8(l0, l1, l2, l3, l4, l5, l6, l7, output, 16);
1282  TRANSPOSE8x8_SH_SH(r3, r11, r15, r7, r5, l13, r9, l15,
1283  l8, l9, l10, l11, l12, l13, l14, l15);
1284  ST_SH8(l8, l9, l10, l11, l12, l13, l14, l15, (output + 8), 16);
1285 }
1286 
1287 static void vp9_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
1288  int32_t dst_stride)
1289 {
1290  v8i16 v0, v2, v4, v6, k0, k1, k2, k3;
1291  v8i16 r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15;
1292  v8i16 out0, out1, out2, out3, out4, out5, out6, out7;
1293  v8i16 out8, out9, out10, out11, out12, out13, out14, out15;
1294  v8i16 g0, g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, g11, g12, g13, g14, g15;
1295  v8i16 h0, h1, h2, h3, h4, h5, h6, h7, h8, h9, h10, h11;
1296  v8i16 res0, res1, res2, res3, res4, res5, res6, res7;
1297  v8i16 res8, res9, res10, res11, res12, res13, res14, res15;
1298  v16u8 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
1299  v16u8 dst8, dst9, dst10, dst11, dst12, dst13, dst14, dst15;
1300  v16i8 zero = { 0 };
1301 
1302  r0 = LD_SH(input + 0 * 16);
1303  r3 = LD_SH(input + 3 * 16);
1304  r4 = LD_SH(input + 4 * 16);
1305  r7 = LD_SH(input + 7 * 16);
1306  r8 = LD_SH(input + 8 * 16);
1307  r11 = LD_SH(input + 11 * 16);
1308  r12 = LD_SH(input + 12 * 16);
1309  r15 = LD_SH(input + 15 * 16);
1310 
1311  /* stage 1 */
1316  VP9_MADD_BF(r15, r0, r7, r8, k0, k1, k2, k3, g0, g1, g2, g3);
1321  VP9_MADD_BF(r11, r4, r3, r12, k0, k1, k2, k3, g8, g9, g10, g11);
1322  BUTTERFLY_4(g0, g2, g10, g8, h8, h9, v2, v0);
1326  VP9_MADD_BF(g1, g3, g9, g11, k0, k1, k2, k0, h0, h1, h2, h3);
1327 
1328  r1 = LD_SH(input + 1 * 16);
1329  r2 = LD_SH(input + 2 * 16);
1330  r5 = LD_SH(input + 5 * 16);
1331  r6 = LD_SH(input + 6 * 16);
1332  r9 = LD_SH(input + 9 * 16);
1333  r10 = LD_SH(input + 10 * 16);
1334  r13 = LD_SH(input + 13 * 16);
1335  r14 = LD_SH(input + 14 * 16);
1336 
1341  VP9_MADD_BF(r13, r2, r5, r10, k0, k1, k2, k3, g4, g5, g6, g7);
1346  VP9_MADD_BF(r9, r6, r1, r14, k0, k1, k2, k3, g12, g13, g14, g15);
1347  BUTTERFLY_4(g4, g6, g14, g12, h10, h11, v6, v4);
1348  BUTTERFLY_4(h8, h9, h11, h10, out0, out1, h11, h10);
1349  out1 = -out1;
1350  SRARI_H2_SH(out0, out1, 6);
1351  dst0 = LD_UB(dst + 0 * dst_stride);
1352  dst1 = LD_UB(dst + 15 * dst_stride);
1353  ILVR_B2_SH(zero, dst0, zero, dst1, res0, res1);
1354  ADD2(res0, out0, res1, out1, res0, res1);
1355  CLIP_SH2_0_255(res0, res1);
1356  PCKEV_B2_SH(res0, res0, res1, res1, res0, res1);
1357  ST8x1_UB(res0, dst);
1358  ST8x1_UB(res1, dst + 15 * dst_stride);
1359 
1363  VP9_MADD_BF(g7, g5, g15, g13, k0, k1, k2, k0, h4, h5, h6, h7);
1364  BUTTERFLY_4(h0, h2, h6, h4, out8, out9, out11, out10);
1365  out8 = -out8;
1366 
1367  SRARI_H2_SH(out8, out9, 6);
1368  dst8 = LD_UB(dst + 1 * dst_stride);
1369  dst9 = LD_UB(dst + 14 * dst_stride);
1370  ILVR_B2_SH(zero, dst8, zero, dst9, res8, res9);
1371  ADD2(res8, out8, res9, out9, res8, res9);
1372  CLIP_SH2_0_255(res8, res9);
1373  PCKEV_B2_SH(res8, res8, res9, res9, res8, res9);
1374  ST8x1_UB(res8, dst + dst_stride);
1375  ST8x1_UB(res9, dst + 14 * dst_stride);
1376 
1380  VP9_MADD_BF(v0, v2, v4, v6, k0, k1, k2, k0, out4, out6, out5, out7);
1381  out4 = -out4;
1382  SRARI_H2_SH(out4, out5, 6);
1383  dst4 = LD_UB(dst + 3 * dst_stride);
1384  dst5 = LD_UB(dst + 12 * dst_stride);
1385  ILVR_B2_SH(zero, dst4, zero, dst5, res4, res5);
1386  ADD2(res4, out4, res5, out5, res4, res5);
1387  CLIP_SH2_0_255(res4, res5);
1388  PCKEV_B2_SH(res4, res4, res5, res5, res4, res5);
1389  ST8x1_UB(res4, dst + 3 * dst_stride);
1390  ST8x1_UB(res5, dst + 12 * dst_stride);
1391 
1392  VP9_MADD_BF(h1, h3, h5, h7, k0, k1, k2, k0, out12, out14, out13, out15);
1393  out13 = -out13;
1394  SRARI_H2_SH(out12, out13, 6);
1395  dst12 = LD_UB(dst + 2 * dst_stride);
1396  dst13 = LD_UB(dst + 13 * dst_stride);
1397  ILVR_B2_SH(zero, dst12, zero, dst13, res12, res13);
1398  ADD2(res12, out12, res13, out13, res12, res13);
1399  CLIP_SH2_0_255(res12, res13);
1400  PCKEV_B2_SH(res12, res12, res13, res13, res12, res13);
1401  ST8x1_UB(res12, dst + 2 * dst_stride);
1402  ST8x1_UB(res13, dst + 13 * dst_stride);
1403 
1406  VP9_MADD_SHORT(out6, out7, k0, k3, out6, out7);
1407  SRARI_H2_SH(out6, out7, 6);
1408  dst6 = LD_UB(dst + 4 * dst_stride);
1409  dst7 = LD_UB(dst + 11 * dst_stride);
1410  ILVR_B2_SH(zero, dst6, zero, dst7, res6, res7);
1411  ADD2(res6, out6, res7, out7, res6, res7);
1412  CLIP_SH2_0_255(res6, res7);
1413  PCKEV_B2_SH(res6, res6, res7, res7, res6, res7);
1414  ST8x1_UB(res6, dst + 4 * dst_stride);
1415  ST8x1_UB(res7, dst + 11 * dst_stride);
1416 
1417  VP9_MADD_SHORT(out10, out11, k0, k3, out10, out11);
1418  SRARI_H2_SH(out10, out11, 6);
1419  dst10 = LD_UB(dst + 6 * dst_stride);
1420  dst11 = LD_UB(dst + 9 * dst_stride);
1421  ILVR_B2_SH(zero, dst10, zero, dst11, res10, res11);
1422  ADD2(res10, out10, res11, out11, res10, res11);
1423  CLIP_SH2_0_255(res10, res11);
1424  PCKEV_B2_SH(res10, res10, res11, res11, res10, res11);
1425  ST8x1_UB(res10, dst + 6 * dst_stride);
1426  ST8x1_UB(res11, dst + 9 * dst_stride);
1427 
1430  VP9_MADD_SHORT(h10, h11, k1, k2, out2, out3);
1431  SRARI_H2_SH(out2, out3, 6);
1432  dst2 = LD_UB(dst + 7 * dst_stride);
1433  dst3 = LD_UB(dst + 8 * dst_stride);
1434  ILVR_B2_SH(zero, dst2, zero, dst3, res2, res3);
1435  ADD2(res2, out2, res3, out3, res2, res3);
1436  CLIP_SH2_0_255(res2, res3);
1437  PCKEV_B2_SH(res2, res2, res3, res3, res2, res3);
1438  ST8x1_UB(res2, dst + 7 * dst_stride);
1439  ST8x1_UB(res3, dst + 8 * dst_stride);
1440 
1441  VP9_MADD_SHORT(out14, out15, k1, k2, out14, out15);
1442  SRARI_H2_SH(out14, out15, 6);
1443  dst14 = LD_UB(dst + 5 * dst_stride);
1444  dst15 = LD_UB(dst + 10 * dst_stride);
1445  ILVR_B2_SH(zero, dst14, zero, dst15, res14, res15);
1446  ADD2(res14, out14, res15, out15, res14, res15);
1447  CLIP_SH2_0_255(res14, res15);
1448  PCKEV_B2_SH(res14, res14, res15, res15, res14, res15);
1449  ST8x1_UB(res14, dst + 5 * dst_stride);
1450  ST8x1_UB(res15, dst + 10 * dst_stride);
1451 }
1452 
1453 static void vp9_iadst16x16_colcol_addblk_msa(int16_t *input, uint8_t *dst,
1454  int32_t dst_stride)
1455 {
1456  int16_t out_arr[16 * 16] ALLOC_ALIGNED(ALIGNMENT);
1457  int16_t *out = out_arr;
1458  int32_t i;
1459 
1460  /* transform rows */
1461  for (i = 0; i < 2; i++) {
1462  /* process 16 * 8 block */
1463  vp9_iadst16_1d_columns_msa((input + (i << 3)), (out + (i << 7)));
1464  }
1465 
1466  /* transform columns */
1467  for (i = 0; i < 2; i++) {
1468  /* process 8 * 16 block */
1469  vp9_iadst16_1d_columns_addblk_msa((out + (i << 3)), (dst + (i << 3)),
1470  dst_stride);
1471  }
1472 }
1473 
1474 static void vp9_iadst_idct_16x16_add_msa(int16_t *input, uint8_t *dst,
1475  int32_t dst_stride, int32_t eob)
1476 {
1477  int32_t i;
1478  int16_t out[16 * 16];
1479  int16_t *out_ptr = &out[0];
1480 
1481  /* transform rows */
1482  for (i = 0; i < 2; i++) {
1483  /* process 8 * 16 block */
1484  vp9_iadst16_1d_columns_msa((input + (i << 3)), (out_ptr + (i << 7)));
1485  }
1486 
1487  /* transform columns */
1488  for (i = 0; i < 2; i++) {
1489  /* process 8 * 16 block */
1490  vp9_idct16_1d_columns_addblk_msa((out_ptr + (i << 3)),
1491  (dst + (i << 3)), dst_stride);
1492  }
1493 }
1494 
1495 static void vp9_idct_iadst_16x16_add_msa(int16_t *input, uint8_t *dst,
1496  int32_t dst_stride, int32_t eob)
1497 {
1498  int32_t i;
1499  int16_t out[16 * 16];
1500  int16_t *out_ptr = &out[0];
1501 
1502  /* transform rows */
1503  for (i = 0; i < 2; i++) {
1504  /* process 8 * 16 block */
1505  vp9_idct16_1d_columns_msa((input + (i << 3)), (out_ptr + (i << 7)));
1506  }
1507 
1508  /* transform columns */
1509  for (i = 0; i < 2; i++) {
1510  /* process 8 * 16 block */
1511  vp9_iadst16_1d_columns_addblk_msa((out_ptr + (i << 3)),
1512  (dst + (i << 3)), dst_stride);
1513  }
1514 }
1515 
1516 static void vp9_idct_butterfly_transpose_store(int16_t *tmp_buf,
1517  int16_t *tmp_eve_buf,
1518  int16_t *tmp_odd_buf,
1519  int16_t *dst)
1520 {
1521  v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
1522  v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
1523 
1524  /* FINAL BUTTERFLY : Dependency on Even & Odd */
1525  vec0 = LD_SH(tmp_odd_buf);
1526  vec1 = LD_SH(tmp_odd_buf + 9 * 8);
1527  vec2 = LD_SH(tmp_odd_buf + 14 * 8);
1528  vec3 = LD_SH(tmp_odd_buf + 6 * 8);
1529  loc0 = LD_SH(tmp_eve_buf);
1530  loc1 = LD_SH(tmp_eve_buf + 8 * 8);
1531  loc2 = LD_SH(tmp_eve_buf + 4 * 8);
1532  loc3 = LD_SH(tmp_eve_buf + 12 * 8);
1533 
1534  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
1535 
1536  ST_SH((loc0 - vec3), (tmp_buf + 31 * 8));
1537  ST_SH((loc1 - vec2), (tmp_buf + 23 * 8));
1538  ST_SH((loc2 - vec1), (tmp_buf + 27 * 8));
1539  ST_SH((loc3 - vec0), (tmp_buf + 19 * 8));
1540 
1541  /* Load 8 & Store 8 */
1542  vec0 = LD_SH(tmp_odd_buf + 4 * 8);
1543  vec1 = LD_SH(tmp_odd_buf + 13 * 8);
1544  vec2 = LD_SH(tmp_odd_buf + 10 * 8);
1545  vec3 = LD_SH(tmp_odd_buf + 3 * 8);
1546  loc0 = LD_SH(tmp_eve_buf + 2 * 8);
1547  loc1 = LD_SH(tmp_eve_buf + 10 * 8);
1548  loc2 = LD_SH(tmp_eve_buf + 6 * 8);
1549  loc3 = LD_SH(tmp_eve_buf + 14 * 8);
1550 
1551  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
1552 
1553  ST_SH((loc0 - vec3), (tmp_buf + 29 * 8));
1554  ST_SH((loc1 - vec2), (tmp_buf + 21 * 8));
1555  ST_SH((loc2 - vec1), (tmp_buf + 25 * 8));
1556  ST_SH((loc3 - vec0), (tmp_buf + 17 * 8));
1557 
1558  /* Load 8 & Store 8 */
1559  vec0 = LD_SH(tmp_odd_buf + 2 * 8);
1560  vec1 = LD_SH(tmp_odd_buf + 11 * 8);
1561  vec2 = LD_SH(tmp_odd_buf + 12 * 8);
1562  vec3 = LD_SH(tmp_odd_buf + 7 * 8);
1563  loc0 = LD_SH(tmp_eve_buf + 1 * 8);
1564  loc1 = LD_SH(tmp_eve_buf + 9 * 8);
1565  loc2 = LD_SH(tmp_eve_buf + 5 * 8);
1566  loc3 = LD_SH(tmp_eve_buf + 13 * 8);
1567 
1568  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
1569 
1570  ST_SH((loc0 - vec3), (tmp_buf + 30 * 8));
1571  ST_SH((loc1 - vec2), (tmp_buf + 22 * 8));
1572  ST_SH((loc2 - vec1), (tmp_buf + 26 * 8));
1573  ST_SH((loc3 - vec0), (tmp_buf + 18 * 8));
1574 
1575  /* Load 8 & Store 8 */
1576  vec0 = LD_SH(tmp_odd_buf + 5 * 8);
1577  vec1 = LD_SH(tmp_odd_buf + 15 * 8);
1578  vec2 = LD_SH(tmp_odd_buf + 8 * 8);
1579  vec3 = LD_SH(tmp_odd_buf + 1 * 8);
1580  loc0 = LD_SH(tmp_eve_buf + 3 * 8);
1581  loc1 = LD_SH(tmp_eve_buf + 11 * 8);
1582  loc2 = LD_SH(tmp_eve_buf + 7 * 8);
1583  loc3 = LD_SH(tmp_eve_buf + 15 * 8);
1584 
1585  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
1586 
1587  ST_SH((loc0 - vec3), (tmp_buf + 28 * 8));
1588  ST_SH((loc1 - vec2), (tmp_buf + 20 * 8));
1589  ST_SH((loc2 - vec1), (tmp_buf + 24 * 8));
1590  ST_SH((loc3 - vec0), (tmp_buf + 16 * 8));
1591 
1592  /* Transpose : 16 vectors */
1593  /* 1st & 2nd 8x8 */
1594  TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
1595  m0, n0, m1, n1, m2, n2, m3, n3);
1596  ST_SH4(m0, n0, m1, n1, (dst + 0), 32);
1597  ST_SH4(m2, n2, m3, n3, (dst + 4 * 32), 32);
1598 
1599  TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
1600  m4, n4, m5, n5, m6, n6, m7, n7);
1601  ST_SH4(m4, n4, m5, n5, (dst + 8), 32);
1602  ST_SH4(m6, n6, m7, n7, (dst + 8 + 4 * 32), 32);
1603 
1604  /* 3rd & 4th 8x8 */
1605  LD_SH8((tmp_buf + 8 * 16), 8, m0, n0, m1, n1, m2, n2, m3, n3);
1606  LD_SH8((tmp_buf + 12 * 16), 8, m4, n4, m5, n5, m6, n6, m7, n7);
1607  TRANSPOSE8x8_SH_SH(m0, n0, m1, n1, m2, n2, m3, n3,
1608  m0, n0, m1, n1, m2, n2, m3, n3);
1609  ST_SH4(m0, n0, m1, n1, (dst + 16), 32);
1610  ST_SH4(m2, n2, m3, n3, (dst + 16 + 4 * 32), 32);
1611 
1612  TRANSPOSE8x8_SH_SH(m4, n4, m5, n5, m6, n6, m7, n7,
1613  m4, n4, m5, n5, m6, n6, m7, n7);
1614  ST_SH4(m4, n4, m5, n5, (dst + 24), 32);
1615  ST_SH4(m6, n6, m7, n7, (dst + 24 + 4 * 32), 32);
1616 }
1617 
1618 static void vp9_idct8x32_column_even_process_store(int16_t *tmp_buf,
1619  int16_t *tmp_eve_buf)
1620 {
1621  v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
1622  v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
1623  v8i16 stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7;
1624  v8i16 zero = { 0 };
1625 
1626  /* Even stage 1 */
1627  LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
1628  ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, tmp_buf, (4 * 32));
1629  tmp_buf += (2 * 32);
1630 
1631  VP9_DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7);
1632  VP9_DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3);
1633  BUTTERFLY_4(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0);
1634  VP9_DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
1635 
1636  loc1 = vec3;
1637  loc0 = vec1;
1638 
1639  VP9_DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
1640  VP9_DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6);
1641  BUTTERFLY_4(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0);
1642  BUTTERFLY_4(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4);
1643  BUTTERFLY_4(vec2, vec3, loc3, loc2, stp2, stp1, stp6, stp5);
1644 
1645  /* Even stage 2 */
1646  /* Load 8 */
1647  LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7);
1648  ST_SH8(zero, zero, zero, zero, zero, zero, zero, zero, tmp_buf, (4 * 32));
1649 
1650  VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7);
1651  VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3);
1652  VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
1653  VP9_DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
1654 
1655  vec0 = reg0 + reg4;
1656  reg0 = reg0 - reg4;
1657  reg4 = reg6 + reg2;
1658  reg6 = reg6 - reg2;
1659  reg2 = reg1 + reg5;
1660  reg1 = reg1 - reg5;
1661  reg5 = reg7 + reg3;
1662  reg7 = reg7 - reg3;
1663  reg3 = vec0;
1664 
1665  vec1 = reg2;
1666  reg2 = reg3 + reg4;
1667  reg3 = reg3 - reg4;
1668  reg4 = reg5 - vec1;
1669  reg5 = reg5 + vec1;
1670 
1671  VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7);
1672  VP9_DOTP_CONST_PAIR((-reg6), reg1, cospi_24_64, cospi_8_64, reg6, reg1);
1673 
1674  vec0 = reg0 - reg6;
1675  reg0 = reg0 + reg6;
1676  vec1 = reg7 - reg1;
1677  reg7 = reg7 + reg1;
1678 
1679  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
1680  VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
1681 
1682  /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */
1683  /* Store 8 */
1684  BUTTERFLY_4(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0);
1685  ST_SH2(loc1, loc3, tmp_eve_buf, 8);
1686  ST_SH2(loc2, loc0, (tmp_eve_buf + 14 * 8), 8);
1687 
1688  BUTTERFLY_4(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0);
1689  ST_SH2(loc1, loc3, (tmp_eve_buf + 2 * 8), 8);
1690  ST_SH2(loc2, loc0, (tmp_eve_buf + 12 * 8), 8);
1691 
1692  /* Store 8 */
1693  BUTTERFLY_4(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0);
1694  ST_SH2(loc1, loc3, (tmp_eve_buf + 4 * 8), 8);
1695  ST_SH2(loc2, loc0, (tmp_eve_buf + 10 * 8), 8);
1696 
1697  BUTTERFLY_4(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0);
1698  ST_SH2(loc1, loc3, (tmp_eve_buf + 6 * 8), 8);
1699  ST_SH2(loc2, loc0, (tmp_eve_buf + 8 * 8), 8);
1700 }
1701 
1702 static void vp9_idct8x32_column_odd_process_store(int16_t *tmp_buf,
1703  int16_t *tmp_odd_buf)
1704 {
1705  v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
1706  v8i16 reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
1707  v8i16 zero = { 0 };
1708 
1709  /* Odd stage 1 */
1710  reg0 = LD_SH(tmp_buf + 32);
1711  reg1 = LD_SH(tmp_buf + 7 * 32);
1712  reg2 = LD_SH(tmp_buf + 9 * 32);
1713  reg3 = LD_SH(tmp_buf + 15 * 32);
1714  reg4 = LD_SH(tmp_buf + 17 * 32);
1715  reg5 = LD_SH(tmp_buf + 23 * 32);
1716  reg6 = LD_SH(tmp_buf + 25 * 32);
1717  reg7 = LD_SH(tmp_buf + 31 * 32);
1718 
1719  ST_SH(zero, tmp_buf + 32);
1720  ST_SH(zero, tmp_buf + 7 * 32);
1721  ST_SH(zero, tmp_buf + 9 * 32);
1722  ST_SH(zero, tmp_buf + 15 * 32);
1723  ST_SH(zero, tmp_buf + 17 * 32);
1724  ST_SH(zero, tmp_buf + 23 * 32);
1725  ST_SH(zero, tmp_buf + 25 * 32);
1726  ST_SH(zero, tmp_buf + 31 * 32);
1727 
1728  VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7);
1729  VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4);
1730  VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5);
1731  VP9_DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6);
1732 
1733  vec0 = reg0 + reg3;
1734  reg0 = reg0 - reg3;
1735  reg3 = reg7 + reg4;
1736  reg7 = reg7 - reg4;
1737  reg4 = reg1 + reg2;
1738  reg1 = reg1 - reg2;
1739  reg2 = reg6 + reg5;
1740  reg6 = reg6 - reg5;
1741  reg5 = vec0;
1742 
1743  /* 4 Stores */
1744  ADD2(reg5, reg4, reg3, reg2, vec0, vec1);
1745  ST_SH2(vec0, vec1, (tmp_odd_buf + 4 * 8), 8);
1746  SUB2(reg5, reg4, reg3, reg2, vec0, vec1);
1747  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
1748  ST_SH2(vec0, vec1, tmp_odd_buf, 8);
1749 
1750  /* 4 Stores */
1751  VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7);
1752  VP9_DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6);
1753  BUTTERFLY_4(reg0, reg7, reg6, reg1, vec0, vec1, vec2, vec3);
1754  ST_SH2(vec0, vec1, (tmp_odd_buf + 6 * 8), 8);
1755  VP9_DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
1756  ST_SH2(vec2, vec3, (tmp_odd_buf + 2 * 8), 8);
1757 
1758  /* Odd stage 2 */
1759  /* 8 loads */
1760  reg0 = LD_SH(tmp_buf + 3 * 32);
1761  reg1 = LD_SH(tmp_buf + 5 * 32);
1762  reg2 = LD_SH(tmp_buf + 11 * 32);
1763  reg3 = LD_SH(tmp_buf + 13 * 32);
1764  reg4 = LD_SH(tmp_buf + 19 * 32);
1765  reg5 = LD_SH(tmp_buf + 21 * 32);
1766  reg6 = LD_SH(tmp_buf + 27 * 32);
1767  reg7 = LD_SH(tmp_buf + 29 * 32);
1768 
1769  ST_SH(zero, tmp_buf + 3 * 32);
1770  ST_SH(zero, tmp_buf + 5 * 32);
1771  ST_SH(zero, tmp_buf + 11 * 32);
1772  ST_SH(zero, tmp_buf + 13 * 32);
1773  ST_SH(zero, tmp_buf + 19 * 32);
1774  ST_SH(zero, tmp_buf + 21 * 32);
1775  ST_SH(zero, tmp_buf + 27 * 32);
1776  ST_SH(zero, tmp_buf + 29 * 32);
1777 
1778  VP9_DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
1779  VP9_DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
1780  VP9_DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
1781  VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
1782 
1783  /* 4 Stores */
1784  SUB4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4,
1785  vec0, vec1, vec2, vec3);
1786  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
1787  VP9_DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
1788  BUTTERFLY_4(loc2, loc3, loc1, loc0, vec0, vec1, vec3, vec2);
1789  ST_SH2(vec0, vec1, (tmp_odd_buf + 12 * 8), 3 * 8);
1790  VP9_DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
1791  ST_SH2(vec0, vec1, (tmp_odd_buf + 10 * 8), 8);
1792 
1793  /* 4 Stores */
1794  ADD4(reg0, reg3, reg1, reg2, reg5, reg6, reg4, reg7,
1795  vec0, vec1, vec2, vec3);
1796  BUTTERFLY_4(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2);
1797  ST_SH2(reg0, reg1, (tmp_odd_buf + 13 * 8), 8);
1798  VP9_DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64, reg0, reg1);
1799  ST_SH2(reg0, reg1, (tmp_odd_buf + 8 * 8), 8);
1800 
1801  /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */
1802  /* Load 8 & Store 8 */
1803  LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3);
1804  LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7);
1805 
1806  ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
1807  loc0, loc1, loc2, loc3);
1808  ST_SH4(loc0, loc1, loc2, loc3, tmp_odd_buf, 8);
1809 
1810  SUB2(reg0, reg4, reg1, reg5, vec0, vec1);
1811  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
1812 
1813  SUB2(reg2, reg6, reg3, reg7, vec0, vec1);
1814  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
1815  ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 8 * 8), 8);
1816 
1817  /* Load 8 & Store 8 */
1818  LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3);
1819  LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7);
1820 
1821  ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
1822  loc0, loc1, loc2, loc3);
1823  ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 4 * 8), 8);
1824 
1825  SUB2(reg0, reg4, reg3, reg7, vec0, vec1);
1826  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
1827 
1828  SUB2(reg1, reg5, reg2, reg6, vec0, vec1);
1829  VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
1830  ST_SH4(loc0, loc1, loc2, loc3, (tmp_odd_buf + 12 * 8), 8);
1831 }
1832 
1833 static void vp9_idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf,
1834  int16_t *tmp_odd_buf,
1835  uint8_t *dst,
1836  int32_t dst_stride)
1837 {
1838  v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
1839  v8i16 m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
1840 
1841  /* FINAL BUTTERFLY : Dependency on Even & Odd */
1842  vec0 = LD_SH(tmp_odd_buf);
1843  vec1 = LD_SH(tmp_odd_buf + 9 * 8);
1844  vec2 = LD_SH(tmp_odd_buf + 14 * 8);
1845  vec3 = LD_SH(tmp_odd_buf + 6 * 8);
1846  loc0 = LD_SH(tmp_eve_buf);
1847  loc1 = LD_SH(tmp_eve_buf + 8 * 8);
1848  loc2 = LD_SH(tmp_eve_buf + 4 * 8);
1849  loc3 = LD_SH(tmp_eve_buf + 12 * 8);
1850 
1851  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6);
1852  SRARI_H4_SH(m0, m2, m4, m6, 6);
1853  VP9_ADDBLK_ST8x4_UB(dst, (4 * dst_stride), m0, m2, m4, m6);
1854 
1855  SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m6, m2, m4, m0);
1856  SRARI_H4_SH(m0, m2, m4, m6, 6);
1857  VP9_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride),
1858  m0, m2, m4, m6);
1859 
1860  /* Load 8 & Store 8 */
1861  vec0 = LD_SH(tmp_odd_buf + 4 * 8);
1862  vec1 = LD_SH(tmp_odd_buf + 13 * 8);
1863  vec2 = LD_SH(tmp_odd_buf + 10 * 8);
1864  vec3 = LD_SH(tmp_odd_buf + 3 * 8);
1865  loc0 = LD_SH(tmp_eve_buf + 2 * 8);
1866  loc1 = LD_SH(tmp_eve_buf + 10 * 8);
1867  loc2 = LD_SH(tmp_eve_buf + 6 * 8);
1868  loc3 = LD_SH(tmp_eve_buf + 14 * 8);
1869 
1870  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7);
1871  SRARI_H4_SH(m1, m3, m5, m7, 6);
1872  VP9_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride),
1873  m1, m3, m5, m7);
1874 
1875  SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m7, m3, m5, m1);
1876  SRARI_H4_SH(m1, m3, m5, m7, 6);
1877  VP9_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride),
1878  m1, m3, m5, m7);
1879 
1880  /* Load 8 & Store 8 */
1881  vec0 = LD_SH(tmp_odd_buf + 2 * 8);
1882  vec1 = LD_SH(tmp_odd_buf + 11 * 8);
1883  vec2 = LD_SH(tmp_odd_buf + 12 * 8);
1884  vec3 = LD_SH(tmp_odd_buf + 7 * 8);
1885  loc0 = LD_SH(tmp_eve_buf + 1 * 8);
1886  loc1 = LD_SH(tmp_eve_buf + 9 * 8);
1887  loc2 = LD_SH(tmp_eve_buf + 5 * 8);
1888  loc3 = LD_SH(tmp_eve_buf + 13 * 8);
1889 
1890  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6);
1891  SRARI_H4_SH(n0, n2, n4, n6, 6);
1892  VP9_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride),
1893  n0, n2, n4, n6);
1894 
1895  SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n6, n2, n4, n0);
1896  SRARI_H4_SH(n0, n2, n4, n6, 6);
1897  VP9_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride),
1898  n0, n2, n4, n6);
1899 
1900  /* Load 8 & Store 8 */
1901  vec0 = LD_SH(tmp_odd_buf + 5 * 8);
1902  vec1 = LD_SH(tmp_odd_buf + 15 * 8);
1903  vec2 = LD_SH(tmp_odd_buf + 8 * 8);
1904  vec3 = LD_SH(tmp_odd_buf + 1 * 8);
1905  loc0 = LD_SH(tmp_eve_buf + 3 * 8);
1906  loc1 = LD_SH(tmp_eve_buf + 11 * 8);
1907  loc2 = LD_SH(tmp_eve_buf + 7 * 8);
1908  loc3 = LD_SH(tmp_eve_buf + 15 * 8);
1909 
1910  ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7);
1911  SRARI_H4_SH(n1, n3, n5, n7, 6);
1912  VP9_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride),
1913  n1, n3, n5, n7);
1914 
1915  SUB4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n7, n3, n5, n1);
1916  SRARI_H4_SH(n1, n3, n5, n7, 6);
1917  VP9_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride),
1918  n1, n3, n5, n7);
1919 }
1920 
1921 static void vp9_idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dst,
1922  int32_t dst_stride)
1923 {
1924  int16_t tmp_odd_buf[16 * 8] ALLOC_ALIGNED(ALIGNMENT);
1925  int16_t tmp_eve_buf[16 * 8] ALLOC_ALIGNED(ALIGNMENT);
1926 
1927  vp9_idct8x32_column_even_process_store(input, &tmp_eve_buf[0]);
1928  vp9_idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]);
1929  vp9_idct8x32_column_butterfly_addblk(&tmp_eve_buf[0], &tmp_odd_buf[0],
1930  dst, dst_stride);
1931 }
1932 
1933 static void vp9_idct8x32_1d_columns_msa(int16_t *input, int16_t *output,
1934  int16_t *tmp_buf)
1935 {
1936  int16_t tmp_odd_buf[16 * 8] ALLOC_ALIGNED(ALIGNMENT);
1937  int16_t tmp_eve_buf[16 * 8] ALLOC_ALIGNED(ALIGNMENT);
1938 
1939  vp9_idct8x32_column_even_process_store(input, &tmp_eve_buf[0]);
1940  vp9_idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]);
1941  vp9_idct_butterfly_transpose_store(tmp_buf, &tmp_eve_buf[0],
1942  &tmp_odd_buf[0], output);
1943 }
1944 
1945 static void vp9_idct32x32_1_add_msa(int16_t *input, uint8_t *dst,
1946  int32_t dst_stride)
1947 {
1948  int32_t i;
1949  int16_t out;
1950  v16u8 dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
1951  v8i16 res0, res1, res2, res3, res4, res5, res6, res7, vec;
1952 
1953  out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS);
1954  out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS);
1955  out = ROUND_POWER_OF_TWO(out, 6);
1956  input[0] = 0;
1957 
1958  vec = __msa_fill_h(out);
1959 
1960  for (i = 16; i--;) {
1961  LD_UB2(dst, 16, dst0, dst1);
1962  LD_UB2(dst + dst_stride, 16, dst2, dst3);
1963 
1964  UNPCK_UB_SH(dst0, res0, res4);
1965  UNPCK_UB_SH(dst1, res1, res5);
1966  UNPCK_UB_SH(dst2, res2, res6);
1967  UNPCK_UB_SH(dst3, res3, res7);
1968  ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2,
1969  res3);
1970  ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6,
1971  res7);
1972  CLIP_SH4_0_255(res0, res1, res2, res3);
1973  CLIP_SH4_0_255(res4, res5, res6, res7);
1974  PCKEV_B4_UB(res4, res0, res5, res1, res6, res2, res7, res3,
1975  tmp0, tmp1, tmp2, tmp3);
1976 
1977  ST_UB2(tmp0, tmp1, dst, 16);
1978  dst += dst_stride;
1979  ST_UB2(tmp2, tmp3, dst, 16);
1980  dst += dst_stride;
1981  }
1982 }
1983 
1984 static void vp9_idct32x32_34_colcol_addblk_msa(int16_t *input, uint8_t *dst,
1985  int32_t dst_stride)
1986 {
1987  int32_t i;
1988  int16_t out_arr[32 * 32] ALLOC_ALIGNED(ALIGNMENT);
1989  int16_t *out_ptr = out_arr;
1990  int16_t tmp_buf[8 * 32] ALLOC_ALIGNED(ALIGNMENT);
1991 
1992  for (i = 32; i--;) {
1993  __asm__ volatile (
1994  "sw $zero, (%[out_ptr]) \n\t"
1995  "sw $zero, 4(%[out_ptr]) \n\t"
1996  "sw $zero, 8(%[out_ptr]) \n\t"
1997  "sw $zero, 12(%[out_ptr]) \n\t"
1998  "sw $zero, 16(%[out_ptr]) \n\t"
1999  "sw $zero, 20(%[out_ptr]) \n\t"
2000  "sw $zero, 24(%[out_ptr]) \n\t"
2001  "sw $zero, 28(%[out_ptr]) \n\t"
2002  "sw $zero, 32(%[out_ptr]) \n\t"
2003  "sw $zero, 36(%[out_ptr]) \n\t"
2004  "sw $zero, 40(%[out_ptr]) \n\t"
2005  "sw $zero, 44(%[out_ptr]) \n\t"
2006  "sw $zero, 48(%[out_ptr]) \n\t"
2007  "sw $zero, 52(%[out_ptr]) \n\t"
2008  "sw $zero, 56(%[out_ptr]) \n\t"
2009  "sw $zero, 60(%[out_ptr]) \n\t"
2010 
2011  :
2012  : [out_ptr] "r" (out_ptr)
2013  );
2014 
2015  out_ptr += 32;
2016  }
2017 
2018  out_ptr = out_arr;
2019 
2020  /* process 8*32 block */
2021  vp9_idct8x32_1d_columns_msa(input, out_ptr, &tmp_buf[0]);
2022 
2023  /* transform columns */
2024  for (i = 0; i < 4; i++) {
2025  /* process 8*32 block */
2026  vp9_idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)),
2027  (dst + (i << 3)), dst_stride);
2028  }
2029 }
2030 
2031 static void vp9_idct32x32_colcol_addblk_msa(int16_t *input, uint8_t *dst,
2032  int32_t dst_stride)
2033 {
2034  int32_t i;
2035  int16_t out_arr[32 * 32] ALLOC_ALIGNED(ALIGNMENT);
2036  int16_t *out_ptr = out_arr;
2037  int16_t tmp_buf[8 * 32] ALLOC_ALIGNED(ALIGNMENT);
2038 
2039  /* transform rows */
2040  for (i = 0; i < 4; i++) {
2041  /* process 8*32 block */
2042  vp9_idct8x32_1d_columns_msa((input + (i << 3)), (out_ptr + (i << 8)),
2043  &tmp_buf[0]);
2044  }
2045 
2046  /* transform columns */
2047  for (i = 0; i < 4; i++) {
2048  /* process 8*32 block */
2049  vp9_idct8x32_1d_columns_addblk_msa((out_ptr + (i << 3)),
2050  (dst + (i << 3)), dst_stride);
2051  }
2052 }
2053 
2055  int16_t *block, int eob)
2056 {
2057  if (eob > 1) {
2058  vp9_idct4x4_colcol_addblk_msa(block, dst, stride);
2059  }
2060  else {
2061  vp9_idct4x4_1_add_msa(block, dst, stride);
2062  }
2063 }
2064 
2066  int16_t *block, int eob)
2067 {
2068  if (eob == 1) {
2069  vp9_idct8x8_1_add_msa(block, dst, stride);
2070  }
2071  else if (eob <= 12) {
2072  vp9_idct8x8_12_colcol_addblk_msa(block, dst, stride);
2073  }
2074  else {
2075  vp9_idct8x8_colcol_addblk_msa(block, dst, stride);
2076  }
2077 }
2078 
2080  int16_t *block, int eob)
2081 {
2082  if (eob == 1) {
2083  /* DC only DCT coefficient. */
2084  vp9_idct16x16_1_add_msa(block, dst, stride);
2085  }
2086  else if (eob <= 10) {
2087  vp9_idct16x16_10_colcol_addblk_msa(block, dst, stride);
2088  }
2089  else {
2090  vp9_idct16x16_colcol_addblk_msa(block, dst, stride);
2091  }
2092 }
2093 
2095  int16_t *block, int eob)
2096 {
2097  if (eob == 1) {
2098  vp9_idct32x32_1_add_msa(block, dst, stride);
2099  }
2100  else if (eob <= 34) {
2101  vp9_idct32x32_34_colcol_addblk_msa(block, dst, stride);
2102  }
2103  else {
2104  vp9_idct32x32_colcol_addblk_msa(block, dst, stride);
2105  }
2106 }
2107 
2109  int16_t *block, int eob)
2110 {
2111  vp9_iadst4x4_colcol_addblk_msa(block, dst, stride);
2112 }
2113 
2115  int16_t *block, int eob)
2116 {
2117  vp9_iadst8x8_colcol_addblk_msa(block, dst, stride);
2118 }
2119 
2121  int16_t *block, int eob)
2122 {
2123  vp9_iadst16x16_colcol_addblk_msa(block, dst, stride);
2124 }
2125 
2127  int16_t *block, int eob)
2128 {
2129  vp9_idct_iadst_4x4_add_msa(block, dst, stride, eob);
2130 }
2131 
2133  int16_t *block, int eob)
2134 {
2135  vp9_idct_iadst_8x8_add_msa(block, dst, stride, eob);
2136 }
2137 
2139  int16_t *block, int eob)
2140 {
2141  vp9_idct_iadst_16x16_add_msa(block, dst, stride, eob);
2142 }
2143 
2145  int16_t *block, int eob)
2146 {
2147  vp9_iadst_idct_4x4_add_msa(block, dst, stride, eob);
2148 }
2149 
2151  int16_t *block, int eob)
2152 {
2153  vp9_iadst_idct_8x8_add_msa(block, dst, stride, eob);
2154 }
2155 
2157  int16_t *block, int eob)
2158 {
2159  vp9_iadst_idct_16x16_add_msa(block, dst, stride, eob);
2160 }
#define VP9_DOT_SHIFT_RIGHT_PCK_H(in0, in1, in2)
Definition: vp9_idct_msa.c:104
void ff_idct_iadst_16x16_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob)
#define ADDBLK_ST4x4_UB(in0, in1, in2, in3, pdst, stride)
void ff_idct_iadst_4x4_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob)
const char const char void * val
Definition: avisynth_c.h:771
static const int32_t cospi_17_64
Definition: vp9_idct_msa.c:45
static const int32_t sinpi_4_9
Definition: vp9_idct_msa.c:65
void ff_iadst_iadst_8x8_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob)
static const int32_t cospi_2_64
Definition: vp9_idct_msa.c:30
#define SRARI_W4_SW(...)
static const int32_t cospi_24_64
Definition: vp9_idct_msa.c:52
#define LD_SH16(...)
#define VP9_SET_COSPI_PAIR(c0_h, c1_h)
Definition: vp9_idct_msa.c:218
static const int32_t cospi_11_64
Definition: vp9_idct_msa.c:39
void ff_iadst_idct_16x16_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob)
#define PCKEV_B2_SH(...)
void ff_idct_idct_8x8_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob)
void ff_idct_idct_4x4_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob)
static void vp9_idct8x32_column_odd_process_store(int16_t *tmp_buf, int16_t *tmp_odd_buf)
static const int32_t cospi_30_64
Definition: vp9_idct_msa.c:58
#define LD_UB4(...)
static void vp9_idct4x4_1_add_msa(int16_t *input, uint8_t *dst, int32_t dst_stride)
Definition: vp9_idct_msa.c:345
static const int32_t sinpi_1_9
Definition: vp9_idct_msa.c:62
#define ILVL_H2_SH(...)
int stride
Definition: mace.c:144
static const int32_t cospi_4_64
Definition: vp9_idct_msa.c:32
static const int32_t sinpi_2_9
Definition: vp9_idct_msa.c:63
void ff_iadst_iadst_16x16_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob)
static const int32_t cospi_7_64
Definition: vp9_idct_msa.c:35
#define DOTP_SH4_SW(...)
static int16_t block[64]
Definition: dct.c:115
#define VP9_DCT_CONST_BITS
Definition: vp9_idct_msa.c:26
#define BUTTERFLY_8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3, out4, out5, out6, out7)
static const int32_t cospi_28_64
Definition: vp9_idct_msa.c:56
static const int32_t cospi_20_64
Definition: vp9_idct_msa.c:48
uint8_t
#define ST_SH2(...)
#define UNPCK_UB_SH(in, out0, out1)
static const int32_t sinpi_3_9
Definition: vp9_idct_msa.c:64
#define LD_UB2(...)
#define SRARI_H4_SH(...)
#define CLIP_SH_0_255(in)
void ff_idct_iadst_8x8_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob)
static void vp9_iadst4x4_colcol_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride)
Definition: vp9_idct_msa.c:379
#define VP9_MADD_BF(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, out0, out1, out2, out3)
Definition: vp9_idct_msa.c:196
static void vp9_idct32x32_colcol_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride)
#define DOTP_SH2_SW(...)
#define LD_SH(...)
#define ILVRL_H2_SH(...)
static void vp9_idct8x8_12_colcol_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride)
Definition: vp9_idct_msa.c:604
#define ROUND_POWER_OF_TWO(value, n)
Definition: vp9_idct_msa.c:27
#define SUB4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3)
static void vp9_idct8x32_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride)
#define CLIP_SH2_0_255(in0, in1)
#define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3)
void ff_idct_idct_32x32_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob)
static const int32_t cospi_14_64
Definition: vp9_idct_msa.c:42
#define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3)
static const int32_t cospi_18_64
Definition: vp9_idct_msa.c:46
static const int32_t cospi_22_64
Definition: vp9_idct_msa.c:50
static void vp9_idct_iadst_8x8_add_msa(int16_t *input, uint8_t *dst, int32_t dst_stride, int32_t eob)
Definition: vp9_idct_msa.c:848
#define VP9_DOT_ADD_SUB_SRARI_PCK(in0, in1, in2, in3, in4, in5, in6, in7, dst0, dst1, dst2, dst3)
Definition: vp9_idct_msa.c:86
#define s2
Definition: regdef.h:39
static void vp9_idct_butterfly_transpose_store(int16_t *tmp_buf, int16_t *tmp_eve_buf, int16_t *tmp_odd_buf, int16_t *dst)
void ff_iadst_idct_4x4_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob)
static const int32_t cospi_9_64
Definition: vp9_idct_msa.c:37
#define s0
Definition: regdef.h:37
#define zero
Definition: regdef.h:64
#define ILVR_B2_SH(...)
#define LD4x4_SH(psrc, out0, out1, out2, out3)
#define TRANSPOSE8x8_SH_SH(...)
static void vp9_idct32x32_34_colcol_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride)
static const int32_t cospi_5_64
Definition: vp9_idct_msa.c:33
static const int32_t cospi_27_64
Definition: vp9_idct_msa.c:55
#define CLIP_SH4_0_255(in0, in1, in2, in3)
#define s5
Definition: regdef.h:42
#define LD_SH8(...)
static const int32_t cospi_13_64
Definition: vp9_idct_msa.c:41
#define SRARI_H2_SH(...)
static void vp9_idct16x16_colcol_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride)
static void vp9_idct_iadst_4x4_add_msa(int16_t *input, uint8_t *dst, int32_t dst_stride, int32_t eob)
Definition: vp9_idct_msa.c:417
static const int32_t cospi_26_64
Definition: vp9_idct_msa.c:54
#define ALIGNMENT
static void vp9_idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf, int16_t *tmp_odd_buf, uint8_t *dst, int32_t dst_stride)
#define VP9_ADST8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3, out4, out5, out6, out7)
Definition: vp9_idct_msa.c:116
static const int32_t cospi_21_64
Definition: vp9_idct_msa.c:49
int32_t
#define PCKEV_H2_SH(...)
#define SRARI_W2_SW(...)
#define s4
Definition: regdef.h:41
#define VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3)
Definition: vp9_idct_msa.c:229
#define PCKEV_B4_UB(...)
#define s3
Definition: regdef.h:40
#define ST_UB2(...)
#define ST_UB4(...)
#define TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3, out4, out5, out6, out7)
Definition: vp9_idct_msa.c:322
static const int32_t cospi_6_64
Definition: vp9_idct_msa.c:34
#define VP9_IDCT4x4(in0, in1, in2, in3, out0, out1, out2, out3)
Definition: vp9_idct_msa.c:247
static const int32_t cospi_19_64
Definition: vp9_idct_msa.c:47
static const int32_t cospi_23_64
Definition: vp9_idct_msa.c:51
static const int32_t cospi_25_64
Definition: vp9_idct_msa.c:53
static void vp9_iadst_idct_16x16_add_msa(int16_t *input, uint8_t *dst, int32_t dst_stride, int32_t eob)
static const int32_t cospi_8_64
Definition: vp9_idct_msa.c:36
static void vp9_idct4x4_colcol_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride)
Definition: vp9_idct_msa.c:360
static void vp9_iadst16x16_colcol_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride)
static void vp9_idct8x32_column_even_process_store(int16_t *tmp_buf, int16_t *tmp_eve_buf)
static void vp9_idct16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride)
Definition: vp9_idct_msa.c:950
static void vp9_idct32x32_1_add_msa(int16_t *input, uint8_t *dst, int32_t dst_stride)
static const int32_t cospi_3_64
Definition: vp9_idct_msa.c:31
static void vp9_iadst_idct_8x8_add_msa(int16_t *input, uint8_t *dst, int32_t dst_stride, int32_t eob)
Definition: vp9_idct_msa.c:821
static void vp9_iadst16_1d_columns_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride)
#define ST_SH(...)
#define VP9_DOTP_CONST_PAIR(reg0, reg1, cnst0, cnst1, out0, out1)
Definition: vp9_idct_msa.c:67
static void vp9_iadst8x8_colcol_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride)
Definition: vp9_idct_msa.c:695
static const int32_t cospi_16_64
Definition: vp9_idct_msa.c:44
static const int32_t cospi_1_64
Definition: vp9_idct_msa.c:29
#define ADD2(in0, in1, in2, in3, out0, out1)
#define s1
Definition: regdef.h:38
static const int32_t cospi_31_64
Definition: vp9_idct_msa.c:59
static void vp9_idct8x8_colcol_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride)
Definition: vp9_idct_msa.c:668
static const int32_t cospi_10_64
Definition: vp9_idct_msa.c:38
#define v0
Definition: regdef.h:26
#define VP9_IADST8x16_1D(r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15, out0, out1, out2, out3, out4, out5, out6, out7, out8, out9, out10, out11, out12, out13, out14, out15)
Definition: vp9_idct_msa.c:876
static void vp9_idct16x16_1_add_msa(int16_t *input, uint8_t *dst, int32_t dst_stride)
static void vp9_idct_iadst_16x16_add_msa(int16_t *input, uint8_t *dst, int32_t dst_stride, int32_t eob)
#define ST_SH8(...)
#define ILVEV_H2_SH(...)
#define SUB2(in0, in1, in2, in3, out0, out1)
void ff_idct_idct_16x16_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob)
static const int32_t cospi_15_64
Definition: vp9_idct_msa.c:43
#define ALLOC_ALIGNED(align)
static const int32_t cospi_29_64
Definition: vp9_idct_msa.c:57
#define ILVR_H2_SH(...)
void ff_iadst_iadst_4x4_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob)
#define LD_SH4(...)
static void vp9_iadst16_1d_columns_msa(int16_t *input, int16_t *output)
#define LD_UB(...)
static void vp9_idct16_1d_columns_msa(int16_t *input, int16_t *output)
#define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3, out4, out5, out6, out7)
Definition: vp9_idct_msa.c:466
static void vp9_idct16x16_10_colcol_addblk_msa(int16_t *input, uint8_t *dst, int32_t dst_stride)
#define VP9_MADD_SHORT(m0, m1, c0, c1, res0, res1)
Definition: vp9_idct_msa.c:184
#define TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, out0, out1, out2, out3)
#define s6
Definition: regdef.h:43
FILE * out
Definition: movenc.c:54
#define ST8x1_UB(in, pdst)
static void vp9_idct8x8_1_add_msa(int16_t *input, uint8_t *dst, int32_t dst_stride)
Definition: vp9_idct_msa.c:586
#define ILVR_D2_SH(...)
static const int32_t cospi_12_64
Definition: vp9_idct_msa.c:40
#define ST_SH4(...)
static void vp9_iadst_idct_4x4_add_msa(int16_t *input, uint8_t *dst, int32_t dst_stride, int32_t eob)
Definition: vp9_idct_msa.c:398
static void vp9_idct8x32_1d_columns_msa(int16_t *input, int16_t *output, int16_t *tmp_buf)
void ff_iadst_idct_8x8_add_msa(uint8_t *dst, ptrdiff_t stride, int16_t *block, int eob)
#define VP9_IADST8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3, out4, out5, out6, out7)
Definition: vp9_idct_msa.c:500
#define VP9_IADST4x4(in0, in1, in2, in3, out0, out1, out2, out3)
Definition: vp9_idct_msa.c:271