FFmpeg  4.0
h263dsp_msa.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Manojkumar Bhosale (Manojkumar.Bhosale@imgtec.com)
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
22 #include "h263dsp_mips.h"
23 
25  0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7,
26  7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11, 12, 12, 12
27 };
28 
30 {
31  int32_t strength = h263_loop_filter_strength_msa[qscale];
32  v16u8 in0, in1, in2, in3, in4, in5, in6, in7;
33  v8i16 temp0, temp1, temp2;
34  v8i16 diff0, diff2, diff4, diff6, diff8;
35  v8i16 d0, a_d0, str_x2, str;
36 
37  src -= 2;
38  LD_UB8(src, stride, in0, in1, in2, in3, in4, in5, in6, in7);
39  TRANSPOSE8x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7,
40  in0, in3, in2, in1);
41 
42  temp0 = (v8i16) __msa_ilvr_b((v16i8) in0, (v16i8) in1);
43  a_d0 = __msa_hsub_u_h((v16u8) temp0, (v16u8) temp0);
44  temp2 = (v8i16) __msa_ilvr_b((v16i8) in2, (v16i8) in3);
45  temp2 = __msa_hsub_u_h((v16u8) temp2, (v16u8) temp2);
46  temp2 <<= 2;
47  diff0 = a_d0 + temp2;
48  diff2 = -(-diff0 >> 3);
49  str_x2 = __msa_fill_h(-(strength << 1));
50  temp0 = (str_x2 <= diff2);
51  diff2 = (v8i16) __msa_bmz_v((v16u8) diff2, (v16u8) temp0, (v16u8) temp0);
52  temp2 = str_x2 - diff2;
53  str = __msa_fill_h(-strength);
54  temp0 = (diff2 < str);
55  diff2 = (v8i16) __msa_bmnz_v((v16u8) diff2, (v16u8) temp2, (v16u8) temp0);
56  diff4 = diff0 >> 3;
57  str_x2 = __msa_fill_h(strength << 1);
58  temp0 = (diff4 <= str_x2);
59  diff4 = (v8i16) __msa_bmz_v((v16u8) diff4, (v16u8) temp0, (v16u8) temp0);
60  temp2 = str_x2 - diff4;
61  str = __msa_fill_h(strength);
62  temp0 = (str < diff4);
63  diff4 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) temp2, (v16u8) temp0);
64  temp0 = __msa_clti_s_h(diff0, 0);
65  d0 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) diff2, (v16u8) temp0);
66  diff2 = -diff2 >> 1;
67  diff4 >>= 1;
68  diff8 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) diff2, (v16u8) temp0);
69  diff6 = (-a_d0) >> 2;
70  diff6 = -(diff6);
71  temp2 = -diff8;
72  temp0 = (diff6 < temp2);
73  diff6 = (v8i16) __msa_bmnz_v((v16u8) diff6, (v16u8) temp2, (v16u8) temp0);
74  diff2 = a_d0 >> 2;
75  temp0 = (diff2 <= diff8);
76  diff2 = (v8i16) __msa_bmz_v((v16u8) diff2, (v16u8) diff8, (v16u8) temp0);
77  temp0 = __msa_clti_s_h(a_d0, 0);
78  diff6 = (v8i16) __msa_bmz_v((v16u8) diff6, (v16u8) diff2, (v16u8) temp0);
79  PCKEV_B2_SH(a_d0, diff6, a_d0, d0, diff6, d0);
80  in0 = (v16u8) ((v16i8) in0 - (v16i8) diff6);
81  in1 = (v16u8) ((v16i8) in1 + (v16i8) diff6);
82  in3 = __msa_xori_b(in3, 128);
83  in3 = (v16u8) __msa_adds_s_b((v16i8) in3, (v16i8) d0);
84  in3 = __msa_xori_b(in3, 128);
85  in2 = __msa_subsus_u_b(in2, (v16i8) d0);
86  ILVR_B2_SH(in3, in0, in1, in2, temp0, temp1);
87  in0 = (v16u8) __msa_ilvr_h(temp1, temp0);
88  in3 = (v16u8) __msa_ilvl_h(temp1, temp0);
89  ST4x4_UB(in0, in0, 0, 1, 2, 3, src, stride);
90  src += 4 * stride;
91  ST4x4_UB(in3, in3, 0, 1, 2, 3, src, stride);
92  src += 4 * stride;
93 }
94 
96 {
97  int32_t strength = h263_loop_filter_strength_msa[qscale];
98  uint64_t res0, res1, res2, res3;
99  v16u8 in0, in1, in2, in3;
100  v8i16 temp0, temp2, diff0, diff2, diff4, diff6, diff8;
101  v8i16 d0, a_d0, str_x2, str;
102 
103  src -= 2 * stride;
104  LD_UB4(src, stride, in0, in3, in2, in1);
105  temp0 = (v8i16) __msa_ilvr_b((v16i8) in0, (v16i8) in1);
106  a_d0 = __msa_hsub_u_h((v16u8) temp0, (v16u8) temp0);
107  temp2 = (v8i16) __msa_ilvr_b((v16i8) in2, (v16i8) in3);
108  temp2 = __msa_hsub_u_h((v16u8) temp2, (v16u8) temp2);
109  temp2 <<= 2;
110  diff0 = a_d0 + temp2;
111  diff2 = -(-diff0 >> 3);
112  str_x2 = __msa_fill_h(-(strength << 1));
113  temp0 = (str_x2 <= diff2);
114  diff2 = (v8i16) __msa_bmz_v((v16u8) diff2, (v16u8) temp0, (v16u8) temp0);
115  temp2 = str_x2 - diff2;
116  str = __msa_fill_h(-strength);
117  temp0 = (diff2 < str);
118  diff2 = (v8i16) __msa_bmnz_v((v16u8) diff2, (v16u8) temp2, (v16u8) temp0);
119  diff4 = diff0 >> 3;
120  str_x2 = __msa_fill_h(strength << 1);
121  temp0 = (diff4 <= str_x2);
122  diff4 = (v8i16) __msa_bmz_v((v16u8) diff4, (v16u8) temp0, (v16u8) temp0);
123  temp2 = str_x2 - diff4;
124  str = __msa_fill_h(strength);
125  temp0 = (str < diff4);
126  diff4 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) temp2, (v16u8) temp0);
127  temp0 = __msa_clti_s_h(diff0, 0);
128  d0 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) diff2, (v16u8) temp0);
129  diff2 = -diff2 >> 1;
130  diff4 >>= 1;
131  diff8 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) diff2, (v16u8) temp0);
132  diff6 = (-a_d0) >> 2;
133  diff6 = -(diff6);
134  temp2 = -diff8;
135  temp0 = (diff6 < temp2);
136  diff6 = (v8i16) __msa_bmnz_v((v16u8) diff6, (v16u8) temp2, (v16u8) temp0);
137  diff2 = a_d0 >> 2;
138  temp0 = (diff2 <= diff8);
139  diff2 = (v8i16) __msa_bmz_v((v16u8) diff2, (v16u8) diff8, (v16u8) temp0);
140  temp0 = __msa_clti_s_h(a_d0, 0);
141  diff6 = (v8i16) __msa_bmz_v((v16u8) diff6, (v16u8) diff2, (v16u8) temp0);
142  PCKEV_B2_SH(a_d0, diff6, a_d0, d0, diff6, d0);
143  in0 = (v16u8) ((v16i8) in0 - (v16i8) diff6);
144  in1 = (v16u8) ((v16i8) in1 + (v16i8) diff6);
145  in3 = __msa_xori_b(in3, 128);
146  in3 = (v16u8) __msa_adds_s_b((v16i8) in3, (v16i8) d0);
147  in3 = __msa_xori_b(in3, 128);
148  in2 = __msa_subsus_u_b(in2, (v16i8) d0);
149  res0 = __msa_copy_u_d((v2i64) in0, 0);
150  res1 = __msa_copy_u_d((v2i64) in3, 0);
151  res2 = __msa_copy_u_d((v2i64) in2, 0);
152  res3 = __msa_copy_u_d((v2i64) in1, 0);
153  SD4(res0, res1, res2, res3, src, stride);
154 }
155 
157 {
158  h263_h_loop_filter_msa(src, stride, q_scale);
159 }
160 
162 {
163  h263_v_loop_filter_msa(src, stride, q_scale);
164 }
#define PCKEV_B2_SH(...)
void ff_h263_v_loop_filter_msa(uint8_t *src, int32_t stride, int32_t q_scale)
Definition: h263dsp_msa.c:161
#define LD_UB4(...)
#define src
Definition: vp8dsp.c:254
int stride
Definition: mace.c:144
#define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride)
uint8_t
#define ILVR_B2_SH(...)
static void h263_v_loop_filter_msa(uint8_t *src, int32_t stride, int32_t qscale)
Definition: h263dsp_msa.c:95
void ff_h263_h_loop_filter_msa(uint8_t *src, int32_t stride, int32_t q_scale)
Definition: h263dsp_msa.c:156
#define TRANSPOSE8x4_UB_UB(...)
#define LD_UB8(...)
int32_t
#define SD4(in0, in1, in2, in3, pdst, stride)
static void h263_h_loop_filter_msa(uint8_t *src, int32_t stride, int32_t qscale)
Definition: h263dsp_msa.c:29
static const uint8_t h263_loop_filter_strength_msa[32]
Definition: h263dsp_msa.c:24