1 /*
2 * Copyright (c) 2022 Loongson Technology Corporation Limited
3 * Contributed by Lu Wang <wanglu@loongson.cn>
4 * Hao Chen <chenhao@loongson.cn>
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include "libavutil/loongarch/loongson_intrinsics.h"
24 #include "hevcdsp_lsx.h"
25
26 static const uint8_t ff_hevc_mask_arr[16 * 2] __attribute__((aligned(0x40))) = {
27 /* 8 width cases */
28 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
29 /* 4 width cases */
30 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20
31 };
32
33 /* hevc_copy: dst = src << 6 */
hevc_copy_4w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, int32_t height)34 static void hevc_copy_4w_lsx(uint8_t *src, int32_t src_stride,
35 int16_t *dst, int32_t dst_stride,
36 int32_t height)
37 {
38 int32_t src_stride_2x = (src_stride << 1);
39 int32_t dst_stride_2x = (dst_stride << 1);
40 int32_t src_stride_4x = (src_stride << 2);
41 int32_t dst_stride_4x = (dst_stride << 2);
42 int32_t src_stride_3x = src_stride_2x + src_stride;
43 int32_t dst_stride_3x = dst_stride_2x + dst_stride;
44 int32_t loop_cnt = height >> 3;
45 int32_t res = height & 0x07;
46
47 __m128i src0, src1, src2, src3, src4, src5, src6, src7;
48 __m128i in0, in1, in2, in3;
49 for (; loop_cnt--;) {
50 src0 = __lsx_vld(src, 0);
51 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x,
52 src1, src2);
53 src3 = __lsx_vldx(src, src_stride_3x);
54 src += src_stride_4x;
55 src4 = __lsx_vld(src, 0);
56 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x,
57 src5, src6);
58 src7 = __lsx_vldx(src, src_stride_3x);
59 src += src_stride_4x;
60
61 DUP4_ARG2(__lsx_vilvl_w, src1, src0, src3, src2, src5, src4, src7, src6,
62 src0, src1, src2, src3);
63 DUP4_ARG2(__lsx_vsllwil_hu_bu, src0, 6, src1, 6, src2, 6, src3, 6,
64 in0, in1, in2, in3);
65
66 __lsx_vstelm_d(in0, dst, 0, 0);
67 __lsx_vstelm_d(in0, dst + dst_stride, 0, 1);
68 __lsx_vstelm_d(in1, dst + dst_stride_2x, 0, 0);
69 __lsx_vstelm_d(in1, dst + dst_stride_3x, 0, 1);
70 dst += dst_stride_4x;
71 __lsx_vstelm_d(in2, dst, 0, 0);
72 __lsx_vstelm_d(in2, dst + dst_stride, 0, 1);
73 __lsx_vstelm_d(in3, dst + dst_stride_2x, 0, 0);
74 __lsx_vstelm_d(in3, dst + dst_stride_3x, 0, 1);
75 dst += dst_stride_4x;
76 }
77 for (;res--;) {
78 src0 = __lsx_vld(src, 0);
79 in0 = __lsx_vsllwil_hu_bu(src0, 6);
80 __lsx_vstelm_d(in0, dst, 0, 0);
81 src += src_stride;
82 dst += dst_stride;
83 }
84 }
85
hevc_copy_6w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, int32_t height)86 static void hevc_copy_6w_lsx(uint8_t *src, int32_t src_stride,
87 int16_t *dst, int32_t dst_stride,
88 int32_t height)
89 {
90 int32_t loop_cnt = (height >> 3);
91 int32_t res = height & 0x07;
92 int32_t src_stride_2x = (src_stride << 1);
93 int32_t src_stride_4x = (src_stride << 2);
94 int32_t src_stride_3x = src_stride_2x + src_stride;
95
96 __m128i src0, src1, src2, src3, src4, src5, src6, src7;
97 __m128i in0, in1, in2, in3, in4, in5, in6, in7;
98
99 for (loop_cnt = (height >> 3); loop_cnt--;) {
100 src0 = __lsx_vld(src, 0);
101 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
102 src3 = __lsx_vldx(src, src_stride_3x);
103 src += src_stride_4x;
104 src4 = __lsx_vld(src, 0);
105 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src5, src6);
106 src7 = __lsx_vldx(src, src_stride_3x);
107 src += src_stride_4x;
108
109 DUP4_ARG2(__lsx_vsllwil_hu_bu, src0, 6, src1, 6, src2, 6, src3, 6,
110 in0, in1, in2, in3);
111 DUP4_ARG2(__lsx_vsllwil_hu_bu, src4, 6, src5, 6, src6, 6, src7, 6,
112 in4, in5, in6, in7);
113
114 __lsx_vstelm_d(in0, dst, 0, 0);
115 __lsx_vstelm_w(in0, dst, 8, 2);
116 dst += dst_stride;
117 __lsx_vstelm_d(in1, dst, 0, 0);
118 __lsx_vstelm_w(in1, dst, 8, 2);
119 dst += dst_stride;
120 __lsx_vstelm_d(in2, dst, 0, 0);
121 __lsx_vstelm_w(in2, dst, 8, 2);
122 dst += dst_stride;
123 __lsx_vstelm_d(in3, dst, 0, 0);
124 __lsx_vstelm_w(in3, dst, 8, 2);
125 dst += dst_stride;
126 __lsx_vstelm_d(in4, dst, 0, 0);
127 __lsx_vstelm_w(in4, dst, 8, 2);
128 dst += dst_stride;
129 __lsx_vstelm_d(in5, dst, 0, 0);
130 __lsx_vstelm_w(in5, dst, 8, 2);
131 dst += dst_stride;
132 __lsx_vstelm_d(in6, dst, 0, 0);
133 __lsx_vstelm_w(in6, dst, 8, 2);
134 dst += dst_stride;
135 __lsx_vstelm_d(in7, dst, 0, 0);
136 __lsx_vstelm_w(in7, dst, 8, 2);
137 dst += dst_stride;
138 }
139 for (;res--;) {
140 src0 = __lsx_vld(src, 0);
141 in0 = __lsx_vsllwil_hu_bu(src0, 6);
142 src += src_stride;
143 __lsx_vstelm_d(in0, dst, 0, 0);
144 __lsx_vstelm_w(in0, dst, 8, 2);
145 dst += dst_stride;
146 }
147 }
148
hevc_copy_8w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, int32_t height)149 static void hevc_copy_8w_lsx(uint8_t *src, int32_t src_stride,
150 int16_t *dst, int32_t dst_stride,
151 int32_t height)
152 {
153 int32_t src_stride_2x = (src_stride << 1);
154 int32_t dst_stride_x = (dst_stride << 1);
155 int32_t src_stride_4x = (src_stride << 2);
156 int32_t dst_stride_2x = (dst_stride_x << 1);
157 int32_t src_stride_3x = src_stride_2x + src_stride;
158 int32_t dst_stride_3x = dst_stride_2x + dst_stride_x;
159 int32_t loop_cnt = height >> 3;
160 int32_t res = height & 0x07;
161 __m128i src0, src1, src2, src3, src4, src5, src6, src7;
162 __m128i in0, in1, in2, in3, in4, in5, in6, in7;
163
164 for (loop_cnt = (height >> 3); loop_cnt--;) {
165 src0 = __lsx_vld(src, 0);
166 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x,
167 src1, src2);
168 src3 = __lsx_vldx(src, src_stride_3x);
169 src += src_stride_4x;
170 src4 = __lsx_vld(src, 0);
171 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x,
172 src5, src6);
173 src7 = __lsx_vldx(src, src_stride_3x);
174 src += src_stride_4x;
175
176 DUP4_ARG2(__lsx_vsllwil_hu_bu, src0, 6, src1, 6, src2, 6, src3, 6,
177 in0, in1, in2, in3);
178 DUP4_ARG2(__lsx_vsllwil_hu_bu, src4, 6, src5, 6, src6, 6, src7, 6,
179 in4, in5, in6, in7);
180 __lsx_vst(in0, dst, 0);
181 __lsx_vstx(in1, dst, dst_stride_x);
182 __lsx_vstx(in2, dst, dst_stride_2x);
183 __lsx_vstx(in3, dst, dst_stride_3x);
184 dst += dst_stride_2x;
185 __lsx_vst(in4, dst, 0);
186 __lsx_vstx(in5, dst, dst_stride_x);
187 __lsx_vstx(in6, dst, dst_stride_2x);
188 __lsx_vstx(in7, dst, dst_stride_3x);
189 dst += dst_stride_2x;
190 }
191 for (;res--;) {
192 src0 = __lsx_vld(src, 0);
193 in0 = __lsx_vsllwil_hu_bu(src0, 6);
194 __lsx_vst(in0, dst, 0);
195 src += src_stride;
196 dst += dst_stride;
197 }
198 }
199
hevc_copy_12w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, int32_t height)200 static void hevc_copy_12w_lsx(uint8_t *src, int32_t src_stride,
201 int16_t *dst, int32_t dst_stride,
202 int32_t height)
203 {
204 uint32_t loop_cnt;
205 uint32_t res = height & 0x07;
206 int32_t src_stride_2x = (src_stride << 1);
207 int32_t dst_stride_x = (dst_stride << 1);
208 int32_t src_stride_4x = (src_stride << 2);
209 int32_t dst_stride_2x = (dst_stride_x << 1);
210 int32_t src_stride_3x = src_stride_2x + src_stride;
211 int32_t dst_stride_3x = dst_stride_2x + dst_stride_x;
212 __m128i zero = __lsx_vldi(0);
213 __m128i src0, src1, src2, src3, src4, src5, src6, src7;
214 __m128i in0, in1, in0_r, in1_r, in2_r, in3_r;
215
216 for (loop_cnt = (height >> 3); loop_cnt--;) {
217 src0 = __lsx_vld(src, 0);
218 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
219 src3 = __lsx_vldx(src, src_stride_3x);
220 src += src_stride_4x;
221 src4 = __lsx_vld(src, 0);
222 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src5, src6);
223 src7 = __lsx_vldx(src, src_stride_3x);
224 src += src_stride_4x;
225
226 DUP4_ARG2(__lsx_vsllwil_hu_bu, src0, 6, src1, 6, src2, 6, src3, 6,
227 in0_r, in1_r, in2_r, in3_r);
228 DUP2_ARG2(__lsx_vilvh_w, src1, src0, src3, src2, src0, src1);
229 DUP2_ARG2(__lsx_vsllwil_hu_bu, src0, 6, src1, 6, in0, in1);
230 __lsx_vst(in0_r, dst, 0);
231 __lsx_vstx(in1_r, dst, dst_stride_x);
232 __lsx_vstx(in2_r, dst, dst_stride_2x);
233 __lsx_vstx(in3_r, dst, dst_stride_3x);
234 __lsx_vstelm_d(in0, dst, 16, 0);
235 dst += dst_stride;
236 __lsx_vstelm_d(in0, dst, 16, 1);
237 dst += dst_stride;
238 __lsx_vstelm_d(in1, dst, 16, 0);
239 dst += dst_stride;
240 __lsx_vstelm_d(in1, dst, 16, 1);
241 dst += dst_stride;
242
243 DUP4_ARG2(__lsx_vsllwil_hu_bu, src4, 6, src5, 6, src6, 6, src7, 6,
244 in0_r, in1_r, in2_r, in3_r);
245 DUP2_ARG2(__lsx_vilvh_w, src5, src4, src7, src6, src0, src1);
246 DUP2_ARG2(__lsx_vsllwil_hu_bu, src0, 6, src1, 6, in0, in1);
247 __lsx_vst(in0_r, dst, 0);
248 __lsx_vstx(in1_r, dst, dst_stride_x);
249 __lsx_vstx(in2_r, dst, dst_stride_2x);
250 __lsx_vstx(in3_r, dst, dst_stride_3x);
251 __lsx_vstelm_d(in0, dst, 16, 0);
252 dst += dst_stride;
253 __lsx_vstelm_d(in0, dst, 16, 1);
254 dst += dst_stride;
255 __lsx_vstelm_d(in1, dst, 16, 0);
256 dst += dst_stride;
257 __lsx_vstelm_d(in1, dst, 16, 1);
258 dst += dst_stride;
259 }
260 for (;res--;) {
261 src0 = __lsx_vld(src, 0);
262 in0 = __lsx_vsllwil_hu_bu(src0, 6);
263 src1 = __lsx_vilvh_b(zero, src0);
264 in1 = __lsx_vslli_h(src1, 6);
265 __lsx_vst(in0, dst, 0);
266 __lsx_vstelm_d(in1, dst, 16, 0);
267 src += src_stride;
268 dst += dst_stride;
269 }
270 }
271
hevc_copy_16w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, int32_t height)272 static void hevc_copy_16w_lsx(uint8_t *src, int32_t src_stride,
273 int16_t *dst, int32_t dst_stride,
274 int32_t height)
275 {
276 __m128i zero = __lsx_vldi(0);
277 int32_t src_stride_2x = (src_stride << 1);
278 int32_t dst_stride_x = (dst_stride << 1);
279 int32_t src_stride_4x = (src_stride << 2);
280 int32_t dst_stride_2x = (dst_stride << 2);
281 int32_t src_stride_3x = src_stride_2x + src_stride;
282 int32_t dst_stride_3x = dst_stride_2x + dst_stride_x;
283 int32_t loop_cnt = height >> 3;
284 int32_t res = height & 0x07;
285 int16_t* dst1 = dst + 8;
286 __m128i src0, src1, src2, src3, src4, src5, src6, src7;
287 __m128i in0_r, in1_r, in2_r, in3_r, in0_l, in1_l, in2_l, in3_l;
288
289 for (loop_cnt = (height >> 3); loop_cnt--;) {
290 src0 = __lsx_vld(src, 0);
291 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x,
292 src1, src2);
293 src3 = __lsx_vldx(src, src_stride_3x);
294 src += src_stride_4x;
295 src4 = __lsx_vld(src, 0);
296 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x,
297 src5, src6);
298 src7 = __lsx_vldx(src, src_stride_3x);
299 src += src_stride_4x;
300 DUP4_ARG2(__lsx_vilvh_b, zero, src0, zero, src1, zero, src2, zero, src3,
301 in0_l, in1_l, in2_l, in3_l);
302 DUP4_ARG2(__lsx_vsllwil_hu_bu, src0, 6, src1, 6, src2, 6, src3, 6,
303 in0_r, in1_r, in2_r, in3_r);
304 DUP4_ARG2(__lsx_vslli_h, in0_l, 6, in1_l, 6, in2_l, 6, in3_l, 6, in0_l,
305 in1_l, in2_l, in3_l);
306 __lsx_vst(in0_r, dst, 0);
307 __lsx_vstx(in1_r, dst, dst_stride_x);
308 __lsx_vstx(in2_r, dst, dst_stride_2x);
309 __lsx_vstx(in3_r, dst, dst_stride_3x);
310 __lsx_vst(in0_l, dst1, 0);
311 __lsx_vstx(in1_l, dst1, dst_stride_x);
312 __lsx_vstx(in2_l, dst1, dst_stride_2x);
313 __lsx_vstx(in3_l, dst1, dst_stride_3x);
314 dst += dst_stride_2x;
315 dst1 += dst_stride_2x;
316
317 DUP4_ARG2(__lsx_vilvh_b, zero, src4, zero, src5, zero, src6, zero, src7,
318 in0_l, in1_l, in2_l, in3_l);
319 DUP4_ARG2(__lsx_vsllwil_hu_bu, src4, 6, src5, 6, src6, 6, src7, 6,
320 in0_r, in1_r, in2_r, in3_r);
321 DUP4_ARG2(__lsx_vslli_h, in0_l, 6, in1_l, 6, in2_l, 6, in3_l, 6, in0_l,
322 in1_l, in2_l, in3_l);
323 __lsx_vst(in0_r, dst, 0);
324 __lsx_vstx(in1_r, dst, dst_stride_x);
325 __lsx_vstx(in2_r, dst, dst_stride_2x);
326 __lsx_vstx(in3_r, dst, dst_stride_3x);
327 __lsx_vst(in0_l, dst1, 0);
328 __lsx_vstx(in1_l, dst1, dst_stride_x);
329 __lsx_vstx(in2_l, dst1, dst_stride_2x);
330 __lsx_vstx(in3_l, dst1, dst_stride_3x);
331 dst += dst_stride_2x;
332 dst1 += dst_stride_2x;
333 }
334 if (res) {
335 src0 = __lsx_vld(src, 0);
336 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
337 src3 = __lsx_vldx(src, src_stride_3x);
338
339 DUP4_ARG2(__lsx_vilvh_b, zero, src0, zero, src1, zero, src2, zero, src3,
340 in0_l, in1_l, in2_l, in3_l);
341 DUP4_ARG2(__lsx_vsllwil_hu_bu, src0, 6, src1, 6, src2, 6, src3, 6,
342 in0_r, in1_r, in2_r, in3_r);
343 DUP4_ARG2(__lsx_vslli_h, in0_l, 6, in1_l, 6, in2_l, 6, in3_l, 6, in0_l,
344 in1_l, in2_l, in3_l);
345 __lsx_vst(in0_r, dst, 0);
346 __lsx_vstx(in1_r, dst, dst_stride_x);
347 __lsx_vstx(in2_r, dst, dst_stride_2x);
348 __lsx_vstx(in3_r, dst, dst_stride_3x);
349 dst += 8;
350 __lsx_vst(in0_l, dst, 0);
351 __lsx_vstx(in1_l, dst, dst_stride_x);
352 __lsx_vstx(in2_l, dst, dst_stride_2x);
353 __lsx_vstx(in3_l, dst, dst_stride_3x);
354 }
355 }
356
hevc_copy_24w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, int32_t height)357 static void hevc_copy_24w_lsx(uint8_t *src, int32_t src_stride,
358 int16_t *dst, int32_t dst_stride,
359 int32_t height)
360 {
361 uint32_t loop_cnt;
362 int32_t src_stride_2x = (src_stride << 1);
363 int32_t dst_stride_x = (dst_stride << 1);
364 int32_t src_stride_4x = (src_stride << 2);
365 int32_t dst_stride_2x = (dst_stride << 2);
366 int32_t src_stride_3x = src_stride_2x + src_stride;
367 int32_t dst_stride_3x = dst_stride_2x + dst_stride_x;
368 uint8_t *_src = src + 16;
369 int16_t *dst1 = dst;
370 __m128i zero = __lsx_vldi(0);
371 __m128i src0, src1, src2, src3, src4, src5, src6, src7;
372 __m128i in0_r, in1_r, in2_r, in3_r, in0_l, in1_l, in2_l, in3_l;
373
374 for (loop_cnt = (height >> 2); loop_cnt--;) {
375 src0 = __lsx_vld(src, 0);
376 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
377 src3 = __lsx_vldx(src, src_stride_3x);
378 src += src_stride_4x;
379 src4 = __lsx_vld(_src, 0);
380 DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride_2x,
381 src5, src6);
382 src7 = __lsx_vldx(_src, src_stride_3x);
383 _src += src_stride_4x;
384
385 DUP4_ARG2(__lsx_vilvh_b, zero, src0, zero, src1, zero, src2, zero,
386 src3, in0_l, in1_l, in2_l, in3_l);
387 DUP4_ARG2(__lsx_vsllwil_hu_bu, src0, 6, src1, 6, src2, 6, src3, 6,
388 in0_r, in1_r, in2_r, in3_r);
389 DUP4_ARG2(__lsx_vslli_h, in0_l, 6, in1_l, 6, in2_l, 6, in3_l, 6,
390 in0_l, in1_l, in2_l, in3_l);
391 __lsx_vst(in0_r, dst, 0);
392 __lsx_vstx(in1_r, dst, dst_stride_x);
393 __lsx_vstx(in2_r, dst, dst_stride_2x);
394 __lsx_vstx(in3_r, dst, dst_stride_3x);
395 dst1 = dst + 8;
396 __lsx_vst(in0_l, dst1, 0);
397 __lsx_vstx(in1_l, dst1, dst_stride_x);
398 __lsx_vstx(in2_l, dst1, dst_stride_2x);
399 __lsx_vstx(in3_l, dst1, dst_stride_3x);
400 DUP4_ARG2(__lsx_vsllwil_hu_bu, src4, 6, src5, 6, src6, 6, src7, 6,
401 in0_r, in1_r, in2_r, in3_r);
402 dst1 = dst1 + 8;
403 __lsx_vst(in0_r, dst1, 0);
404 __lsx_vstx(in1_r, dst1, dst_stride_x);
405 __lsx_vstx(in2_r, dst1, dst_stride_2x);
406 __lsx_vstx(in3_r, dst1, dst_stride_3x);
407 dst += dst_stride_2x;
408 }
409 }
410
hevc_copy_32w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, int32_t height)411 static void hevc_copy_32w_lsx(uint8_t *src, int32_t src_stride,
412 int16_t *dst, int32_t dst_stride,
413 int32_t height)
414 {
415 uint32_t loop_cnt;
416 int32_t src_stride_2x = (src_stride << 1);
417 int32_t src_stride_4x = (src_stride << 2);
418 int32_t src_stride_3x = src_stride_2x + src_stride;
419 uint8_t *_src = src + 16;
420 __m128i zero = {0};
421 __m128i src0, src1, src2, src3, src4, src5, src6, src7;
422 __m128i in0_r, in1_r, in2_r, in3_r, in0_l, in1_l, in2_l, in3_l;
423
424 for (loop_cnt = (height >> 2); loop_cnt--;) {
425 src0 = __lsx_vld(src, 0);
426 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src2, src4);
427 src6 = __lsx_vldx(src, src_stride_3x);
428 src += src_stride_4x;
429 src1 = __lsx_vld(_src, 0);
430 DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride_2x,
431 src3, src5);
432 src7 = __lsx_vldx(_src, src_stride_3x);
433 _src += src_stride_4x;
434
435 DUP4_ARG2(__lsx_vilvh_b, zero, src0, zero, src1, zero, src2, zero,
436 src3, in0_l, in1_l, in2_l, in3_l);
437 DUP4_ARG2(__lsx_vsllwil_hu_bu, src0, 6, src1, 6, src2, 6, src3, 6,
438 in0_r, in1_r, in2_r, in3_r);
439 DUP4_ARG2(__lsx_vslli_h, in0_l, 6, in1_l, 6, in2_l, 6, in3_l, 6,
440 in0_l, in1_l, in2_l, in3_l);
441 __lsx_vst(in0_r, dst, 0);
442 __lsx_vst(in0_l, dst, 16);
443 __lsx_vst(in1_r, dst, 32);
444 __lsx_vst(in1_l, dst, 48);
445 dst += dst_stride;
446 __lsx_vst(in2_r, dst, 0);
447 __lsx_vst(in2_l, dst, 16);
448 __lsx_vst(in3_r, dst, 32);
449 __lsx_vst(in3_l, dst, 48);
450 dst += dst_stride;
451
452 DUP4_ARG2(__lsx_vilvh_b, zero, src4, zero, src5, zero, src6, zero, src7,
453 in0_l, in1_l, in2_l, in3_l);
454 DUP4_ARG2(__lsx_vsllwil_hu_bu, src4, 6, src5, 6, src6, 6, src7, 6,
455 in0_r, in1_r, in2_r, in3_r);
456 DUP4_ARG2(__lsx_vslli_h, in0_l, 6, in1_l, 6, in2_l, 6, in3_l, 6, in0_l,
457 in1_l, in2_l, in3_l);
458 __lsx_vst(in0_r, dst, 0);
459 __lsx_vst(in0_l, dst, 16);
460 __lsx_vst(in1_r, dst, 32);
461 __lsx_vst(in1_l, dst, 48);
462 dst += dst_stride;
463 __lsx_vst(in2_r, dst, 0);
464 __lsx_vst(in2_l, dst, 16);
465 __lsx_vst(in3_r, dst, 32);
466 __lsx_vst(in3_l, dst, 48);
467 dst += dst_stride;
468 }
469 }
470
hevc_copy_48w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, int32_t height)471 static void hevc_copy_48w_lsx(uint8_t *src, int32_t src_stride,
472 int16_t *dst, int32_t dst_stride,
473 int32_t height)
474 {
475 uint32_t loop_cnt;
476 __m128i zero = {0};
477 __m128i src0, src1, src2, src3, src4, src5, src6, src7;
478 __m128i src8, src9, src10, src11;
479 __m128i in0_r, in1_r, in2_r, in3_r, in4_r, in5_r;
480 __m128i in0_l, in1_l, in2_l, in3_l, in4_l, in5_l;
481
482 for (loop_cnt = (height >> 2); loop_cnt--;) {
483 DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src1);
484 src2 = __lsx_vld(src, 32);
485 src += src_stride;
486 DUP2_ARG2(__lsx_vld, src, 0, src, 16, src3, src4);
487 src5 = __lsx_vld(src, 32);
488 src += src_stride;
489 DUP2_ARG2(__lsx_vld, src, 0, src, 16, src6, src7);
490 src8 = __lsx_vld(src, 32);
491 src += src_stride;
492 DUP2_ARG2(__lsx_vld, src, 0, src, 16, src9, src10);
493 src11 = __lsx_vld(src, 32);
494 src += src_stride;
495
496 DUP4_ARG2(__lsx_vilvh_b, zero, src0, zero, src1, zero, src2, zero,
497 src3, in0_l, in1_l, in2_l, in3_l);
498 DUP2_ARG2(__lsx_vilvh_b, zero, src4, zero, src5, in4_l, in5_l);
499 DUP4_ARG2(__lsx_vsllwil_hu_bu, src0, 6, src1, 6, src2, 6, src3, 6,
500 in0_r, in1_r, in2_r, in3_r);
501 DUP4_ARG2(__lsx_vslli_h, in0_l, 6, in1_l, 6, in2_l, 6, in3_l, 6, in0_l,
502 in1_l, in2_l, in3_l);
503 DUP2_ARG2(__lsx_vsllwil_hu_bu, src4, 6, src5, 6, in4_r, in5_r);
504 DUP2_ARG2(__lsx_vslli_h, in4_l, 6, in5_l, 6, in4_l, in5_l);
505 __lsx_vst(in0_r, dst, 0);
506 __lsx_vst(in0_l, dst, 16);
507 __lsx_vst(in1_r, dst, 32);
508 __lsx_vst(in1_l, dst, 48);
509 __lsx_vst(in2_r, dst, 64);
510 __lsx_vst(in2_l, dst, 80);
511 dst += dst_stride;
512 __lsx_vst(in3_r, dst, 0);
513 __lsx_vst(in3_l, dst, 16);
514 __lsx_vst(in4_r, dst, 32);
515 __lsx_vst(in4_l, dst, 48);
516 __lsx_vst(in5_r, dst, 64);
517 __lsx_vst(in5_l, dst, 80);
518 dst += dst_stride;
519
520 DUP4_ARG2(__lsx_vilvh_b, zero, src6, zero, src7, zero, src8, zero, src9,
521 in0_l, in1_l, in2_l, in3_l);
522 DUP2_ARG2(__lsx_vilvh_b, zero, src10, zero, src11, in4_l, in5_l);
523 DUP4_ARG2(__lsx_vsllwil_hu_bu, src6, 6, src7, 6, src8, 6, src9, 6,
524 in0_r, in1_r, in2_r, in3_r);
525 DUP4_ARG2(__lsx_vslli_h, in0_l, 6, in1_l, 6, in2_l, 6, in3_l, 6, in0_l,
526 in1_l, in2_l, in3_l);
527 DUP2_ARG2(__lsx_vsllwil_hu_bu, src10, 6, src11, 6, in4_r, in5_r);
528 DUP2_ARG2(__lsx_vslli_h, in4_l, 6, in5_l, 6, in4_l, in5_l);
529 __lsx_vst(in0_r, dst, 0);
530 __lsx_vst(in0_l, dst, 16);
531 __lsx_vst(in1_r, dst, 32);
532 __lsx_vst(in1_l, dst, 48);
533 __lsx_vst(in2_r, dst, 64);
534 __lsx_vst(in2_l, dst, 80);
535 dst += dst_stride;
536 __lsx_vst(in3_r, dst, 0);
537 __lsx_vst(in3_l, dst, 16);
538 __lsx_vst(in4_r, dst, 32);
539 __lsx_vst(in4_l, dst, 48);
540 __lsx_vst(in5_r, dst, 64);
541 __lsx_vst(in5_l, dst, 80);
542 dst += dst_stride;
543 }
544 }
545
hevc_copy_64w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, int32_t height)546 static void hevc_copy_64w_lsx(uint8_t *src, int32_t src_stride,
547 int16_t *dst, int32_t dst_stride,
548 int32_t height)
549 {
550 uint32_t loop_cnt;
551 __m128i zero = {0};
552 __m128i src0, src1, src2, src3, src4, src5, src6, src7;
553 __m128i in0_r, in1_r, in2_r, in3_r, in0_l, in1_l, in2_l, in3_l;
554
555
556 for (loop_cnt = (height >> 1); loop_cnt--;) {
557 DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
558 src0, src1, src2, src3);
559 src += src_stride;
560 DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
561 src4, src5, src6, src7);
562 src += src_stride;
563
564 DUP4_ARG2(__lsx_vilvh_b, zero, src0, zero, src1, zero, src2, zero,
565 src3, in0_l, in1_l, in2_l, in3_l);
566 DUP4_ARG2(__lsx_vsllwil_hu_bu, src0, 6, src1, 6, src2, 6, src3, 6,
567 in0_r, in1_r, in2_r, in3_r);
568 DUP4_ARG2(__lsx_vslli_h, in0_l, 6, in1_l, 6, in2_l, 6, in3_l, 6,
569 in0_l, in1_l, in2_l, in3_l);
570 __lsx_vst(in0_r, dst, 0);
571 __lsx_vst(in0_l, dst, 16);
572 __lsx_vst(in1_r, dst, 32);
573 __lsx_vst(in1_l, dst, 48);
574 __lsx_vst(in2_r, dst, 64);
575 __lsx_vst(in2_l, dst, 80);
576 __lsx_vst(in3_r, dst, 96);
577 __lsx_vst(in3_l, dst, 112);
578 dst += dst_stride;
579
580 DUP4_ARG2(__lsx_vilvh_b, zero, src4, zero, src5, zero, src6, zero,
581 src7, in0_l, in1_l, in2_l, in3_l);
582 DUP4_ARG2(__lsx_vsllwil_hu_bu, src4, 6, src5, 6, src6, 6, src7, 6,
583 in0_r, in1_r, in2_r, in3_r);
584 DUP4_ARG2(__lsx_vslli_h, in0_l, 6, in1_l, 6, in2_l, 6, in3_l, 6, in0_l,
585 in1_l, in2_l, in3_l);
586 __lsx_vst(in0_r, dst, 0);
587 __lsx_vst(in0_l, dst, 16);
588 __lsx_vst(in1_r, dst, 32);
589 __lsx_vst(in1_l, dst, 48);
590 __lsx_vst(in2_r, dst, 64);
591 __lsx_vst(in2_l, dst, 80);
592 __lsx_vst(in3_r, dst, 96);
593 __lsx_vst(in3_l, dst, 112);
594 dst += dst_stride;
595 }
596 }
597
hevc_hz_8t_4w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)598 static void hevc_hz_8t_4w_lsx(uint8_t *src, int32_t src_stride,
599 int16_t *dst, int32_t dst_stride,
600 const int8_t *filter, int32_t height)
601 {
602 uint32_t loop_cnt = height >> 3;
603 uint32_t res = (height & 0x7) >> 1;
604 int32_t src_stride_2x = (src_stride << 1);
605 int32_t dst_stride_2x = (dst_stride << 1);
606 int32_t src_stride_4x = (src_stride << 2);
607 int32_t dst_stride_4x = (dst_stride << 2);
608 int32_t src_stride_3x = src_stride_2x + src_stride;
609 int32_t dst_stride_3x = dst_stride_2x + dst_stride;
610 __m128i src0, src1, src2, src3, src4, src5, src6, src7;
611 __m128i filt0, filt1, filt2, filt3;
612 __m128i mask1, mask2, mask3;
613 __m128i vec0, vec1, vec2, vec3;
614 __m128i dst0, dst1, dst2, dst3;
615 __m128i mask0 = __lsx_vld(ff_hevc_mask_arr, 16);
616
617 src -= 3;
618 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
619 filt0, filt1, filt2, filt3);
620
621 DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
622 mask3 = __lsx_vaddi_bu(mask0, 6);
623
624 for (;loop_cnt--;) {
625 src0 = __lsx_vld(src, 0);
626 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
627 src3 = __lsx_vldx(src, src_stride_3x);
628 src += src_stride_4x;
629 src4 = __lsx_vld(src, 0);
630 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src5, src6);
631 src7 = __lsx_vldx(src, src_stride_3x);
632 src += src_stride_4x;
633 DUP4_ARG3(__lsx_vshuf_b, src1, src0, mask0, src1, src0, mask1, src1,
634 src0, mask2, src1, src0, mask3, vec0, vec1, vec2, vec3);
635 dst0 = __lsx_vdp2_h_bu_b(vec0, filt0);
636 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst0, vec2, filt2,
637 dst0, dst0);
638 dst0 = __lsx_vdp2add_h_bu_b(dst0, vec3, filt3);
639 DUP4_ARG3(__lsx_vshuf_b, src3, src2, mask0, src3, src2, mask1, src3,
640 src2, mask2, src3, src2, mask3, vec0, vec1, vec2, vec3);
641 dst1 = __lsx_vdp2_h_bu_b(vec0, filt0);
642 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst1, vec1, filt1, dst1, vec2, filt2,
643 dst1, dst1);
644 dst1 = __lsx_vdp2add_h_bu_b(dst1, vec3, filt3);
645 DUP4_ARG3(__lsx_vshuf_b, src5, src4, mask0, src5, src4, mask1, src5,
646 src4, mask2, src5, src4, mask3, vec0, vec1, vec2, vec3);
647 dst2 = __lsx_vdp2_h_bu_b(vec0, filt0);
648 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst2, vec1, filt1, dst2, vec2, filt2,
649 dst2, dst2);
650 dst2 = __lsx_vdp2add_h_bu_b(dst2, vec3, filt3);
651 DUP4_ARG3(__lsx_vshuf_b, src7, src6, mask0, src7, src6, mask1, src7,
652 src6, mask2, src7, src6, mask3, vec0, vec1, vec2, vec3);
653 dst3 = __lsx_vdp2_h_bu_b(vec0, filt0);
654 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst3, vec1, filt1, dst3, vec2, filt2,
655 dst3, dst3);
656 dst3 = __lsx_vdp2add_h_bu_b(dst3, vec3, filt3);
657
658 __lsx_vstelm_d(dst0, dst, 0, 0);
659 __lsx_vstelm_d(dst0, dst + dst_stride, 0, 1);
660 __lsx_vstelm_d(dst1, dst + dst_stride_2x, 0, 0);
661 __lsx_vstelm_d(dst1, dst + dst_stride_3x, 0, 1);
662 dst += dst_stride_4x;
663 __lsx_vstelm_d(dst2, dst, 0, 0);
664 __lsx_vstelm_d(dst2, dst + dst_stride, 0, 1);
665 __lsx_vstelm_d(dst3, dst + dst_stride_2x, 0, 0);
666 __lsx_vstelm_d(dst3, dst + dst_stride_3x, 0, 1);
667 dst += dst_stride_4x;
668 }
669 for (;res--;) {
670 src0 = __lsx_vld(src, 0);
671 src1 = __lsx_vldx(src, src_stride);
672 DUP4_ARG3(__lsx_vshuf_b, src1, src0, mask0, src1, src0, mask1, src1,
673 src0, mask2, src1, src0, mask3, vec0, vec1, vec2, vec3);
674 dst0 = __lsx_vdp2_h_bu_b(vec0, filt0);
675 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst0, vec2, filt2,
676 dst0, dst0);
677 dst0 = __lsx_vdp2add_h_bu_b(dst0, vec3, filt3);
678 __lsx_vstelm_d(dst0, dst, 0, 0);
679 __lsx_vstelm_d(dst0, dst + dst_stride, 0, 1);
680 src += src_stride_2x;
681 dst += dst_stride_2x;
682 }
683 }
684
hevc_hz_8t_8w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)685 static void hevc_hz_8t_8w_lsx(uint8_t *src, int32_t src_stride,
686 int16_t *dst, int32_t dst_stride,
687 const int8_t *filter, int32_t height)
688 {
689 uint32_t loop_cnt;
690 int32_t src_stride_2x = (src_stride << 1);
691 int32_t dst_stride_x = (dst_stride << 1);
692 int32_t src_stride_4x = (src_stride << 2);
693 int32_t dst_stride_2x = (dst_stride << 2);
694 int32_t src_stride_3x = src_stride_2x + src_stride;
695 int32_t dst_stride_3x = dst_stride_2x + dst_stride_x;
696 __m128i src0, src1, src2, src3;
697 __m128i filt0, filt1, filt2, filt3;
698 __m128i mask1, mask2, mask3;
699 __m128i vec0, vec1, vec2, vec3;
700 __m128i dst0, dst1, dst2, dst3;
701 __m128i mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
702
703 src -= 3;
704 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
705 filt0, filt1, filt2, filt3);
706
707 DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
708 mask3 = __lsx_vaddi_bu(mask0, 6);
709
710 for (loop_cnt = (height >> 2); loop_cnt--;) {
711 src0 = __lsx_vld(src, 0);
712 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
713 src3 = __lsx_vldx(src, src_stride_3x);
714 src += src_stride_4x;
715
716 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, src0,
717 src0, mask2, src0, src0, mask3, vec0, vec1, vec2, vec3);
718 dst0 = __lsx_vdp2_h_bu_b(vec0, filt0);
719 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst0, vec2, filt2,
720 dst0, dst0);
721 dst0 = __lsx_vdp2add_h_bu_b(dst0, vec3, filt3);
722 DUP4_ARG3(__lsx_vshuf_b, src1, src1, mask0, src1, src1, mask1, src1,
723 src1, mask2, src1, src1, mask3, vec0, vec1, vec2, vec3);
724 dst1 = __lsx_vdp2_h_bu_b(vec0, filt0);
725 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst1, vec1, filt1, dst1, vec2, filt2,
726 dst1, dst1);
727 dst1 = __lsx_vdp2add_h_bu_b(dst1, vec3, filt3);
728 DUP4_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1, src2,
729 src2, mask2, src2, src2, mask3, vec0, vec1, vec2, vec3);
730 dst2 = __lsx_vdp2_h_bu_b(vec0, filt0);
731 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst2, vec1, filt1, dst2, vec2, filt2,
732 dst2, dst2);
733 dst2 = __lsx_vdp2add_h_bu_b(dst2, vec3, filt3);
734 DUP4_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1, src3,
735 src3, mask2, src3, src3, mask3, vec0, vec1, vec2, vec3);
736 dst3 = __lsx_vdp2_h_bu_b(vec0, filt0);
737 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst3, vec1, filt1, dst3, vec2, filt2,
738 dst3, dst3);
739 dst3 = __lsx_vdp2add_h_bu_b(dst3, vec3, filt3);
740
741 __lsx_vst(dst0, dst, 0);
742 __lsx_vstx(dst1, dst, dst_stride_x);
743 __lsx_vstx(dst2, dst, dst_stride_2x);
744 __lsx_vstx(dst3, dst, dst_stride_3x);
745 dst += dst_stride_2x;
746 }
747 }
748
hevc_hz_8t_12w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)749 static void hevc_hz_8t_12w_lsx(uint8_t *src, int32_t src_stride,
750 int16_t *dst, int32_t dst_stride,
751 const int8_t *filter, int32_t height)
752 {
753 uint32_t loop_cnt;
754 int32_t src_stride_2x = (src_stride << 1);
755 int32_t src_stride_4x = (src_stride << 2);
756 int32_t src_stride_3x = src_stride_2x + src_stride;
757 uint8_t *_src;
758 __m128i src0, src1, src2, src3, src4, src5, src6, src7;
759 __m128i mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask7;
760 __m128i vec0, vec1, vec2, vec3, vec4, vec5;
761 __m128i filt0, filt1, filt2, filt3, dst0, dst1, dst2, dst3, dst4, dst5;
762
763 src -= 3;
764 _src = src + 8;
765 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
766 filt0, filt1, filt2, filt3);
767
768 mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
769 DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
770 mask3 = __lsx_vaddi_bu(mask0, 6);
771 mask4 = __lsx_vld(ff_hevc_mask_arr, 16);
772 DUP2_ARG2(__lsx_vaddi_bu, mask4, 2, mask4, 4, mask5, mask6);
773 mask7 = __lsx_vaddi_bu(mask4, 6);
774
775 for (loop_cnt = 4; loop_cnt--;) {
776 src0 = __lsx_vld(src, 0);
777 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
778 src3 = __lsx_vldx(src, src_stride_3x);
779 src4 = __lsx_vld(_src, 0);
780 DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride_2x,
781 src5, src6);
782 src7 = __lsx_vldx(_src, src_stride_3x);
783 src += src_stride_4x;
784 _src += src_stride_4x;
785
786 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0,
787 vec0, vec1);
788 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src3, src3, mask0,
789 vec2, vec3);
790 DUP2_ARG3(__lsx_vshuf_b, src5, src4, mask4, src7, src6, mask4,
791 vec4, vec5);
792 DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec1, filt0, vec2, filt0,
793 vec3, filt0, dst0, dst1, dst2, dst3);
794 DUP2_ARG2(__lsx_vdp2_h_bu_b, vec4, filt0, vec5, filt0, dst4, dst5);
795 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1,
796 vec0, vec1);
797 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask1, src3, src3, mask1,
798 vec2, vec3);
799 DUP2_ARG3(__lsx_vshuf_b, src5, src4, mask5, src7, src6, mask5,
800 vec4, vec5);
801 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt1, dst1, vec1, filt1,
802 dst2, vec2, filt1, dst3, vec3, filt1, dst0, dst1, dst2, dst3);
803 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec4, filt1, dst5, vec5, filt1,
804 dst4, dst5);
805 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2,
806 vec0, vec1);
807 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask2, src3, src3, mask2,
808 vec2, vec3);
809 DUP2_ARG3(__lsx_vshuf_b, src5, src4, mask6, src7, src6, mask6,
810 vec4, vec5);
811 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt2, dst1, vec1, filt2,
812 dst2, vec2, filt2, dst3, vec3, filt2, dst0, dst1, dst2, dst3);
813 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec4, filt2, dst5, vec5, filt2,
814 dst4, dst5);
815 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask3, src1, src1, mask3,
816 vec0, vec1);
817 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask3, src3, src3, mask3,
818 vec2, vec3);
819 DUP2_ARG3(__lsx_vshuf_b, src5, src4, mask7, src7, src6, mask7,
820 vec4, vec5);
821 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt3, dst1, vec1, filt3,
822 dst2, vec2, filt3, dst3, vec3, filt3, dst0, dst1, dst2, dst3);
823 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec4, filt3, dst5, vec5, filt3,
824 dst4, dst5);
825
826 __lsx_vst(dst0, dst, 0);
827 __lsx_vstelm_d(dst4, dst, 16, 0);
828 dst += dst_stride;
829 __lsx_vst(dst1, dst, 0);
830 __lsx_vstelm_d(dst4, dst, 16, 1);
831 dst += dst_stride;
832 __lsx_vst(dst2, dst, 0);
833 __lsx_vstelm_d(dst5, dst, 16, 0);
834 dst += dst_stride;
835 __lsx_vst(dst3, dst, 0);
836 __lsx_vstelm_d(dst5, dst, 16, 1);
837 dst += dst_stride;
838 }
839 }
840
hevc_hz_8t_16w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)841 static void hevc_hz_8t_16w_lsx(uint8_t *src, int32_t src_stride,
842 int16_t *dst, int32_t dst_stride,
843 const int8_t *filter, int32_t height)
844 {
845 uint32_t loop_cnt;
846 __m128i src0, src1, src2, src3;
847 __m128i filt0, filt1, filt2, filt3;
848 __m128i mask1, mask2, mask3;
849 __m128i vec0, vec1, vec2, vec3;
850 __m128i dst0, dst1, dst2, dst3;
851 __m128i mask0;
852
853 src -= 3;
854 mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
855 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
856 filt0, filt1, filt2, filt3);
857
858 DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
859 mask3 = __lsx_vaddi_bu(mask0, 6);
860
861 for (loop_cnt = (height >> 1); loop_cnt--;) {
862 DUP2_ARG2(__lsx_vld, src, 0, src, 8, src0, src1);
863 src += src_stride;
864 DUP2_ARG2(__lsx_vld, src, 0, src, 8, src2, src3);
865 src += src_stride;
866
867 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0,
868 vec0, vec1);
869 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src3, src3, mask0,
870 vec2, vec3);
871 DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec1, filt0, vec2, filt0,
872 vec3, filt0, dst0, dst1, dst2, dst3);
873 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1,
874 vec0, vec1);
875 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask1, src3, src3, mask1,
876 vec2, vec3);
877 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt1, dst1, vec1, filt1,
878 dst2, vec2, filt1, dst3, vec3, filt1, dst0, dst1, dst2, dst3);
879 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2,
880 vec0, vec1);
881 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask2, src3, src3, mask2,
882 vec2, vec3);
883 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt2, dst1, vec1, filt2,
884 dst2, vec2, filt2, dst3, vec3, filt2, dst0, dst1, dst2, dst3);
885 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask3, src1, src1, mask3,
886 vec0, vec1);
887 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask3, src3, src3, mask3,
888 vec2, vec3);
889 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt3, dst1, vec1, filt3,
890 dst2, vec2, filt3, dst3, vec3, filt3, dst0, dst1, dst2, dst3);
891
892 __lsx_vst(dst0, dst, 0);
893 __lsx_vst(dst1, dst, 16);
894 dst += dst_stride;
895 __lsx_vst(dst2, dst, 0);
896 __lsx_vst(dst3, dst, 16);
897 dst += dst_stride;
898 }
899 }
900
hevc_hz_8t_24w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)901 static void hevc_hz_8t_24w_lsx(uint8_t *src, int32_t src_stride,
902 int16_t *dst, int32_t dst_stride,
903 const int8_t *filter, int32_t height)
904 {
905 uint32_t loop_cnt;
906 __m128i src0, src1, src2, src3;
907 __m128i filt0, filt1, filt2, filt3;
908 __m128i mask1, mask2, mask3, mask4, mask5, mask6, mask7;
909 __m128i vec0, vec1, vec2, vec3, vec4, vec5;
910 __m128i dst0, dst1, dst2, dst3, dst4, dst5;
911 __m128i mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
912
913 src -= 3;
914 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
915 filt0, filt1, filt2, filt3);
916
917 DUP4_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask0, 6, mask0, 8, mask1,
918 mask2, mask3, mask4);
919 DUP2_ARG2(__lsx_vaddi_bu, mask0, 10, mask0, 12, mask5, mask6);
920 mask7 = __lsx_vaddi_bu(mask0, 14);
921
922 for (loop_cnt = (height >> 1); loop_cnt--;) {
923 DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src1);
924 src += src_stride;
925 DUP2_ARG2(__lsx_vld, src, 0, src, 16, src2, src3);
926 src += src_stride;
927
928 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src0, mask4, src1,
929 src1, mask0, src2, src2, mask0, vec0, vec1, vec2, vec3);
930 DUP2_ARG3(__lsx_vshuf_b, src3, src2, mask4, src3, src3, mask0,
931 vec4, vec5);
932 DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec1, filt0, vec2, filt0,
933 vec3, filt0, dst0, dst1, dst2, dst3);
934 DUP2_ARG2(__lsx_vdp2_h_bu_b, vec4, filt0, vec5, filt0, dst4, dst5);
935 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src0, mask5, src1,
936 src1, mask1, src2, src2, mask1, vec0, vec1, vec2, vec3);
937 DUP2_ARG3(__lsx_vshuf_b, src3, src2, mask5, src3, src3, mask1,
938 vec4, vec5);
939 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt1, dst1, vec1, filt1,
940 dst2, vec2, filt1, dst3, vec3, filt1, dst0, dst1, dst2, dst3);
941 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec4, filt1, dst5, vec5, filt1,
942 dst4, dst5);
943 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src0, mask6, src1,
944 src1, mask2, src2, src2, mask2, vec0, vec1, vec2, vec3);
945 DUP2_ARG3(__lsx_vshuf_b, src3, src2, mask6, src3, src3, mask2,
946 vec4, vec5);
947 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt2, dst1, vec1, filt2,
948 dst2, vec2, filt2, dst3, vec3, filt2, dst0, dst1, dst2, dst3);
949 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec4, filt2, dst5, vec5, filt2,
950 dst4, dst5);
951 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask3, src1, src0, mask7, src1,
952 src1, mask3, src2, src2, mask3, vec0, vec1, vec2, vec3);
953 DUP2_ARG3(__lsx_vshuf_b, src3, src2, mask7, src3, src3, mask3,
954 vec4, vec5);
955 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt3, dst1, vec1, filt3,
956 dst2, vec2, filt3, dst3, vec3, filt3, dst0, dst1, dst2, dst3);
957 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec4, filt3, dst5, vec5, filt3,
958 dst4, dst5);
959
960 __lsx_vst(dst0, dst, 0);
961 __lsx_vst(dst1, dst, 16);
962 __lsx_vst(dst2, dst, 32);
963 dst += dst_stride;
964 __lsx_vst(dst3, dst, 0);
965 __lsx_vst(dst4, dst, 16);
966 __lsx_vst(dst5, dst, 32);
967 dst += dst_stride;
968 }
969 }
970
hevc_hz_8t_32w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)971 static void hevc_hz_8t_32w_lsx(uint8_t *src, int32_t src_stride,
972 int16_t *dst, int32_t dst_stride,
973 const int8_t *filter, int32_t height)
974 {
975 uint32_t loop_cnt;
976 __m128i src0, src1, src2;
977 __m128i filt0, filt1, filt2, filt3;
978 __m128i mask1, mask2, mask3, mask4, mask5, mask6, mask7;
979 __m128i vec0, vec1, vec2, vec3;
980 __m128i dst0, dst1, dst2, dst3;
981 __m128i mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
982
983 src -= 3;
984 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
985 filt0, filt1, filt2, filt3);
986
987 DUP4_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask0, 6, mask0, 8,
988 mask1, mask2, mask3, mask4);
989 DUP2_ARG2(__lsx_vaddi_bu, mask0, 10, mask0, 12, mask5, mask6);
990 mask7 = __lsx_vaddi_bu(mask0, 14);
991
992 for (loop_cnt = height; loop_cnt--;) {
993 DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src1);
994 src2 = __lsx_vld(src, 24);
995 src += src_stride;
996
997 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, src0,
998 src0, mask2, src0, src0, mask3, vec0, vec1, vec2, vec3);
999 dst0 = __lsx_vdp2_h_bu_b(vec0, filt0);
1000 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst0, vec2, filt2,
1001 dst0, dst0);
1002 dst0 = __lsx_vdp2add_h_bu_b(dst0, vec3, filt3);
1003
1004 DUP4_ARG3(__lsx_vshuf_b, src1, src0, mask4, src1, src0, mask5, src1,
1005 src0, mask6, src1, src0, mask7, vec0, vec1, vec2, vec3);
1006 dst1 = __lsx_vdp2_h_bu_b(vec0, filt0);
1007 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst1, vec1, filt1, dst1, vec2, filt2,
1008 dst1, dst1);
1009 dst1 = __lsx_vdp2add_h_bu_b(dst1, vec3, filt3);
1010 DUP4_ARG3(__lsx_vshuf_b, src1, src1, mask0, src1, src1, mask1, src1, src1,
1011 mask2, src1, src1, mask3, vec0, vec1, vec2, vec3);
1012 dst2 = __lsx_vdp2_h_bu_b(vec0, filt0);
1013 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst2, vec1, filt1, dst2, vec2, filt2,
1014 dst2, dst2);
1015 dst2 = __lsx_vdp2add_h_bu_b(dst2, vec3, filt3);
1016 DUP4_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1, src2, src2,
1017 mask2, src2, src2, mask3, vec0, vec1, vec2, vec3);
1018 dst3 = __lsx_vdp2_h_bu_b(vec0, filt0);
1019 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst3, vec1, filt1, dst3, vec2, filt2,
1020 dst3, dst3);
1021 dst3 = __lsx_vdp2add_h_bu_b(dst3, vec3, filt3);
1022
1023 __lsx_vst(dst0, dst, 0);
1024 __lsx_vst(dst1, dst, 16);
1025 __lsx_vst(dst2, dst, 32);
1026 __lsx_vst(dst3, dst, 48);
1027 dst += dst_stride;
1028 }
1029 }
1030
hevc_hz_8t_48w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)1031 static void hevc_hz_8t_48w_lsx(uint8_t *src, int32_t src_stride,
1032 int16_t *dst, int32_t dst_stride,
1033 const int8_t *filter, int32_t height)
1034 {
1035 uint32_t loop_cnt;
1036 __m128i src0, src1, src2, src3;
1037 __m128i filt0, filt1, filt2, filt3;
1038 __m128i mask1, mask2, mask3, mask4, mask5, mask6, mask7;
1039 __m128i vec0, vec1, vec2, vec3, vec4, vec5;
1040 __m128i dst0, dst1, dst2, dst3, dst4, dst5;
1041 __m128i mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
1042
1043 src -= 3;
1044 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
1045 filt0, filt1, filt2, filt3);
1046
1047 DUP4_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask0, 6, mask0, 8, mask1,
1048 mask2, mask3, mask4);
1049 DUP2_ARG2(__lsx_vaddi_bu, mask0, 10, mask0, 12, mask5, mask6);
1050 mask7 = __lsx_vaddi_bu(mask0, 14);
1051
1052 for (loop_cnt = height; loop_cnt--;) {
1053 DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src1);
1054 src2 = __lsx_vld(src, 32);
1055 src3 = __lsx_vld(src, 40);
1056 src += src_stride;
1057
1058 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src0, mask4, src1, src1,
1059 mask0, src2, src1, mask4, vec0, vec1, vec2, vec3);
1060 DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec1, filt0, vec2, filt0,
1061 vec3, filt0, dst0, dst1, dst2, dst3);
1062 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src0, mask5, src1,
1063 src1, mask1, src2, src1, mask5, vec0, vec1, vec2, vec3);
1064 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt1, dst1, vec1, filt1,
1065 dst2, vec2, filt1, dst3, vec3, filt1, dst0, dst1, dst2, dst3);
1066 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src0, mask6, src1,
1067 src1, mask2, src2, src1, mask6, vec0, vec1, vec2, vec3);
1068 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt2, dst1, vec1, filt2,
1069 dst2, vec2, filt2, dst3, vec3, filt2, dst0, dst1, dst2, dst3);
1070 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask3, src1, src0, mask7, src1,
1071 src1, mask3, src2, src1, mask7, vec0, vec1, vec2, vec3);
1072 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt3, dst1, vec1, filt3,
1073 dst2, vec2, filt3, dst3, vec3, filt3, dst0, dst1, dst2, dst3);
1074 __lsx_vst(dst0, dst, 0);
1075 __lsx_vst(dst1, dst, 16);
1076 __lsx_vst(dst2, dst, 32);
1077 __lsx_vst(dst3, dst, 48);
1078
1079 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src3, src3, mask0,
1080 vec4, vec5);
1081 DUP2_ARG2(__lsx_vdp2_h_bu_b, vec4, filt0, vec5, filt0, dst4, dst5);
1082 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask1, src3, src3, mask1,
1083 vec4, vec5);
1084 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec4, filt1, dst5, vec5, filt1,
1085 dst4, dst5);
1086 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask2, src3, src3, mask2,
1087 vec4, vec5);
1088 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec4, filt2, dst5, vec5, filt2,
1089 dst4, dst5);
1090 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask3, src3, src3, mask3,
1091 vec4, vec5);
1092 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec4, filt3, dst5, vec5, filt3,
1093 dst4, dst5);
1094 __lsx_vst(dst4, dst, 64);
1095 __lsx_vst(dst5, dst, 80);
1096 dst += dst_stride;
1097 }
1098 }
1099
hevc_hz_8t_64w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)1100 static void hevc_hz_8t_64w_lsx(uint8_t *src, int32_t src_stride,
1101 int16_t *dst, int32_t dst_stride,
1102 const int8_t *filter, int32_t height)
1103 {
1104 uint32_t loop_cnt;
1105 __m128i src0, src1, src2, src3, src4;
1106 __m128i filt0, filt1, filt2, filt3;
1107 __m128i mask1, mask2, mask3, mask4, mask5, mask6, mask7;
1108 __m128i vec0, vec1, vec2, vec3;
1109 __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
1110 __m128i mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
1111
1112 src -= 3;
1113 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
1114 filt0, filt1, filt2, filt3);
1115
1116 DUP4_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask0, 6, mask0, 8, mask1,
1117 mask2, mask3, mask4);
1118 DUP2_ARG2(__lsx_vaddi_bu, mask0, 10, mask0, 12, mask5, mask6)
1119 mask7 = __lsx_vaddi_bu(mask0, 14);
1120
1121 for (loop_cnt = height; loop_cnt--;) {
1122 DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
1123 src0, src1, src2, src3);
1124 src4 = __lsx_vld(src, 56);
1125 src += src_stride;
1126
1127 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, src0,
1128 src0, mask2, src0, src0, mask3, vec0, vec1, vec2, vec3);
1129 dst0 = __lsx_vdp2_h_bu_b(vec0, filt0);
1130 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst0, vec2, filt2,
1131 dst0, dst0);
1132 dst0 = __lsx_vdp2add_h_bu_b(dst0, vec3, filt3);
1133 __lsx_vst(dst0, dst, 0);
1134
1135 DUP4_ARG3(__lsx_vshuf_b, src1, src0, mask4, src1, src0, mask5, src1,
1136 src0, mask6, src1, src0, mask7, vec0, vec1, vec2, vec3);
1137 dst1 = __lsx_vdp2_h_bu_b(vec0, filt0);
1138 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst1, vec1, filt1, dst1, vec2, filt2,
1139 dst1, dst1);
1140 dst1 = __lsx_vdp2add_h_bu_b(dst1, vec3, filt3);
1141 __lsx_vst(dst1, dst, 16);
1142
1143 DUP4_ARG3(__lsx_vshuf_b, src1, src1, mask0, src1, src1, mask1, src1,
1144 src1, mask2, src1, src1, mask3, vec0, vec1, vec2, vec3);
1145 dst2 = __lsx_vdp2_h_bu_b(vec0, filt0);
1146 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst2, vec1, filt1, dst2, vec2, filt2,
1147 dst2, dst2);
1148 dst2 = __lsx_vdp2add_h_bu_b(dst2, vec3, filt3);
1149 __lsx_vst(dst2, dst, 32);
1150
1151 DUP4_ARG3(__lsx_vshuf_b, src2, src1, mask4, src2, src1, mask5, src2,
1152 src1, mask6, src2, src1, mask7, vec0, vec1, vec2, vec3);
1153 dst3 = __lsx_vdp2_h_bu_b(vec0, filt0);
1154 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst3, vec1, filt1, dst3, vec2, filt2,
1155 dst3, dst3);
1156 dst3 = __lsx_vdp2add_h_bu_b(dst3, vec3, filt3);
1157 __lsx_vst(dst3, dst, 48);
1158
1159 DUP4_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1, src2,
1160 src2, mask2, src2, src2, mask3, vec0, vec1, vec2, vec3);
1161 dst4 = __lsx_vdp2_h_bu_b(vec0, filt0);
1162 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec1, filt1, dst4, vec2, filt2,
1163 dst4, dst4);
1164 dst4 = __lsx_vdp2add_h_bu_b(dst4, vec3, filt3);
1165 __lsx_vst(dst4, dst, 64);
1166
1167 DUP4_ARG3(__lsx_vshuf_b, src3, src2, mask4, src3, src2, mask5, src3,
1168 src2, mask6, src3, src2, mask7, vec0, vec1, vec2, vec3);
1169 dst5 = __lsx_vdp2_h_bu_b(vec0, filt0);
1170 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst5, vec1, filt1, dst5, vec2, filt2,
1171 dst5, dst5);
1172 dst5 = __lsx_vdp2add_h_bu_b(dst5, vec3, filt3);
1173 __lsx_vst(dst5, dst, 80);
1174
1175 DUP4_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1, src3,
1176 src3, mask2, src3, src3, mask3, vec0, vec1, vec2, vec3);
1177 dst6 = __lsx_vdp2_h_bu_b(vec0, filt0);
1178 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst6, vec1, filt1, dst6, vec2, filt2,
1179 dst6, dst6);
1180 dst6 = __lsx_vdp2add_h_bu_b(dst6, vec3, filt3);
1181 __lsx_vst(dst6, dst, 96);
1182
1183 DUP4_ARG3(__lsx_vshuf_b, src4, src4, mask0, src4, src4, mask1, src4,
1184 src4, mask2, src4, src4, mask3, vec0, vec1, vec2, vec3);
1185 dst7 = __lsx_vdp2_h_bu_b(vec0, filt0);
1186 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst7, vec1, filt1, dst7, vec2, filt2,
1187 dst7, dst7);
1188 dst7 = __lsx_vdp2add_h_bu_b(dst7, vec3, filt3);
1189 __lsx_vst(dst7, dst, 112);
1190 dst += dst_stride;
1191 }
1192 }
1193
hevc_vt_8t_4w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)1194 static void hevc_vt_8t_4w_lsx(uint8_t *src, int32_t src_stride,
1195 int16_t *dst, int32_t dst_stride,
1196 const int8_t *filter, int32_t height)
1197 {
1198 int32_t loop_cnt;
1199 int32_t res = (height & 0x07) >> 1;
1200 int32_t src_stride_2x = (src_stride << 1);
1201 int32_t src_stride_4x = (src_stride << 2);
1202 int32_t src_stride_3x = src_stride_2x + src_stride;
1203 __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8;
1204 __m128i src9, src10, src11, src12, src13, src14;
1205 __m128i src10_r, src32_r, src54_r, src76_r, src98_r;
1206 __m128i src21_r, src43_r, src65_r, src87_r, src109_r;
1207 __m128i src1110_r, src1211_r, src1312_r, src1413_r;
1208 __m128i src2110, src4332, src6554, src8776, src10998;
1209 __m128i src12111110, src14131312;
1210 __m128i dst10, dst32, dst54, dst76;
1211 __m128i filt0, filt1, filt2, filt3;
1212
1213 src -= src_stride_3x;
1214
1215 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
1216 filt0, filt1, filt2, filt3);
1217
1218 src0 = __lsx_vld(src, 0);
1219 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
1220 src3 = __lsx_vldx(src, src_stride_3x);
1221 src += src_stride_4x;
1222 src4 = __lsx_vld(src, 0);
1223 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src5, src6);
1224 src += src_stride_3x;
1225 DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2, src1,
1226 src10_r, src32_r, src54_r, src21_r);
1227 DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, src43_r, src65_r);
1228 DUP2_ARG2(__lsx_vilvl_d, src21_r, src10_r, src43_r, src32_r,
1229 src2110, src4332);
1230 src6554 = __lsx_vilvl_d(src65_r, src54_r);
1231
1232 for (loop_cnt = (height >> 3); loop_cnt--;) {
1233 src7 = __lsx_vld(src, 0);
1234 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src8, src9);
1235 src10 = __lsx_vldx(src, src_stride_3x);
1236 src += src_stride_4x;
1237 src11 = __lsx_vld(src, 0);
1238 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x,
1239 src12, src13);
1240 src14 = __lsx_vldx(src, src_stride_3x);
1241 src += src_stride_4x;
1242
1243 DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8, src10, src9,
1244 src76_r, src87_r, src98_r, src109_r);
1245 DUP4_ARG2(__lsx_vilvl_b, src11, src10, src12, src11, src13, src12, src14,
1246 src13, src1110_r, src1211_r, src1312_r, src1413_r);
1247 DUP4_ARG2(__lsx_vilvl_d, src87_r, src76_r, src109_r, src98_r, src1211_r,
1248 src1110_r, src1413_r, src1312_r, src8776, src10998,
1249 src12111110, src14131312);
1250
1251 dst10 = __lsx_vdp2_h_bu_b(src2110, filt0);
1252 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst10, src4332, filt1, dst10, src6554,
1253 filt2, dst10, dst10);
1254 dst10 = __lsx_vdp2add_h_bu_b(dst10, src8776, filt3);
1255 dst32 = __lsx_vdp2_h_bu_b(src4332, filt0);
1256 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst32, src6554, filt1, dst32, src8776,
1257 filt2, dst32, dst32);
1258 dst32 = __lsx_vdp2add_h_bu_b(dst32, src10998, filt3);
1259 dst54 = __lsx_vdp2_h_bu_b(src6554, filt0);
1260 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst54, src8776, filt1,
1261 dst54, src10998, filt2, dst54, dst54);
1262 dst54 = __lsx_vdp2add_h_bu_b(dst54, src12111110, filt3);
1263 dst76 = __lsx_vdp2_h_bu_b(src8776, filt0);
1264 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst76, src10998, filt1, dst76,
1265 src12111110, filt2, dst76, dst76);
1266 dst76 = __lsx_vdp2add_h_bu_b(dst76, src14131312, filt3);
1267
1268 __lsx_vstelm_d(dst10, dst, 0, 0);
1269 dst += dst_stride;
1270 __lsx_vstelm_d(dst10, dst, 0, 1);
1271 dst += dst_stride;
1272 __lsx_vstelm_d(dst32, dst, 0, 0);
1273 dst += dst_stride;
1274 __lsx_vstelm_d(dst32, dst, 0, 1);
1275 dst += dst_stride;
1276 __lsx_vstelm_d(dst54, dst, 0, 0);
1277 dst += dst_stride;
1278 __lsx_vstelm_d(dst54, dst, 0, 1);
1279 dst += dst_stride;
1280 __lsx_vstelm_d(dst76, dst, 0, 0);
1281 dst += dst_stride;
1282 __lsx_vstelm_d(dst76, dst, 0, 1);
1283 dst += dst_stride;
1284
1285 src2110 = src10998;
1286 src4332 = src12111110;
1287 src6554 = src14131312;
1288 src6 = src14;
1289 }
1290 for (;res--;) {
1291 src7 = __lsx_vld(src, 0);
1292 src8 = __lsx_vldx(src, src_stride);
1293 DUP2_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src76_r, src87_r);
1294 src += src_stride_2x;
1295 src8776 = __lsx_vilvl_d(src87_r, src76_r);
1296
1297 dst10 = __lsx_vdp2_h_bu_b(src2110, filt0);
1298 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst10, src4332, filt1, dst10, src6554,
1299 filt2, dst10, dst10);
1300 dst10 = __lsx_vdp2add_h_bu_b(dst10, src8776, filt3);
1301
1302 __lsx_vstelm_d(dst10, dst, 0, 0);
1303 dst += dst_stride;
1304 __lsx_vstelm_d(dst10, dst, 0, 1);
1305 dst += dst_stride;
1306
1307 src2110 = src4332;
1308 src4332 = src6554;
1309 src6554 = src8776;
1310 src6 = src8;
1311 }
1312 }
1313
hevc_vt_8t_8w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)1314 static void hevc_vt_8t_8w_lsx(uint8_t *src, int32_t src_stride,
1315 int16_t *dst, int32_t dst_stride,
1316 const int8_t *filter, int32_t height)
1317 {
1318 int32_t loop_cnt;
1319 int32_t src_stride_2x = (src_stride << 1);
1320 int32_t dst_stride_x = (dst_stride << 1);
1321 int32_t src_stride_4x = (src_stride << 2);
1322 int32_t dst_stride_2x = (dst_stride << 2);
1323 int32_t src_stride_3x = src_stride_2x + src_stride;
1324 int32_t dst_stride_3x = dst_stride_2x + dst_stride_x;
1325 __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
1326 __m128i src10_r, src32_r, src54_r, src76_r, src98_r;
1327 __m128i src21_r, src43_r, src65_r, src87_r, src109_r;
1328 __m128i dst0_r, dst1_r, dst2_r, dst3_r;
1329 __m128i filt0, filt1, filt2, filt3;
1330
1331 src -= src_stride_3x;
1332
1333 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
1334 filt0, filt1, filt2, filt3);
1335
1336 src0 = __lsx_vld(src, 0);
1337 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
1338 src3 = __lsx_vldx(src, src_stride_3x);
1339 src += src_stride_4x;
1340 src4 = __lsx_vld(src, 0);
1341 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src5, src6);
1342 src += src_stride_3x;
1343 DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2, src1,
1344 src10_r, src32_r, src54_r, src21_r);
1345 DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, src43_r, src65_r);
1346
1347 for (loop_cnt = (height >> 2); loop_cnt--;) {
1348 src7 = __lsx_vld(src, 0);
1349 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src8, src9);
1350 src10 = __lsx_vldx(src, src_stride_3x);
1351 src += src_stride_4x;
1352 DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8, src10,
1353 src9, src76_r, src87_r, src98_r, src109_r);
1354
1355 dst0_r = __lsx_vdp2_h_bu_b(src10_r, filt0);
1356 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0_r, src32_r, filt1, dst0_r,
1357 src54_r, filt2, dst0_r, dst0_r);
1358 dst0_r = __lsx_vdp2add_h_bu_b(dst0_r, src76_r, filt3);
1359 dst1_r = __lsx_vdp2_h_bu_b(src21_r, filt0);
1360 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst1_r, src43_r, filt1, dst1_r,
1361 src65_r, filt2, dst1_r, dst1_r);
1362 dst1_r = __lsx_vdp2add_h_bu_b(dst1_r, src87_r, filt3);
1363 dst2_r = __lsx_vdp2_h_bu_b(src32_r, filt0);
1364 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst2_r, src54_r, filt1, dst2_r,
1365 src76_r, filt2, dst2_r, dst2_r);
1366 dst2_r = __lsx_vdp2add_h_bu_b(dst2_r, src98_r, filt3);
1367 dst3_r = __lsx_vdp2_h_bu_b(src43_r, filt0);
1368 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst3_r, src65_r, filt1, dst3_r,
1369 src87_r, filt2, dst3_r, dst3_r);
1370 dst3_r = __lsx_vdp2add_h_bu_b(dst3_r, src109_r, filt3);
1371
1372 __lsx_vst(dst0_r, dst, 0);
1373 __lsx_vstx(dst1_r, dst, dst_stride_x);
1374 __lsx_vstx(dst2_r, dst, dst_stride_2x);
1375 __lsx_vstx(dst3_r, dst, dst_stride_3x);
1376 dst += dst_stride_2x;
1377
1378 src10_r = src54_r;
1379 src32_r = src76_r;
1380 src54_r = src98_r;
1381 src21_r = src65_r;
1382 src43_r = src87_r;
1383 src65_r = src109_r;
1384 src6 = src10;
1385 }
1386 }
1387
hevc_vt_8t_12w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)1388 static void hevc_vt_8t_12w_lsx(uint8_t *src, int32_t src_stride,
1389 int16_t *dst, int32_t dst_stride,
1390 const int8_t *filter, int32_t height)
1391 {
1392 int32_t loop_cnt;
1393 int32_t src_stride_2x = (src_stride << 1);
1394 int32_t src_stride_4x = (src_stride << 2);
1395 int32_t src_stride_3x = src_stride_2x + src_stride;
1396 __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
1397 __m128i src10_r, src32_r, src54_r, src76_r, src98_r;
1398 __m128i src21_r, src43_r, src65_r, src87_r, src109_r;
1399 __m128i dst0_r, dst1_r, dst2_r, dst3_r;
1400 __m128i src10_l, src32_l, src54_l, src76_l, src98_l;
1401 __m128i src21_l, src43_l, src65_l, src87_l, src109_l;
1402 __m128i src2110, src4332, src6554, src8776, src10998;
1403 __m128i dst0_l, dst1_l;
1404 __m128i filt0, filt1, filt2, filt3;
1405
1406 src -= src_stride_3x;
1407
1408 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
1409 filt0, filt1, filt2, filt3);
1410 src0 = __lsx_vld(src, 0);
1411 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
1412 src3 = __lsx_vldx(src, src_stride_3x);
1413 src += src_stride_4x;
1414 src4 = __lsx_vld(src, 0);
1415 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src5, src6);
1416 src += src_stride_3x;
1417 DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2, src1,
1418 src10_r, src32_r, src54_r, src21_r);
1419 DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, src43_r, src65_r);
1420 DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src5, src4, src2, src1,
1421 src10_l, src32_l, src54_l, src21_l);
1422 DUP2_ARG2(__lsx_vilvh_b, src4, src3, src6, src5, src43_l, src65_l);
1423 DUP2_ARG2(__lsx_vilvl_d, src21_l, src10_l, src43_l, src32_l,
1424 src2110, src4332);
1425 src6554 = __lsx_vilvl_d(src65_l, src54_l);
1426
1427 for (loop_cnt = (height >> 2); loop_cnt--;) {
1428 src7 = __lsx_vld(src, 0);
1429 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src8, src9);
1430 src10 = __lsx_vldx(src, src_stride_3x);
1431 src += src_stride_4x;
1432 DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8, src10,
1433 src9, src76_r, src87_r, src98_r, src109_r);
1434 DUP4_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, src9, src8, src10,
1435 src9, src76_l, src87_l, src98_l, src109_l);
1436 DUP2_ARG2(__lsx_vilvl_d, src87_l, src76_l, src109_l, src98_l,
1437 src8776, src10998);
1438
1439 dst0_r = __lsx_vdp2_h_bu_b(src10_r, filt0);
1440 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0_r, src32_r, filt1, dst0_r,
1441 src54_r, filt2, dst0_r, dst0_r);
1442 dst0_r = __lsx_vdp2add_h_bu_b(dst0_r, src76_r, filt3);
1443 dst1_r = __lsx_vdp2_h_bu_b(src21_r, filt0);
1444 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst1_r, src43_r, filt1, dst1_r,
1445 src65_r, filt2, dst1_r, dst1_r);
1446 dst1_r = __lsx_vdp2add_h_bu_b(dst1_r, src87_r, filt3);
1447 dst2_r = __lsx_vdp2_h_bu_b(src32_r, filt0);
1448 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst2_r, src54_r, filt1, dst2_r,
1449 src76_r, filt2, dst2_r, dst2_r);
1450 dst2_r = __lsx_vdp2add_h_bu_b(dst2_r, src98_r, filt3);
1451 dst3_r = __lsx_vdp2_h_bu_b(src43_r, filt0);
1452 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst3_r, src65_r, filt1, dst3_r,
1453 src87_r, filt2, dst3_r, dst3_r);
1454 dst3_r = __lsx_vdp2add_h_bu_b(dst3_r, src109_r, filt3);
1455 dst0_l = __lsx_vdp2_h_bu_b(src2110, filt0);
1456 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0_l, src4332, filt1, dst0_l,
1457 src6554, filt2, dst0_l, dst0_l);
1458 dst0_l = __lsx_vdp2add_h_bu_b(dst0_l, src8776, filt3);
1459 dst1_l = __lsx_vdp2_h_bu_b(src4332, filt0);
1460 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst1_l, src6554, filt1, dst1_l,
1461 src8776, filt2, dst1_l, dst1_l);
1462 dst1_l = __lsx_vdp2add_h_bu_b(dst1_l, src10998, filt3);
1463
1464 __lsx_vst(dst0_r, dst, 0);
1465 __lsx_vstelm_d(dst0_l, dst, 16, 0);
1466 dst += dst_stride;
1467 __lsx_vst(dst1_r, dst, 0);
1468 __lsx_vstelm_d(dst0_l, dst, 16, 1);
1469 dst += dst_stride;
1470 __lsx_vst(dst2_r, dst, 0);
1471 __lsx_vstelm_d(dst1_l, dst, 16, 0);
1472 dst += dst_stride;
1473 __lsx_vst(dst3_r, dst, 0);
1474 __lsx_vstelm_d(dst1_l, dst, 16, 1);
1475 dst += dst_stride;
1476
1477 src10_r = src54_r;
1478 src32_r = src76_r;
1479 src54_r = src98_r;
1480 src21_r = src65_r;
1481 src43_r = src87_r;
1482 src65_r = src109_r;
1483 src2110 = src6554;
1484 src4332 = src8776;
1485 src6554 = src10998;
1486 src6 = src10;
1487 }
1488 }
1489
hevc_vt_8t_16multx4mult_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height, int32_t width)1490 static void hevc_vt_8t_16multx4mult_lsx(uint8_t *src,
1491 int32_t src_stride,
1492 int16_t *dst,
1493 int32_t dst_stride,
1494 const int8_t *filter,
1495 int32_t height,
1496 int32_t width)
1497 {
1498 uint8_t *src_tmp;
1499 int16_t *dst_tmp;
1500 int32_t loop_cnt, cnt;
1501 int32_t src_stride_2x = (src_stride << 1);
1502 int32_t src_stride_4x = (src_stride << 2);
1503 int32_t src_stride_3x = src_stride_2x + src_stride;
1504 __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
1505 __m128i src10_r, src32_r, src54_r, src76_r, src98_r;
1506 __m128i src21_r, src43_r, src65_r, src87_r, src109_r;
1507 __m128i dst0_r, dst1_r, dst2_r, dst3_r;
1508 __m128i src10_l, src32_l, src54_l, src76_l, src98_l;
1509 __m128i src21_l, src43_l, src65_l, src87_l, src109_l;
1510 __m128i dst0_l, dst1_l, dst2_l, dst3_l;
1511 __m128i filt0, filt1, filt2, filt3;
1512
1513 src -= src_stride_3x;
1514
1515 DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
1516 filt0, filt1, filt2, filt3);
1517
1518 for (cnt = width >> 4; cnt--;) {
1519 src_tmp = src;
1520 dst_tmp = dst;
1521
1522 src0 = __lsx_vld(src_tmp, 0);
1523 DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
1524 src1, src2);
1525 src3 = __lsx_vldx(src_tmp, src_stride_3x);
1526 src_tmp += src_stride_4x;
1527 src4 = __lsx_vld(src_tmp, 0);
1528 DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
1529 src5, src6);
1530 src_tmp += src_stride_3x;
1531 DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2, src1,
1532 src10_r, src32_r, src54_r, src21_r);
1533 DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, src43_r, src65_r);
1534 DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src5, src4, src2, src1,
1535 src10_l, src32_l, src54_l, src21_l);
1536 DUP2_ARG2(__lsx_vilvh_b, src4, src3, src6, src5, src43_l, src65_l);
1537
1538 for (loop_cnt = (height >> 2); loop_cnt--;) {
1539 src7 = __lsx_vld(src_tmp, 0);
1540 DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
1541 src8, src9);
1542 src10 = __lsx_vldx(src_tmp, src_stride_3x);
1543 src_tmp += src_stride_4x;
1544 DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8,
1545 src10, src9, src76_r, src87_r, src98_r, src109_r);
1546 DUP4_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, src9, src8,
1547 src10, src9, src76_l, src87_l, src98_l, src109_l);
1548
1549 dst0_r = __lsx_vdp2_h_bu_b(src10_r, filt0);
1550 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0_r, src32_r, filt1, dst0_r,
1551 src54_r, filt2, dst0_r, dst0_r);
1552 dst0_r = __lsx_vdp2add_h_bu_b(dst0_r, src76_r, filt3);
1553 dst1_r = __lsx_vdp2_h_bu_b(src21_r, filt0);
1554 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst1_r, src43_r, filt1, dst1_r,
1555 src65_r, filt2, dst1_r, dst1_r);
1556 dst1_r = __lsx_vdp2add_h_bu_b(dst1_r, src87_r, filt3);
1557 dst2_r = __lsx_vdp2_h_bu_b(src32_r, filt0);
1558 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst2_r, src54_r, filt1, dst2_r,
1559 src76_r, filt2, dst2_r, dst2_r);
1560 dst2_r = __lsx_vdp2add_h_bu_b(dst2_r, src98_r, filt3);
1561 dst3_r = __lsx_vdp2_h_bu_b(src43_r, filt0);
1562 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst3_r, src65_r, filt1, dst3_r,
1563 src87_r, filt2, dst3_r, dst3_r);
1564 dst3_r = __lsx_vdp2add_h_bu_b(dst3_r, src109_r, filt3);
1565 dst0_l = __lsx_vdp2_h_bu_b(src10_l, filt0);
1566 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0_l, src32_l, filt1, dst0_l,
1567 src54_l, filt2, dst0_l, dst0_l);
1568 dst0_l = __lsx_vdp2add_h_bu_b(dst0_l, src76_l, filt3);
1569 dst1_l = __lsx_vdp2_h_bu_b(src21_l, filt0);
1570 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst1_l, src43_l, filt1, dst1_l,
1571 src65_l, filt2, dst1_l, dst1_l);
1572 dst1_l = __lsx_vdp2add_h_bu_b(dst1_l, src87_l, filt3);
1573 dst2_l = __lsx_vdp2_h_bu_b(src32_l, filt0);
1574 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst2_l, src54_l, filt1, dst2_l,
1575 src76_l, filt2, dst2_l, dst2_l);
1576 dst2_l = __lsx_vdp2add_h_bu_b(dst2_l, src98_l, filt3);
1577 dst3_l = __lsx_vdp2_h_bu_b(src43_l, filt0);
1578 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst3_l, src65_l, filt1, dst3_l,
1579 src87_l, filt2, dst3_l, dst3_l);
1580 dst3_l = __lsx_vdp2add_h_bu_b(dst3_l, src109_l, filt3);
1581
1582 __lsx_vst(dst0_r, dst_tmp, 0);
1583 __lsx_vst(dst0_l, dst_tmp, 16);
1584 dst_tmp += dst_stride;
1585 __lsx_vst(dst1_r, dst_tmp, 0);
1586 __lsx_vst(dst1_l, dst_tmp, 16);
1587 dst_tmp += dst_stride;
1588 __lsx_vst(dst2_r, dst_tmp, 0);
1589 __lsx_vst(dst2_l, dst_tmp, 16);
1590 dst_tmp += dst_stride;
1591 __lsx_vst(dst3_r, dst_tmp, 0);
1592 __lsx_vst(dst3_l, dst_tmp, 16);
1593 dst_tmp += dst_stride;
1594
1595 src10_r = src54_r;
1596 src32_r = src76_r;
1597 src54_r = src98_r;
1598 src21_r = src65_r;
1599 src43_r = src87_r;
1600 src65_r = src109_r;
1601 src10_l = src54_l;
1602 src32_l = src76_l;
1603 src54_l = src98_l;
1604 src21_l = src65_l;
1605 src43_l = src87_l;
1606 src65_l = src109_l;
1607 src6 = src10;
1608 }
1609 src += 16;
1610 dst += 16;
1611 }
1612 }
1613
hevc_vt_8t_16w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)1614 static void hevc_vt_8t_16w_lsx(uint8_t *src, int32_t src_stride,
1615 int16_t *dst, int32_t dst_stride,
1616 const int8_t *filter, int32_t height)
1617 {
1618 hevc_vt_8t_16multx4mult_lsx(src, src_stride, dst, dst_stride,
1619 filter, height, 16);
1620 }
1621
hevc_vt_8t_24w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)1622 static void hevc_vt_8t_24w_lsx(uint8_t *src, int32_t src_stride,
1623 int16_t *dst, int32_t dst_stride,
1624 const int8_t *filter, int32_t height)
1625 {
1626 hevc_vt_8t_16multx4mult_lsx(src, src_stride, dst, dst_stride,
1627 filter, height, 16);
1628 hevc_vt_8t_8w_lsx(src + 16, src_stride, dst + 16, dst_stride,
1629 filter, height);
1630 }
1631
hevc_vt_8t_32w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)1632 static void hevc_vt_8t_32w_lsx(uint8_t *src, int32_t src_stride,
1633 int16_t *dst, int32_t dst_stride,
1634 const int8_t *filter, int32_t height)
1635 {
1636 hevc_vt_8t_16multx4mult_lsx(src, src_stride, dst, dst_stride,
1637 filter, height, 32);
1638 }
1639
hevc_vt_8t_48w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)1640 static void hevc_vt_8t_48w_lsx(uint8_t *src, int32_t src_stride,
1641 int16_t *dst, int32_t dst_stride,
1642 const int8_t *filter, int32_t height)
1643 {
1644 hevc_vt_8t_16multx4mult_lsx(src, src_stride, dst, dst_stride,
1645 filter, height, 48);
1646 }
1647
hevc_vt_8t_64w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)1648 static void hevc_vt_8t_64w_lsx(uint8_t *src, int32_t src_stride,
1649 int16_t *dst, int32_t dst_stride,
1650 const int8_t *filter, int32_t height)
1651 {
1652 hevc_vt_8t_16multx4mult_lsx(src, src_stride, dst, dst_stride,
1653 filter, height, 64);
1654 }
1655
hevc_hv_8t_4w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)1656 static void hevc_hv_8t_4w_lsx(uint8_t *src, int32_t src_stride,
1657 int16_t *dst, int32_t dst_stride,
1658 const int8_t *filter_x, const int8_t *filter_y,
1659 int32_t height)
1660 {
1661 uint32_t loop_cnt;
1662 int32_t src_stride_2x = (src_stride << 1);
1663 int32_t src_stride_4x = (src_stride << 2);
1664 int32_t src_stride_3x = src_stride_2x + src_stride;
1665 __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
1666 __m128i filt0, filt1, filt2, filt3;
1667 __m128i filt_h0, filt_h1, filt_h2, filt_h3;
1668 __m128i mask1, mask2, mask3;
1669 __m128i filter_vec;
1670 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1671 __m128i vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
1672 __m128i dst30, dst41, dst52, dst63, dst66, dst97, dst108;
1673 __m128i dst0_r, dst1_r, dst2_r, dst3_r;
1674 __m128i dst10_r, dst32_r, dst54_r, dst76_r, dst98_r;
1675 __m128i dst21_r, dst43_r, dst65_r, dst87_r, dst109_r;
1676 __m128i mask0;
1677
1678 mask0 = __lsx_vld(ff_hevc_mask_arr, 16);
1679
1680 src -= src_stride_3x + 3;
1681 DUP4_ARG2(__lsx_vldrepl_h, filter_x, 0, filter_x, 2, filter_x, 4,
1682 filter_x, 6, filt0, filt1, filt2, filt3);
1683 filter_vec = __lsx_vld(filter_y, 0);
1684 filter_vec = __lsx_vsllwil_h_b(filter_vec, 0);
1685
1686 DUP4_ARG2(__lsx_vreplvei_w, filter_vec, 0, filter_vec, 1, filter_vec, 2,
1687 filter_vec, 3, filt_h0, filt_h1, filt_h2, filt_h3);
1688 DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
1689 mask3 = __lsx_vaddi_bu(mask0, 6);
1690
1691 src0 = __lsx_vld(src, 0);
1692 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
1693 src3 = __lsx_vldx(src, src_stride_3x);
1694 src += src_stride_4x;
1695 src4 = __lsx_vld(src, 0);
1696 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src5, src6);
1697 src += src_stride_3x;
1698
1699 DUP4_ARG3(__lsx_vshuf_b, src3, src0, mask0, src3, src0, mask1, src3, src0,
1700 mask2, src3, src0, mask3, vec0, vec1, vec2, vec3);
1701 DUP4_ARG3(__lsx_vshuf_b, src4, src1, mask0, src4, src1, mask1, src4, src1,
1702 mask2, src4, src1, mask3, vec4, vec5, vec6, vec7);
1703 DUP4_ARG3(__lsx_vshuf_b, src5, src2, mask0, src5, src2, mask1, src5, src2,
1704 mask2, src5, src2, mask3, vec8, vec9, vec10, vec11);
1705 DUP4_ARG3(__lsx_vshuf_b, src6, src3, mask0, src6, src3, mask1, src6, src3,
1706 mask2, src6, src3, mask3, vec12, vec13, vec14, vec15);
1707 dst30 = __lsx_vdp2_h_bu_b(vec0, filt0);
1708 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst30, vec1, filt1, dst30, vec2, filt2,
1709 dst30, dst30);
1710 dst30 = __lsx_vdp2add_h_bu_b(dst30, vec3, filt3);
1711 dst41 = __lsx_vdp2_h_bu_b(vec4, filt0);
1712 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst41, vec5, filt1, dst41, vec6, filt2,
1713 dst41, dst41);
1714 dst41 = __lsx_vdp2add_h_bu_b(dst41, vec7, filt3);
1715 dst52 = __lsx_vdp2_h_bu_b(vec8, filt0);
1716 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst52, vec9, filt1, dst52, vec10, filt2,
1717 dst52, dst52);
1718 dst52 = __lsx_vdp2add_h_bu_b(dst52, vec11, filt3);
1719 dst63 = __lsx_vdp2_h_bu_b(vec12, filt0);
1720 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst63, vec13, filt1, dst63, vec14, filt2,
1721 dst63, dst63);
1722 dst63 = __lsx_vdp2add_h_bu_b(dst63, vec15, filt3);
1723
1724 DUP2_ARG2(__lsx_vilvl_h, dst41, dst30, dst52, dst41, dst10_r, dst21_r);
1725 DUP2_ARG2(__lsx_vilvh_h, dst41, dst30, dst52, dst41, dst43_r, dst54_r);
1726 dst32_r = __lsx_vilvl_h(dst63, dst52);
1727 dst65_r = __lsx_vilvh_h(dst63, dst52);
1728 dst66 = __lsx_vreplvei_d(dst63, 1);
1729
1730 for (loop_cnt = height >> 2; loop_cnt--;) {
1731 src7 = __lsx_vld(src, 0);
1732 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src8, src9);
1733 src10 = __lsx_vldx(src, src_stride_3x);
1734 src += src_stride_4x;
1735
1736 DUP4_ARG3(__lsx_vshuf_b, src9, src7, mask0, src9, src7, mask1, src9, src7,
1737 mask2, src9, src7, mask3, vec0, vec1, vec2, vec3);
1738 DUP4_ARG3(__lsx_vshuf_b, src10, src8, mask0, src10, src8, mask1, src10, src8,
1739 mask2, src10, src8, mask3, vec4, vec5, vec6, vec7);
1740
1741 dst97 = __lsx_vdp2_h_bu_b(vec0, filt0);
1742 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst97, vec1, filt1, dst97, vec2, filt2,
1743 dst97, dst97);
1744 dst97 = __lsx_vdp2add_h_bu_b(dst97, vec3, filt3);
1745 dst108 = __lsx_vdp2_h_bu_b(vec4, filt0);
1746 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst108, vec5, filt1, dst108, vec6,
1747 filt2, dst108, dst108);
1748 dst108 = __lsx_vdp2add_h_bu_b(dst108, vec7, filt3);
1749
1750 DUP2_ARG2(__lsx_vilvl_h, dst97, dst66, dst108, dst97, dst76_r, dst87_r);
1751 dst109_r = __lsx_vilvh_h(dst108, dst97);
1752 dst66 = __lsx_vreplvei_d(dst97, 1);
1753 dst98_r = __lsx_vilvl_h(dst66, dst108);
1754
1755 DUP4_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst21_r, filt_h0, dst32_r,
1756 filt_h0, dst43_r, filt_h0, dst0_r, dst1_r, dst2_r, dst3_r);
1757 DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst1_r, dst43_r,
1758 filt_h1, dst2_r, dst54_r, filt_h1, dst3_r, dst65_r, filt_h1,
1759 dst0_r, dst1_r, dst2_r, dst3_r);
1760 DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst54_r, filt_h2, dst1_r, dst65_r,
1761 filt_h2, dst2_r, dst76_r, filt_h2, dst3_r, dst87_r, filt_h2,
1762 dst0_r, dst1_r, dst2_r, dst3_r);
1763 DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst76_r, filt_h3, dst1_r, dst87_r,
1764 filt_h3, dst2_r, dst98_r, filt_h3, dst3_r, dst109_r, filt_h3,
1765 dst0_r, dst1_r, dst2_r, dst3_r);
1766 DUP4_ARG2(__lsx_vsrai_w, dst0_r, 6, dst1_r, 6, dst2_r, 6, dst3_r, 6,
1767 dst0_r, dst1_r, dst2_r, dst3_r);
1768 DUP2_ARG2(__lsx_vpickev_h, dst1_r, dst0_r, dst3_r, dst2_r, dst0_r, dst2_r);
1769 __lsx_vstelm_d(dst0_r, dst, 0, 0);
1770 dst += dst_stride;
1771 __lsx_vstelm_d(dst0_r, dst, 0, 1);
1772 dst += dst_stride;
1773 __lsx_vstelm_d(dst2_r, dst, 0, 0);
1774 dst += dst_stride;
1775 __lsx_vstelm_d(dst2_r, dst, 0, 1);
1776 dst += dst_stride;
1777
1778 dst10_r = dst54_r;
1779 dst32_r = dst76_r;
1780 dst54_r = dst98_r;
1781 dst21_r = dst65_r;
1782 dst43_r = dst87_r;
1783 dst65_r = dst109_r;
1784 dst66 = __lsx_vreplvei_d(dst108, 1);
1785 }
1786 }
1787
hevc_hv_8t_8multx1mult_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height, int32_t width)1788 static void hevc_hv_8t_8multx1mult_lsx(uint8_t *src,
1789 int32_t src_stride,
1790 int16_t *dst,
1791 int32_t dst_stride,
1792 const int8_t *filter_x,
1793 const int8_t *filter_y,
1794 int32_t height,
1795 int32_t width)
1796 {
1797 uint32_t loop_cnt, cnt;
1798 uint8_t *src_tmp;
1799 int16_t *dst_tmp;
1800 int32_t src_stride_2x = (src_stride << 1);
1801 int32_t src_stride_4x = (src_stride << 2);
1802 int32_t src_stride_3x = src_stride_2x + src_stride;
1803 __m128i src0, src1, src2, src3, src4, src5, src6, src7;
1804 __m128i filt0, filt1, filt2, filt3;
1805 __m128i filt_h0, filt_h1, filt_h2, filt_h3;
1806 __m128i mask1, mask2, mask3;
1807 __m128i filter_vec;
1808 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1809 __m128i vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
1810 __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
1811 __m128i dst0_r, dst0_l;
1812 __m128i dst10_r, dst32_r, dst54_r, dst76_r;
1813 __m128i dst10_l, dst32_l, dst54_l, dst76_l;
1814 __m128i mask0 = {0x403030202010100, 0x807070606050504};
1815
1816 src -= src_stride_3x + 3;
1817 DUP4_ARG2(__lsx_vldrepl_h, filter_x, 0, filter_x, 2, filter_x, 4,
1818 filter_x, 6, filt0, filt1, filt2, filt3);
1819
1820 filter_vec = __lsx_vld(filter_y, 0);
1821 filter_vec = __lsx_vsllwil_h_b(filter_vec, 0);
1822
1823 DUP4_ARG2(__lsx_vreplvei_w, filter_vec, 0, filter_vec, 1, filter_vec, 2,
1824 filter_vec, 3, filt_h0, filt_h1, filt_h2, filt_h3);
1825
1826 DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
1827 mask3 = __lsx_vaddi_bu(mask0, 6);
1828
1829 for (cnt = width >> 3; cnt--;) {
1830 src_tmp = src;
1831 dst_tmp = dst;
1832 src0 = __lsx_vld(src_tmp, 0);
1833 DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
1834 src1, src2);
1835 src3 = __lsx_vldx(src_tmp, src_stride_3x);
1836 src_tmp += src_stride_4x;
1837 src4 = __lsx_vld(src_tmp, 0);
1838 DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
1839 src5, src6);
1840 src_tmp += src_stride_3x;
1841
1842 /* row 0 row 1 row 2 row 3 */
1843 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, src0,
1844 src0, mask2, src0, src0, mask3, vec0, vec1, vec2, vec3);
1845 DUP4_ARG3(__lsx_vshuf_b, src1, src1, mask0, src1, src1, mask1, src1,
1846 src1, mask2, src1, src1, mask3, vec4, vec5, vec6, vec7);
1847 DUP4_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1, src2,
1848 src2, mask2, src2, src2, mask3, vec8, vec9, vec10, vec11);
1849 DUP4_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1, src3,
1850 src3, mask2, src3, src3, mask3, vec12, vec13, vec14, vec15);
1851 dst0 = __lsx_vdp2_h_bu_b(vec0, filt0);
1852 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst0, vec2, filt2,
1853 dst0, dst0);
1854 dst0 = __lsx_vdp2add_h_bu_b(dst0, vec3, filt3);
1855 dst1 = __lsx_vdp2_h_bu_b(vec4, filt0);
1856 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst1, vec5, filt1, dst1, vec6, filt2,
1857 dst1, dst1);
1858 dst1 = __lsx_vdp2add_h_bu_b(dst1, vec7, filt3);
1859 dst2 = __lsx_vdp2_h_bu_b(vec8, filt0);
1860 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst2, vec9, filt1, dst2, vec10, filt2,
1861 dst2, dst2);
1862 dst2 = __lsx_vdp2add_h_bu_b(dst2, vec11, filt3);
1863 dst3 = __lsx_vdp2_h_bu_b(vec12, filt0);
1864 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst3, vec13, filt1, dst3, vec14, filt2,
1865 dst3, dst3);
1866 dst3 = __lsx_vdp2add_h_bu_b(dst3, vec15, filt3);
1867
1868 /* row 4 row 5 row 6 */
1869 DUP4_ARG3(__lsx_vshuf_b, src4, src4, mask0, src4, src4, mask1, src4,
1870 src4, mask2, src4, src4, mask3, vec0, vec1, vec2, vec3);
1871 DUP4_ARG3(__lsx_vshuf_b, src5, src5, mask0, src5, src5, mask1, src5,
1872 src5, mask2, src5, src5, mask3, vec4, vec5, vec6, vec7);
1873 DUP4_ARG3(__lsx_vshuf_b, src6, src6, mask0, src6, src6, mask1, src6,
1874 src6, mask2, src6, src6, mask3, vec8, vec9, vec10, vec11);
1875 dst4 = __lsx_vdp2_h_bu_b(vec0, filt0);
1876 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec1, filt1, dst4, vec2, filt2,
1877 dst4, dst4);
1878 dst4 = __lsx_vdp2add_h_bu_b(dst4, vec3, filt3);
1879 dst5 = __lsx_vdp2_h_bu_b(vec4, filt0);
1880 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst5, vec5, filt1, dst5, vec6, filt2,
1881 dst5, dst5);
1882 dst5 = __lsx_vdp2add_h_bu_b(dst5, vec7, filt3);
1883 dst6 = __lsx_vdp2_h_bu_b(vec8, filt0);
1884 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst6, vec9, filt1, dst6, vec10, filt2,
1885 dst6, dst6);
1886 dst6 = __lsx_vdp2add_h_bu_b(dst6, vec11, filt3);
1887
1888 for (loop_cnt = height; loop_cnt--;) {
1889 src7 = __lsx_vld(src_tmp, 0);
1890 src_tmp += src_stride;
1891
1892 DUP4_ARG3(__lsx_vshuf_b, src7, src7, mask0, src7, src7, mask1, src7,
1893 src7, mask2, src7, src7, mask3, vec0, vec1, vec2, vec3);
1894 dst7 = __lsx_vdp2_h_bu_b(vec0, filt0);
1895 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst7, vec1, filt1, dst7, vec2,
1896 filt2, dst7, dst7);
1897 dst7 = __lsx_vdp2add_h_bu_b(dst7, vec3, filt3);
1898
1899 DUP4_ARG2(__lsx_vilvl_h, dst1, dst0, dst3, dst2, dst5, dst4, dst7,
1900 dst6, dst10_r, dst32_r, dst54_r, dst76_r);
1901 DUP4_ARG2(__lsx_vilvh_h, dst1, dst0, dst3, dst2, dst5, dst4, dst7,
1902 dst6, dst10_l, dst32_l, dst54_l, dst76_l);
1903
1904 DUP2_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst10_l, filt_h0,
1905 dst0_r, dst0_l);
1906 DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst0_l,
1907 dst32_l, filt_h1, dst0_r, dst54_r, filt_h2, dst0_l,
1908 dst54_l, filt_h2, dst0_r, dst0_l, dst0_r, dst0_l);
1909 DUP2_ARG3(__lsx_vdp2add_w_h, dst0_r, dst76_r, filt_h3, dst0_l,
1910 dst76_l, filt_h3, dst0_r, dst0_l);
1911 dst0_r = __lsx_vsrai_w(dst0_r, 6);
1912 dst0_l = __lsx_vsrai_w(dst0_l, 6);
1913
1914 dst0_r = __lsx_vpickev_h(dst0_l, dst0_r);
1915 __lsx_vst(dst0_r, dst_tmp, 0);
1916 dst_tmp += dst_stride;
1917
1918 dst0 = dst1;
1919 dst1 = dst2;
1920 dst2 = dst3;
1921 dst3 = dst4;
1922 dst4 = dst5;
1923 dst5 = dst6;
1924 dst6 = dst7;
1925 }
1926 src += 8;
1927 dst += 8;
1928 }
1929 }
1930
hevc_hv_8t_8w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)1931 static void hevc_hv_8t_8w_lsx(uint8_t *src, int32_t src_stride,
1932 int16_t *dst, int32_t dst_stride,
1933 const int8_t *filter_x, const int8_t *filter_y,
1934 int32_t height)
1935 {
1936 hevc_hv_8t_8multx1mult_lsx(src, src_stride, dst, dst_stride,
1937 filter_x, filter_y, height, 8);
1938 }
1939
hevc_hv_8t_12w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)1940 static void hevc_hv_8t_12w_lsx(uint8_t *src, int32_t src_stride,
1941 int16_t *dst, int32_t dst_stride,
1942 const int8_t *filter_x, const int8_t *filter_y,
1943 int32_t height)
1944 {
1945 uint32_t loop_cnt;
1946 uint8_t *src_tmp;
1947 int16_t *dst_tmp;
1948 int32_t src_stride_2x = (src_stride << 1);
1949 int32_t src_stride_4x = (src_stride << 2);
1950 int32_t src_stride_3x = src_stride_2x + src_stride;
1951 __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
1952 __m128i mask0, mask1, mask2, mask3, mask4, mask5, mask6, mask7;
1953 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
1954 __m128i vec8, vec9, vec10, vec11, vec12, vec13, vec14, vec15;
1955 __m128i filt0, filt1, filt2, filt3, filt_h0, filt_h1, filt_h2, filt_h3;
1956 __m128i filter_vec;
1957 __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
1958 __m128i dst30, dst41, dst52, dst63, dst66, dst97, dst108;
1959 __m128i dst10_r, dst32_r, dst54_r, dst76_r, dst98_r, dst21_r, dst43_r;
1960 __m128i dst65_r, dst87_r, dst109_r, dst10_l, dst32_l, dst54_l, dst76_l;
1961 __m128i dst0_r, dst0_l, dst1_r, dst2_r, dst3_r;
1962
1963 src -= src_stride_3x + 3;
1964 DUP4_ARG2(__lsx_vldrepl_h, filter_x, 0, filter_x, 2, filter_x, 4,
1965 filter_x, 6, filt0, filt1, filt2, filt3);
1966
1967 filter_vec = __lsx_vld(filter_y, 0);
1968 filter_vec = __lsx_vsllwil_h_b(filter_vec, 0);
1969
1970 DUP4_ARG2(__lsx_vreplvei_w, filter_vec, 0, filter_vec, 1, filter_vec, 2,
1971 filter_vec, 3, filt_h0, filt_h1, filt_h2, filt_h3);
1972
1973 mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
1974 DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
1975 mask3 = __lsx_vaddi_bu(mask0, 6);
1976
1977 src_tmp = src;
1978 dst_tmp = dst;
1979
1980 src0 = __lsx_vld(src_tmp, 0);
1981 DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
1982 src1, src2);
1983 src3 = __lsx_vldx(src_tmp, src_stride_3x);
1984 src_tmp += src_stride_4x;
1985 src4 = __lsx_vld(src_tmp, 0);
1986 DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
1987 src5, src6);
1988 src_tmp += src_stride_3x;
1989
1990 /* row 0 row 1 row 2 row 3 */
1991 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, src0, src0,
1992 mask2, src0, src0, mask3, vec0, vec1, vec2, vec3);
1993 DUP4_ARG3(__lsx_vshuf_b, src1, src1, mask0, src1, src1, mask1, src1, src1,
1994 mask2, src1, src1, mask3, vec4, vec5, vec6, vec7);
1995 DUP4_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1, src2, src2,
1996 mask2, src2, src2, mask3, vec8, vec9, vec10, vec11);
1997 DUP4_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1, src3, src3,
1998 mask2, src3, src3, mask3, vec12, vec13, vec14, vec15);
1999 dst0 = __lsx_vdp2_h_bu_b(vec0, filt0);
2000 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst0, vec2, filt2,
2001 dst0, dst0);
2002 dst0 = __lsx_vdp2add_h_bu_b(dst0, vec3, filt3);
2003 dst1 = __lsx_vdp2_h_bu_b(vec4, filt0);
2004 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst1, vec5, filt1, dst1, vec6, filt2,
2005 dst1, dst1);
2006 dst1 = __lsx_vdp2add_h_bu_b(dst1, vec7, filt3);
2007 dst2 = __lsx_vdp2_h_bu_b(vec8, filt0);
2008 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst2, vec9, filt1, dst2, vec10, filt2,
2009 dst2, dst2);
2010 dst2 = __lsx_vdp2add_h_bu_b(dst2, vec11, filt3);
2011 dst3 = __lsx_vdp2_h_bu_b(vec12, filt0);
2012 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst3, vec13, filt1, dst3, vec14, filt2,
2013 dst3, dst3);
2014 dst3 = __lsx_vdp2add_h_bu_b(dst3, vec15, filt3);
2015
2016 /* row 4 row 5 row 6 */
2017 DUP4_ARG3(__lsx_vshuf_b, src4, src4, mask0, src4, src4, mask1, src4, src4,
2018 mask2, src4, src4, mask3, vec0, vec1, vec2, vec3);
2019 DUP4_ARG3(__lsx_vshuf_b, src5, src5, mask0, src5, src5, mask1, src5, src5,
2020 mask2, src5, src5, mask3, vec4, vec5, vec6, vec7);
2021 DUP4_ARG3(__lsx_vshuf_b, src6, src6, mask0, src6, src6, mask1, src6, src6,
2022 mask2, src6, src6, mask3, vec8, vec9, vec10, vec11);
2023 dst4 = __lsx_vdp2_h_bu_b(vec0, filt0);
2024 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec1, filt1, dst4, vec2, filt2,
2025 dst4, dst4);
2026 dst4 = __lsx_vdp2add_h_bu_b(dst4, vec3, filt3);
2027 dst5 = __lsx_vdp2_h_bu_b(vec4, filt0);
2028 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst5, vec5, filt1, dst5, vec6, filt2,
2029 dst5, dst5);
2030 dst5 = __lsx_vdp2add_h_bu_b(dst5, vec7, filt3);
2031 dst6 = __lsx_vdp2_h_bu_b(vec8, filt0);
2032 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst6, vec9, filt1, dst6, vec10, filt2,
2033 dst6, dst6);
2034 dst6 = __lsx_vdp2add_h_bu_b(dst6, vec11, filt3);
2035
2036 for (loop_cnt = height; loop_cnt--;) {
2037 src7 = __lsx_vld(src_tmp, 0);
2038 src_tmp += src_stride;
2039
2040 DUP4_ARG3(__lsx_vshuf_b, src7, src7, mask0, src7, src7, mask1, src7,
2041 src7, mask2, src7, src7, mask3, vec0, vec1, vec2, vec3);
2042 dst7 = __lsx_vdp2_h_bu_b(vec0, filt0);
2043 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst7, vec1, filt1, dst7, vec2, filt2,
2044 dst7, dst7);
2045 dst7 = __lsx_vdp2add_h_bu_b(dst7, vec3, filt3);
2046 DUP4_ARG2(__lsx_vilvl_h, dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6,
2047 dst10_r, dst32_r, dst54_r, dst76_r);
2048 DUP4_ARG2(__lsx_vilvh_h, dst1, dst0, dst3, dst2, dst5, dst4, dst7, dst6,
2049 dst10_l, dst32_l, dst54_l, dst76_l);
2050 DUP2_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst10_l, filt_h0,
2051 dst0_r, dst0_l);
2052 DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst0_l, dst32_l,
2053 filt_h1, dst0_r, dst54_r, filt_h2, dst0_l, dst54_l, filt_h2,
2054 dst0_r, dst0_l, dst0_r, dst0_l);
2055 DUP2_ARG3(__lsx_vdp2add_w_h, dst0_r, dst76_r, filt_h3, dst0_l, dst76_l,
2056 filt_h3, dst0_r, dst0_l)
2057 dst0_r = __lsx_vsrai_w(dst0_r, 6);
2058 dst0_l = __lsx_vsrai_w(dst0_l, 6);
2059
2060 dst0_r = __lsx_vpickev_h(dst0_l, dst0_r);
2061 __lsx_vst(dst0_r, dst_tmp, 0);
2062 dst_tmp += dst_stride;
2063
2064 dst0 = dst1;
2065 dst1 = dst2;
2066 dst2 = dst3;
2067 dst3 = dst4;
2068 dst4 = dst5;
2069 dst5 = dst6;
2070 dst6 = dst7;
2071 }
2072 src += 8;
2073 dst += 8;
2074
2075 mask4 = __lsx_vld(ff_hevc_mask_arr, 16);
2076 DUP2_ARG2(__lsx_vaddi_bu, mask4, 2, mask4, 4, mask5, mask6);
2077 mask7 = __lsx_vaddi_bu(mask4, 6);
2078
2079 src0 = __lsx_vld(src, 0);
2080 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
2081 src3 = __lsx_vldx(src, src_stride_3x);
2082 src += src_stride_4x;
2083 src4 = __lsx_vld(src, 0);
2084 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src5, src6);
2085 src += src_stride_3x;
2086
2087 DUP4_ARG3(__lsx_vshuf_b, src3, src0, mask4, src3, src0, mask5, src3, src0,
2088 mask6, src3, src0, mask7, vec0, vec1, vec2, vec3);
2089 DUP4_ARG3(__lsx_vshuf_b, src4, src1, mask4, src4, src1, mask5, src4, src1,
2090 mask6, src4, src1, mask7, vec4, vec5, vec6, vec7);
2091 DUP4_ARG3(__lsx_vshuf_b, src5, src2, mask4, src5, src2, mask5, src5, src2,
2092 mask6, src5, src2, mask7, vec8, vec9, vec10, vec11);
2093 DUP4_ARG3(__lsx_vshuf_b, src6, src3, mask4, src6, src3, mask5, src6, src3,
2094 mask6, src6, src3, mask7, vec12, vec13, vec14, vec15);
2095 dst30 = __lsx_vdp2_h_bu_b(vec0, filt0);
2096 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst30, vec1, filt1, dst30, vec2, filt2,
2097 dst30, dst30);
2098 dst30 = __lsx_vdp2add_h_bu_b(dst30, vec3, filt3);
2099 dst41 = __lsx_vdp2_h_bu_b(vec4, filt0);
2100 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst41, vec5, filt1, dst41, vec6, filt2,
2101 dst41, dst41);
2102 dst41 = __lsx_vdp2add_h_bu_b(dst41, vec7, filt3);
2103 dst52 = __lsx_vdp2_h_bu_b(vec8, filt0);
2104 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst52, vec9, filt1, dst52, vec10, filt2,
2105 dst52, dst52);
2106 dst52 = __lsx_vdp2add_h_bu_b(dst52, vec11, filt3);
2107 dst63 = __lsx_vdp2_h_bu_b(vec12, filt0);
2108 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst63, vec13, filt1, dst63, vec14, filt2,
2109 dst63, dst63);
2110 dst63 = __lsx_vdp2add_h_bu_b(dst63, vec15, filt3);
2111
2112 DUP2_ARG2(__lsx_vilvl_h, dst41, dst30, dst52, dst41, dst10_r, dst21_r);
2113 DUP2_ARG2(__lsx_vilvh_h, dst41, dst30, dst52, dst41, dst43_r, dst54_r);
2114 dst32_r = __lsx_vilvl_h(dst63, dst52);
2115 dst65_r = __lsx_vilvh_h(dst63, dst52);
2116
2117 dst66 = __lsx_vreplvei_d(dst63, 1);
2118
2119 for (loop_cnt = height >> 2; loop_cnt--;) {
2120 src7 = __lsx_vld(src, 0);
2121 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src8, src9);
2122 src10 = __lsx_vldx(src, src_stride_3x);
2123 src += src_stride_4x;
2124
2125 DUP4_ARG3(__lsx_vshuf_b, src9, src7, mask4, src9, src7, mask5, src9,
2126 src7, mask6, src9, src7, mask7, vec0, vec1, vec2, vec3);
2127 DUP4_ARG3(__lsx_vshuf_b, src10, src8, mask4, src10, src8, mask5, src10,
2128 src8, mask6, src10, src8, mask7, vec4, vec5, vec6, vec7);
2129 dst97 = __lsx_vdp2_h_bu_b(vec0, filt0);
2130 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst97, vec1, filt1, dst97, vec2, filt2,
2131 dst97, dst97);
2132 dst97 = __lsx_vdp2add_h_bu_b(dst97, vec3, filt3);
2133 dst108 = __lsx_vdp2_h_bu_b(vec4, filt0);
2134 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst108, vec5, filt1, dst108, vec6,
2135 filt2, dst108, dst108);
2136 dst108 = __lsx_vdp2add_h_bu_b(dst108, vec7, filt3);
2137
2138 DUP2_ARG2(__lsx_vilvl_h, dst97, dst66, dst108, dst97, dst76_r, dst87_r);
2139 dst109_r = __lsx_vilvh_h(dst108, dst97);
2140 dst66 = __lsx_vreplvei_d(dst97, 1);
2141 dst98_r = __lsx_vilvl_h(dst66, dst108);
2142
2143 DUP4_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst21_r, filt_h0, dst32_r,
2144 filt_h0, dst43_r, filt_h0, dst0_r, dst1_r, dst2_r, dst3_r);
2145 DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst1_r, dst43_r,
2146 filt_h1, dst2_r, dst54_r, filt_h1, dst3_r, dst65_r, filt_h1,
2147 dst0_r, dst1_r, dst2_r, dst3_r);
2148 DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst54_r, filt_h2, dst1_r, dst65_r,
2149 filt_h2, dst2_r, dst76_r, filt_h2, dst3_r, dst87_r, filt_h2,
2150 dst0_r, dst1_r, dst2_r, dst3_r);
2151 DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst76_r, filt_h3, dst1_r, dst87_r,
2152 filt_h3, dst2_r, dst98_r, filt_h3, dst3_r, dst109_r, filt_h3,
2153 dst0_r, dst1_r, dst2_r, dst3_r);
2154 DUP4_ARG2(__lsx_vsrai_w, dst0_r, 6, dst1_r, 6, dst2_r, 6, dst3_r, 6,
2155 dst0_r, dst1_r, dst2_r, dst3_r);
2156 DUP2_ARG2(__lsx_vpickev_h, dst1_r, dst0_r, dst3_r, dst2_r,
2157 dst0_r, dst2_r);
2158 __lsx_vstelm_d(dst0_r, dst, 0, 0);
2159 dst += dst_stride;
2160 __lsx_vstelm_d(dst0_r, dst, 0, 1);
2161 dst += dst_stride;
2162 __lsx_vstelm_d(dst2_r, dst, 0, 0);
2163 dst += dst_stride;
2164 __lsx_vstelm_d(dst2_r, dst, 0, 1);
2165 dst += dst_stride;
2166
2167 dst10_r = dst54_r;
2168 dst32_r = dst76_r;
2169 dst54_r = dst98_r;
2170 dst21_r = dst65_r;
2171 dst43_r = dst87_r;
2172 dst65_r = dst109_r;
2173 dst66 = __lsx_vreplvei_d(dst108, 1);
2174 }
2175 }
2176
hevc_hv_8t_16w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)2177 static void hevc_hv_8t_16w_lsx(uint8_t *src, int32_t src_stride,
2178 int16_t *dst, int32_t dst_stride,
2179 const int8_t *filter_x, const int8_t *filter_y,
2180 int32_t height)
2181 {
2182 hevc_hv_8t_8multx1mult_lsx(src, src_stride, dst, dst_stride,
2183 filter_x, filter_y, height, 16);
2184 }
2185
hevc_hv_8t_24w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)2186 static void hevc_hv_8t_24w_lsx(uint8_t *src, int32_t src_stride,
2187 int16_t *dst, int32_t dst_stride,
2188 const int8_t *filter_x, const int8_t *filter_y,
2189 int32_t height)
2190 {
2191 hevc_hv_8t_8multx1mult_lsx(src, src_stride, dst, dst_stride,
2192 filter_x, filter_y, height, 24);
2193 }
2194
hevc_hv_8t_32w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)2195 static void hevc_hv_8t_32w_lsx(uint8_t *src, int32_t src_stride,
2196 int16_t *dst, int32_t dst_stride,
2197 const int8_t *filter_x, const int8_t *filter_y,
2198 int32_t height)
2199 {
2200 hevc_hv_8t_8multx1mult_lsx(src, src_stride, dst, dst_stride,
2201 filter_x, filter_y, height, 32);
2202 }
2203
hevc_hv_8t_48w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)2204 static void hevc_hv_8t_48w_lsx(uint8_t *src, int32_t src_stride,
2205 int16_t *dst, int32_t dst_stride,
2206 const int8_t *filter_x, const int8_t *filter_y,
2207 int32_t height)
2208 {
2209 hevc_hv_8t_8multx1mult_lsx(src, src_stride, dst, dst_stride,
2210 filter_x, filter_y, height, 48);
2211 }
2212
hevc_hv_8t_64w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)2213 static void hevc_hv_8t_64w_lsx(uint8_t *src, int32_t src_stride,
2214 int16_t *dst, int32_t dst_stride,
2215 const int8_t *filter_x, const int8_t *filter_y,
2216 int32_t height)
2217 {
2218 hevc_hv_8t_8multx1mult_lsx(src, src_stride, dst, dst_stride,
2219 filter_x, filter_y, height, 64);
2220 }
2221
hevc_hz_4t_32w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)2222 static void hevc_hz_4t_32w_lsx(uint8_t *src,
2223 int32_t src_stride,
2224 int16_t *dst,
2225 int32_t dst_stride,
2226 const int8_t *filter,
2227 int32_t height)
2228 {
2229 uint32_t loop_cnt;
2230 __m128i src0, src1, src2;
2231 __m128i filt0, filt1;
2232 __m128i mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
2233 __m128i mask1, mask2, mask3;
2234 __m128i dst0, dst1, dst2, dst3;
2235 __m128i vec0, vec1, vec2, vec3;
2236
2237 src -= 1;
2238 DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
2239
2240 DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 8, mask1, mask2);
2241 mask3 = __lsx_vaddi_bu(mask0, 10);
2242
2243 for (loop_cnt = height; loop_cnt--;) {
2244 DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src1);
2245 src2 = __lsx_vld(src, 24);
2246 src += src_stride;
2247
2248 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src0, mask2,
2249 vec0, vec1);
2250 DUP2_ARG3(__lsx_vshuf_b, src1, src1, mask0, src2, src2, mask0,
2251 vec2, vec3);
2252 DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec1, filt0, vec2, filt0,
2253 vec3, filt0, dst0, dst1, dst2, dst3);
2254 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src0, mask3,
2255 vec0, vec1);
2256 DUP2_ARG3(__lsx_vshuf_b, src1, src1, mask1, src2, src2, mask1,
2257 vec2, vec3);
2258 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec0, filt1, dst1, vec1, filt1,
2259 dst2, vec2, filt1, dst3, vec3, filt1, dst0, dst1, dst2, dst3);
2260 __lsx_vst(dst0, dst, 0);
2261 __lsx_vst(dst1, dst, 16);
2262 __lsx_vst(dst2, dst, 32);
2263 __lsx_vst(dst3, dst, 48);
2264 dst += dst_stride;
2265 }
2266 }
2267
hevc_vt_4t_16w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)2268 static void hevc_vt_4t_16w_lsx(uint8_t *src,
2269 int32_t src_stride,
2270 int16_t *dst,
2271 int32_t dst_stride,
2272 const int8_t *filter,
2273 int32_t height)
2274 {
2275 int32_t loop_cnt;
2276 int32_t src_stride_2x = (src_stride << 1);
2277 int32_t src_stride_3x = src_stride_2x + src_stride;
2278 __m128i src0, src1, src2, src3, src4, src5;
2279 __m128i src10_r, src32_r, src21_r, src43_r;
2280 __m128i src10_l, src32_l, src21_l, src43_l;
2281 __m128i dst0_r, dst1_r, dst0_l, dst1_l;
2282 __m128i filt0, filt1;
2283
2284 src -= src_stride;
2285 DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
2286
2287 src0 = __lsx_vld(src, 0);
2288 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
2289 src += src_stride_3x;
2290 DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src10_r, src21_r);
2291 DUP2_ARG2(__lsx_vilvh_b, src1, src0, src2, src1, src10_l, src21_l);
2292
2293 for (loop_cnt = (height >> 2); loop_cnt--;) {
2294 src3 = __lsx_vld(src, 0);
2295 src4 = __lsx_vldx(src, src_stride);
2296 src += src_stride_2x;
2297 DUP2_ARG2(__lsx_vilvl_b, src3, src2, src4, src3, src32_r, src43_r);
2298 DUP2_ARG2(__lsx_vilvh_b, src3, src2, src4, src3, src32_l, src43_l);
2299 DUP4_ARG2(__lsx_vdp2_h_bu_b, src10_r, filt0, src10_l, filt0, src21_r,
2300 filt0, src21_l, filt0, dst0_r, dst0_l, dst1_r, dst1_l);
2301 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0_r, src32_r, filt1, dst0_l,
2302 src32_l, filt1, dst1_r, src43_r, filt1, dst1_l, src43_l,
2303 filt1, dst0_r, dst0_l, dst1_r, dst1_l);
2304 __lsx_vst(dst0_r, dst, 0);
2305 __lsx_vst(dst0_l, dst, 16);
2306 dst += dst_stride;
2307 __lsx_vst(dst1_r, dst, 0);
2308 __lsx_vst(dst1_l, dst, 16);
2309 dst += dst_stride;
2310
2311 src5 = __lsx_vld(src, 0);
2312 src2 = __lsx_vldx(src, src_stride);
2313 src += src_stride_2x;
2314 DUP2_ARG2(__lsx_vilvl_b, src5, src4, src2, src5, src10_r, src21_r);
2315 DUP2_ARG2(__lsx_vilvh_b, src5, src4, src2, src5, src10_l, src21_l);
2316 DUP4_ARG2(__lsx_vdp2_h_bu_b, src32_r, filt0, src32_l, filt0, src43_r,
2317 filt0, src43_l, filt0, dst0_r, dst0_l, dst1_r, dst1_l);
2318 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0_r, src10_r, filt1, dst0_l,
2319 src10_l, filt1, dst1_r, src21_r, filt1, dst1_l, src21_l,
2320 filt1, dst0_r, dst0_l, dst1_r, dst1_l);
2321 __lsx_vst(dst0_r, dst, 0);
2322 __lsx_vst(dst0_l, dst, 16);
2323 dst += dst_stride;
2324 __lsx_vst(dst1_r, dst, 0);
2325 __lsx_vst(dst1_l, dst, 16);
2326 dst += dst_stride;
2327 }
2328 }
2329
hevc_vt_4t_24w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)2330 static void hevc_vt_4t_24w_lsx(uint8_t *src,
2331 int32_t src_stride,
2332 int16_t *dst,
2333 int32_t dst_stride,
2334 const int8_t *filter,
2335 int32_t height)
2336 {
2337 int32_t loop_cnt;
2338 int32_t src_stride_2x = (src_stride << 1);
2339 int32_t src_stride_3x = src_stride_2x + src_stride;
2340 uint8_t *_src;
2341
2342 __m128i src0, src1, src2, src3, src4, src5;
2343 __m128i src6, src7, src8, src9, src10, src11;
2344 __m128i src10_r, src32_r, src76_r, src98_r;
2345 __m128i src21_r, src43_r, src87_r, src109_r;
2346 __m128i dst0_r, dst1_r, dst2_r, dst3_r;
2347 __m128i src10_l, src32_l, src21_l, src43_l;
2348 __m128i dst0_l, dst1_l;
2349 __m128i filt0, filt1;
2350
2351 src -= src_stride;
2352 _src = src + 16;
2353 DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
2354
2355 src0 = __lsx_vld(src, 0);
2356 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
2357 DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src10_r, src21_r);
2358 DUP2_ARG2(__lsx_vilvh_b, src1, src0, src2, src1, src10_l, src21_l);
2359
2360 src6 = __lsx_vld(_src, 0);
2361 DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride_2x, src7, src8);
2362 src += src_stride_3x;
2363 _src += src_stride_3x;
2364 DUP2_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src76_r, src87_r);
2365
2366 for (loop_cnt = (height >> 2); loop_cnt--;) {
2367 DUP2_ARG2(__lsx_vld, src, 0, _src, 0, src3, src9);
2368 DUP2_ARG2(__lsx_vldx, src, src_stride, _src, src_stride, src4, src10);
2369 src += src_stride_2x;
2370 _src += src_stride_2x;
2371 DUP2_ARG2(__lsx_vilvl_b, src3, src2, src4, src3, src32_r, src43_r);
2372 DUP2_ARG2(__lsx_vilvh_b, src3, src2, src4, src3, src32_l, src43_l);
2373
2374 DUP2_ARG2(__lsx_vilvl_b, src9, src8, src10, src9, src98_r, src109_r);
2375 DUP4_ARG2(__lsx_vdp2_h_bu_b, src10_r, filt0, src10_l, filt0, src21_r,
2376 filt0, src21_l, filt0, dst0_r, dst0_l, dst1_r, dst1_l);
2377 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0_r, src32_r, filt1, dst0_l,
2378 src32_l, filt1, dst1_r, src43_r, filt1, dst1_l, src43_l,
2379 filt1, dst0_r, dst0_l, dst1_r, dst1_l);
2380 DUP2_ARG2(__lsx_vdp2_h_bu_b, src76_r, filt0, src87_r, filt0,
2381 dst2_r, dst3_r);
2382 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst2_r, src98_r, filt1, dst3_r,
2383 src109_r, filt1, dst2_r, dst3_r);
2384
2385 __lsx_vst(dst0_r, dst, 0);
2386 __lsx_vst(dst0_l, dst, 16);
2387 __lsx_vst(dst2_r, dst, 32);
2388 dst += dst_stride;
2389 __lsx_vst(dst1_r, dst, 0);
2390 __lsx_vst(dst1_l, dst, 16);
2391 __lsx_vst(dst3_r, dst, 32);
2392 dst += dst_stride;
2393
2394 DUP2_ARG2(__lsx_vld, src, 0, _src, 0, src5, src11);
2395 DUP2_ARG2(__lsx_vldx, src, src_stride, _src, src_stride, src2, src8);
2396 src += src_stride_2x;
2397 _src += src_stride_2x;
2398 DUP2_ARG2(__lsx_vilvl_b, src5, src4, src2, src5, src10_r, src21_r);
2399 DUP2_ARG2(__lsx_vilvh_b, src5, src4, src2, src5, src10_l, src21_l);
2400
2401 DUP2_ARG2(__lsx_vilvl_b, src11, src10, src8, src11, src76_r, src87_r);
2402
2403 DUP4_ARG2(__lsx_vdp2_h_bu_b, src32_r, filt0, src32_l, filt0, src43_r,
2404 filt0, src43_l, filt0, dst0_r, dst0_l, dst1_r, dst1_l);
2405 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0_r, src10_r, filt1, dst0_l, src10_l,
2406 filt1, dst1_r, src21_r, filt1, dst1_l, src21_l, filt1,
2407 dst0_r, dst0_l, dst1_r, dst1_l);
2408 DUP2_ARG2(__lsx_vdp2_h_bu_b, src98_r, filt0, src109_r, filt0,
2409 dst2_r, dst3_r);
2410 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst2_r, src76_r, filt1, dst3_r, src87_r,
2411 filt1, dst2_r, dst3_r);
2412
2413 __lsx_vst(dst0_r, dst, 0);
2414 __lsx_vst(dst0_l, dst, 16);
2415 __lsx_vst(dst2_r, dst, 32);
2416 dst += dst_stride;
2417 __lsx_vst(dst1_r, dst, 0);
2418 __lsx_vst(dst1_l, dst, 16);
2419 __lsx_vst(dst3_r, dst, 32);
2420 dst += dst_stride;
2421 }
2422 }
2423
hevc_vt_4t_32w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter, int32_t height)2424 static void hevc_vt_4t_32w_lsx(uint8_t *src,
2425 int32_t src_stride,
2426 int16_t *dst,
2427 int32_t dst_stride,
2428 const int8_t *filter,
2429 int32_t height)
2430 {
2431 int32_t loop_cnt;
2432 int32_t src_stride_2x = (src_stride << 1);
2433 int32_t src_stride_3x = src_stride_2x + src_stride;
2434 uint8_t *_src;
2435
2436 __m128i src0, src1, src2, src3, src4, src5;
2437 __m128i src6, src7, src8, src9, src10, src11;
2438 __m128i src10_r, src32_r, src76_r, src98_r;
2439 __m128i src21_r, src43_r, src87_r, src109_r;
2440 __m128i dst0_r, dst1_r, dst2_r, dst3_r;
2441 __m128i src10_l, src32_l, src76_l, src98_l;
2442 __m128i src21_l, src43_l, src87_l, src109_l;
2443 __m128i dst0_l, dst1_l, dst2_l, dst3_l;
2444 __m128i filt0, filt1;
2445
2446 src -= src_stride;
2447 _src = src + 16;
2448 DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
2449
2450 src0 = __lsx_vld(src, 0);
2451 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
2452 DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src10_r, src21_r);
2453 DUP2_ARG2(__lsx_vilvh_b, src1, src0, src2, src1, src10_l, src21_l);
2454
2455 src6 = __lsx_vld(_src, 0);
2456 DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride_2x, src7, src8);
2457 src += src_stride_3x;
2458 _src += src_stride_3x;
2459 DUP2_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src76_r, src87_r);
2460 DUP2_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, src76_l, src87_l);
2461
2462 for (loop_cnt = (height >> 2); loop_cnt--;) {
2463 DUP2_ARG2(__lsx_vld, src, 0, _src, 0, src3, src9);
2464 DUP2_ARG2(__lsx_vldx, src, src_stride, _src, src_stride, src4, src10);
2465 src += src_stride_2x;
2466 _src += src_stride_2x;
2467 DUP2_ARG2(__lsx_vilvl_b, src3, src2, src4, src3, src32_r, src43_r);
2468 DUP2_ARG2(__lsx_vilvh_b, src3, src2, src4, src3, src32_l, src43_l);
2469
2470 DUP2_ARG2(__lsx_vilvl_b, src9, src8, src10, src9, src98_r, src109_r);
2471 DUP2_ARG2(__lsx_vilvh_b, src9, src8, src10, src9, src98_l, src109_l);
2472
2473 DUP4_ARG2(__lsx_vdp2_h_bu_b, src10_r, filt0, src10_l, filt0, src21_r,
2474 filt0, src21_l, filt0, dst0_r, dst0_l, dst1_r, dst1_l);
2475 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0_r, src32_r, filt1, dst0_l,
2476 src32_l, filt1, dst1_r, src43_r, filt1, dst1_l,src43_l,
2477 filt1, dst0_r, dst0_l, dst1_r, dst1_l);
2478 DUP4_ARG2(__lsx_vdp2_h_bu_b, src76_r, filt0, src76_l, filt0, src87_r,
2479 filt0, src87_l, filt0, dst2_r, dst2_l, dst3_r, dst3_l);
2480 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst2_r, src98_r, filt1, dst2_l, src98_l,
2481 filt1, dst3_r, src109_r, filt1, dst3_l, src109_l, filt1,
2482 dst2_r, dst2_l, dst3_r, dst3_l);
2483
2484 __lsx_vst(dst0_r, dst, 0);
2485 __lsx_vst(dst0_l, dst, 16);
2486 __lsx_vst(dst2_r, dst, 32);
2487 __lsx_vst(dst2_l, dst, 48);
2488 dst += dst_stride;
2489 __lsx_vst(dst1_r, dst, 0);
2490 __lsx_vst(dst1_l, dst, 16);
2491 __lsx_vst(dst3_r, dst, 32);
2492 __lsx_vst(dst3_l, dst, 48);
2493 dst += dst_stride;
2494
2495 DUP2_ARG2(__lsx_vld, src, 0, _src, 0, src5, src11);
2496 DUP2_ARG2(__lsx_vldx, src, src_stride, _src, src_stride, src2, src8);
2497 src += src_stride_2x;
2498 _src += src_stride_2x;
2499 DUP2_ARG2(__lsx_vilvl_b, src5, src4, src2, src5, src10_r, src21_r);
2500 DUP2_ARG2(__lsx_vilvh_b, src5, src4, src2, src5, src10_l, src21_l);
2501
2502 DUP2_ARG2(__lsx_vilvl_b, src11, src10, src8, src11, src76_r, src87_r);
2503 DUP2_ARG2(__lsx_vilvh_b, src11, src10, src8, src11, src76_l, src87_l);
2504
2505 DUP4_ARG2(__lsx_vdp2_h_bu_b, src32_r, filt0, src32_l, filt0, src43_r,
2506 filt0, src43_l, filt0, dst0_r, dst0_l, dst1_r, dst1_l);
2507 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0_r, src10_r, filt1, dst0_l,
2508 src10_l, filt1, dst1_r, src21_r, filt1, dst1_l, src21_l,
2509 filt1, dst0_r, dst0_l, dst1_r, dst1_l);
2510
2511 DUP4_ARG2(__lsx_vdp2_h_bu_b, src98_r, filt0, src98_l, filt0, src109_r,
2512 filt0, src109_l, filt0, dst2_r, dst2_l, dst3_r, dst3_l);
2513 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst2_r, src76_r, filt1, dst2_l, src76_l,
2514 filt1, dst3_r, src87_r, filt1, dst3_l, src87_l, filt1,
2515 dst2_r, dst2_l, dst3_r, dst3_l);
2516
2517 __lsx_vst(dst0_r, dst, 0);
2518 __lsx_vst(dst0_l, dst, 16);
2519 __lsx_vst(dst2_r, dst, 32);
2520 __lsx_vst(dst2_l, dst, 48);
2521 dst += dst_stride;
2522 __lsx_vst(dst1_r, dst, 0);
2523 __lsx_vst(dst1_l, dst, 16);
2524 __lsx_vst(dst3_r, dst, 32);
2525 __lsx_vst(dst3_l, dst, 48);
2526 dst += dst_stride;
2527 }
2528 }
2529
hevc_hv_4t_8x2_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y)2530 static void hevc_hv_4t_8x2_lsx(uint8_t *src,
2531 int32_t src_stride,
2532 int16_t *dst,
2533 int32_t dst_stride,
2534 const int8_t *filter_x,
2535 const int8_t *filter_y)
2536 {
2537 int32_t src_stride_2x = (src_stride << 1);
2538 int32_t src_stride_4x = (src_stride << 2);
2539 int32_t src_stride_3x = src_stride_2x + src_stride;
2540
2541 __m128i src0, src1, src2, src3, src4;
2542 __m128i filt0, filt1;
2543 __m128i filt_h0, filt_h1;
2544 __m128i mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
2545 __m128i mask1;
2546 __m128i filter_vec;
2547 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
2548 __m128i dst0, dst1, dst2, dst3, dst4;
2549 __m128i dst0_r, dst0_l, dst1_r, dst1_l;
2550 __m128i dst10_r, dst32_r, dst21_r, dst43_r;
2551 __m128i dst10_l, dst32_l, dst21_l, dst43_l;
2552
2553 src -= (src_stride + 1);
2554 DUP2_ARG2(__lsx_vldrepl_h, filter_x, 0, filter_x, 2, filt0, filt1);
2555
2556 filter_vec = __lsx_vld(filter_y, 0);
2557 filter_vec = __lsx_vsllwil_h_b(filter_vec, 0);
2558 DUP2_ARG2(__lsx_vreplvei_w, filter_vec, 0, filter_vec, 1, filt_h0, filt_h1);
2559
2560 mask1 = __lsx_vaddi_bu(mask0, 2);
2561
2562 src0 = __lsx_vld(src, 0);
2563 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
2564 src3 = __lsx_vldx(src, src_stride_3x);
2565 src4 = __lsx_vldx(src, src_stride_4x);
2566
2567 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, vec0, vec1);
2568 DUP2_ARG3(__lsx_vshuf_b, src1, src1, mask0, src1, src1, mask1, vec2, vec3);
2569 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1, vec4, vec5);
2570 DUP2_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1, vec6, vec7);
2571 DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask0, src4, src4, mask1, vec8, vec9);
2572
2573 DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, vec4, filt0,
2574 vec6, filt0, dst0, dst1, dst2, dst3);
2575 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec3, filt1,
2576 dst2, vec5, filt1, dst3, vec7, filt1, dst0, dst1, dst2, dst3);
2577 dst4 = __lsx_vdp2_h_bu_b(vec8, filt0);
2578 dst4 = __lsx_vdp2add_h_bu_b(dst4, vec9, filt1);
2579
2580 DUP2_ARG2(__lsx_vilvl_h, dst1, dst0, dst2, dst1, dst10_r, dst21_r);
2581 DUP2_ARG2(__lsx_vilvh_h, dst1, dst0, dst2, dst1, dst10_l, dst21_l);
2582 DUP2_ARG2(__lsx_vilvl_h, dst3, dst2, dst4, dst3, dst32_r, dst43_r);
2583 DUP2_ARG2(__lsx_vilvh_h, dst3, dst2, dst4, dst3, dst32_l, dst43_l);
2584
2585 DUP4_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst10_l, filt_h0, dst21_r,
2586 filt_h0, dst21_l, filt_h0, dst0_r, dst0_l, dst1_r, dst1_l);
2587 DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst0_l, dst32_l,
2588 filt_h1, dst1_r, dst43_r, filt_h1, dst1_l, dst43_l, filt_h1,
2589 dst0_r, dst0_l, dst1_r, dst1_l);
2590 DUP4_ARG2(__lsx_vsrai_w, dst0_r, 6, dst0_l, 6, dst1_r, 6, dst1_l, 6,
2591 dst0_r, dst0_l, dst1_r, dst1_l);
2592 DUP2_ARG2(__lsx_vpickev_h, dst0_l, dst0_r, dst1_l, dst1_r, dst0_r, dst1_r);
2593 __lsx_vst(dst0_r, dst, 0);
2594 __lsx_vst(dst1_r, dst + dst_stride, 0);
2595 }
2596
hevc_hv_4t_8multx4_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t width8mult)2597 static void hevc_hv_4t_8multx4_lsx(uint8_t *src, int32_t src_stride,
2598 int16_t *dst, int32_t dst_stride,
2599 const int8_t *filter_x,
2600 const int8_t *filter_y, int32_t width8mult)
2601 {
2602 int32_t cnt;
2603 int32_t src_stride_2x = (src_stride << 1);
2604 int32_t dst_stride_x = (dst_stride << 1);
2605 int32_t src_stride_4x = (src_stride << 2);
2606 int32_t dst_stride_2x = (dst_stride << 2);
2607 int32_t src_stride_3x = src_stride_2x + src_stride;
2608 int32_t dst_stride_3x = dst_stride_2x + dst_stride_x;
2609
2610 __m128i src0, src1, src2, src3, src4, src5, src6, mask0, mask1;
2611 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
2612 __m128i filt0, filt1, filt_h0, filt_h1, filter_vec;
2613 __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6;
2614 __m128i dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
2615 __m128i dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
2616 __m128i dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
2617
2618 src -= (src_stride + 1);
2619 DUP2_ARG2(__lsx_vldrepl_h, filter_x, 0, filter_x, 2, filt0, filt1);
2620
2621 filter_vec = __lsx_vld(filter_y, 0);
2622 filter_vec = __lsx_vsllwil_h_b(filter_vec, 0);
2623 DUP2_ARG2(__lsx_vreplvei_w, filter_vec, 0, filter_vec, 1, filt_h0, filt_h1);
2624
2625 mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
2626 mask1 = __lsx_vaddi_bu(mask0, 2);
2627
2628 for (cnt = width8mult; cnt--;) {
2629 src0 = __lsx_vld(src, 0);
2630 DUP4_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src,
2631 src_stride_3x, src, src_stride_4x, src1, src2, src3, src4);
2632 src += src_stride_4x;
2633 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src5, src6);
2634 src += (8 - src_stride_4x);
2635
2636 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1,
2637 vec0, vec1);
2638 DUP2_ARG3(__lsx_vshuf_b, src1, src1, mask0, src1, src1, mask1,
2639 vec2, vec3);
2640 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1,
2641 vec4, vec5);
2642
2643 DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, dst0, dst1);
2644 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec3, filt1,
2645 dst0, dst1);
2646 dst2 = __lsx_vdp2_h_bu_b(vec4, filt0);
2647 dst2 = __lsx_vdp2add_h_bu_b(dst2, vec5, filt1);
2648
2649 DUP2_ARG2(__lsx_vilvl_h, dst1, dst0, dst2, dst1, dst10_r, dst21_r);
2650 DUP2_ARG2(__lsx_vilvh_h, dst1, dst0, dst2, dst1, dst10_l, dst21_l);
2651
2652 DUP2_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1,
2653 vec0, vec1);
2654 DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask0, src4, src4, mask1,
2655 vec2, vec3);
2656 DUP2_ARG3(__lsx_vshuf_b, src5, src5, mask0, src5, src5, mask1,
2657 vec4, vec5);
2658 DUP2_ARG3(__lsx_vshuf_b, src6, src6, mask0, src6, src6, mask1,
2659 vec6, vec7);
2660 DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, vec4, filt0,
2661 vec6, filt0, dst3, dst4, dst5, dst6);
2662 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst3, vec1, filt1, dst4, vec3, filt1,
2663 dst5, vec5, filt1, dst6, vec7, filt1, dst3, dst4, dst5, dst6);
2664 DUP2_ARG2(__lsx_vilvl_h, dst3, dst2, dst4, dst3, dst32_r, dst43_r);
2665 DUP2_ARG2(__lsx_vilvh_h, dst3, dst2, dst4, dst3, dst32_l, dst43_l);
2666 DUP2_ARG2(__lsx_vilvl_h, dst5, dst4, dst6, dst5, dst54_r, dst65_r);
2667 DUP2_ARG2(__lsx_vilvh_h, dst5, dst4, dst6, dst5, dst54_l, dst65_l);
2668
2669 DUP4_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst10_l, filt_h0, dst21_r,
2670 filt_h0, dst21_l, filt_h0, dst0_r, dst0_l, dst1_r, dst1_l);
2671 DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst0_l, dst32_l,
2672 filt_h1, dst1_r, dst43_r, filt_h1, dst1_l, dst43_l, filt_h1,
2673 dst0_r, dst0_l, dst1_r, dst1_l);
2674
2675 DUP4_ARG2(__lsx_vdp2_w_h, dst32_r, filt_h0, dst32_l, filt_h0, dst43_r,
2676 filt_h0, dst43_l, filt_h0, dst2_r, dst2_l, dst3_r, dst3_l);
2677 DUP4_ARG3(__lsx_vdp2add_w_h, dst2_r, dst54_r, filt_h1, dst2_l, dst54_l,
2678 filt_h1, dst3_r, dst65_r, filt_h1, dst3_l, dst65_l, filt_h1,
2679 dst2_r, dst2_l, dst3_r, dst3_l);
2680 DUP4_ARG2(__lsx_vsrai_w, dst0_r, 6, dst0_l, 6, dst1_r, 6, dst1_l, 6,
2681 dst0_r, dst0_l, dst1_r, dst1_l);
2682 DUP4_ARG2(__lsx_vsrai_w, dst2_r, 6, dst2_l, 6, dst3_r, 6, dst3_l, 6,
2683 dst2_r, dst2_l, dst3_r, dst3_l);
2684 DUP2_ARG2(__lsx_vpickev_h, dst0_l, dst0_r, dst1_l, dst1_r,
2685 dst0_r, dst1_r);
2686 DUP2_ARG2(__lsx_vpickev_h, dst2_l, dst2_r, dst3_l, dst3_r,
2687 dst2_r, dst3_r);
2688
2689 __lsx_vst(dst0_r, dst, 0);
2690 __lsx_vstx(dst1_r, dst, dst_stride_x);
2691 __lsx_vstx(dst2_r, dst, dst_stride_2x);
2692 __lsx_vstx(dst3_r, dst, dst_stride_3x);
2693 dst += 8;
2694 }
2695 }
2696
hevc_hv_4t_8x6_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y)2697 static void hevc_hv_4t_8x6_lsx(uint8_t *src,
2698 int32_t src_stride,
2699 int16_t *dst,
2700 int32_t dst_stride,
2701 const int8_t *filter_x,
2702 const int8_t *filter_y)
2703 {
2704 int32_t src_stride_2x = (src_stride << 1);
2705 int32_t dst_stride_2x = (dst_stride << 1);
2706 int32_t src_stride_4x = (src_stride << 2);
2707 int32_t src_stride_3x = src_stride_2x + src_stride;
2708 __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8;
2709 __m128i filt0, filt1;
2710 __m128i filt_h0, filt_h1;
2711 __m128i mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
2712 __m128i mask1, filter_vec;
2713 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9;
2714 __m128i vec10, vec11, vec12, vec13, vec14, vec15, vec16, vec17;
2715 __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7, dst8;
2716 __m128i dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
2717 __m128i dst4_r, dst4_l, dst5_r, dst5_l;
2718 __m128i dst10_r, dst32_r, dst10_l, dst32_l;
2719 __m128i dst21_r, dst43_r, dst21_l, dst43_l;
2720 __m128i dst54_r, dst54_l, dst65_r, dst65_l;
2721 __m128i dst76_r, dst76_l, dst87_r, dst87_l;
2722
2723 src -= (src_stride + 1);
2724 DUP2_ARG2(__lsx_vldrepl_h, filter_x, 0, filter_x, 2, filt0, filt1);
2725
2726 filter_vec = __lsx_vld(filter_y, 0);
2727 filter_vec = __lsx_vsllwil_h_b(filter_vec, 0);
2728 DUP2_ARG2(__lsx_vreplvei_w, filter_vec, 0, filter_vec, 1, filt_h0, filt_h1);
2729
2730 mask1 = __lsx_vaddi_bu(mask0, 2);
2731
2732 src0 = __lsx_vld(src, 0);
2733 DUP4_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src,
2734 src_stride_3x, src, src_stride_4x, src1, src2, src3, src4);
2735 src += src_stride_4x;
2736 DUP4_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src,
2737 src_stride_3x, src, src_stride_4x, src5, src6, src7, src8);
2738
2739 DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, src1, src1,
2740 mask0, src1, src1, mask1, vec0, vec1, vec2, vec3);
2741 DUP4_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1,src3, src3,
2742 mask0, src3, src3, mask1, vec4, vec5, vec6, vec7);
2743 DUP4_ARG3(__lsx_vshuf_b, src4, src4, mask0, src4, src4, mask1, src5, src5,
2744 mask0, src5, src5, mask1, vec8, vec9, vec10, vec11);
2745 DUP4_ARG3(__lsx_vshuf_b, src6, src6, mask0, src6, src6, mask1, src7, src7,
2746 mask0, src7, src7, mask1, vec12, vec13, vec14, vec15);
2747 DUP2_ARG3(__lsx_vshuf_b, src8, src8, mask0, src8, src8, mask1,
2748 vec16, vec17);
2749
2750 DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, vec4, filt0, vec6,
2751 filt0, dst0, dst1, dst2, dst3);
2752 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec3, filt1,
2753 dst2, vec5, filt1, dst3, vec7, filt1, dst0, dst1, dst2, dst3);
2754 DUP4_ARG2(__lsx_vdp2_h_bu_b, vec8, filt0, vec10, filt0, vec12, filt0,
2755 vec14, filt0, dst4, dst5, dst6, dst7);
2756 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst4, vec9, filt1, dst5, vec11, filt1, dst6,
2757 vec13, filt1, dst7, vec15, filt1, dst4, dst5, dst6, dst7);
2758 dst8 = __lsx_vdp2_h_bu_b(vec16, filt0);
2759 dst8 = __lsx_vdp2add_h_bu_b(dst8, vec17, filt1);
2760
2761 DUP4_ARG2(__lsx_vilvl_h, dst1, dst0, dst2, dst1, dst3, dst2, dst4, dst3,
2762 dst10_r, dst21_r, dst32_r, dst43_r);
2763 DUP4_ARG2(__lsx_vilvh_h, dst1, dst0, dst2, dst1, dst3, dst2, dst4, dst3,
2764 dst10_l, dst21_l, dst32_l, dst43_l);
2765 DUP4_ARG2(__lsx_vilvl_h, dst5, dst4, dst6, dst5, dst7, dst6, dst8, dst7,
2766 dst54_r, dst65_r, dst76_r, dst87_r);
2767 DUP4_ARG2(__lsx_vilvh_h, dst5, dst4, dst6, dst5, dst7, dst6, dst8, dst7,
2768 dst54_l, dst65_l, dst76_l, dst87_l);
2769
2770 DUP4_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst10_l, filt_h0, dst21_r,
2771 filt_h0, dst21_l, filt_h0, dst0_r, dst0_l, dst1_r, dst1_l);
2772 DUP4_ARG2(__lsx_vdp2_w_h, dst32_r, filt_h0, dst32_l, filt_h0, dst43_r,
2773 filt_h0, dst43_l, filt_h0, dst2_r, dst2_l, dst3_r, dst3_l);
2774 DUP4_ARG2(__lsx_vdp2_w_h, dst54_r, filt_h0, dst54_l, filt_h0, dst65_r,
2775 filt_h0, dst65_l, filt_h0, dst4_r, dst4_l, dst5_r, dst5_l);
2776 DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst0_l, dst32_l,
2777 filt_h1, dst1_r, dst43_r, filt_h1, dst1_l, dst43_l, filt_h1,
2778 dst0_r, dst0_l, dst1_r, dst1_l);
2779 DUP4_ARG3(__lsx_vdp2add_w_h, dst2_r, dst54_r, filt_h1, dst2_l, dst54_l,
2780 filt_h1, dst3_r, dst65_r, filt_h1, dst3_l, dst65_l, filt_h1,
2781 dst2_r, dst2_l, dst3_r, dst3_l);
2782 DUP4_ARG3(__lsx_vdp2add_w_h, dst4_r, dst76_r, filt_h1, dst4_l, dst76_l,
2783 filt_h1, dst5_r, dst87_r, filt_h1, dst5_l, dst87_l, filt_h1,
2784 dst4_r, dst4_l, dst5_r, dst5_l);
2785 DUP4_ARG2(__lsx_vsrai_w, dst0_r, 6, dst0_l, 6, dst1_r, 6, dst1_l, 6, dst0_r,
2786 dst0_l, dst1_r, dst1_l);
2787 DUP4_ARG2(__lsx_vsrai_w, dst2_r, 6, dst2_l, 6, dst3_r, 6, dst3_l, 6, dst2_r,
2788 dst2_l, dst3_r, dst3_l);
2789 DUP4_ARG2(__lsx_vsrai_w, dst4_r, 6, dst4_l, 6, dst5_r, 6, dst5_l, 6, dst4_r,
2790 dst4_l, dst5_r, dst5_l);
2791
2792 DUP4_ARG2(__lsx_vpickev_h,dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r,
2793 dst3_l, dst3_r, dst0_r, dst1_r, dst2_r, dst3_r);
2794 DUP2_ARG2(__lsx_vpickev_h, dst4_l, dst4_r, dst5_l, dst5_r, dst4_r, dst5_r);
2795
2796 __lsx_vst(dst0_r, dst, 0);
2797 __lsx_vstx(dst1_r, dst, dst_stride_2x);
2798 dst += dst_stride_2x;
2799 __lsx_vst(dst2_r, dst, 0);
2800 __lsx_vstx(dst3_r, dst, dst_stride_2x);
2801 dst += dst_stride_2x;
2802 __lsx_vst(dst4_r, dst, 0);
2803 __lsx_vstx(dst5_r, dst, dst_stride_2x);
2804 }
2805
hevc_hv_4t_8multx4mult_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height, int32_t width8mult)2806 static void hevc_hv_4t_8multx4mult_lsx(uint8_t *src,
2807 int32_t src_stride,
2808 int16_t *dst,
2809 int32_t dst_stride,
2810 const int8_t *filter_x,
2811 const int8_t *filter_y,
2812 int32_t height,
2813 int32_t width8mult)
2814 {
2815 uint32_t loop_cnt, cnt;
2816 uint8_t *src_tmp;
2817 int16_t *dst_tmp;
2818 int32_t src_stride_2x = (src_stride << 1);
2819 int32_t dst_stride_x = (dst_stride << 1);
2820 int32_t src_stride_4x = (src_stride << 2);
2821 int32_t dst_stride_2x = (dst_stride << 2);
2822 int32_t src_stride_3x = src_stride_2x + src_stride;
2823 int32_t dst_stride_3x = dst_stride_2x + dst_stride_x;
2824
2825 __m128i src0, src1, src2, src3, src4, src5, src6;
2826 __m128i filt0, filt1;
2827 __m128i filt_h0, filt_h1;
2828 __m128i mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
2829 __m128i mask1, filter_vec;
2830 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
2831 __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6;
2832 __m128i dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
2833 __m128i dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
2834 __m128i dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
2835
2836 src -= (src_stride + 1);
2837 DUP2_ARG2(__lsx_vldrepl_h, filter_x, 0, filter_x, 2, filt0, filt1);
2838
2839 filter_vec = __lsx_vld(filter_y, 0);
2840 filter_vec = __lsx_vsllwil_h_b(filter_vec, 0);
2841 DUP2_ARG2(__lsx_vreplvei_w, filter_vec, 0, filter_vec, 1, filt_h0, filt_h1);
2842
2843 mask1 = __lsx_vaddi_bu(mask0, 2);
2844
2845 for (cnt = width8mult; cnt--;) {
2846 src_tmp = src;
2847 dst_tmp = dst;
2848
2849 src0 = __lsx_vld(src_tmp, 0);
2850 DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
2851 src1, src2);
2852 src_tmp += src_stride_3x;
2853
2854 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1,
2855 vec0, vec1);
2856 DUP2_ARG3(__lsx_vshuf_b, src1, src1, mask0, src1, src1, mask1,
2857 vec2, vec3);
2858 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1,
2859 vec4, vec5);
2860
2861 DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, dst0, dst1);
2862 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec3, filt1,
2863 dst0, dst1);
2864 dst2 = __lsx_vdp2_h_bu_b(vec4, filt0);
2865 dst2 = __lsx_vdp2add_h_bu_b(dst2, vec5, filt1);
2866
2867 DUP2_ARG2(__lsx_vilvl_h, dst1, dst0, dst2, dst1, dst10_r, dst21_r);
2868 DUP2_ARG2(__lsx_vilvh_h, dst1, dst0, dst2, dst1, dst10_l, dst21_l);
2869
2870 for (loop_cnt = height >> 2; loop_cnt--;) {
2871 src3 = __lsx_vld(src_tmp, 0);
2872 DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
2873 src4, src5);
2874 src6 = __lsx_vldx(src_tmp, src_stride_3x);
2875 src_tmp += src_stride_4x;
2876
2877 DUP2_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1,
2878 vec0, vec1);
2879 DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask0, src4, src4, mask1,
2880 vec2, vec3);
2881 DUP2_ARG3(__lsx_vshuf_b, src5, src5, mask0, src5, src5, mask1,
2882 vec4, vec5);
2883 DUP2_ARG3(__lsx_vshuf_b, src6, src6, mask0, src6, src6, mask1,
2884 vec6, vec7);
2885
2886 DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, vec4, filt0,
2887 vec6, filt0, dst3, dst4, dst5, dst6);
2888 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst3, vec1, filt1, dst4, vec3,
2889 filt1, dst5, vec5, filt1, dst6, vec7, filt1, dst3,
2890 dst4, dst5, dst6);
2891
2892 DUP2_ARG2(__lsx_vilvl_h, dst3, dst2, dst4, dst3, dst32_r, dst43_r);
2893 DUP2_ARG2(__lsx_vilvh_h, dst3, dst2, dst4, dst3, dst32_l, dst43_l);
2894 DUP2_ARG2(__lsx_vilvl_h, dst5, dst4, dst6, dst5, dst54_r, dst65_r);
2895 DUP2_ARG2(__lsx_vilvh_h, dst5, dst4, dst6, dst5, dst54_l, dst65_l);
2896
2897 DUP4_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst10_l, filt_h0, dst21_r,
2898 filt_h0, dst21_l, filt_h0, dst0_r, dst0_l, dst1_r, dst1_l);
2899 DUP4_ARG2(__lsx_vdp2_w_h, dst32_r, filt_h0, dst32_l, filt_h0, dst43_r,
2900 filt_h0, dst43_l, filt_h0, dst2_r, dst2_l, dst3_r, dst3_l);
2901 DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst0_l,
2902 dst32_l, filt_h1, dst1_r, dst43_r, filt_h1, dst1_l,
2903 dst43_l, filt_h1, dst0_r, dst0_l, dst1_r, dst1_l);
2904 DUP4_ARG3(__lsx_vdp2add_w_h, dst2_r, dst54_r, filt_h1, dst2_l,
2905 dst54_l, filt_h1, dst3_r, dst65_r, filt_h1, dst3_l,
2906 dst65_l, filt_h1, dst2_r, dst2_l, dst3_r, dst3_l);
2907
2908 DUP4_ARG2(__lsx_vsrai_w, dst0_r, 6, dst0_l, 6, dst1_r, 6, dst1_l, 6,
2909 dst0_r, dst0_l, dst1_r, dst1_l);
2910 DUP4_ARG2(__lsx_vsrai_w, dst2_r, 6, dst2_l, 6, dst3_r, 6, dst3_l, 6,
2911 dst2_r, dst2_l, dst3_r, dst3_l);
2912
2913 DUP4_ARG2(__lsx_vpickev_h, dst0_l, dst0_r, dst1_l, dst1_r, dst2_l,
2914 dst2_r, dst3_l, dst3_r, dst0_r, dst1_r, dst2_r, dst3_r);
2915
2916 __lsx_vst(dst0_r, dst_tmp, 0);
2917 __lsx_vstx(dst1_r, dst_tmp, dst_stride_x);
2918 __lsx_vstx(dst2_r, dst_tmp, dst_stride_2x);
2919 __lsx_vstx(dst3_r, dst_tmp, dst_stride_3x);
2920 dst_tmp += dst_stride_2x;
2921
2922 dst10_r = dst54_r;
2923 dst10_l = dst54_l;
2924 dst21_r = dst65_r;
2925 dst21_l = dst65_l;
2926 dst2 = dst6;
2927 }
2928 src += 8;
2929 dst += 8;
2930 }
2931 }
2932
hevc_hv_4t_8w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)2933 static void hevc_hv_4t_8w_lsx(uint8_t *src,
2934 int32_t src_stride,
2935 int16_t *dst,
2936 int32_t dst_stride,
2937 const int8_t *filter_x,
2938 const int8_t *filter_y,
2939 int32_t height)
2940 {
2941
2942 if (2 == height) {
2943 hevc_hv_4t_8x2_lsx(src, src_stride, dst, dst_stride,
2944 filter_x, filter_y);
2945 } else if (4 == height) {
2946 hevc_hv_4t_8multx4_lsx(src, src_stride, dst, dst_stride,
2947 filter_x, filter_y, 1);
2948 } else if (6 == height) {
2949 hevc_hv_4t_8x6_lsx(src, src_stride, dst, dst_stride,
2950 filter_x, filter_y);
2951 } else if (0 == (height & 0x03)) {
2952 hevc_hv_4t_8multx4mult_lsx(src, src_stride, dst, dst_stride,
2953 filter_x, filter_y, height, 1);
2954 }
2955 }
2956
hevc_hv_4t_12w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)2957 static void hevc_hv_4t_12w_lsx(uint8_t *src,
2958 int32_t src_stride,
2959 int16_t *dst,
2960 int32_t dst_stride,
2961 const int8_t *filter_x,
2962 const int8_t *filter_y,
2963 int32_t height)
2964 {
2965 uint32_t loop_cnt;
2966 uint8_t *src_tmp;
2967 int16_t *dst_tmp;
2968 int32_t src_stride_2x = (src_stride << 1);
2969 int32_t dst_stride_x = (dst_stride << 1);
2970 int32_t src_stride_4x = (src_stride << 2);
2971 int32_t dst_stride_2x = (dst_stride << 2);
2972 int32_t src_stride_3x = src_stride_2x + src_stride;
2973 int32_t dst_stride_3x = dst_stride_2x + dst_stride_x;
2974
2975 __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
2976 __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
2977 __m128i mask0, mask1, mask2, mask3;
2978 __m128i filt0, filt1, filt_h0, filt_h1, filter_vec, dst0;
2979 __m128i dst1, dst2, dst3, dst4, dst5, dst6, dst10, dst21, dst22, dst73;
2980 __m128i dst84, dst95, dst106, dst76_r, dst98_r, dst87_r, dst109_r;
2981 __m128i dst10_r, dst32_r, dst54_r, dst21_r, dst43_r, dst65_r;
2982 __m128i dst10_l, dst32_l, dst54_l, dst21_l, dst43_l, dst65_l;
2983 __m128i dst0_r, dst0_l, dst1_r, dst1_l, dst2_r, dst2_l, dst3_r, dst3_l;
2984 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
2985
2986 src -= (src_stride + 1);
2987 DUP2_ARG2(__lsx_vldrepl_h, filter_x, 0, filter_x, 2, filt0, filt1);
2988
2989 filter_vec = __lsx_vld(filter_y, 0);
2990 filter_vec = __lsx_vsllwil_h_b(filter_vec, 0);
2991 DUP2_ARG2(__lsx_vreplvei_w, filter_vec, 0, filter_vec, 1, filt_h0, filt_h1);
2992
2993 mask0 = __lsx_vld(ff_hevc_mask_arr, 0);
2994 mask1 = __lsx_vaddi_bu(mask0, 2);
2995
2996 src_tmp = src;
2997 dst_tmp = dst;
2998
2999 src0 = __lsx_vld(src_tmp, 0);
3000 DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
3001 src1, src2);
3002 src_tmp += src_stride_3x;
3003
3004 DUP2_ARG3(__lsx_vshuf_b, src0, src0, mask0, src0, src0, mask1, vec0, vec1);
3005 DUP2_ARG3(__lsx_vshuf_b, src1, src1, mask0, src1, src1, mask1, vec2, vec3);
3006 DUP2_ARG3(__lsx_vshuf_b, src2, src2, mask0, src2, src2, mask1, vec4, vec5);
3007
3008 DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, dst0, dst1);
3009 dst2 = __lsx_vdp2_h_bu_b(vec4, filt0);
3010 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst0, vec1, filt1, dst1, vec3, filt1,
3011 dst0, dst1);
3012 dst2 = __lsx_vdp2add_h_bu_b(dst2, vec5, filt1);
3013
3014 DUP2_ARG2(__lsx_vilvl_h, dst1, dst0, dst2, dst1, dst10_r, dst21_r);
3015 DUP2_ARG2(__lsx_vilvh_h, dst1, dst0, dst2, dst1, dst10_l, dst21_l);
3016
3017 for (loop_cnt = 4; loop_cnt--;) {
3018 src3 = __lsx_vld(src_tmp, 0);
3019 DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride_2x,
3020 src4, src5);
3021 src6 = __lsx_vldx(src_tmp, src_stride_3x);
3022 src_tmp += src_stride_4x;
3023
3024 DUP2_ARG3(__lsx_vshuf_b, src3, src3, mask0, src3, src3, mask1,
3025 vec0, vec1);
3026 DUP2_ARG3(__lsx_vshuf_b, src4, src4, mask0, src4, src4, mask1,
3027 vec2, vec3);
3028 DUP2_ARG3(__lsx_vshuf_b, src5, src5, mask0, src5, src5, mask1,
3029 vec4, vec5);
3030 DUP2_ARG3(__lsx_vshuf_b, src6, src6, mask0, src6, src6, mask1,
3031 vec6, vec7);
3032
3033 DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, vec4, filt0,
3034 vec6, filt0, dst3, dst4, dst5, dst6);
3035 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst3, vec1, filt1, dst4, vec3,
3036 filt1, dst5, vec5, filt1, dst6, vec7, filt1, dst3,
3037 dst4, dst5, dst6);
3038
3039 DUP2_ARG2(__lsx_vilvl_h, dst3, dst2, dst4, dst3, dst32_r, dst43_r);
3040 DUP2_ARG2(__lsx_vilvh_h, dst3, dst2, dst4, dst3, dst32_l, dst43_l);
3041 DUP2_ARG2(__lsx_vilvl_h, dst5, dst4, dst6, dst5, dst54_r, dst65_r);
3042 DUP2_ARG2(__lsx_vilvh_h, dst5, dst4, dst6, dst5, dst54_l, dst65_l);
3043
3044 DUP4_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst10_l, filt_h0, dst21_r,
3045 filt_h0, dst21_l, filt_h0, dst0_r, dst0_l, dst1_r, dst1_l);
3046 DUP4_ARG2(__lsx_vdp2_w_h, dst32_r, filt_h0, dst32_l, filt_h0, dst43_r,
3047 filt_h0, dst43_l, filt_h0, dst2_r, dst2_l, dst3_r, dst3_l);
3048 DUP4_ARG3(__lsx_vdp2add_w_h, dst0_r, dst32_r, filt_h1, dst0_l, dst32_l,
3049 filt_h1, dst1_r, dst43_r, filt_h1, dst1_l, dst43_l, filt_h1,
3050 dst0_r, dst0_l, dst1_r, dst1_l);
3051 DUP4_ARG3(__lsx_vdp2add_w_h, dst2_r, dst54_r, filt_h1, dst2_l, dst54_l,
3052 filt_h1, dst3_r, dst65_r, filt_h1, dst3_l, dst65_l, filt_h1,
3053 dst2_r, dst2_l, dst3_r, dst3_l);
3054 DUP4_ARG2(__lsx_vsrai_w, dst0_r, 6, dst0_l, 6, dst1_r, 6, dst1_l, 6,
3055 dst0_r, dst0_l, dst1_r, dst1_l);
3056 DUP4_ARG2(__lsx_vsrai_w, dst2_r, 6, dst2_l, 6, dst3_r, 6, dst3_l, 6,
3057 dst2_r, dst2_l, dst3_r, dst3_l);
3058 DUP4_ARG2(__lsx_vpickev_h, dst0_l, dst0_r, dst1_l, dst1_r, dst2_l, dst2_r,
3059 dst3_l, dst3_r, dst0_r, dst1_r, dst2_r, dst3_r);
3060 __lsx_vst(dst0_r, dst_tmp, 0);
3061 __lsx_vstx(dst1_r, dst_tmp, dst_stride_x);
3062 __lsx_vstx(dst2_r, dst_tmp, dst_stride_2x);
3063 __lsx_vstx(dst3_r, dst_tmp, dst_stride_3x);
3064 dst_tmp += dst_stride_2x;
3065
3066 dst10_r = dst54_r;
3067 dst10_l = dst54_l;
3068 dst21_r = dst65_r;
3069 dst21_l = dst65_l;
3070 dst2 = dst6;
3071 }
3072
3073 src += 8;
3074 dst += 8;
3075
3076 mask2 = __lsx_vld(ff_hevc_mask_arr, 16);
3077 mask3 = __lsx_vaddi_bu(mask2, 2);
3078
3079 src0 = __lsx_vld(src, 0);
3080 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src1, src2);
3081 src += src_stride_3x;
3082 DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask2, src1, src0, mask3, vec0, vec1);
3083 DUP2_ARG3(__lsx_vshuf_b, src2, src1, mask2, src2, src1, mask3, vec2, vec3);
3084 DUP2_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, dst10, dst21);
3085 DUP2_ARG3(__lsx_vdp2add_h_bu_b, dst10, vec1, filt1, dst21, vec3, filt1,
3086 dst10, dst21);
3087 dst10_r = __lsx_vilvl_h(dst21, dst10);
3088 dst21_r = __lsx_vilvh_h(dst21, dst10);
3089 dst22 = __lsx_vreplvei_d(dst21, 1);
3090
3091 for (loop_cnt = 2; loop_cnt--;) {
3092 src3 = __lsx_vld(src, 0);
3093 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src4, src5);
3094 src6 = __lsx_vldx(src, src_stride_3x);
3095 src += src_stride_4x;
3096 src7 = __lsx_vld(src, 0);
3097 DUP2_ARG2(__lsx_vldx, src, src_stride, src, src_stride_2x, src8, src9);
3098 src10 = __lsx_vldx(src, src_stride_3x);
3099 src += src_stride_4x;
3100 DUP2_ARG3(__lsx_vshuf_b, src7, src3, mask2, src7, src3, mask3,
3101 vec0, vec1);
3102 DUP2_ARG3(__lsx_vshuf_b, src8, src4, mask2, src8, src4, mask3,
3103 vec2, vec3);
3104 DUP2_ARG3(__lsx_vshuf_b, src9, src5, mask2, src9, src5, mask3,
3105 vec4, vec5);
3106 DUP2_ARG3(__lsx_vshuf_b, src10, src6, mask2, src10, src6, mask3,
3107 vec6, vec7);
3108
3109 DUP4_ARG2(__lsx_vdp2_h_bu_b, vec0, filt0, vec2, filt0, vec4, filt0,
3110 vec6, filt0, dst73, dst84, dst95, dst106);
3111 DUP4_ARG3(__lsx_vdp2add_h_bu_b, dst73, vec1, filt1, dst84, vec3,
3112 filt1, dst95, vec5, filt1, dst106, vec7, filt1, dst73,
3113 dst84, dst95, dst106);
3114
3115 DUP2_ARG2(__lsx_vilvl_h, dst73, dst22, dst84, dst73, dst32_r, dst43_r);
3116 DUP2_ARG2(__lsx_vilvh_h, dst84, dst73, dst95, dst84, dst87_r, dst98_r);
3117 DUP2_ARG2(__lsx_vilvl_h, dst95, dst84, dst106, dst95, dst54_r, dst65_r);
3118 dst109_r = __lsx_vilvh_h(dst106, dst95);
3119 dst22 = __lsx_vreplvei_d(dst73, 1);
3120 dst76_r = __lsx_vilvl_h(dst22, dst106);
3121
3122 DUP4_ARG2(__lsx_vdp2_w_h, dst10_r, filt_h0, dst21_r, filt_h0, dst32_r,
3123 filt_h0, dst43_r, filt_h0, tmp0, tmp1, tmp2, tmp3);
3124 DUP4_ARG2(__lsx_vdp2_w_h, dst54_r, filt_h0, dst65_r, filt_h0, dst76_r,
3125 filt_h0, dst87_r, filt_h0, tmp4, tmp5, tmp6, tmp7);
3126 DUP4_ARG3(__lsx_vdp2add_w_h, tmp0, dst32_r, filt_h1, tmp1, dst43_r,
3127 filt_h1, tmp2, dst54_r, filt_h1, tmp3, dst65_r, filt_h1,
3128 tmp0, tmp1, tmp2, tmp3);
3129 DUP4_ARG3(__lsx_vdp2add_w_h, tmp4, dst76_r, filt_h1, tmp5, dst87_r,
3130 filt_h1, tmp6, dst98_r, filt_h1, tmp7, dst109_r, filt_h1,
3131 tmp4, tmp5, tmp6, tmp7);
3132 DUP4_ARG2(__lsx_vsrai_w, tmp0, 6, tmp1, 6, tmp2, 6, tmp3, 6,
3133 tmp0, tmp1, tmp2, tmp3);
3134 DUP4_ARG2(__lsx_vsrai_w, tmp4, 6, tmp5, 6, tmp6, 6, tmp7, 6,
3135 tmp4, tmp5, tmp6, tmp7);
3136 DUP4_ARG2(__lsx_vpickev_h, tmp1, tmp0, tmp3, tmp2, tmp5, tmp4,
3137 tmp7, tmp6, tmp0, tmp1, tmp2, tmp3);
3138
3139 __lsx_vstelm_d(tmp0, dst, 0, 0);
3140 dst += dst_stride;
3141 __lsx_vstelm_d(tmp0, dst, 0, 1);
3142 dst += dst_stride;
3143 __lsx_vstelm_d(tmp1, dst, 0, 0);
3144 dst += dst_stride;
3145 __lsx_vstelm_d(tmp1, dst, 0, 1);
3146 dst += dst_stride;
3147 __lsx_vstelm_d(tmp2, dst, 0, 0);
3148 dst += dst_stride;
3149 __lsx_vstelm_d(tmp2, dst, 0, 1);
3150 dst += dst_stride;
3151 __lsx_vstelm_d(tmp3, dst, 0, 0);
3152 dst += dst_stride;
3153 __lsx_vstelm_d(tmp3, dst, 0, 1);
3154 dst += dst_stride;
3155
3156 dst10_r = dst98_r;
3157 dst21_r = dst109_r;
3158 dst22 = __lsx_vreplvei_d(dst106, 1);
3159 }
3160 }
3161
hevc_hv_4t_16w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)3162 static void hevc_hv_4t_16w_lsx(uint8_t *src,
3163 int32_t src_stride,
3164 int16_t *dst,
3165 int32_t dst_stride,
3166 const int8_t *filter_x,
3167 const int8_t *filter_y,
3168 int32_t height)
3169 {
3170 if (4 == height) {
3171 hevc_hv_4t_8multx4_lsx(src, src_stride, dst, dst_stride,
3172 filter_x, filter_y, 2);
3173 } else {
3174 hevc_hv_4t_8multx4mult_lsx(src, src_stride, dst, dst_stride,
3175 filter_x, filter_y, height, 2);
3176 }
3177 }
3178
hevc_hv_4t_24w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)3179 static void hevc_hv_4t_24w_lsx(uint8_t *src,
3180 int32_t src_stride,
3181 int16_t *dst,
3182 int32_t dst_stride,
3183 const int8_t *filter_x,
3184 const int8_t *filter_y,
3185 int32_t height)
3186 {
3187 hevc_hv_4t_8multx4mult_lsx(src, src_stride, dst, dst_stride,
3188 filter_x, filter_y, height, 3);
3189 }
3190
hevc_hv_4t_32w_lsx(uint8_t *src, int32_t src_stride, int16_t *dst, int32_t dst_stride, const int8_t *filter_x, const int8_t *filter_y, int32_t height)3191 static void hevc_hv_4t_32w_lsx(uint8_t *src,
3192 int32_t src_stride,
3193 int16_t *dst,
3194 int32_t dst_stride,
3195 const int8_t *filter_x,
3196 const int8_t *filter_y,
3197 int32_t height)
3198 {
3199 hevc_hv_4t_8multx4mult_lsx(src, src_stride, dst, dst_stride,
3200 filter_x, filter_y, height, 4);
3201 }
3202
3203 #define MC_COPY(WIDTH) \
3204 void ff_hevc_put_hevc_pel_pixels##WIDTH##_8_lsx(int16_t *dst, \
3205 uint8_t *src, \
3206 ptrdiff_t src_stride, \
3207 int height, \
3208 intptr_t mx, \
3209 intptr_t my, \
3210 int width) \
3211 { \
3212 hevc_copy_##WIDTH##w_lsx(src, src_stride, dst, MAX_PB_SIZE, height); \
3213 }
3214
3215 MC_COPY(4);
3216 MC_COPY(6);
3217 MC_COPY(8);
3218 MC_COPY(12);
3219 MC_COPY(16);
3220 MC_COPY(24);
3221 MC_COPY(32);
3222 MC_COPY(48);
3223 MC_COPY(64);
3224
3225 #undef MC_COPY
3226
3227 #define MC(PEL, DIR, WIDTH, TAP, DIR1, FILT_DIR) \
3228 void ff_hevc_put_hevc_##PEL##_##DIR##WIDTH##_8_lsx(int16_t *dst, \
3229 uint8_t *src, \
3230 ptrdiff_t src_stride, \
3231 int height, \
3232 intptr_t mx, \
3233 intptr_t my, \
3234 int width) \
3235 { \
3236 const int8_t *filter = ff_hevc_##PEL##_filters[FILT_DIR - 1]; \
3237 \
3238 hevc_##DIR1##_##TAP##t_##WIDTH##w_lsx(src, src_stride, dst, \
3239 MAX_PB_SIZE, filter, height); \
3240 }
3241
3242 MC(qpel, h, 4, 8, hz, mx);
3243 MC(qpel, h, 8, 8, hz, mx);
3244 MC(qpel, h, 12, 8, hz, mx);
3245 MC(qpel, h, 16, 8, hz, mx);
3246 MC(qpel, h, 24, 8, hz, mx);
3247 MC(qpel, h, 32, 8, hz, mx);
3248 MC(qpel, h, 48, 8, hz, mx);
3249 MC(qpel, h, 64, 8, hz, mx);
3250
3251 MC(qpel, v, 4, 8, vt, my);
3252 MC(qpel, v, 8, 8, vt, my);
3253 MC(qpel, v, 12, 8, vt, my);
3254 MC(qpel, v, 16, 8, vt, my);
3255 MC(qpel, v, 24, 8, vt, my);
3256 MC(qpel, v, 32, 8, vt, my);
3257 MC(qpel, v, 48, 8, vt, my);
3258 MC(qpel, v, 64, 8, vt, my);
3259
3260 MC(epel, h, 32, 4, hz, mx);
3261
3262 MC(epel, v, 16, 4, vt, my);
3263 MC(epel, v, 24, 4, vt, my);
3264 MC(epel, v, 32, 4, vt, my);
3265
3266 #undef MC
3267
3268 #define MC_HV(PEL, WIDTH, TAP) \
3269 void ff_hevc_put_hevc_##PEL##_hv##WIDTH##_8_lsx(int16_t *dst, \
3270 uint8_t *src, \
3271 ptrdiff_t src_stride, \
3272 int height, \
3273 intptr_t mx, \
3274 intptr_t my, \
3275 int width) \
3276 { \
3277 const int8_t *filter_x = ff_hevc_##PEL##_filters[mx - 1]; \
3278 const int8_t *filter_y = ff_hevc_##PEL##_filters[my - 1]; \
3279 \
3280 hevc_hv_##TAP##t_##WIDTH##w_lsx(src, src_stride, dst, MAX_PB_SIZE, \
3281 filter_x, filter_y, height); \
3282 }
3283
3284 MC_HV(qpel, 4, 8);
3285 MC_HV(qpel, 8, 8);
3286 MC_HV(qpel, 12, 8);
3287 MC_HV(qpel, 16, 8);
3288 MC_HV(qpel, 24, 8);
3289 MC_HV(qpel, 32, 8);
3290 MC_HV(qpel, 48, 8);
3291 MC_HV(qpel, 64, 8);
3292
3293 MC_HV(epel, 8, 4);
3294 MC_HV(epel, 12, 4);
3295 MC_HV(epel, 16, 4);
3296 MC_HV(epel, 24, 4);
3297 MC_HV(epel, 32, 4);
3298
3299 #undef MC_HV
3300