1/*
2 * Copyright (c) 2021 Loongson Technology Corporation Limited
3 * Contributed by Hao Chen <chenhao@loongson.cn>
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include "libavcodec/vp9dsp.h"
23#include "libavutil/loongarch/loongson_intrinsics.h"
24#include "vp9dsp_loongarch.h"
25
26#define LSX_ST_8(_dst0, _dst1, _dst2, _dst3, _dst4,   \
27                 _dst5, _dst6, _dst7, _dst, _stride,  \
28                 _stride2, _stride3, _stride4)        \
29{                                                     \
30    __lsx_vst(_dst0, _dst, 0);                        \
31    __lsx_vstx(_dst1, _dst, _stride);                 \
32    __lsx_vstx(_dst2, _dst, _stride2);                \
33    __lsx_vstx(_dst3, _dst, _stride3);                \
34    _dst += _stride4;                                 \
35    __lsx_vst(_dst4, _dst, 0);                        \
36    __lsx_vstx(_dst5, _dst, _stride);                 \
37    __lsx_vstx(_dst6, _dst, _stride2);                \
38    __lsx_vstx(_dst7, _dst, _stride3);                \
39}
40
41#define LSX_ST_8X16(_dst0, _dst1, _dst2, _dst3, _dst4,   \
42                    _dst5, _dst6, _dst7, _dst, _stride)  \
43{                                                        \
44    __lsx_vst(_dst0, _dst, 0);                           \
45    __lsx_vst(_dst0, _dst, 16);                          \
46    _dst += _stride;                                     \
47    __lsx_vst(_dst1, _dst, 0);                           \
48    __lsx_vst(_dst1, _dst, 16);                          \
49    _dst += _stride;                                     \
50    __lsx_vst(_dst2, _dst, 0);                           \
51    __lsx_vst(_dst2, _dst, 16);                          \
52    _dst += _stride;                                     \
53    __lsx_vst(_dst3, _dst, 0);                           \
54    __lsx_vst(_dst3, _dst, 16);                          \
55    _dst += _stride;                                     \
56    __lsx_vst(_dst4, _dst, 0);                           \
57    __lsx_vst(_dst4, _dst, 16);                          \
58    _dst += _stride;                                     \
59    __lsx_vst(_dst5, _dst, 0);                           \
60    __lsx_vst(_dst5, _dst, 16);                          \
61    _dst += _stride;                                     \
62    __lsx_vst(_dst6, _dst, 0);                           \
63    __lsx_vst(_dst6, _dst, 16);                          \
64    _dst += _stride;                                     \
65    __lsx_vst(_dst7, _dst, 0);                           \
66    __lsx_vst(_dst7, _dst, 16);                          \
67    _dst += _stride;                                     \
68}
69
70void ff_vert_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *left,
71                       const uint8_t *src)
72{
73    __m128i src0;
74    ptrdiff_t stride2 = dst_stride << 1;
75    ptrdiff_t stride3 = stride2 + dst_stride;
76    ptrdiff_t stride4 = stride2 << 1;
77    src0 = __lsx_vld(src, 0);
78    LSX_ST_8(src0, src0, src0, src0, src0, src0, src0, src0, dst,
79             dst_stride, stride2, stride3, stride4);
80    dst += stride4;
81    LSX_ST_8(src0, src0, src0, src0, src0, src0, src0, src0, dst,
82             dst_stride, stride2, stride3, stride4);
83}
84
85void ff_vert_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *left,
86                       const uint8_t *src)
87{
88    uint32_t row;
89    __m128i src0, src1;
90
91    DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src1);
92    for (row = 32; row--;) {
93        __lsx_vst(src0, dst, 0);
94        __lsx_vst(src1, dst, 16);
95        dst += dst_stride;
96    }
97}
98
99void ff_hor_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src,
100                      const uint8_t *top)
101{
102    __m128i src0, src1, src2, src3, src4, src5, src6, src7;
103    __m128i src8, src9, src10, src11, src12, src13, src14, src15;
104    ptrdiff_t stride2 = dst_stride << 1;
105    ptrdiff_t stride3 = stride2 + dst_stride;
106    ptrdiff_t stride4 = stride2 << 1;
107
108    src15 = __lsx_vldrepl_b(src, 0);
109    src14 = __lsx_vldrepl_b(src, 1);
110    src13 = __lsx_vldrepl_b(src, 2);
111    src12 = __lsx_vldrepl_b(src, 3);
112    src11 = __lsx_vldrepl_b(src, 4);
113    src10 = __lsx_vldrepl_b(src, 5);
114    src9  = __lsx_vldrepl_b(src, 6);
115    src8  = __lsx_vldrepl_b(src, 7);
116    src7  = __lsx_vldrepl_b(src, 8);
117    src6  = __lsx_vldrepl_b(src, 9);
118    src5  = __lsx_vldrepl_b(src, 10);
119    src4  = __lsx_vldrepl_b(src, 11);
120    src3  = __lsx_vldrepl_b(src, 12);
121    src2  = __lsx_vldrepl_b(src, 13);
122    src1  = __lsx_vldrepl_b(src, 14);
123    src0  = __lsx_vldrepl_b(src, 15);
124    LSX_ST_8(src0, src1, src2, src3, src4, src5, src6, src7, dst,
125             dst_stride, stride2, stride3, stride4);
126    dst += stride4;
127    LSX_ST_8(src8, src9, src10, src11, src12, src13, src14, src15, dst,
128             dst_stride, stride2, stride3, stride4);
129}
130
131void ff_hor_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src,
132                      const uint8_t *top)
133{
134    __m128i src0, src1, src2, src3, src4, src5, src6, src7;
135    __m128i src8, src9, src10, src11, src12, src13, src14, src15;
136    __m128i src16, src17, src18, src19, src20, src21, src22, src23;
137    __m128i src24, src25, src26, src27, src28, src29, src30, src31;
138
139    src31 = __lsx_vldrepl_b(src, 0);
140    src30 = __lsx_vldrepl_b(src, 1);
141    src29 = __lsx_vldrepl_b(src, 2);
142    src28 = __lsx_vldrepl_b(src, 3);
143    src27 = __lsx_vldrepl_b(src, 4);
144    src26 = __lsx_vldrepl_b(src, 5);
145    src25 = __lsx_vldrepl_b(src, 6);
146    src24 = __lsx_vldrepl_b(src, 7);
147    src23 = __lsx_vldrepl_b(src, 8);
148    src22 = __lsx_vldrepl_b(src, 9);
149    src21 = __lsx_vldrepl_b(src, 10);
150    src20 = __lsx_vldrepl_b(src, 11);
151    src19 = __lsx_vldrepl_b(src, 12);
152    src18 = __lsx_vldrepl_b(src, 13);
153    src17 = __lsx_vldrepl_b(src, 14);
154    src16 = __lsx_vldrepl_b(src, 15);
155    src15 = __lsx_vldrepl_b(src, 16);
156    src14 = __lsx_vldrepl_b(src, 17);
157    src13 = __lsx_vldrepl_b(src, 18);
158    src12 = __lsx_vldrepl_b(src, 19);
159    src11 = __lsx_vldrepl_b(src, 20);
160    src10 = __lsx_vldrepl_b(src, 21);
161    src9  = __lsx_vldrepl_b(src, 22);
162    src8  = __lsx_vldrepl_b(src, 23);
163    src7  = __lsx_vldrepl_b(src, 24);
164    src6  = __lsx_vldrepl_b(src, 25);
165    src5  = __lsx_vldrepl_b(src, 26);
166    src4  = __lsx_vldrepl_b(src, 27);
167    src3  = __lsx_vldrepl_b(src, 28);
168    src2  = __lsx_vldrepl_b(src, 29);
169    src1  = __lsx_vldrepl_b(src, 30);
170    src0  = __lsx_vldrepl_b(src, 31);
171    LSX_ST_8X16(src0, src1, src2, src3, src4, src5, src6, src7,
172                dst, dst_stride);
173    LSX_ST_8X16(src8, src9, src10, src11, src12, src13, src14, src15,
174                dst, dst_stride);
175    LSX_ST_8X16(src16, src17, src18, src19, src20, src21, src22, src23,
176                dst, dst_stride);
177    LSX_ST_8X16(src24, src25, src26, src27, src28, src29, src30, src31,
178                dst, dst_stride);
179}
180
181void ff_dc_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src_left,
182                   const uint8_t *src_top)
183{
184    __m128i tmp0, tmp1, dst0;
185
186    tmp0 = __lsx_vldrepl_w(src_top, 0);
187    tmp1 = __lsx_vldrepl_w(src_left, 0);
188    dst0 = __lsx_vilvl_w(tmp1, tmp0);
189    dst0 = __lsx_vhaddw_hu_bu(dst0, dst0);
190    dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
191    dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
192    dst0 = __lsx_vsrari_w(dst0, 3);
193    dst0 = __lsx_vshuf4i_b(dst0, 0);
194    __lsx_vstelm_w(dst0, dst, 0, 0);
195    dst += dst_stride;
196    __lsx_vstelm_w(dst0, dst, 0, 0);
197    dst += dst_stride;
198    __lsx_vstelm_w(dst0, dst, 0, 0);
199    dst += dst_stride;
200    __lsx_vstelm_w(dst0, dst, 0, 0);
201}
202
203#define INTRA_DC_TL_4X4(dir)                                            \
204void ff_dc_##dir##_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride,          \
205                          const uint8_t *left,                          \
206                          const uint8_t *top)                           \
207{                                                                       \
208    __m128i tmp0, dst0;                                                 \
209                                                                        \
210    tmp0 = __lsx_vldrepl_w(dir, 0);                                     \
211    dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0);                              \
212    dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);                              \
213    dst0 = __lsx_vsrari_w(dst0, 2);                                     \
214    dst0 = __lsx_vshuf4i_b(dst0, 0);                                    \
215    __lsx_vstelm_w(dst0, dst, 0, 0);                                    \
216    dst += dst_stride;                                                  \
217    __lsx_vstelm_w(dst0, dst, 0, 0);                                    \
218    dst += dst_stride;                                                  \
219    __lsx_vstelm_w(dst0, dst, 0, 0);                                    \
220    dst += dst_stride;                                                  \
221    __lsx_vstelm_w(dst0, dst, 0, 0);                                    \
222}
223INTRA_DC_TL_4X4(top);
224INTRA_DC_TL_4X4(left);
225
226void ff_dc_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src_left,
227                   const uint8_t *src_top)
228{
229    __m128i tmp0, tmp1, dst0;
230
231    tmp0 = __lsx_vldrepl_d(src_top, 0);
232    tmp1 = __lsx_vldrepl_d(src_left, 0);
233    dst0 = __lsx_vilvl_d(tmp1, tmp0);
234    dst0 = __lsx_vhaddw_hu_bu(dst0, dst0);
235    dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
236    dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
237    dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
238    dst0 = __lsx_vsrari_w(dst0, 4);
239    dst0 = __lsx_vreplvei_b(dst0, 0);
240    __lsx_vstelm_d(dst0, dst, 0, 0);
241    dst += dst_stride;
242    __lsx_vstelm_d(dst0, dst, 0, 0);
243    dst += dst_stride;
244    __lsx_vstelm_d(dst0, dst, 0, 0);
245    dst += dst_stride;
246    __lsx_vstelm_d(dst0, dst, 0, 0);
247    dst += dst_stride;
248    __lsx_vstelm_d(dst0, dst, 0, 0);
249    dst += dst_stride;
250    __lsx_vstelm_d(dst0, dst, 0, 0);
251    dst += dst_stride;
252    __lsx_vstelm_d(dst0, dst, 0, 0);
253    dst += dst_stride;
254    __lsx_vstelm_d(dst0, dst, 0, 0);
255}
256
257#define INTRA_DC_TL_8X8(dir)                                                  \
258void ff_dc_##dir##_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride,                \
259                           const uint8_t *left,                               \
260                           const uint8_t *top)                                \
261{                                                                             \
262    __m128i tmp0, dst0;                                                       \
263                                                                              \
264    tmp0 = __lsx_vldrepl_d(dir, 0);                                           \
265    dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0);                                    \
266    dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);                                    \
267    dst0 = __lsx_vhaddw_du_wu(dst0, dst0);                                    \
268    dst0 = __lsx_vsrari_w(dst0, 3);                                           \
269    dst0 = __lsx_vreplvei_b(dst0, 0);                                         \
270    __lsx_vstelm_d(dst0, dst, 0, 0);                                          \
271    dst += dst_stride;                                                        \
272    __lsx_vstelm_d(dst0, dst, 0, 0);                                          \
273    dst += dst_stride;                                                        \
274    __lsx_vstelm_d(dst0, dst, 0, 0);                                          \
275    dst += dst_stride;                                                        \
276    __lsx_vstelm_d(dst0, dst, 0, 0);                                          \
277    dst += dst_stride;                                                        \
278    __lsx_vstelm_d(dst0, dst, 0, 0);                                          \
279    dst += dst_stride;                                                        \
280    __lsx_vstelm_d(dst0, dst, 0, 0);                                          \
281    dst += dst_stride;                                                        \
282    __lsx_vstelm_d(dst0, dst, 0, 0);                                          \
283    dst += dst_stride;                                                        \
284    __lsx_vstelm_d(dst0, dst, 0, 0);                                          \
285}
286
287INTRA_DC_TL_8X8(top);
288INTRA_DC_TL_8X8(left);
289
290void ff_dc_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
291                     const uint8_t *src_left, const uint8_t *src_top)
292{
293    __m128i tmp0, tmp1, dst0;
294    ptrdiff_t stride2 = dst_stride << 1;
295    ptrdiff_t stride3 = stride2 + dst_stride;
296    ptrdiff_t stride4 = stride2 << 1;
297
298    tmp0 = __lsx_vld(src_top, 0);
299    tmp1 = __lsx_vld(src_left, 0);
300    DUP2_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp0, tmp1);
301    dst0 = __lsx_vadd_h(tmp0, tmp1);
302    dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
303    dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
304    dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
305    dst0 = __lsx_vsrari_w(dst0, 5);
306    dst0 = __lsx_vreplvei_b(dst0, 0);
307    LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst,
308             dst_stride, stride2, stride3, stride4);
309    dst += stride4;
310    LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst,
311             dst_stride, stride2, stride3, stride4);
312}
313
314#define INTRA_DC_TL_16X16(dir)                                                \
315void ff_dc_##dir##_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride,              \
316                             const uint8_t *left,                             \
317                             const uint8_t *top)                              \
318{                                                                             \
319    __m128i tmp0, dst0;                                                       \
320    ptrdiff_t stride2 = dst_stride << 1;                                      \
321    ptrdiff_t stride3 = stride2 + dst_stride;                                 \
322    ptrdiff_t stride4 = stride2 << 1;                                         \
323                                                                              \
324    tmp0 = __lsx_vld(dir, 0);                                                 \
325    dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0);                                    \
326    dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);                                    \
327    dst0 = __lsx_vhaddw_du_wu(dst0, dst0);                                    \
328    dst0 = __lsx_vhaddw_qu_du(dst0, dst0);                                    \
329    dst0 = __lsx_vsrari_w(dst0, 4);                                           \
330    dst0 = __lsx_vreplvei_b(dst0, 0);                                         \
331    LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst,             \
332             dst_stride, stride2, stride3, stride4);                          \
333    dst += stride4;                                                           \
334    LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst,             \
335             dst_stride, stride2, stride3, stride4);                          \
336}
337
338INTRA_DC_TL_16X16(top);
339INTRA_DC_TL_16X16(left);
340
341void ff_dc_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride,
342                     const uint8_t *src_left, const uint8_t *src_top)
343{
344    __m128i tmp0, tmp1, tmp2, tmp3, dst0;
345
346    DUP2_ARG2(__lsx_vld, src_top, 0, src_top, 16, tmp0, tmp1);
347    DUP2_ARG2(__lsx_vld, src_left, 0, src_left, 16, tmp2, tmp3);
348    DUP4_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp2, tmp2,
349              tmp3, tmp3, tmp0, tmp1, tmp2, tmp3);
350    DUP2_ARG2(__lsx_vadd_h, tmp0, tmp1, tmp2, tmp3, tmp0, tmp1);
351    dst0 = __lsx_vadd_h(tmp0, tmp1);
352    dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
353    dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
354    dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
355    dst0 = __lsx_vsrari_w(dst0, 6);
356    dst0 = __lsx_vreplvei_b(dst0, 0);
357    LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
358                dst, dst_stride);
359    LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
360                dst, dst_stride);
361    LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
362                dst, dst_stride);
363    LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
364                dst, dst_stride);
365}
366
367#define INTRA_DC_TL_32X32(dir)                                               \
368void ff_dc_##dir##_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride,             \
369                             const uint8_t *left,                            \
370                             const uint8_t *top)                             \
371{                                                                            \
372    __m128i tmp0, tmp1, dst0;                                                \
373                                                                             \
374    DUP2_ARG2(__lsx_vld, dir, 0, dir, 16, tmp0, tmp1);                       \
375    DUP2_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp0, tmp1);       \
376    dst0 = __lsx_vadd_h(tmp0, tmp1);                                         \
377    dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);                                   \
378    dst0 = __lsx_vhaddw_du_wu(dst0, dst0);                                   \
379    dst0 = __lsx_vhaddw_qu_du(dst0, dst0);                                   \
380    dst0 = __lsx_vsrari_w(dst0, 5);                                          \
381    dst0 = __lsx_vreplvei_b(dst0, 0);                                        \
382    LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,              \
383                dst, dst_stride);                                            \
384    LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,              \
385                dst, dst_stride);                                            \
386    LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,              \
387                dst, dst_stride);                                            \
388    LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,              \
389                dst, dst_stride);                                            \
390}
391
392INTRA_DC_TL_32X32(top);
393INTRA_DC_TL_32X32(left);
394
395#define INTRA_PREDICT_VALDC_16X16_LSX(val)                             \
396void ff_dc_##val##_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride,       \
397                             const uint8_t *left, const uint8_t *top)  \
398{                                                                      \
399    __m128i out = __lsx_vldi(val);                                     \
400    ptrdiff_t stride2 = dst_stride << 1;                               \
401    ptrdiff_t stride3 = stride2 + dst_stride;                          \
402    ptrdiff_t stride4 = stride2 << 1;                                  \
403                                                                       \
404    LSX_ST_8(out, out, out, out, out, out, out, out, dst,              \
405             dst_stride, stride2, stride3, stride4);                   \
406    dst += stride4;                                                    \
407    LSX_ST_8(out, out, out, out, out, out, out, out, dst,              \
408             dst_stride, stride2, stride3, stride4);                   \
409}
410
411INTRA_PREDICT_VALDC_16X16_LSX(127);
412INTRA_PREDICT_VALDC_16X16_LSX(128);
413INTRA_PREDICT_VALDC_16X16_LSX(129);
414
415#define INTRA_PREDICT_VALDC_32X32_LSX(val)                               \
416void ff_dc_##val##_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride,         \
417                             const uint8_t *left, const uint8_t *top)    \
418{                                                                        \
419    __m128i out = __lsx_vldi(val);                                       \
420                                                                         \
421    LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\
422    LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\
423    LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\
424    LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\
425}
426
427INTRA_PREDICT_VALDC_32X32_LSX(127);
428INTRA_PREDICT_VALDC_32X32_LSX(128);
429INTRA_PREDICT_VALDC_32X32_LSX(129);
430
431void ff_tm_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
432                   const uint8_t *src_left, const uint8_t *src_top_ptr)
433{
434    uint8_t top_left = src_top_ptr[-1];
435    __m128i tmp0, tmp1, tmp2, tmp3, reg0, reg1;
436    __m128i src0, src1, src2, src3;
437    __m128i dst0, dst1, dst2, dst3;
438
439    reg0 = __lsx_vreplgr2vr_h(top_left);
440    reg1 = __lsx_vld(src_top_ptr, 0);
441    DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2, src_left,
442              3, tmp3, tmp2, tmp1, tmp0);
443    DUP4_ARG2(__lsx_vilvl_b, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3, reg1,
444              src0, src1, src2, src3);
445    DUP4_ARG2(__lsx_vhaddw_hu_bu, src0, src0, src1, src1, src2, src2, src3,
446              src3, dst0, dst1, dst2, dst3);
447    DUP4_ARG2(__lsx_vssub_hu, dst0, reg0, dst1, reg0, dst2, reg0, dst3, reg0,
448              dst0, dst1, dst2, dst3);
449    DUP4_ARG2(__lsx_vsat_hu, dst0, 7, dst1, 7, dst2, 7, dst3, 7,
450              dst0, dst1, dst2, dst3);
451    DUP2_ARG2(__lsx_vpickev_b, dst1, dst0, dst3, dst2, dst0, dst1);
452    __lsx_vstelm_w(dst0, dst, 0, 0);
453    dst += dst_stride;
454    __lsx_vstelm_w(dst0, dst, 0, 2);
455    dst += dst_stride;
456    __lsx_vstelm_w(dst1, dst, 0, 0);
457    dst += dst_stride;
458    __lsx_vstelm_w(dst1, dst, 0, 2);
459}
460
461void ff_tm_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride,
462                   const uint8_t *src_left, const uint8_t *src_top_ptr)
463{
464    uint8_t top_left = src_top_ptr[-1];
465    __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
466    __m128i src0, src1, src2, src3, src4, src5, src6, src7;
467    __m128i reg0, reg1;
468
469    reg0 = __lsx_vreplgr2vr_h(top_left);
470    reg1 = __lsx_vld(src_top_ptr, 0);
471    DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2, src_left,
472              3, tmp7, tmp6, tmp5, tmp4);
473    DUP4_ARG2(__lsx_vldrepl_b, src_left, 4, src_left, 5, src_left, 6, src_left,
474              7, tmp3, tmp2, tmp1, tmp0);
475    DUP4_ARG2(__lsx_vilvl_b, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3, reg1,
476              src0, src1, src2, src3);
477    DUP4_ARG2(__lsx_vilvl_b, tmp4, reg1, tmp5, reg1, tmp6, reg1, tmp7, reg1,
478              src4, src5, src6, src7);
479    DUP4_ARG2(__lsx_vhaddw_hu_bu, src0, src0, src1, src1, src2, src2, src3,
480              src3, src0, src1, src2, src3);
481    DUP4_ARG2(__lsx_vhaddw_hu_bu, src4, src4, src5, src5, src6, src6, src7,
482              src7, src4, src5, src6, src7);
483    DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0,
484              src0, src1, src2, src3);
485    DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0,
486              src4, src5, src6, src7);
487    DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
488              src0, src1, src2, src3);
489    DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
490              src4, src5, src6, src7);
491    DUP4_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, src5, src4, src7, src6,
492              src0, src1, src2, src3);
493    __lsx_vstelm_d(src0, dst, 0, 0);
494    dst += dst_stride;
495    __lsx_vstelm_d(src0, dst, 0, 1);
496    dst += dst_stride;
497    __lsx_vstelm_d(src1, dst, 0, 0);
498    dst += dst_stride;
499    __lsx_vstelm_d(src1, dst, 0, 1);
500    dst += dst_stride;
501    __lsx_vstelm_d(src2, dst, 0, 0);
502    dst += dst_stride;
503    __lsx_vstelm_d(src2, dst, 0, 1);
504    dst += dst_stride;
505    __lsx_vstelm_d(src3, dst, 0, 0);
506    dst += dst_stride;
507    __lsx_vstelm_d(src3, dst, 0, 1);
508}
509
510void ff_tm_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
511                     const uint8_t *src_left, const uint8_t *src_top_ptr)
512{
513    uint8_t top_left = src_top_ptr[-1];
514    __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
515    __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
516    __m128i src0, src1, src2, src3, src4, src5, src6, src7;
517    __m128i reg0, reg1;
518    ptrdiff_t stride2 = dst_stride << 1;
519    ptrdiff_t stride3 = stride2 + dst_stride;
520    ptrdiff_t stride4 = stride2 << 1;
521
522    reg0 = __lsx_vreplgr2vr_h(top_left);
523    reg1 = __lsx_vld(src_top_ptr, 0);
524    DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2, src_left,
525              3, tmp15, tmp14, tmp13, tmp12);
526    DUP4_ARG2(__lsx_vldrepl_b, src_left, 4, src_left, 5, src_left, 6, src_left,
527              7, tmp11, tmp10, tmp9, tmp8);
528    DUP4_ARG2(__lsx_vldrepl_b, src_left, 8, src_left, 9, src_left, 10,
529              src_left, 11, tmp7, tmp6, tmp5, tmp4);
530    DUP4_ARG2(__lsx_vldrepl_b, src_left, 12, src_left, 13, src_left, 14,
531              src_left, 15, tmp3, tmp2, tmp1, tmp0);
532    DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3,
533              reg1, src0, src1, src2, src3);
534    DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3,
535              reg1, src4, src5, src6, src7);
536    DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0,
537              src0, src1, src2, src3);
538    DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0,
539              src4, src5, src6, src7);
540    DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
541              src0, src1, src2, src3);
542    DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
543              src4, src5, src6, src7);
544    DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7, src3,
545              tmp0, tmp1, tmp2, tmp3);
546    DUP4_ARG2(__lsx_vaddwev_h_bu, tmp4, reg1, tmp5, reg1, tmp6, reg1, tmp7,
547              reg1, src0, src1, src2, src3);
548    DUP4_ARG2(__lsx_vaddwod_h_bu, tmp4, reg1, tmp5, reg1, tmp6, reg1, tmp7,
549              reg1, src4, src5, src6, src7);
550    DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0,
551              src0, src1, src2, src3);
552    DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0,
553              src4, src5, src6, src7);
554    DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
555              src0, src1, src2, src3);
556    DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
557              src4, src5, src6, src7);
558    DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7, src3,
559              tmp4, tmp5, tmp6, tmp7);
560    DUP4_ARG2(__lsx_vaddwev_h_bu, tmp8, reg1, tmp9, reg1, tmp10, reg1, tmp11,
561              reg1, src0, src1, src2, src3);
562    DUP4_ARG2(__lsx_vaddwod_h_bu, tmp8, reg1, tmp9, reg1, tmp10, reg1, tmp11,
563              reg1, src4, src5, src6, src7);
564    DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0,
565              src0, src1, src2, src3);
566    DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0,
567              src4, src5, src6, src7);
568    DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
569              src0, src1, src2, src3);
570    DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
571              src4, src5, src6, src7);
572    DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7, src3,
573              tmp8, tmp9, tmp10, tmp11);
574    DUP4_ARG2(__lsx_vaddwev_h_bu, tmp12, reg1, tmp13, reg1, tmp14, reg1,
575              tmp15, reg1, src0, src1, src2, src3);
576    DUP4_ARG2(__lsx_vaddwod_h_bu, tmp12, reg1, tmp13, reg1, tmp14, reg1,
577              tmp15, reg1, src4, src5, src6, src7);
578    DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0,
579              src0, src1, src2, src3);
580    DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0,
581              src4, src5, src6, src7);
582    DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
583              src0, src1, src2, src3);
584    DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
585              src4, src5, src6, src7);
586    DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7, src3,
587              tmp12, tmp13, tmp14, tmp15);
588    LSX_ST_8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, dst,
589             dst_stride, stride2, stride3, stride4);
590    dst += stride4;
591    LSX_ST_8(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, dst,
592             dst_stride, stride2, stride3, stride4);
593}
594
595void ff_tm_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride,
596                     const uint8_t *src_left, const uint8_t *src_top_ptr)
597{
598    uint8_t top_left = src_top_ptr[-1];
599    uint32_t loop_cnt;
600    __m128i tmp0, tmp1, tmp2, tmp3, reg0, reg1, reg2;
601    __m128i src0, src1, src2, src3, src4, src5, src6, src7;
602    __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
603
604    reg0 = __lsx_vreplgr2vr_h(top_left);
605    DUP2_ARG2(__lsx_vld, src_top_ptr, 0, src_top_ptr, 16, reg1, reg2);
606
607    src_left += 28;
608    for (loop_cnt = 8; loop_cnt--;) {
609        DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2,
610                  src_left, 3, tmp3, tmp2, tmp1, tmp0);
611        src_left -= 4;
612        DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1,
613                  tmp3, reg1, src0, src1, src2, src3);
614        DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1,
615                  tmp3, reg1, src4, src5, src6, src7);
616        DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3,
617                  reg0, src0, src1, src2, src3);
618        DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7,
619                  reg0, src4, src5, src6, src7);
620        DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg2, tmp1, reg2, tmp2, reg2,
621                  tmp3, reg2, dst0, dst1, dst2, dst3);
622        DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg2, tmp1, reg2, tmp2, reg2,
623                  tmp3, reg2, dst4, dst5, dst6, dst7);
624        DUP4_ARG2(__lsx_vssub_hu, dst0, reg0, dst1, reg0, dst2, reg0, dst3,
625                  reg0, dst0, dst1, dst2, dst3);
626        DUP4_ARG2(__lsx_vssub_hu, dst4, reg0, dst5, reg0, dst6, reg0, dst7,
627                  reg0, dst4, dst5, dst6, dst7);
628        DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
629                  src0, src1, src2, src3);
630        DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
631                  src4, src5, src6, src7);
632        DUP4_ARG2(__lsx_vsat_hu, dst0, 7, dst1, 7, dst2, 7, dst3, 7,
633                  dst0, dst1, dst2, dst3);
634        DUP4_ARG2(__lsx_vsat_hu, dst4, 7, dst5, 7, dst6, 7, dst7, 7,
635                  dst4, dst5, dst6, dst7);
636        DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7,
637                  src3, src0, src1, src2, src3);
638        DUP4_ARG2(__lsx_vpackev_b, dst4, dst0, dst5, dst1, dst6, dst2, dst7,
639                  dst3, dst0, dst1, dst2, dst3);
640        __lsx_vst(src0, dst, 0);
641        __lsx_vst(dst0, dst, 16);
642        dst += dst_stride;
643        __lsx_vst(src1, dst, 0);
644        __lsx_vst(dst1, dst, 16);
645        dst += dst_stride;
646        __lsx_vst(src2, dst, 0);
647        __lsx_vst(dst2, dst, 16);
648        dst += dst_stride;
649        __lsx_vst(src3, dst, 0);
650        __lsx_vst(dst3, dst, 16);
651        dst += dst_stride;
652    }
653}
654