1 /*
2  * Copyright (c) 2021 Loongson Technology Corporation Limited
3  * Contributed by Xiwei Gu <guxiwei-hf@loongson.cn>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavcodec/bit_depth_template.c"
23 #include "h264dsp_lasx.h"
24 #include "libavutil/loongarch/loongson_intrinsics.h"
25 
26 #define H264_LOOP_FILTER_STRENGTH_ITERATION_LASX(edges, step, mask_mv, dir, \
27                                                  d_idx, mask_dir)           \
28 do {                                                                        \
29     int b_idx = 0; \
30     int step_x4 = step << 2; \
31     int d_idx_12 = d_idx + 12; \
32     int d_idx_52 = d_idx + 52; \
33     int d_idx_x4 = d_idx << 2; \
34     int d_idx_x4_48 = d_idx_x4 + 48; \
35     int dir_x32  = dir * 32; \
36     uint8_t *ref_t = (uint8_t*)ref; \
37     uint8_t *mv_t  = (uint8_t*)mv; \
38     uint8_t *nnz_t = (uint8_t*)nnz; \
39     uint8_t *bS_t  = (uint8_t*)bS; \
40     mask_mv <<= 3; \
41     for (; b_idx < edges; b_idx += step) { \
42         out &= mask_dir; \
43         if (!(mask_mv & b_idx)) { \
44             if (bidir) { \
45                 ref2 = __lasx_xvldx(ref_t, d_idx_12); \
46                 ref3 = __lasx_xvldx(ref_t, d_idx_52); \
47                 ref0 = __lasx_xvld(ref_t, 12); \
48                 ref1 = __lasx_xvld(ref_t, 52); \
49                 ref2 = __lasx_xvilvl_w(ref3, ref2); \
50                 ref0 = __lasx_xvilvl_w(ref0, ref0); \
51                 ref1 = __lasx_xvilvl_w(ref1, ref1); \
52                 ref3 = __lasx_xvshuf4i_w(ref2, 0xB1); \
53                 ref0 = __lasx_xvsub_b(ref0, ref2); \
54                 ref1 = __lasx_xvsub_b(ref1, ref3); \
55                 ref0 = __lasx_xvor_v(ref0, ref1); \
56 \
57                 tmp2 = __lasx_xvldx(mv_t, d_idx_x4_48);   \
58                 tmp3 = __lasx_xvld(mv_t, 48); \
59                 tmp4 = __lasx_xvld(mv_t, 208); \
60                 tmp5 = __lasx_xvld(mv_t + d_idx_x4, 208); \
61                 DUP2_ARG3(__lasx_xvpermi_q, tmp2, tmp2, 0x20, tmp5, tmp5, \
62                           0x20, tmp2, tmp5); \
63                 tmp3 =  __lasx_xvpermi_q(tmp4, tmp3, 0x20); \
64                 tmp2 = __lasx_xvsub_h(tmp2, tmp3); \
65                 tmp5 = __lasx_xvsub_h(tmp5, tmp3); \
66                 DUP2_ARG2(__lasx_xvsat_h, tmp2, 7, tmp5, 7, tmp2, tmp5); \
67                 tmp0 = __lasx_xvpickev_b(tmp5, tmp2); \
68                 tmp0 = __lasx_xvpermi_d(tmp0, 0xd8); \
69                 tmp0 = __lasx_xvadd_b(tmp0, cnst_1); \
70                 tmp0 = __lasx_xvssub_bu(tmp0, cnst_0); \
71                 tmp0 = __lasx_xvsat_h(tmp0, 7); \
72                 tmp0 = __lasx_xvpickev_b(tmp0, tmp0); \
73                 tmp0 = __lasx_xvpermi_d(tmp0, 0xd8); \
74                 tmp1 = __lasx_xvpickod_d(tmp0, tmp0); \
75                 out = __lasx_xvor_v(ref0, tmp0); \
76                 tmp1 = __lasx_xvshuf4i_w(tmp1, 0xB1); \
77                 out = __lasx_xvor_v(out, tmp1); \
78                 tmp0 = __lasx_xvshuf4i_w(out, 0xB1); \
79                 out = __lasx_xvmin_bu(out, tmp0); \
80             } else { \
81                 ref0 = __lasx_xvldx(ref_t, d_idx_12); \
82                 ref3 = __lasx_xvld(ref_t, 12); \
83                 tmp2 = __lasx_xvldx(mv_t, d_idx_x4_48); \
84                 tmp3 = __lasx_xvld(mv_t, 48); \
85                 tmp4 = __lasx_xvsub_h(tmp3, tmp2); \
86                 tmp1 = __lasx_xvsat_h(tmp4, 7); \
87                 tmp1 = __lasx_xvpickev_b(tmp1, tmp1); \
88                 tmp1 = __lasx_xvadd_b(tmp1, cnst_1); \
89                 out = __lasx_xvssub_bu(tmp1, cnst_0); \
90                 out = __lasx_xvsat_h(out, 7); \
91                 out = __lasx_xvpickev_b(out, out); \
92                 ref0 = __lasx_xvsub_b(ref3, ref0); \
93                 out = __lasx_xvor_v(out, ref0); \
94             } \
95         } \
96         tmp0 = __lasx_xvld(nnz_t, 12); \
97         tmp1 = __lasx_xvldx(nnz_t, d_idx_12); \
98         tmp0 = __lasx_xvor_v(tmp0, tmp1); \
99         tmp0 = __lasx_xvmin_bu(tmp0, cnst_2); \
100         out  = __lasx_xvmin_bu(out, cnst_2); \
101         tmp0 = __lasx_xvslli_h(tmp0, 1); \
102         tmp0 = __lasx_xvmax_bu(out, tmp0); \
103         tmp0 = __lasx_vext2xv_hu_bu(tmp0); \
104         __lasx_xvstelm_d(tmp0, bS_t + dir_x32, 0, 0); \
105         ref_t += step; \
106         mv_t  += step_x4; \
107         nnz_t += step; \
108         bS_t  += step; \
109     } \
110 } while(0)
111 
ff_h264_loop_filter_strength_lasx(int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2], int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field)112 void ff_h264_loop_filter_strength_lasx(int16_t bS[2][4][4], uint8_t nnz[40],
113                                        int8_t ref[2][40], int16_t mv[2][40][2],
114                                        int bidir, int edges, int step,
115                                        int mask_mv0, int mask_mv1, int field)
116 {
117     __m256i out;
118     __m256i ref0, ref1, ref2, ref3;
119     __m256i tmp0, tmp1;
120     __m256i tmp2, tmp3, tmp4, tmp5;
121     __m256i cnst_0, cnst_1, cnst_2;
122     __m256i zero = __lasx_xvldi(0);
123     __m256i one  = __lasx_xvnor_v(zero, zero);
124     int64_t cnst3 = 0x0206020602060206, cnst4 = 0x0103010301030103;
125     if (field) {
126         cnst_0 = __lasx_xvreplgr2vr_d(cnst3);
127         cnst_1 = __lasx_xvreplgr2vr_d(cnst4);
128         cnst_2 = __lasx_xvldi(0x01);
129     } else {
130         DUP2_ARG1(__lasx_xvldi, 0x06, 0x03, cnst_0, cnst_1);
131         cnst_2 = __lasx_xvldi(0x01);
132     }
133     step  <<= 3;
134     edges <<= 3;
135 
136     H264_LOOP_FILTER_STRENGTH_ITERATION_LASX(edges, step, mask_mv1,
137                                              1, -8, zero);
138     H264_LOOP_FILTER_STRENGTH_ITERATION_LASX(32, 8, mask_mv0, 0, -1, one);
139 
140     DUP2_ARG2(__lasx_xvld, (int8_t*)bS, 0, (int8_t*)bS, 16, tmp0, tmp1);
141     DUP2_ARG2(__lasx_xvilvh_d, tmp0, tmp0, tmp1, tmp1, tmp2, tmp3);
142     LASX_TRANSPOSE4x4_H(tmp0, tmp2, tmp1, tmp3, tmp2, tmp3, tmp4, tmp5);
143     __lasx_xvstelm_d(tmp2, (int8_t*)bS, 0, 0);
144     __lasx_xvstelm_d(tmp3, (int8_t*)bS + 8, 0, 0);
145     __lasx_xvstelm_d(tmp4, (int8_t*)bS + 16, 0, 0);
146     __lasx_xvstelm_d(tmp5, (int8_t*)bS + 24, 0, 0);
147 }
148