1 /*
2  * MIPS SIMD optimized H.264 deblocking code
3  *
4  * Copyright (c) 2020 Loongson Technology Corporation Limited
5  *                    Gu Xiwei <guxiwei-hf@loongson.cn>
6  *
7  * This file is part of FFmpeg.
8  *
9  * FFmpeg is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public
11  * License as published by the Free Software Foundation; either
12  * version 2.1 of the License, or (at your option) any later version.
13  *
14  * FFmpeg is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with FFmpeg; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22  */
23 
24 #include "libavcodec/bit_depth_template.c"
25 #include "h264dsp_mips.h"
26 #include "libavutil/mips/generic_macros_msa.h"
27 #include "libavcodec/mips/h264dsp_mips.h"
28 
29 #define h264_loop_filter_strength_iteration_msa(edges, step, mask_mv, dir, \
30                                                 d_idx, mask_dir)           \
31 do {                                                                       \
32     int b_idx = 0; \
33     int step_x4 = step << 2; \
34     int d_idx_12 = d_idx + 12; \
35     int d_idx_52 = d_idx + 52; \
36     int d_idx_x4 = d_idx << 2; \
37     int d_idx_x4_48 = d_idx_x4 + 48; \
38     int dir_x32  = dir * 32; \
39     uint8_t *ref_t = (uint8_t*)ref; \
40     uint8_t *mv_t  = (uint8_t*)mv; \
41     uint8_t *nnz_t = (uint8_t*)nnz; \
42     uint8_t *bS_t  = (uint8_t*)bS; \
43     mask_mv <<= 3; \
44     for (; b_idx < edges; b_idx += step) { \
45         out &= mask_dir; \
46         if (!(mask_mv & b_idx)) { \
47             if (bidir) { \
48                 ref_2 = LD_SB(ref_t + d_idx_12); \
49                 ref_3 = LD_SB(ref_t + d_idx_52); \
50                 ref_0 = LD_SB(ref_t + 12); \
51                 ref_1 = LD_SB(ref_t + 52); \
52                 ref_2 = (v16i8)__msa_ilvr_w((v4i32)ref_3, (v4i32)ref_2); \
53                 ref_0 = (v16i8)__msa_ilvr_w((v4i32)ref_0, (v4i32)ref_0); \
54                 ref_1 = (v16i8)__msa_ilvr_w((v4i32)ref_1, (v4i32)ref_1); \
55                 ref_3 = (v16i8)__msa_shf_h((v8i16)ref_2, 0x4e); \
56                 ref_0 -= ref_2; \
57                 ref_1 -= ref_3; \
58                 ref_0 = (v16i8)__msa_or_v((v16u8)ref_0, (v16u8)ref_1); \
59 \
60                 tmp_2 = LD_SH(mv_t + d_idx_x4_48);   \
61                 tmp_3 = LD_SH(mv_t + 48); \
62                 tmp_4 = LD_SH(mv_t + 208); \
63                 tmp_5 = tmp_2 - tmp_3; \
64                 tmp_6 = tmp_2 - tmp_4; \
65                 SAT_SH2_SH(tmp_5, tmp_6, 7); \
66                 tmp_0 = __msa_pckev_b((v16i8)tmp_6, (v16i8)tmp_5); \
67                 tmp_0 += cnst_1; \
68                 tmp_0 = (v16i8)__msa_subs_u_b((v16u8)tmp_0, (v16u8)cnst_0);\
69                 tmp_0 = (v16i8)__msa_sat_s_h((v8i16)tmp_0, 7); \
70                 tmp_0 = __msa_pckev_b(tmp_0, tmp_0); \
71                 out   = (v16i8)__msa_or_v((v16u8)ref_0, (v16u8)tmp_0); \
72 \
73                 tmp_2 = LD_SH(mv_t + 208 + d_idx_x4); \
74                 tmp_5 = tmp_2 - tmp_3; \
75                 tmp_6 = tmp_2 - tmp_4; \
76                 SAT_SH2_SH(tmp_5, tmp_6, 7); \
77                 tmp_1 = __msa_pckev_b((v16i8)tmp_6, (v16i8)tmp_5); \
78                 tmp_1 += cnst_1; \
79                 tmp_1 = (v16i8)__msa_subs_u_b((v16u8)tmp_1, (v16u8)cnst_0); \
80                 tmp_1 = (v16i8)__msa_sat_s_h((v8i16)tmp_1, 7); \
81                 tmp_1 = __msa_pckev_b(tmp_1, tmp_1); \
82 \
83                 tmp_1 = (v16i8)__msa_shf_h((v8i16)tmp_1, 0x4e); \
84                 out   = (v16i8)__msa_or_v((v16u8)out, (v16u8)tmp_1); \
85                 tmp_0 = (v16i8)__msa_shf_h((v8i16)out, 0x4e); \
86                 out   = (v16i8)__msa_min_u_b((v16u8)out, (v16u8)tmp_0); \
87             } else { \
88                 ref_0 = LD_SB(ref_t + d_idx_12); \
89                 ref_3 = LD_SB(ref_t + 12); \
90                 tmp_2 = LD_SH(mv_t + d_idx_x4_48); \
91                 tmp_3 = LD_SH(mv_t + 48); \
92                 tmp_4 = tmp_3 - tmp_2; \
93                 tmp_1 = (v16i8)__msa_sat_s_h(tmp_4, 7); \
94                 tmp_1 = __msa_pckev_b(tmp_1, tmp_1); \
95                 tmp_1 += cnst_1; \
96                 out   = (v16i8)__msa_subs_u_b((v16u8)tmp_1, (v16u8)cnst_0); \
97                 out   = (v16i8)__msa_sat_s_h((v8i16)out, 7); \
98                 out   = __msa_pckev_b(out, out); \
99                 ref_0 = ref_3 - ref_0; \
100                 out   = (v16i8)__msa_or_v((v16u8)out, (v16u8)ref_0); \
101             } \
102         } \
103         tmp_0 = LD_SB(nnz_t + 12); \
104         tmp_1 = LD_SB(nnz_t + d_idx_12); \
105         tmp_0 = (v16i8)__msa_or_v((v16u8)tmp_0, (v16u8)tmp_1); \
106         tmp_0 = (v16i8)__msa_min_u_b((v16u8)tmp_0, (v16u8)cnst_2); \
107         out   = (v16i8)__msa_min_u_b((v16u8)out, (v16u8)cnst_2); \
108         tmp_0 = (v16i8)((v8i16)tmp_0 << 1); \
109         tmp_0 = (v16i8)__msa_max_u_b((v16u8)out, (v16u8)tmp_0); \
110         tmp_0 = __msa_ilvr_b(zero, tmp_0); \
111         ST_D1(tmp_0, 0, bS_t + dir_x32); \
112         ref_t += step; \
113         mv_t  += step_x4; \
114         nnz_t += step; \
115         bS_t  += step; \
116     } \
117 } while(0)
118 
ff_h264_loop_filter_strength_msa(int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2], int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field)119 void ff_h264_loop_filter_strength_msa(int16_t bS[2][4][4], uint8_t nnz[40],
120                                       int8_t ref[2][40], int16_t mv[2][40][2],
121                                       int bidir, int edges, int step,
122                                       int mask_mv0, int mask_mv1, int field)
123 {
124     v16i8 out;
125     v16i8 ref_0, ref_1, ref_2, ref_3;
126     v16i8 tmp_0, tmp_1;
127     v8i16 tmp_2, tmp_3, tmp_4, tmp_5, tmp_6;
128     v16i8 cnst_0, cnst_1, cnst_2;
129     v16i8 zero = { 0 };
130     v16i8 one  = __msa_fill_b(0xff);
131     if (field) {
132         cnst_0 = (v16i8)__msa_fill_h(0x206);
133         cnst_1 = (v16i8)__msa_fill_h(0x103);
134         cnst_2 = (v16i8)__msa_fill_h(0x101);
135     } else {
136         cnst_0 = __msa_fill_b(0x6);
137         cnst_1 = __msa_fill_b(0x3);
138         cnst_2 = __msa_fill_b(0x1);
139     }
140     step  <<= 3;
141     edges <<= 3;
142 
143     h264_loop_filter_strength_iteration_msa(edges, step, mask_mv1, 1, -8, zero);
144     h264_loop_filter_strength_iteration_msa(32, 8, mask_mv0, 0, -1, one);
145 
146     LD_SB2((int8_t*)bS, 16, tmp_0, tmp_1);
147     tmp_2 = (v8i16)__msa_ilvl_d((v2i64)tmp_0, (v2i64)tmp_0);
148     tmp_3 = (v8i16)__msa_ilvl_d((v2i64)tmp_1, (v2i64)tmp_1);
149     TRANSPOSE4x4_SH_SH(tmp_0, tmp_2, tmp_1, tmp_3, tmp_2, tmp_3, tmp_4, tmp_5);
150     tmp_0 = (v16i8)__msa_ilvr_d((v2i64)tmp_3, (v2i64)tmp_2);
151     tmp_1 = (v16i8)__msa_ilvr_d((v2i64)tmp_5, (v2i64)tmp_4);
152     ST_SB2(tmp_0, tmp_1, (int8_t*)bS, 16);
153 }
154