1/*
2 * Copyright (c) 2015 Manojkumar Bhosale (Manojkumar.Bhosale@imgtec.com)
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#include "libavutil/mips/generic_macros_msa.h"
22#include "h263dsp_mips.h"
23
24static const uint8_t h263_loop_filter_strength_msa[32] = {
25    0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 7,
26    7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11, 12, 12, 12
27};
28
29static void h263_h_loop_filter_msa(uint8_t *src, int32_t stride, int32_t qscale)
30{
31    int32_t strength = h263_loop_filter_strength_msa[qscale];
32    v16u8 in0, in1, in2, in3, in4, in5, in6, in7;
33    v8i16 temp0, temp1, temp2;
34    v8i16 diff0, diff2, diff4, diff6, diff8;
35    v8i16 d0, a_d0, str_x2, str;
36
37    src -= 2;
38    LD_UB8(src, stride, in0, in1, in2, in3, in4, in5, in6, in7);
39    TRANSPOSE8x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7,
40                       in0, in3, in2, in1);
41
42    temp0 = (v8i16) __msa_ilvr_b((v16i8) in0, (v16i8) in1);
43    a_d0 = __msa_hsub_u_h((v16u8) temp0, (v16u8) temp0);
44    temp2 = (v8i16) __msa_ilvr_b((v16i8) in2, (v16i8) in3);
45    temp2 = __msa_hsub_u_h((v16u8) temp2, (v16u8) temp2);
46    temp2 <<= 2;
47    diff0 = a_d0 + temp2;
48    diff2 = -(-diff0 >> 3);
49    str_x2 = __msa_fill_h(-(strength << 1));
50    temp0 = (str_x2 <= diff2);
51    diff2 = (v8i16) __msa_bmz_v((v16u8) diff2, (v16u8) temp0, (v16u8) temp0);
52    temp2 = str_x2 - diff2;
53    str = __msa_fill_h(-strength);
54    temp0 = (diff2 < str);
55    diff2 = (v8i16) __msa_bmnz_v((v16u8) diff2, (v16u8) temp2, (v16u8) temp0);
56    diff4 = diff0 >> 3;
57    str_x2 = __msa_fill_h(strength << 1);
58    temp0 = (diff4 <= str_x2);
59    diff4 = (v8i16) __msa_bmz_v((v16u8) diff4, (v16u8) temp0, (v16u8) temp0);
60    temp2 = str_x2 - diff4;
61    str = __msa_fill_h(strength);
62    temp0 = (str < diff4);
63    diff4 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) temp2, (v16u8) temp0);
64    temp0 = __msa_clti_s_h(diff0, 0);
65    d0 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) diff2, (v16u8) temp0);
66    diff2 = -diff2 >> 1;
67    diff4 >>= 1;
68    diff8 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) diff2, (v16u8) temp0);
69    diff6 = (-a_d0) >> 2;
70    diff6 = -(diff6);
71    temp2 = -diff8;
72    temp0 = (diff6 < temp2);
73    diff6 = (v8i16) __msa_bmnz_v((v16u8) diff6, (v16u8) temp2, (v16u8) temp0);
74    diff2 = a_d0 >> 2;
75    temp0 = (diff2 <= diff8);
76    diff2 = (v8i16) __msa_bmz_v((v16u8) diff2, (v16u8) diff8, (v16u8) temp0);
77    temp0 = __msa_clti_s_h(a_d0, 0);
78    diff6 = (v8i16) __msa_bmz_v((v16u8) diff6, (v16u8) diff2, (v16u8) temp0);
79    PCKEV_B2_SH(a_d0, diff6, a_d0, d0, diff6, d0);
80    in0 = (v16u8) ((v16i8) in0 - (v16i8) diff6);
81    in1 = (v16u8) ((v16i8) in1 + (v16i8) diff6);
82    in3 = __msa_xori_b(in3, 128);
83    in3 = (v16u8) __msa_adds_s_b((v16i8) in3, (v16i8) d0);
84    in3 = __msa_xori_b(in3, 128);
85    in2 = __msa_subsus_u_b(in2, (v16i8) d0);
86    ILVR_B2_SH(in3, in0, in1, in2, temp0, temp1);
87    in0 = (v16u8) __msa_ilvr_h(temp1, temp0);
88    in3 = (v16u8) __msa_ilvl_h(temp1, temp0);
89    ST_W8(in0, in3, 0, 1, 2, 3, 0, 1, 2, 3, src, stride);
90}
91
92static void h263_v_loop_filter_msa(uint8_t *src, int32_t stride, int32_t qscale)
93{
94    int32_t strength = h263_loop_filter_strength_msa[qscale];
95    uint64_t res0, res1, res2, res3;
96    v16u8 in0, in1, in2, in3;
97    v8i16 temp0, temp2, diff0, diff2, diff4, diff6, diff8;
98    v8i16 d0, a_d0, str_x2, str;
99
100    src -= 2 * stride;
101    LD_UB4(src, stride, in0, in3, in2, in1);
102    temp0 = (v8i16) __msa_ilvr_b((v16i8) in0, (v16i8) in1);
103    a_d0 = __msa_hsub_u_h((v16u8) temp0, (v16u8) temp0);
104    temp2 = (v8i16) __msa_ilvr_b((v16i8) in2, (v16i8) in3);
105    temp2 = __msa_hsub_u_h((v16u8) temp2, (v16u8) temp2);
106    temp2 <<= 2;
107    diff0 = a_d0 + temp2;
108    diff2 = -(-diff0 >> 3);
109    str_x2 = __msa_fill_h(-(strength << 1));
110    temp0 = (str_x2 <= diff2);
111    diff2 = (v8i16) __msa_bmz_v((v16u8) diff2, (v16u8) temp0, (v16u8) temp0);
112    temp2 = str_x2 - diff2;
113    str = __msa_fill_h(-strength);
114    temp0 = (diff2 < str);
115    diff2 = (v8i16) __msa_bmnz_v((v16u8) diff2, (v16u8) temp2, (v16u8) temp0);
116    diff4 = diff0 >> 3;
117    str_x2 = __msa_fill_h(strength << 1);
118    temp0 = (diff4 <= str_x2);
119    diff4 = (v8i16) __msa_bmz_v((v16u8) diff4, (v16u8) temp0, (v16u8) temp0);
120    temp2 = str_x2 - diff4;
121    str = __msa_fill_h(strength);
122    temp0 = (str < diff4);
123    diff4 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) temp2, (v16u8) temp0);
124    temp0 = __msa_clti_s_h(diff0, 0);
125    d0 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) diff2, (v16u8) temp0);
126    diff2 = -diff2 >> 1;
127    diff4 >>= 1;
128    diff8 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) diff2, (v16u8) temp0);
129    diff6 = (-a_d0) >> 2;
130    diff6 = -(diff6);
131    temp2 = -diff8;
132    temp0 = (diff6 < temp2);
133    diff6 = (v8i16) __msa_bmnz_v((v16u8) diff6, (v16u8) temp2, (v16u8) temp0);
134    diff2 = a_d0 >> 2;
135    temp0 = (diff2 <= diff8);
136    diff2 = (v8i16) __msa_bmz_v((v16u8) diff2, (v16u8) diff8, (v16u8) temp0);
137    temp0 = __msa_clti_s_h(a_d0, 0);
138    diff6 = (v8i16) __msa_bmz_v((v16u8) diff6, (v16u8) diff2, (v16u8) temp0);
139    PCKEV_B2_SH(a_d0, diff6, a_d0, d0, diff6, d0);
140    in0 = (v16u8) ((v16i8) in0 - (v16i8) diff6);
141    in1 = (v16u8) ((v16i8) in1 + (v16i8) diff6);
142    in3 = __msa_xori_b(in3, 128);
143    in3 = (v16u8) __msa_adds_s_b((v16i8) in3, (v16i8) d0);
144    in3 = __msa_xori_b(in3, 128);
145    in2 = __msa_subsus_u_b(in2, (v16i8) d0);
146    res0 = __msa_copy_u_d((v2i64) in0, 0);
147    res1 = __msa_copy_u_d((v2i64) in3, 0);
148    res2 = __msa_copy_u_d((v2i64) in2, 0);
149    res3 = __msa_copy_u_d((v2i64) in1, 0);
150    SD4(res0, res1, res2, res3, src, stride);
151}
152
153void ff_h263_h_loop_filter_msa(uint8_t *src, int32_t stride, int32_t q_scale)
154{
155    h263_h_loop_filter_msa(src, stride, q_scale);
156}
157
158void ff_h263_v_loop_filter_msa(uint8_t *src, int32_t stride, int32_t q_scale)
159{
160    h263_v_loop_filter_msa(src, stride, q_scale);
161}
162