1/*
2 * Copyright (c) 2015 Manojkumar Bhosale (Manojkumar.Bhosale@imgtec.com)
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21#include <string.h>
22#include "libavcodec/vp8dsp.h"
23#include "libavutil/mips/generic_macros_msa.h"
24#include "vp8dsp_mips.h"
25
26static const int cospi8sqrt2minus1 = 20091;
27static const int sinpi8sqrt2 = 35468;
28
29#define VP8_IDCT_1D_W(in0, in1, in2, in3, out0, out1, out2, out3)    \
30{                                                                    \
31    v4i32 a1_m, b1_m, c1_m, d1_m;                                    \
32    v4i32 c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m;                    \
33    v4i32 const_cospi8sqrt2minus1_m, sinpi8_sqrt2_m;                 \
34                                                                     \
35    const_cospi8sqrt2minus1_m = __msa_fill_w(cospi8sqrt2minus1);     \
36    sinpi8_sqrt2_m = __msa_fill_w(sinpi8sqrt2);                      \
37    a1_m = in0 + in2;                                                \
38    b1_m = in0 - in2;                                                \
39    c_tmp1_m = ((in1) * sinpi8_sqrt2_m) >> 16;                       \
40    c_tmp2_m = in3 + (((in3) * const_cospi8sqrt2minus1_m) >> 16);    \
41    c1_m = c_tmp1_m - c_tmp2_m;                                      \
42    d_tmp1_m = (in1) + (((in1) * const_cospi8sqrt2minus1_m) >> 16);  \
43    d_tmp2_m = ((in3) * sinpi8_sqrt2_m) >> 16;                       \
44    d1_m = d_tmp1_m + d_tmp2_m;                                      \
45    BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3);     \
46}
47
48void ff_vp8_idct_add_msa(uint8_t *dst, int16_t input[16], ptrdiff_t stride)
49{
50    v8i16 input0, input1;
51    v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3;
52    v4i32 res0, res1, res2, res3;
53    v16i8 zero = { 0 };
54    v16i8 pred0, pred1, pred2, pred3, dest0, dest1;
55    v16i8 mask = { 0, 4, 8, 12, 16, 20, 24, 28, 0, 0, 0, 0, 0, 0, 0, 0 };
56
57    /* load short vector elements of 4x4 block */
58    LD_SH2(input, 8, input0, input1);
59    UNPCK_SH_SW(input0, in0, in1);
60    UNPCK_SH_SW(input1, in2, in3);
61    VP8_IDCT_1D_W(in0, in1, in2, in3, hz0, hz1, hz2, hz3);
62    /* transpose the block */
63    TRANSPOSE4x4_SW_SW(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3);
64    VP8_IDCT_1D_W(hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3);
65    SRARI_W4_SW(vt0, vt1, vt2, vt3, 3);
66    /* transpose the block */
67    TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3);
68    LD_SB4(dst, stride, pred0, pred1, pred2, pred3);
69    ILVR_B4_SW(zero, pred0, zero, pred1, zero, pred2, zero, pred3,
70               res0, res1, res2, res3);
71    ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3,
72               res0, res1, res2, res3);
73    ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3);
74    CLIP_SW4_0_255(res0, res1, res2, res3);
75    VSHF_B2_SB(res0, res1, res2, res3, mask, mask, dest0, dest1);
76    ST_W2(dest0, 0, 1, dst, stride);
77    ST_W2(dest1, 0, 1, dst + 2 * stride, stride);
78
79    memset(input, 0, 4 * 4 * sizeof(*input));
80}
81
82void ff_vp8_idct_dc_add_msa(uint8_t *dst, int16_t in_dc[16], ptrdiff_t stride)
83{
84    v8i16 vec;
85    v8i16 res0, res1, res2, res3;
86    v16i8 zero = { 0 };
87    v16i8 pred0, pred1, pred2, pred3, dest0, dest1;
88    v16i8 mask = { 0, 2, 4, 6, 16, 18, 20, 22, 0, 0, 0, 0, 0, 0, 0, 0 };
89
90    vec = __msa_fill_h(in_dc[0]);
91    vec = __msa_srari_h(vec, 3);
92    LD_SB4(dst, stride, pred0, pred1, pred2, pred3);
93    ILVR_B4_SH(zero, pred0, zero, pred1, zero, pred2, zero, pred3,
94               res0, res1, res2, res3);
95    ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3);
96    CLIP_SH4_0_255(res0, res1, res2, res3);
97    VSHF_B2_SB(res0, res1, res2, res3, mask, mask, dest0, dest1);
98    ST_W2(dest0, 0, 1, dst, stride);
99    ST_W2(dest1, 0, 1, dst + 2 * stride, stride);
100
101    in_dc[0] = 0;
102}
103
104void ff_vp8_luma_dc_wht_msa(int16_t block[4][4][16], int16_t input[16])
105{
106    int16_t *mb_dq_coeff = &block[0][0][0];
107    v8i16 input0, input1;
108    v4i32 in0, in1, in2, in3, a1, b1, c1, d1;
109    v4i32 hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3;
110
111    /* load short vector elements of 4x4 block */
112    LD_SH2(input, 8, input0, input1);
113    UNPCK_SH_SW(input0, in0, in1);
114    UNPCK_SH_SW(input1, in2, in3);
115    BUTTERFLY_4(in0, in1, in2, in3, a1, b1, c1, d1);
116    BUTTERFLY_4(a1, d1, c1, b1, hz0, hz1, hz3, hz2);
117    /* transpose the block */
118    TRANSPOSE4x4_SW_SW(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3);
119    BUTTERFLY_4(hz0, hz1, hz2, hz3, a1, b1, c1, d1);
120    BUTTERFLY_4(a1, d1, c1, b1, vt0, vt1, vt3, vt2);
121    ADD4(vt0, 3, vt1, 3, vt2, 3, vt3, 3, vt0, vt1, vt2, vt3);
122    SRA_4V(vt0, vt1, vt2, vt3, 3);
123    mb_dq_coeff[0] = __msa_copy_s_h((v8i16) vt0, 0);
124    mb_dq_coeff[16] = __msa_copy_s_h((v8i16) vt1, 0);
125    mb_dq_coeff[32] = __msa_copy_s_h((v8i16) vt2, 0);
126    mb_dq_coeff[48] = __msa_copy_s_h((v8i16) vt3, 0);
127    mb_dq_coeff[64] = __msa_copy_s_h((v8i16) vt0, 2);
128    mb_dq_coeff[80] = __msa_copy_s_h((v8i16) vt1, 2);
129    mb_dq_coeff[96] = __msa_copy_s_h((v8i16) vt2, 2);
130    mb_dq_coeff[112] = __msa_copy_s_h((v8i16) vt3, 2);
131    mb_dq_coeff[128] = __msa_copy_s_h((v8i16) vt0, 4);
132    mb_dq_coeff[144] = __msa_copy_s_h((v8i16) vt1, 4);
133    mb_dq_coeff[160] = __msa_copy_s_h((v8i16) vt2, 4);
134    mb_dq_coeff[176] = __msa_copy_s_h((v8i16) vt3, 4);
135    mb_dq_coeff[192] = __msa_copy_s_h((v8i16) vt0, 6);
136    mb_dq_coeff[208] = __msa_copy_s_h((v8i16) vt1, 6);
137    mb_dq_coeff[224] = __msa_copy_s_h((v8i16) vt2, 6);
138    mb_dq_coeff[240] = __msa_copy_s_h((v8i16) vt3, 6);
139
140    memset(input, 0, 4 * 4 * sizeof(int16_t));
141}
142
143void ff_vp8_idct_dc_add4y_msa(uint8_t *dst, int16_t block[4][16],
144                              ptrdiff_t stride)
145{
146    ff_vp8_idct_dc_add_msa(dst, &block[0][0], stride);
147    ff_vp8_idct_dc_add_msa(dst + 4, &block[1][0], stride);
148    ff_vp8_idct_dc_add_msa(dst + 8, &block[2][0], stride);
149    ff_vp8_idct_dc_add_msa(dst + 12, &block[3][0], stride);
150}
151
152void ff_vp8_idct_dc_add4uv_msa(uint8_t *dst, int16_t block[4][16],
153                               ptrdiff_t stride)
154{
155    ff_vp8_idct_dc_add_msa(dst, &block[0][0], stride);
156    ff_vp8_idct_dc_add_msa(dst + 4, &block[1][0], stride);
157    ff_vp8_idct_dc_add_msa(dst + stride * 4, &block[2][0], stride);
158    ff_vp8_idct_dc_add_msa(dst + stride * 4 + 4, &block[3][0], stride);
159}
160