1/*
2 * VC-1 and WMV3 decoder - DSP functions AltiVec-optimized
3 * Copyright (c) 2006 Konstantin Shishkov
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include "config.h"
23
24#include "libavutil/attributes.h"
25#include "libavutil/cpu.h"
26#include "libavutil/ppc/cpu.h"
27#include "libavutil/ppc/util_altivec.h"
28
29#include "libavcodec/vc1dsp.h"
30
31#if HAVE_ALTIVEC
32
33// main steps of 8x8 transform
34#define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd) \
35do { \
36    t0 = vec_sl(vec_add(s0, s4), vec_2); \
37    t0 = vec_add(vec_sl(t0, vec_1), t0); \
38    t0 = vec_add(t0, vec_rnd); \
39    t1 = vec_sl(vec_sub(s0, s4), vec_2); \
40    t1 = vec_add(vec_sl(t1, vec_1), t1); \
41    t1 = vec_add(t1, vec_rnd); \
42    t2 = vec_add(vec_sl(s6, vec_2), vec_sl(s6, vec_1)); \
43    t2 = vec_add(t2, vec_sl(s2, vec_4)); \
44    t3 = vec_add(vec_sl(s2, vec_2), vec_sl(s2, vec_1)); \
45    t3 = vec_sub(t3, vec_sl(s6, vec_4)); \
46    t4 = vec_add(t0, t2); \
47    t5 = vec_add(t1, t3); \
48    t6 = vec_sub(t1, t3); \
49    t7 = vec_sub(t0, t2); \
50\
51    t0 = vec_sl(vec_add(s1, s3), vec_4); \
52    t0 = vec_add(t0, vec_sl(s5, vec_3)); \
53    t0 = vec_add(t0, vec_sl(s7, vec_2)); \
54    t0 = vec_add(t0, vec_sub(s5, s3)); \
55\
56    t1 = vec_sl(vec_sub(s1, s5), vec_4); \
57    t1 = vec_sub(t1, vec_sl(s7, vec_3)); \
58    t1 = vec_sub(t1, vec_sl(s3, vec_2)); \
59    t1 = vec_sub(t1, vec_add(s1, s7)); \
60\
61    t2 = vec_sl(vec_sub(s7, s3), vec_4); \
62    t2 = vec_add(t2, vec_sl(s1, vec_3)); \
63    t2 = vec_add(t2, vec_sl(s5, vec_2)); \
64    t2 = vec_add(t2, vec_sub(s1, s7)); \
65\
66    t3 = vec_sl(vec_sub(s5, s7), vec_4); \
67    t3 = vec_sub(t3, vec_sl(s3, vec_3)); \
68    t3 = vec_add(t3, vec_sl(s1, vec_2)); \
69    t3 = vec_sub(t3, vec_add(s3, s5)); \
70\
71    s0 = vec_add(t4, t0); \
72    s1 = vec_add(t5, t1); \
73    s2 = vec_add(t6, t2); \
74    s3 = vec_add(t7, t3); \
75    s4 = vec_sub(t7, t3); \
76    s5 = vec_sub(t6, t2); \
77    s6 = vec_sub(t5, t1); \
78    s7 = vec_sub(t4, t0); \
79}while(0)
80
81#define SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7) \
82do { \
83    s0 = vec_sra(s0, vec_3); \
84    s1 = vec_sra(s1, vec_3); \
85    s2 = vec_sra(s2, vec_3); \
86    s3 = vec_sra(s3, vec_3); \
87    s4 = vec_sra(s4, vec_3); \
88    s5 = vec_sra(s5, vec_3); \
89    s6 = vec_sra(s6, vec_3); \
90    s7 = vec_sra(s7, vec_3); \
91}while(0)
92
93#define SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7) \
94do { \
95    s0 = vec_sra(s0, vec_7); \
96    s1 = vec_sra(s1, vec_7); \
97    s2 = vec_sra(s2, vec_7); \
98    s3 = vec_sra(s3, vec_7); \
99    s4 = vec_sra(vec_add(s4, vec_1s), vec_7); \
100    s5 = vec_sra(vec_add(s5, vec_1s), vec_7); \
101    s6 = vec_sra(vec_add(s6, vec_1s), vec_7); \
102    s7 = vec_sra(vec_add(s7, vec_1s), vec_7); \
103}while(0)
104
105/* main steps of 4x4 transform */
106#define STEP4(s0, s1, s2, s3, vec_rnd) \
107do { \
108    t1 = vec_add(vec_sl(s0, vec_4), s0); \
109    t1 = vec_add(t1, vec_rnd); \
110    t2 = vec_add(vec_sl(s2, vec_4), s2); \
111    t0 = vec_add(t1, t2); \
112    t1 = vec_sub(t1, t2); \
113    t3 = vec_sl(vec_sub(s3, s1), vec_1); \
114    t3 = vec_add(t3, vec_sl(t3, vec_2)); \
115    t2 = vec_add(t3, vec_sl(s1, vec_5)); \
116    t3 = vec_add(t3, vec_sl(s3, vec_3)); \
117    t3 = vec_add(t3, vec_sl(s3, vec_2)); \
118    s0 = vec_add(t0, t2); \
119    s1 = vec_sub(t1, t3); \
120    s2 = vec_add(t1, t3); \
121    s3 = vec_sub(t0, t2); \
122}while (0)
123
124#define SHIFT_HOR4(s0, s1, s2, s3) \
125    s0 = vec_sra(s0, vec_3); \
126    s1 = vec_sra(s1, vec_3); \
127    s2 = vec_sra(s2, vec_3); \
128    s3 = vec_sra(s3, vec_3);
129
130#define SHIFT_VERT4(s0, s1, s2, s3) \
131    s0 = vec_sra(s0, vec_7); \
132    s1 = vec_sra(s1, vec_7); \
133    s2 = vec_sra(s2, vec_7); \
134    s3 = vec_sra(s3, vec_7);
135
136/** Do inverse transform on 8x8 block
137*/
138static void vc1_inv_trans_8x8_altivec(int16_t block[64])
139{
140    vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
141    vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
142    vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
143    vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
144    const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
145    const vector unsigned int vec_7 = vec_splat_u32(7);
146    const vector unsigned int vec_4 = vec_splat_u32(4);
147    const vector  signed int vec_4s = vec_splat_s32(4);
148    const vector unsigned int vec_3 = vec_splat_u32(3);
149    const vector unsigned int vec_2 = vec_splat_u32(2);
150    const vector  signed int vec_1s = vec_splat_s32(1);
151    const vector unsigned int vec_1 = vec_splat_u32(1);
152
153    src0 = vec_ld(  0, block);
154    src1 = vec_ld( 16, block);
155    src2 = vec_ld( 32, block);
156    src3 = vec_ld( 48, block);
157    src4 = vec_ld( 64, block);
158    src5 = vec_ld( 80, block);
159    src6 = vec_ld( 96, block);
160    src7 = vec_ld(112, block);
161
162    s0 = vec_unpackl(src0);
163    s1 = vec_unpackl(src1);
164    s2 = vec_unpackl(src2);
165    s3 = vec_unpackl(src3);
166    s4 = vec_unpackl(src4);
167    s5 = vec_unpackl(src5);
168    s6 = vec_unpackl(src6);
169    s7 = vec_unpackl(src7);
170    s8 = vec_unpackh(src0);
171    s9 = vec_unpackh(src1);
172    sA = vec_unpackh(src2);
173    sB = vec_unpackh(src3);
174    sC = vec_unpackh(src4);
175    sD = vec_unpackh(src5);
176    sE = vec_unpackh(src6);
177    sF = vec_unpackh(src7);
178    STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
179    SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
180    STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
181    SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
182    src0 = vec_pack(s8, s0);
183    src1 = vec_pack(s9, s1);
184    src2 = vec_pack(sA, s2);
185    src3 = vec_pack(sB, s3);
186    src4 = vec_pack(sC, s4);
187    src5 = vec_pack(sD, s5);
188    src6 = vec_pack(sE, s6);
189    src7 = vec_pack(sF, s7);
190    TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
191
192    s0 = vec_unpackl(src0);
193    s1 = vec_unpackl(src1);
194    s2 = vec_unpackl(src2);
195    s3 = vec_unpackl(src3);
196    s4 = vec_unpackl(src4);
197    s5 = vec_unpackl(src5);
198    s6 = vec_unpackl(src6);
199    s7 = vec_unpackl(src7);
200    s8 = vec_unpackh(src0);
201    s9 = vec_unpackh(src1);
202    sA = vec_unpackh(src2);
203    sB = vec_unpackh(src3);
204    sC = vec_unpackh(src4);
205    sD = vec_unpackh(src5);
206    sE = vec_unpackh(src6);
207    sF = vec_unpackh(src7);
208    STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64);
209    SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7);
210    STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64);
211    SHIFT_VERT8(s8, s9, sA, sB, sC, sD, sE, sF);
212    src0 = vec_pack(s8, s0);
213    src1 = vec_pack(s9, s1);
214    src2 = vec_pack(sA, s2);
215    src3 = vec_pack(sB, s3);
216    src4 = vec_pack(sC, s4);
217    src5 = vec_pack(sD, s5);
218    src6 = vec_pack(sE, s6);
219    src7 = vec_pack(sF, s7);
220
221    vec_st(src0,  0, block);
222    vec_st(src1, 16, block);
223    vec_st(src2, 32, block);
224    vec_st(src3, 48, block);
225    vec_st(src4, 64, block);
226    vec_st(src5, 80, block);
227    vec_st(src6, 96, block);
228    vec_st(src7,112, block);
229}
230
231/** Do inverse transform on 8x4 part of block
232*/
233static void vc1_inv_trans_8x4_altivec(uint8_t *dest, ptrdiff_t stride,
234                                      int16_t *block)
235{
236    vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
237    vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
238    vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
239    vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
240    const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
241    const vector unsigned int vec_7 = vec_splat_u32(7);
242    const vector unsigned int vec_5 = vec_splat_u32(5);
243    const vector unsigned int vec_4 = vec_splat_u32(4);
244    const vector  signed int vec_4s = vec_splat_s32(4);
245    const vector unsigned int vec_3 = vec_splat_u32(3);
246    const vector unsigned int vec_2 = vec_splat_u32(2);
247    const vector unsigned int vec_1 = vec_splat_u32(1);
248    vector unsigned char tmp;
249    vector signed short tmp2, tmp3;
250    vector unsigned char perm0, perm1, p0, p1, p;
251
252    src0 = vec_ld(  0, block);
253    src1 = vec_ld( 16, block);
254    src2 = vec_ld( 32, block);
255    src3 = vec_ld( 48, block);
256    src4 = vec_ld( 64, block);
257    src5 = vec_ld( 80, block);
258    src6 = vec_ld( 96, block);
259    src7 = vec_ld(112, block);
260
261    TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
262    s0 = vec_unpackl(src0);
263    s1 = vec_unpackl(src1);
264    s2 = vec_unpackl(src2);
265    s3 = vec_unpackl(src3);
266    s4 = vec_unpackl(src4);
267    s5 = vec_unpackl(src5);
268    s6 = vec_unpackl(src6);
269    s7 = vec_unpackl(src7);
270    s8 = vec_unpackh(src0);
271    s9 = vec_unpackh(src1);
272    sA = vec_unpackh(src2);
273    sB = vec_unpackh(src3);
274    sC = vec_unpackh(src4);
275    sD = vec_unpackh(src5);
276    sE = vec_unpackh(src6);
277    sF = vec_unpackh(src7);
278    STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
279    SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
280    STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
281    SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
282    src0 = vec_pack(s8, s0);
283    src1 = vec_pack(s9, s1);
284    src2 = vec_pack(sA, s2);
285    src3 = vec_pack(sB, s3);
286    src4 = vec_pack(sC, s4);
287    src5 = vec_pack(sD, s5);
288    src6 = vec_pack(sE, s6);
289    src7 = vec_pack(sF, s7);
290    TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
291
292    s0 = vec_unpackh(src0);
293    s1 = vec_unpackh(src1);
294    s2 = vec_unpackh(src2);
295    s3 = vec_unpackh(src3);
296    s8 = vec_unpackl(src0);
297    s9 = vec_unpackl(src1);
298    sA = vec_unpackl(src2);
299    sB = vec_unpackl(src3);
300    STEP4(s0, s1, s2, s3, vec_64);
301    SHIFT_VERT4(s0, s1, s2, s3);
302    STEP4(s8, s9, sA, sB, vec_64);
303    SHIFT_VERT4(s8, s9, sA, sB);
304    src0 = vec_pack(s0, s8);
305    src1 = vec_pack(s1, s9);
306    src2 = vec_pack(s2, sA);
307    src3 = vec_pack(s3, sB);
308
309#if HAVE_BIGENDIAN
310    p0 = vec_lvsl (0, dest);
311    p1 = vec_lvsl (stride, dest);
312    p = vec_splat_u8 (-1);
313    perm0 = vec_mergeh (p, p0);
314    perm1 = vec_mergeh (p, p1);
315#define GET_TMP2(dst, p)        \
316    tmp = vec_ld (0, dest);     \
317    tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), p);
318#else
319#define GET_TMP2(dst,p)         \
320    tmp = vec_vsx_ld (0, dst);  \
321    tmp2 = (vector signed short)vec_mergeh (tmp, vec_splat_u8(0));
322#endif
323
324#define ADD(dest,src,perm)                                              \
325    GET_TMP2(dest, perm);                                               \
326    tmp3 = vec_adds (tmp2, src);                                        \
327    tmp = vec_packsu (tmp3, tmp3);                                      \
328    vec_ste ((vector unsigned int)tmp, 0, (unsigned int *)dest);        \
329    vec_ste ((vector unsigned int)tmp, 4, (unsigned int *)dest);
330
331    ADD (dest, src0, perm0)      dest += stride;
332    ADD (dest, src1, perm1)      dest += stride;
333    ADD (dest, src2, perm0)      dest += stride;
334    ADD (dest, src3, perm1)
335}
336
337#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
338#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
339
340#define OP_U8_ALTIVEC                          PUT_OP_U8_ALTIVEC
341#define PREFIX_no_rnd_vc1_chroma_mc8_altivec   put_no_rnd_vc1_chroma_mc8_altivec
342#include "h264chroma_template.c"
343#undef OP_U8_ALTIVEC
344#undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
345
346#define OP_U8_ALTIVEC                          AVG_OP_U8_ALTIVEC
347#define PREFIX_no_rnd_vc1_chroma_mc8_altivec   avg_no_rnd_vc1_chroma_mc8_altivec
348#include "h264chroma_template.c"
349#undef OP_U8_ALTIVEC
350#undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
351
352#endif /* HAVE_ALTIVEC */
353
354av_cold void ff_vc1dsp_init_ppc(VC1DSPContext *dsp)
355{
356#if HAVE_ALTIVEC
357    if (!PPC_ALTIVEC(av_get_cpu_flags()))
358        return;
359
360    dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_altivec;
361    dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_altivec;
362    dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = put_no_rnd_vc1_chroma_mc8_altivec;
363    dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = avg_no_rnd_vc1_chroma_mc8_altivec;
364#endif /* HAVE_ALTIVEC */
365}
366