1/*
2 * GMC (Global Motion Compensation), AltiVec-enabled
3 *
4 * Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
5 *
6 * This file is part of FFmpeg.
7 *
8 * FFmpeg is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * FFmpeg is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with FFmpeg; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23#include "libavutil/attributes.h"
24#include "libavutil/cpu.h"
25#include "libavutil/mem_internal.h"
26#include "libavutil/ppc/cpu.h"
27#include "libavutil/ppc/util_altivec.h"
28
29#include "libavcodec/mpegvideodsp.h"
30
31#if HAVE_ALTIVEC
32/* AltiVec-enhanced gmc1. ATM this code assumes stride is a multiple of 8
33 * to preserve proper dst alignment. */
34static void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */,
35                         int stride, int h, int x16, int y16, int rounder)
36{
37    int i;
38    const DECLARE_ALIGNED(16, unsigned short, rounder_a) = rounder;
39    const DECLARE_ALIGNED(16, unsigned short, ABCD)[8] = {
40        (16 - x16) * (16 - y16), /* A */
41             (x16) * (16 - y16), /* B */
42        (16 - x16) * (y16),      /* C */
43             (x16) * (y16),      /* D */
44        0, 0, 0, 0               /* padding */
45    };
46    register const vector unsigned char vczero =
47        (const vector unsigned char) vec_splat_u8(0);
48    register const vector unsigned short vcsr8 =
49        (const vector unsigned short) vec_splat_u16(8);
50    register vector unsigned char dstv, dstv2, srcvB, srcvC, srcvD;
51    register vector unsigned short tempB, tempC, tempD;
52    unsigned long dst_odd        = (unsigned long) dst & 0x0000000F;
53    unsigned long src_really_odd = (unsigned long) src & 0x0000000F;
54    register vector unsigned short tempA =
55        vec_ld(0, (const unsigned short *) ABCD);
56    register vector unsigned short Av = vec_splat(tempA, 0);
57    register vector unsigned short Bv = vec_splat(tempA, 1);
58    register vector unsigned short Cv = vec_splat(tempA, 2);
59    register vector unsigned short Dv = vec_splat(tempA, 3);
60    register vector unsigned short rounderV =
61        vec_splat((vec_u16) vec_lde(0, &rounder_a), 0);
62
63    /* we'll be able to pick-up our 9 char elements at src from those
64     * 32 bytes we load the first batch here, as inside the loop we can
65     * reuse 'src + stride' from one iteration as the 'src' of the next. */
66    register vector unsigned char src_0 = vec_ld(0, src);
67    register vector unsigned char src_1 = vec_ld(16, src);
68    register vector unsigned char srcvA = vec_perm(src_0, src_1,
69                                                   vec_lvsl(0, src));
70
71    if (src_really_odd != 0x0000000F)
72        /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned
73         * on the second vector. */
74        srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
75    else
76        srcvB = src_1;
77    srcvA = vec_mergeh(vczero, srcvA);
78    srcvB = vec_mergeh(vczero, srcvB);
79
80    for (i = 0; i < h; i++) {
81        dst_odd        =   (unsigned long) dst            & 0x0000000F;
82        src_really_odd = (((unsigned long) src) + stride) & 0x0000000F;
83
84        dstv = vec_ld(0, dst);
85
86        /* We'll be able to pick-up our 9 char elements at src + stride from
87         * those 32 bytes then reuse the resulting 2 vectors srvcC and srcvD
88         * as the next srcvA and srcvB. */
89        src_0 = vec_ld(stride +  0, src);
90        src_1 = vec_ld(stride + 16, src);
91        srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
92
93        if (src_really_odd != 0x0000000F)
94            /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned
95             * on the second vector. */
96            srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
97        else
98            srcvD = src_1;
99
100        srcvC = vec_mergeh(vczero, srcvC);
101        srcvD = vec_mergeh(vczero, srcvD);
102
103        /* OK, now we (finally) do the math :-)
104         * Those four instructions replace 32 int muls & 32 int adds.
105         * Isn't AltiVec nice? */
106        tempA = vec_mladd((vector unsigned short) srcvA, Av, rounderV);
107        tempB = vec_mladd((vector unsigned short) srcvB, Bv, tempA);
108        tempC = vec_mladd((vector unsigned short) srcvC, Cv, tempB);
109        tempD = vec_mladd((vector unsigned short) srcvD, Dv, tempC);
110
111        srcvA = srcvC;
112        srcvB = srcvD;
113
114        tempD = vec_sr(tempD, vcsr8);
115
116        dstv2 = vec_pack(tempD, (vector unsigned short) vczero);
117
118        if (dst_odd)
119            dstv2 = vec_perm(dstv, dstv2, vcprm(0, 1, s0, s1));
120        else
121            dstv2 = vec_perm(dstv, dstv2, vcprm(s0, s1, 2, 3));
122
123        vec_st(dstv2, 0, dst);
124
125        dst += stride;
126        src += stride;
127    }
128}
129#endif /* HAVE_ALTIVEC */
130
131av_cold void ff_mpegvideodsp_init_ppc(MpegVideoDSPContext *c)
132{
133#if HAVE_ALTIVEC
134    if (!PPC_ALTIVEC(av_get_cpu_flags()))
135        return;
136
137    c->gmc1 = gmc1_altivec;
138#endif /* HAVE_ALTIVEC */
139}
140