1/* 2 * Copyright (c) 2012 3 * MIPS Technologies, Inc., California. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its 14 * contributors may be used to endorse or promote products derived from 15 * this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * Author: Branimir Vasic (bvasic@mips.com) 30 * Author: Zoran Lukic (zoranl@mips.com) 31 * 32 * This file is part of FFmpeg. 33 * 34 * FFmpeg is free software; you can redistribute it and/or 35 * modify it under the terms of the GNU Lesser General Public 36 * License as published by the Free Software Foundation; either 37 * version 2.1 of the License, or (at your option) any later version. 38 * 39 * FFmpeg is distributed in the hope that it will be useful, 40 * but WITHOUT ANY WARRANTY; without even the implied warranty of 41 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 42 * Lesser General Public License for more details. 43 * 44 * You should have received a copy of the GNU Lesser General Public 45 * License along with FFmpeg; if not, write to the Free Software 46 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 47 */ 48 49/** 50 * @file 51 * Reference: libavutil/float_dsp.c 52 */ 53 54#include "config.h" 55#include "libavutil/float_dsp.h" 56#include "libavutil/mips/asmdefs.h" 57 58#if HAVE_INLINE_ASM && HAVE_MIPSFPU 59#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6 60static void vector_fmul_mips(float *dst, const float *src0, const float *src1, 61 int len) 62{ 63 int i; 64 65 if (len & 3) { 66 for (i = 0; i < len; i++) 67 dst[i] = src0[i] * src1[i]; 68 } else { 69 float *d = (float *)dst; 70 float *d_end = d + len; 71 float *s0 = (float *)src0; 72 float *s1 = (float *)src1; 73 74 float src0_0, src0_1, src0_2, src0_3; 75 float src1_0, src1_1, src1_2, src1_3; 76 77 __asm__ volatile ( 78 "1: \n\t" 79 "lwc1 %[src0_0], 0(%[s0]) \n\t" 80 "lwc1 %[src1_0], 0(%[s1]) \n\t" 81 "lwc1 %[src0_1], 4(%[s0]) \n\t" 82 "lwc1 %[src1_1], 4(%[s1]) \n\t" 83 "lwc1 %[src0_2], 8(%[s0]) \n\t" 84 "lwc1 %[src1_2], 8(%[s1]) \n\t" 85 "lwc1 %[src0_3], 12(%[s0]) \n\t" 86 "lwc1 %[src1_3], 12(%[s1]) \n\t" 87 "mul.s %[src0_0], %[src0_0], %[src1_0] \n\t" 88 "mul.s %[src0_1], %[src0_1], %[src1_1] \n\t" 89 "mul.s %[src0_2], %[src0_2], %[src1_2] \n\t" 90 "mul.s %[src0_3], %[src0_3], %[src1_3] \n\t" 91 "swc1 %[src0_0], 0(%[d]) \n\t" 92 "swc1 %[src0_1], 4(%[d]) \n\t" 93 "swc1 %[src0_2], 8(%[d]) \n\t" 94 "swc1 %[src0_3], 12(%[d]) \n\t" 95 PTR_ADDIU "%[s0], %[s0], 16 \n\t" 96 PTR_ADDIU "%[s1], %[s1], 16 \n\t" 97 PTR_ADDIU "%[d], %[d], 16 \n\t" 98 "bne %[d], %[d_end], 1b \n\t" 99 100 : [src0_0]"=&f"(src0_0), [src0_1]"=&f"(src0_1), 101 [src0_2]"=&f"(src0_2), [src0_3]"=&f"(src0_3), 102 [src1_0]"=&f"(src1_0), [src1_1]"=&f"(src1_1), 103 [src1_2]"=&f"(src1_2), [src1_3]"=&f"(src1_3), 104 [d]"+r"(d), [s0]"+r"(s0), [s1]"+r"(s1) 105 : [d_end]"r"(d_end) 106 : "memory" 107 ); 108 } 109} 110 111static void vector_fmul_scalar_mips(float *dst, const float *src, float mul, 112 int len) 113{ 114 float temp0, temp1, temp2, temp3; 115 float *local_src = (float*)src; 116 float *end = local_src + len; 117 118 /* loop unrolled 4 times */ 119 __asm__ volatile( 120 ".set push \n\t" 121 ".set noreorder \n\t" 122 "1: \n\t" 123 "lwc1 %[temp0], 0(%[src]) \n\t" 124 "lwc1 %[temp1], 4(%[src]) \n\t" 125 "lwc1 %[temp2], 8(%[src]) \n\t" 126 "lwc1 %[temp3], 12(%[src]) \n\t" 127 PTR_ADDIU "%[dst], %[dst], 16 \n\t" 128 "mul.s %[temp0], %[temp0], %[mul] \n\t" 129 "mul.s %[temp1], %[temp1], %[mul] \n\t" 130 "mul.s %[temp2], %[temp2], %[mul] \n\t" 131 "mul.s %[temp3], %[temp3], %[mul] \n\t" 132 PTR_ADDIU "%[src], %[src], 16 \n\t" 133 "swc1 %[temp0], -16(%[dst]) \n\t" 134 "swc1 %[temp1], -12(%[dst]) \n\t" 135 "swc1 %[temp2], -8(%[dst]) \n\t" 136 "bne %[src], %[end], 1b \n\t" 137 " swc1 %[temp3], -4(%[dst]) \n\t" 138 ".set pop \n\t" 139 140 : [temp0]"=&f"(temp0), [temp1]"=&f"(temp1), 141 [temp2]"=&f"(temp2), [temp3]"=&f"(temp3), 142 [dst]"+r"(dst), [src]"+r"(local_src) 143 : [end]"r"(end), [mul]"f"(mul) 144 : "memory" 145 ); 146} 147 148static void vector_fmul_window_mips(float *dst, const float *src0, 149 const float *src1, const float *win, int len) 150{ 151 float * dst_j, *win_j, *src0_i, *src1_j, *dst_i, *win_i; 152 float temp, temp1, temp2, temp3; 153 float s0, s01, s1, s11; 154 float wi, wi1, wi2, wi3; 155 float wj, wj1, wj2, wj3; 156 const float * lp_end = win + len; 157 158 win_i = (float *)win; 159 win_j = (float *)(win + 2 * len -1); 160 src1_j = (float *)(src1 + len - 1); 161 src0_i = (float *)src0; 162 dst_i = (float *)dst; 163 dst_j = (float *)(dst + 2 * len -1); 164 165 /* loop unrolled 4 times */ 166 __asm__ volatile ( 167 "1:" 168 "lwc1 %[s1], 0(%[src1_j]) \n\t" 169 "lwc1 %[wi], 0(%[win_i]) \n\t" 170 "lwc1 %[wj], 0(%[win_j]) \n\t" 171 "lwc1 %[s11], -4(%[src1_j]) \n\t" 172 "lwc1 %[wi1], 4(%[win_i]) \n\t" 173 "lwc1 %[wj1], -4(%[win_j]) \n\t" 174 "lwc1 %[s0], 0(%[src0_i]) \n\t" 175 "lwc1 %[s01], 4(%[src0_i]) \n\t" 176 "mul.s %[temp], %[s1], %[wi] \n\t" 177 "mul.s %[temp1], %[s1], %[wj] \n\t" 178 "mul.s %[temp2], %[s11], %[wi1] \n\t" 179 "mul.s %[temp3], %[s11], %[wj1] \n\t" 180 "lwc1 %[s1], -8(%[src1_j]) \n\t" 181 "lwc1 %[wi2], 8(%[win_i]) \n\t" 182 "lwc1 %[wj2], -8(%[win_j]) \n\t" 183 "lwc1 %[s11], -12(%[src1_j]) \n\t" 184 "msub.s %[temp], %[temp], %[s0], %[wj] \n\t" 185 "madd.s %[temp1], %[temp1], %[s0], %[wi] \n\t" 186 "msub.s %[temp2], %[temp2], %[s01], %[wj1] \n\t" 187 "madd.s %[temp3], %[temp3], %[s01], %[wi1] \n\t" 188 "lwc1 %[wi3], 12(%[win_i]) \n\t" 189 "lwc1 %[wj3], -12(%[win_j]) \n\t" 190 "lwc1 %[s0], 8(%[src0_i]) \n\t" 191 "lwc1 %[s01], 12(%[src0_i]) \n\t" 192 PTR_ADDIU "%[src1_j],-16 \n\t" 193 PTR_ADDIU "%[win_i],16 \n\t" 194 PTR_ADDIU "%[win_j],-16 \n\t" 195 PTR_ADDIU "%[src0_i],16 \n\t" 196 "swc1 %[temp], 0(%[dst_i]) \n\t" /* dst[i] = s0*wj - s1*wi; */ 197 "swc1 %[temp1], 0(%[dst_j]) \n\t" /* dst[j] = s0*wi + s1*wj; */ 198 "swc1 %[temp2], 4(%[dst_i]) \n\t" /* dst[i+1] = s01*wj1 - s11*wi1; */ 199 "swc1 %[temp3], -4(%[dst_j]) \n\t" /* dst[j-1] = s01*wi1 + s11*wj1; */ 200 "mul.s %[temp], %[s1], %[wi2] \n\t" 201 "mul.s %[temp1], %[s1], %[wj2] \n\t" 202 "mul.s %[temp2], %[s11], %[wi3] \n\t" 203 "mul.s %[temp3], %[s11], %[wj3] \n\t" 204 "msub.s %[temp], %[temp], %[s0], %[wj2] \n\t" 205 "madd.s %[temp1], %[temp1], %[s0], %[wi2] \n\t" 206 "msub.s %[temp2], %[temp2], %[s01], %[wj3] \n\t" 207 "madd.s %[temp3], %[temp3], %[s01], %[wi3] \n\t" 208 "swc1 %[temp], 8(%[dst_i]) \n\t" /* dst[i+2] = s0*wj2 - s1*wi2; */ 209 "swc1 %[temp1], -8(%[dst_j]) \n\t" /* dst[j-2] = s0*wi2 + s1*wj2; */ 210 "swc1 %[temp2], 12(%[dst_i]) \n\t" /* dst[i+2] = s01*wj3 - s11*wi3; */ 211 "swc1 %[temp3], -12(%[dst_j]) \n\t" /* dst[j-3] = s01*wi3 + s11*wj3; */ 212 PTR_ADDIU "%[dst_i],16 \n\t" 213 PTR_ADDIU "%[dst_j],-16 \n\t" 214 "bne %[win_i], %[lp_end], 1b \n\t" 215 : [temp]"=&f"(temp), [temp1]"=&f"(temp1), [temp2]"=&f"(temp2), 216 [temp3]"=&f"(temp3), [src0_i]"+r"(src0_i), [win_i]"+r"(win_i), 217 [src1_j]"+r"(src1_j), [win_j]"+r"(win_j), [dst_i]"+r"(dst_i), 218 [dst_j]"+r"(dst_j), [s0] "=&f"(s0), [s01]"=&f"(s01), [s1] "=&f"(s1), 219 [s11]"=&f"(s11), [wi] "=&f"(wi), [wj] "=&f"(wj), [wi2]"=&f"(wi2), 220 [wj2]"=&f"(wj2), [wi3]"=&f"(wi3), [wj3]"=&f"(wj3), [wi1]"=&f"(wi1), 221 [wj1]"=&f"(wj1) 222 : [lp_end]"r"(lp_end) 223 : "memory" 224 ); 225} 226 227static void butterflies_float_mips(float *av_restrict v1, float *av_restrict v2, 228 int len) 229{ 230 float temp0, temp1, temp2, temp3, temp4; 231 float temp5, temp6, temp7, temp8, temp9; 232 float temp10, temp11, temp12, temp13, temp14, temp15; 233 int pom; 234 pom = (len >> 2)-1; 235 236 /* loop unrolled 4 times */ 237 __asm__ volatile ( 238 "lwc1 %[temp0], 0(%[v1]) \n\t" 239 "lwc1 %[temp1], 4(%[v1]) \n\t" 240 "lwc1 %[temp2], 8(%[v1]) \n\t" 241 "lwc1 %[temp3], 12(%[v1]) \n\t" 242 "lwc1 %[temp4], 0(%[v2]) \n\t" 243 "lwc1 %[temp5], 4(%[v2]) \n\t" 244 "lwc1 %[temp6], 8(%[v2]) \n\t" 245 "lwc1 %[temp7], 12(%[v2]) \n\t" 246 "beq %[pom], $zero, 2f \n\t" 247 "1: \n\t" 248 "sub.s %[temp8], %[temp0], %[temp4] \n\t" 249 "add.s %[temp9], %[temp0], %[temp4] \n\t" 250 "sub.s %[temp10], %[temp1], %[temp5] \n\t" 251 "add.s %[temp11], %[temp1], %[temp5] \n\t" 252 "sub.s %[temp12], %[temp2], %[temp6] \n\t" 253 "add.s %[temp13], %[temp2], %[temp6] \n\t" 254 "sub.s %[temp14], %[temp3], %[temp7] \n\t" 255 "add.s %[temp15], %[temp3], %[temp7] \n\t" 256 PTR_ADDIU "%[v1], %[v1], 16 \n\t" 257 PTR_ADDIU "%[v2], %[v2], 16 \n\t" 258 "addiu %[pom], %[pom], -1 \n\t" 259 "lwc1 %[temp0], 0(%[v1]) \n\t" 260 "lwc1 %[temp1], 4(%[v1]) \n\t" 261 "lwc1 %[temp2], 8(%[v1]) \n\t" 262 "lwc1 %[temp3], 12(%[v1]) \n\t" 263 "lwc1 %[temp4], 0(%[v2]) \n\t" 264 "lwc1 %[temp5], 4(%[v2]) \n\t" 265 "lwc1 %[temp6], 8(%[v2]) \n\t" 266 "lwc1 %[temp7], 12(%[v2]) \n\t" 267 "swc1 %[temp9], -16(%[v1]) \n\t" 268 "swc1 %[temp8], -16(%[v2]) \n\t" 269 "swc1 %[temp11], -12(%[v1]) \n\t" 270 "swc1 %[temp10], -12(%[v2]) \n\t" 271 "swc1 %[temp13], -8(%[v1]) \n\t" 272 "swc1 %[temp12], -8(%[v2]) \n\t" 273 "swc1 %[temp15], -4(%[v1]) \n\t" 274 "swc1 %[temp14], -4(%[v2]) \n\t" 275 "bgtz %[pom], 1b \n\t" 276 "2: \n\t" 277 "sub.s %[temp8], %[temp0], %[temp4] \n\t" 278 "add.s %[temp9], %[temp0], %[temp4] \n\t" 279 "sub.s %[temp10], %[temp1], %[temp5] \n\t" 280 "add.s %[temp11], %[temp1], %[temp5] \n\t" 281 "sub.s %[temp12], %[temp2], %[temp6] \n\t" 282 "add.s %[temp13], %[temp2], %[temp6] \n\t" 283 "sub.s %[temp14], %[temp3], %[temp7] \n\t" 284 "add.s %[temp15], %[temp3], %[temp7] \n\t" 285 "swc1 %[temp9], 0(%[v1]) \n\t" 286 "swc1 %[temp8], 0(%[v2]) \n\t" 287 "swc1 %[temp11], 4(%[v1]) \n\t" 288 "swc1 %[temp10], 4(%[v2]) \n\t" 289 "swc1 %[temp13], 8(%[v1]) \n\t" 290 "swc1 %[temp12], 8(%[v2]) \n\t" 291 "swc1 %[temp15], 12(%[v1]) \n\t" 292 "swc1 %[temp14], 12(%[v2]) \n\t" 293 294 : [v1]"+r"(v1), [v2]"+r"(v2), [pom]"+r"(pom), [temp0] "=&f" (temp0), 295 [temp1]"=&f"(temp1), [temp2]"=&f"(temp2), [temp3]"=&f"(temp3), 296 [temp4]"=&f"(temp4), [temp5]"=&f"(temp5), [temp6]"=&f"(temp6), 297 [temp7]"=&f"(temp7), [temp8]"=&f"(temp8), [temp9]"=&f"(temp9), 298 [temp10]"=&f"(temp10), [temp11]"=&f"(temp11), [temp12]"=&f"(temp12), 299 [temp13]"=&f"(temp13), [temp14]"=&f"(temp14), [temp15]"=&f"(temp15) 300 : 301 : "memory" 302 ); 303} 304 305static void vector_fmul_reverse_mips(float *dst, const float *src0, const float *src1, int len){ 306 int i; 307 float temp0, temp1, temp2, temp3, temp4, temp5, temp6, temp7; 308 src1 += len-1; 309 310 for(i=0; i<(len>>2); i++) 311 { 312 /* loop unrolled 4 times */ 313 __asm__ volatile( 314 "lwc1 %[temp0], 0(%[src0]) \n\t" 315 "lwc1 %[temp1], 0(%[src1]) \n\t" 316 "lwc1 %[temp2], 4(%[src0]) \n\t" 317 "lwc1 %[temp3], -4(%[src1]) \n\t" 318 "lwc1 %[temp4], 8(%[src0]) \n\t" 319 "lwc1 %[temp5], -8(%[src1]) \n\t" 320 "lwc1 %[temp6], 12(%[src0]) \n\t" 321 "lwc1 %[temp7], -12(%[src1]) \n\t" 322 "mul.s %[temp0], %[temp1], %[temp0] \n\t" 323 "mul.s %[temp2], %[temp3], %[temp2] \n\t" 324 "mul.s %[temp4], %[temp5], %[temp4] \n\t" 325 "mul.s %[temp6], %[temp7], %[temp6] \n\t" 326 PTR_ADDIU "%[src0], %[src0], 16 \n\t" 327 PTR_ADDIU "%[src1], %[src1], -16 \n\t" 328 PTR_ADDIU "%[dst], %[dst], 16 \n\t" 329 "swc1 %[temp0], -16(%[dst]) \n\t" 330 "swc1 %[temp2], -12(%[dst]) \n\t" 331 "swc1 %[temp4], -8(%[dst]) \n\t" 332 "swc1 %[temp6], -4(%[dst]) \n\t" 333 334 : [dst]"+r"(dst), [src0]"+r"(src0), [src1]"+r"(src1), 335 [temp0]"=&f"(temp0), [temp1]"=&f"(temp1),[temp2]"=&f"(temp2), 336 [temp3]"=&f"(temp3), [temp4]"=&f"(temp4), [temp5]"=&f"(temp5), 337 [temp6]"=&f"(temp6), [temp7]"=&f"(temp7) 338 : 339 : "memory" 340 ); 341 } 342} 343#endif /* !HAVE_MIPS32R6 && !HAVE_MIPS64R6 */ 344#endif /* HAVE_INLINE_ASM && HAVE_MIPSFPU */ 345 346void ff_float_dsp_init_mips(AVFloatDSPContext *fdsp) { 347#if HAVE_INLINE_ASM && HAVE_MIPSFPU 348#if !HAVE_MIPS32R6 && !HAVE_MIPS64R6 349 fdsp->vector_fmul = vector_fmul_mips; 350 fdsp->vector_fmul_scalar = vector_fmul_scalar_mips; 351 fdsp->vector_fmul_window = vector_fmul_window_mips; 352 fdsp->butterflies_float = butterflies_float_mips; 353 fdsp->vector_fmul_reverse = vector_fmul_reverse_mips; 354#endif /* !HAVE_MIPS32R6 && !HAVE_MIPS64R6 */ 355#endif /* HAVE_INLINE_ASM && HAVE_MIPSFPU */ 356} 357