/third_party/ffmpeg/libavcodec/ |
H A D | faandct.c | 67 FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in row_fdct() local 78 tmp5= data[2 + i] - data[5 + i]; in row_fdct() 95 tmp4 += tmp5; in row_fdct() 96 tmp5 += tmp6; in row_fdct() 102 tmp5*=A1; in row_fdct() 104 z11= tmp7 + tmp5; in row_fdct() 105 z13= tmp7 - tmp5; in row_fdct() 116 FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in ff_faandct() local 132 tmp5= temp[8*2 + i] - temp[8*5 + i]; in ff_faandct() 149 tmp4 += tmp5; in ff_faandct() 170 FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; ff_faandct248() local [all...] |
H A D | jfdctint_template.c | 184 int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in row_fdct() local 201 tmp5 = dataptr[2] - dataptr[5]; in row_fdct() 229 z2 = tmp5 + tmp6; in row_fdct() 231 z4 = tmp5 + tmp7; in row_fdct() 235 tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ in row_fdct() 247 dataptr[5] = (int16_t) DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS); in row_fdct() 262 int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in ff_jpeg_fdct_islow() local 282 tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5]; in ff_jpeg_fdct_islow() 310 z2 = tmp5 in ff_jpeg_fdct_islow() 344 int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; ff_fdct248_islow() local [all...] |
H A D | jfdctfst.c | 145 int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in row_fdct() local 160 tmp5 = dataptr[2] - dataptr[5]; in row_fdct() 180 tmp10 = tmp4 + tmp5; /* phase 2 */ in row_fdct() 181 tmp11 = tmp5 + tmp6; in row_fdct() 209 int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in ff_fdct_ifast() local 226 tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5]; in ff_fdct_ifast() 246 tmp10 = tmp4 + tmp5; /* phase 2 */ in ff_fdct_ifast() 247 tmp11 = tmp5 + tmp6; in ff_fdct_ifast() 275 int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in ff_fdct_ifast248() local 292 tmp5 in ff_fdct_ifast248() [all...] |
H A D | hq_hqadsp.c | 37 int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9, tmpA; in idct_row() local 45 tmp5 = IDCTMUL(tmp0 + tmp2, FIX_1_847); in idct_row() 46 tmp6 = IDCTMUL(tmp2, FIX_1_082) - tmp5; in idct_row() 47 tmp7 = tmp5 - IDCTMUL(tmp0, FIX_2_613) * 2; in idct_row() 75 int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9, tmpA; in idct_col() local 83 tmp5 = IDCTMUL(tmp0 + tmp2, FIX_1_847); in idct_col() 84 tmp6 = IDCTMUL(tmp2, FIX_1_082) - tmp5; in idct_col() 85 tmp7 = tmp5 - IDCTMUL(tmp0, FIX_2_613) * 2; in idct_col()
|
H A D | fft_template.c | 326 unsigned tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; in fft_calc_c() local 338 tmp5 = tmpz[2].re + (unsigned)tmpz[3].re; in fft_calc_c() 346 tmpz[0].re = tmp1 + tmp5; in fft_calc_c() 347 tmpz[2].re = tmp1 - tmp5; in fft_calc_c() 369 tmp5 = tmp1 + tmp3; in fft_calc_c() 379 tmpz[4].re = tmpz[0].re - tmp5; in fft_calc_c() 380 tmpz[0].re = tmpz[0].re + tmp5; in fft_calc_c() 389 tmp5 = (int32_t)((accu + 0x40000000) >> 31); in fft_calc_c() 396 tmp1 = tmp5 + tmp7; in fft_calc_c() 397 tmp3 = tmp5 in fft_calc_c() [all...] |
/third_party/skia/third_party/externals/libjpeg-turbo/ |
H A D | jfdctint.c | 145 JLONG tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in jpeg_fdct_islow() local 163 tmp5 = dataptr[2] - dataptr[5]; in jpeg_fdct_islow() 191 z2 = tmp5 + tmp6; in jpeg_fdct_islow() 193 z4 = tmp5 + tmp7; in jpeg_fdct_islow() 197 tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ in jpeg_fdct_islow() 209 dataptr[5] = (DCTELEM)DESCALE(tmp5 + z2 + z4, CONST_BITS - PASS1_BITS); in jpeg_fdct_islow() 228 tmp5 = dataptr[DCTSIZE * 2] - dataptr[DCTSIZE * 5]; in jpeg_fdct_islow() 258 z2 = tmp5 + tmp6; in jpeg_fdct_islow() 260 z4 = tmp5 in jpeg_fdct_islow() [all...] |
H A D | jidctflt.c | 76 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in jpeg_idct_float() local 147 tmp5 = DEQUANTIZE(inptr[DCTSIZE * 3], quantptr[DCTSIZE * 3] * _0_125); in jpeg_idct_float() 151 z13 = tmp6 + tmp5; /* phase 6 */ in jpeg_idct_float() 152 z10 = tmp6 - tmp5; in jpeg_idct_float() 164 tmp5 = tmp11 - tmp6; in jpeg_idct_float() 165 tmp4 = tmp10 - tmp5; in jpeg_idct_float() 171 wsptr[DCTSIZE * 2] = tmp2 + tmp5; in jpeg_idct_float() 172 wsptr[DCTSIZE * 5] = tmp2 - tmp5; in jpeg_idct_float() 222 tmp5 = tmp11 - tmp6; in jpeg_idct_float() 223 tmp4 = tmp10 - tmp5; in jpeg_idct_float() [all...] |
H A D | jidctfst.c | 175 DCTELEM tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in jpeg_idct_ifast() local 246 tmp5 = DEQUANTIZE(inptr[DCTSIZE * 3], quantptr[DCTSIZE * 3]); in jpeg_idct_ifast() 250 z13 = tmp6 + tmp5; /* phase 6 */ in jpeg_idct_ifast() 251 z10 = tmp6 - tmp5; in jpeg_idct_ifast() 263 tmp5 = tmp11 - tmp6; in jpeg_idct_ifast() 264 tmp4 = tmp10 + tmp5; in jpeg_idct_ifast() 270 wsptr[DCTSIZE * 2] = (int)(tmp2 + tmp5); in jpeg_idct_ifast() 271 wsptr[DCTSIZE * 5] = (int)(tmp2 - tmp5); in jpeg_idct_ifast() 345 tmp5 = tmp11 - tmp6; in jpeg_idct_ifast() 346 tmp4 = tmp10 + tmp5; in jpeg_idct_ifast() [all...] |
H A D | jfdctflt.c | 62 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in jpeg_fdct_float() local 77 tmp5 = dataptr[2] - dataptr[5]; in jpeg_fdct_float() 97 tmp10 = tmp4 + tmp5; /* phase 2 */ in jpeg_fdct_float() 98 tmp11 = tmp5 + tmp6; in jpeg_fdct_float() 127 tmp5 = dataptr[DCTSIZE * 2] - dataptr[DCTSIZE * 5]; in jpeg_fdct_float() 147 tmp10 = tmp4 + tmp5; /* phase 2 */ in jpeg_fdct_float() 148 tmp11 = tmp5 + tmp6; in jpeg_fdct_float()
|
H A D | jfdctfst.c | 119 DCTELEM tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in jpeg_fdct_ifast() local 135 tmp5 = dataptr[2] - dataptr[5]; in jpeg_fdct_ifast() 155 tmp10 = tmp4 + tmp5; /* phase 2 */ in jpeg_fdct_ifast() 156 tmp11 = tmp5 + tmp6; in jpeg_fdct_ifast() 185 tmp5 = dataptr[DCTSIZE * 2] - dataptr[DCTSIZE * 5]; in jpeg_fdct_ifast() 205 tmp10 = tmp4 + tmp5; /* phase 2 */ in jpeg_fdct_ifast() 206 tmp11 = tmp5 + tmp6; in jpeg_fdct_ifast()
|
/third_party/ffmpeg/libavcodec/loongarch/ |
H A D | h264_deblock_lasx.c | 60 tmp5 = __lasx_xvld(mv_t + d_idx_x4, 208); \ 61 DUP2_ARG3(__lasx_xvpermi_q, tmp2, tmp2, 0x20, tmp5, tmp5, \ 62 0x20, tmp2, tmp5); \ 65 tmp5 = __lasx_xvsub_h(tmp5, tmp3); \ 66 DUP2_ARG2(__lasx_xvsat_h, tmp2, 7, tmp5, 7, tmp2, tmp5); \ 67 tmp0 = __lasx_xvpickev_b(tmp5, tmp2); \ 120 __m256i tmp2, tmp3, tmp4, tmp5; in ff_h264_loop_filter_strength_lasx() local [all...] |
H A D | vp8_lpf_lsx.c | 337 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in ff_vp8_h_loop_filter16_lsx() local 379 tmp5 = __lsx_vilvh_b(q2, q1); in ff_vp8_h_loop_filter16_lsx() 398 VP8_ST6x1_UB(tmp6, 0, tmp5, 0, temp_src, 4); in ff_vp8_h_loop_filter16_lsx() 400 VP8_ST6x1_UB(tmp6, 1, tmp5, 1, temp_src, 4); in ff_vp8_h_loop_filter16_lsx() 402 VP8_ST6x1_UB(tmp6, 2, tmp5, 2, temp_src, 4); in ff_vp8_h_loop_filter16_lsx() 404 VP8_ST6x1_UB(tmp6, 3, tmp5, 3, temp_src, 4); in ff_vp8_h_loop_filter16_lsx() 406 VP8_ST6x1_UB(tmp7, 0, tmp5, 4, temp_src, 4); in ff_vp8_h_loop_filter16_lsx() 408 VP8_ST6x1_UB(tmp7, 1, tmp5, 5, temp_src, 4); in ff_vp8_h_loop_filter16_lsx() 410 VP8_ST6x1_UB(tmp7, 2, tmp5, 6, temp_src, 4); in ff_vp8_h_loop_filter16_lsx() 412 VP8_ST6x1_UB(tmp7, 3, tmp5, in ff_vp8_h_loop_filter16_lsx() 424 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; ff_vp8_h_loop_filter8uv_lsx() local 543 __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; ff_vp8_h_loop_filter16_inner_lsx() local [all...] |
H A D | h264idct_lasx.c | 85 __m256i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in ff_h264_idct8_addblk_lasx() local 134 tmp5 = __lasx_xvsrai_h(vec2, 2); in ff_h264_idct8_addblk_lasx() 135 tmp5 = __lasx_xvadd_h(tmp5, vec1); in ff_h264_idct8_addblk_lasx() 141 LASX_BUTTERFLY_8_H(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, in ff_h264_idct8_addblk_lasx() 149 tmp4, tmp5, tmp6, tmp7); in ff_h264_idct8_addblk_lasx() 164 vec0 = __lasx_xvsub_w(tmp5, vec0); in ff_h264_idct8_addblk_lasx() 173 vec2 = __lasx_xvsrai_w(tmp5, 1); in ff_h264_idct8_addblk_lasx() 176 vec2 = __lasx_xvadd_w(vec2, tmp5); in ff_h264_idct8_addblk_lasx() 180 vec3 = __lasx_xvadd_w(vec3, tmp5); in ff_h264_idct8_addblk_lasx() [all...] |
/third_party/skia/third_party/externals/libjpeg-turbo/simd/arm/ |
H A D | jidctfst-neon.c | 148 int16x4_t tmp5 = vmul_s16(vget_high_s16(row3), quant_row3); in jsimd_idct_ifast_neon() local 152 int16x4_t z13 = vadd_s16(tmp6, tmp5); /* phase 6 */ in jsimd_idct_ifast_neon() 153 int16x4_t neg_z10 = vsub_s16(tmp5, tmp6); in jsimd_idct_ifast_neon() 173 tmp5 = vsub_s16(tmp11, tmp6); in jsimd_idct_ifast_neon() 174 tmp4 = vadd_s16(tmp10, tmp5); in jsimd_idct_ifast_neon() 180 row2 = vcombine_s16(dcval, vadd_s16(tmp2, tmp5)); in jsimd_idct_ifast_neon() 181 row5 = vcombine_s16(dcval, vsub_s16(tmp2, tmp5)); in jsimd_idct_ifast_neon() 223 int16x4_t tmp5 = vmul_s16(vget_low_s16(row3), quant_row3); in jsimd_idct_ifast_neon() local 227 int16x4_t z13 = vadd_s16(tmp6, tmp5); /* phase 6 */ in jsimd_idct_ifast_neon() 228 int16x4_t neg_z10 = vsub_s16(tmp5, tmp in jsimd_idct_ifast_neon() 293 int16x8_t tmp5 = vmulq_s16(row3, quant_row3); jsimd_idct_ifast_neon() local 399 int16x8_t tmp5 = vsubq_s16(tmp11, tmp6); jsimd_idct_ifast_neon() local [all...] |
H A D | jfdctfst-neon.c | 94 int16x8_t tmp5 = vsubq_s16(col2, col5); in jsimd_fdct_ifast_neon() local 112 tmp10 = vaddq_s16(tmp4, tmp5); /* phase 2 */ in jsimd_fdct_ifast_neon() 113 tmp11 = vaddq_s16(tmp5, tmp6); in jsimd_fdct_ifast_neon() 168 tmp5 = vsubq_s16(row2, row5); in jsimd_fdct_ifast_neon() 186 tmp10 = vaddq_s16(tmp4, tmp5); /* phase 2 */ in jsimd_fdct_ifast_neon() 187 tmp11 = vaddq_s16(tmp5, tmp6); in jsimd_fdct_ifast_neon()
|
H A D | jfdctint-neon.c | 128 int16x8_t tmp5 = vsubq_s16(col2, col5); in jsimd_fdct_islow_neon() local 163 int16x8_t z2 = vaddq_s16(tmp5, tmp6); in jsimd_fdct_islow_neon() 165 int16x8_t z4 = vaddq_s16(tmp5, tmp7); in jsimd_fdct_islow_neon() 176 int32x4_t tmp5_l = vmull_lane_s16(vget_low_s16(tmp5), consts.val[2], 1); in jsimd_fdct_islow_neon() 177 int32x4_t tmp5_h = vmull_lane_s16(vget_high_s16(tmp5), consts.val[2], 1); in jsimd_fdct_islow_neon() 267 tmp5 = vsubq_s16(row2, row5); in jsimd_fdct_islow_neon() 300 z2 = vaddq_s16(tmp5, tmp6); in jsimd_fdct_islow_neon() 302 z4 = vaddq_s16(tmp5, tmp7); in jsimd_fdct_islow_neon() 313 tmp5_l = vmull_lane_s16(vget_low_s16(tmp5), consts.val[2], 1); in jsimd_fdct_islow_neon() 314 tmp5_h = vmull_lane_s16(vget_high_s16(tmp5), const in jsimd_fdct_islow_neon() [all...] |
/third_party/node/deps/openssl/openssl/crypto/sha/asm/ |
H A D | sha1-ia64.pl | 81 dep.z tmp5=$a,5,27 };; // a<<5 89 or tmp5=tmp1,tmp5 // ROTATE(a,5) 91 { .mii; add $e=$e,tmp5 // e+=ROTATE(a,5) 104 dep.z tmp5=$a,5,27 };; // a<<5 114 { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5) 132 dep.z tmp5=$a,5,27 } // a<<5 144 { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5) 163 dep.z tmp5=$a,5,27 } // a<<5 173 { .mmi; or tmp1=tmp1,tmp5 // ROTAT [all...] |
/third_party/openssl/crypto/sha/asm/ |
H A D | sha1-ia64.pl | 81 dep.z tmp5=$a,5,27 };; // a<<5 89 or tmp5=tmp1,tmp5 // ROTATE(a,5) 91 { .mii; add $e=$e,tmp5 // e+=ROTATE(a,5) 104 dep.z tmp5=$a,5,27 };; // a<<5 114 { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5) 132 dep.z tmp5=$a,5,27 } // a<<5 144 { .mmi; or tmp1=tmp1,tmp5 // ROTATE(a,5) 163 dep.z tmp5=$a,5,27 } // a<<5 173 { .mmi; or tmp1=tmp1,tmp5 // ROTAT [all...] |
/third_party/ffmpeg/libavfilter/ |
H A D | vf_fspp.c | 250 int_simd16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in column_fidct_c() local 274 tmp5 = dataptr[DCTSIZE * 2] - dataptr[DCTSIZE * 5]; in column_fidct_c() 313 tmp10 = tmp4 + tmp5; in column_fidct_c() 314 tmp11 = tmp5 + tmp6; in column_fidct_c() 333 THRESHOLD(tmp5, d3, threshold[3 * 8]); in column_fidct_c() 337 //Simd version uses here a shortcut for the tmp5,tmp6,tmp7 == 0 in column_fidct_c() 338 z13 = tmp6 + tmp5; in column_fidct_c() 339 z10 = (tmp6 - tmp5) << 1; in column_fidct_c() 350 tmp5 = tmp11 - tmp6; in column_fidct_c() 351 tmp4 = tmp10 + tmp5; in column_fidct_c() 373 int_simd16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; row_idct_c() local 435 int_simd16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; row_fdct_c() local [all...] |
/third_party/ffmpeg/libavcodec/mips/ |
H A D | vp8_lpf_msa.c | 382 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in ff_vp8_h_loop_filter16_msa() local 403 ILVRL_B2_SH(q2, q1, tmp2, tmp5); in ff_vp8_h_loop_filter16_msa() 422 VP8_ST6x1_UB(tmp6, 0, tmp5, 0, temp_src, 4); in ff_vp8_h_loop_filter16_msa() 424 VP8_ST6x1_UB(tmp6, 1, tmp5, 1, temp_src, 4); in ff_vp8_h_loop_filter16_msa() 426 VP8_ST6x1_UB(tmp6, 2, tmp5, 2, temp_src, 4); in ff_vp8_h_loop_filter16_msa() 428 VP8_ST6x1_UB(tmp6, 3, tmp5, 3, temp_src, 4); in ff_vp8_h_loop_filter16_msa() 430 VP8_ST6x1_UB(tmp7, 0, tmp5, 4, temp_src, 4); in ff_vp8_h_loop_filter16_msa() 432 VP8_ST6x1_UB(tmp7, 1, tmp5, 5, temp_src, 4); in ff_vp8_h_loop_filter16_msa() 434 VP8_ST6x1_UB(tmp7, 2, tmp5, 6, temp_src, 4); in ff_vp8_h_loop_filter16_msa() 436 VP8_ST6x1_UB(tmp7, 3, tmp5, in ff_vp8_h_loop_filter16_msa() 447 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; ff_vp8_h_loop_filter8uv_msa() local 597 v4i32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; ff_vp8_h_loop_filter8uv_inner_msa() local 651 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5; ff_vp8_h_loop_filter16_inner_msa() local [all...] |
H A D | fft_mips.c | 66 FFTSample tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; in ff_fft_calc_mips() local 84 tmp5 = tmpz[2].re + tmpz[3].re; in ff_fft_calc_mips() 92 tmpz[0].re = tmp1 + tmp5; in ff_fft_calc_mips() 93 tmpz[2].re = tmp1 - tmp5; in ff_fft_calc_mips() 126 "add.s %[tmp5], %[tmp1], %[tmp3] \n\t" // tmp5 = tmp1 + tmp3; in ff_fft_calc_mips() 142 "sub.s %[pom1], %[pom], %[tmp5] \n\t" in ff_fft_calc_mips() 144 "add.s %[pom3], %[pom], %[tmp5] \n\t" in ff_fft_calc_mips() 147 "swc1 %[pom1], 32(%[tmpz]) \n\t" // tmpz[4].re = tmpz[0].re - tmp5; in ff_fft_calc_mips() 148 "swc1 %[pom3], 0(%[tmpz]) \n\t" // tmpz[0].re = tmpz[0].re + tmp5; in ff_fft_calc_mips() [all...] |
/third_party/skia/third_party/externals/libjpeg-turbo/simd/i386/ |
H A D | jfdctint-mmx.asm | 191 psubw mm3, mm6 ; mm3=data2-data5=tmp5 253 movq mm6, mm3 ; mm3=tmp5 281 ; z1 = tmp4 + tmp7; z2 = tmp5 + tmp6; 282 ; tmp4 = tmp4 * 0.298631336; tmp5 = tmp5 * 2.053119869; 285 ; data7 = tmp4 + z1 + z3; data5 = tmp5 + z2 + z4; 290 ; tmp5 = tmp5 * (2.053119869 - 2.562915447) + tmp6 * -2.562915447; 291 ; tmp6 = tmp5 * -2.562915447 + tmp6 * (3.072711026 - 2.562915447); 293 ; data7 = tmp4 + z3; data5 = tmp5 [all...] |
H A D | jfdctint-sse2.asm | 208 psubw xmm5, xmm0 ; xmm5=data2-data5=tmp5 270 movdqa xmm0, xmm5 ; xmm5=tmp5 298 ; z1 = tmp4 + tmp7; z2 = tmp5 + tmp6; 299 ; tmp4 = tmp4 * 0.298631336; tmp5 = tmp5 * 2.053119869; 302 ; data7 = tmp4 + z1 + z3; data5 = tmp5 + z2 + z4; 307 ; tmp5 = tmp5 * (2.053119869 - 2.562915447) + tmp6 * -2.562915447; 308 ; tmp6 = tmp5 * -2.562915447 + tmp6 * (3.072711026 - 2.562915447); 310 ; data7 = tmp4 + z3; data5 = tmp5 [all...] |
H A D | jfdctint-avx2.asm | 175 ; z1 = tmp4 + tmp7; z2 = tmp5 + tmp6; 176 ; tmp4 = tmp4 * 0.298631336; tmp5 = tmp5 * 2.053119869; 179 ; data7 = tmp4 + z1 + z3; data5 = tmp5 + z2 + z4; 184 ; tmp5 = tmp5 * (2.053119869 - 2.562915447) + tmp6 * -2.562915447; 185 ; tmp6 = tmp5 * -2.562915447 + tmp6 * (3.072711026 - 2.562915447); 187 ; data7 = tmp4 + z3; data5 = tmp5 + z4;
|
/third_party/skia/third_party/externals/libjpeg-turbo/simd/x86_64/ |
H A D | jfdctint-sse2.asm | 200 psubw xmm5, xmm0 ; xmm5=data2-data5=tmp5 262 movdqa xmm0, xmm5 ; xmm5=tmp5 290 ; z1 = tmp4 + tmp7; z2 = tmp5 + tmp6; 291 ; tmp4 = tmp4 * 0.298631336; tmp5 = tmp5 * 2.053119869; 294 ; data7 = tmp4 + z1 + z3; data5 = tmp5 + z2 + z4; 299 ; tmp5 = tmp5 * (2.053119869 - 2.562915447) + tmp6 * -2.562915447; 300 ; tmp6 = tmp5 * -2.562915447 + tmp6 * (3.072711026 - 2.562915447); 302 ; data7 = tmp4 + z3; data5 = tmp5 [all...] |