/third_party/node/deps/openssl/openssl/crypto/cast/asm/ |
H A D | cast-586.pl | 32 $tmp4="edx"; 95 &E_CAST( 0,$S,$L,$R,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4); 96 &E_CAST( 1,$S,$R,$L,$K,@F2,$tmp1,$tmp2,$tmp3,$tmp4); 97 &E_CAST( 2,$S,$L,$R,$K,@F3,$tmp1,$tmp2,$tmp3,$tmp4); 98 &E_CAST( 3,$S,$R,$L,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4); 99 &E_CAST( 4,$S,$L,$R,$K,@F2,$tmp1,$tmp2,$tmp3,$tmp4); 100 &E_CAST( 5,$S,$R,$L,$K,@F3,$tmp1,$tmp2,$tmp3,$tmp4); 101 &E_CAST( 6,$S,$L,$R,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4); 102 &E_CAST( 7,$S,$R,$L,$K,@F2,$tmp1,$tmp2,$tmp3,$tmp4); 103 &E_CAST( 8,$S,$L,$R,$K,@F3,$tmp1,$tmp2,$tmp3,$tmp4); [all...] |
/third_party/openssl/crypto/cast/asm/ |
H A D | cast-586.pl | 32 $tmp4="edx"; 95 &E_CAST( 0,$S,$L,$R,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4); 96 &E_CAST( 1,$S,$R,$L,$K,@F2,$tmp1,$tmp2,$tmp3,$tmp4); 97 &E_CAST( 2,$S,$L,$R,$K,@F3,$tmp1,$tmp2,$tmp3,$tmp4); 98 &E_CAST( 3,$S,$R,$L,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4); 99 &E_CAST( 4,$S,$L,$R,$K,@F2,$tmp1,$tmp2,$tmp3,$tmp4); 100 &E_CAST( 5,$S,$R,$L,$K,@F3,$tmp1,$tmp2,$tmp3,$tmp4); 101 &E_CAST( 6,$S,$L,$R,$K,@F1,$tmp1,$tmp2,$tmp3,$tmp4); 102 &E_CAST( 7,$S,$R,$L,$K,@F2,$tmp1,$tmp2,$tmp3,$tmp4); 103 &E_CAST( 8,$S,$L,$R,$K,@F3,$tmp1,$tmp2,$tmp3,$tmp4); [all...] |
/third_party/node/deps/openssl/openssl/crypto/bf/asm/ |
H A D | bf-586.pl | 27 $tmp4="edx"; 69 &BF_ENCRYPT($i+1,$R,$L,$P,$tmp1,$tmp2,$tmp3,$tmp4,1); 73 &BF_ENCRYPT($i+2,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,1); 76 &mov($tmp4,&DWP(($BF_ROUNDS+1)*4,$P,"",0)); 88 &BF_ENCRYPT($i,$R,$L,$P,$tmp1,$tmp2,$tmp3,$tmp4,0); 91 &BF_ENCRYPT($i-1,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,0); 94 &mov($tmp4,&DWP(0,$P,"",0)); 97 &xor($R,$tmp4); 106 local($i,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,$enc)=@_; 108 &mov( $tmp4, [all...] |
/third_party/openssl/crypto/bf/asm/ |
H A D | bf-586.pl | 27 $tmp4="edx"; 69 &BF_ENCRYPT($i+1,$R,$L,$P,$tmp1,$tmp2,$tmp3,$tmp4,1); 73 &BF_ENCRYPT($i+2,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,1); 76 &mov($tmp4,&DWP(($BF_ROUNDS+1)*4,$P,"",0)); 88 &BF_ENCRYPT($i,$R,$L,$P,$tmp1,$tmp2,$tmp3,$tmp4,0); 91 &BF_ENCRYPT($i-1,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,0); 94 &mov($tmp4,&DWP(0,$P,"",0)); 97 &xor($R,$tmp4); 106 local($i,$L,$R,$P,$tmp1,$tmp2,$tmp3,$tmp4,$enc)=@_; 108 &mov( $tmp4, [all...] |
/third_party/optimized-routines/string/aarch64/ |
H A D | strcpy.S | 37 #define tmp4 x11 define 119 rev tmp4, data2 120 sub tmp3, tmp4, zeroones 121 orr tmp4, tmp4, #REP8_7f 128 orr tmp4, data2, #REP8_7f 130 bics has_nul2, tmp3, tmp4 212 orr tmp4, data2, #REP8_7f 214 bics has_nul2, tmp3, tmp4 271 orr tmp4, data [all...] |
/third_party/skia/third_party/externals/libjpeg-turbo/ |
H A D | jfdctint.c | 145 JLONG tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in jpeg_fdct_islow() local 165 tmp4 = dataptr[3] - dataptr[4]; in jpeg_fdct_islow() 187 * i0..i3 in the paper are tmp4..tmp7 here. in jpeg_fdct_islow() 190 z1 = tmp4 + tmp7; in jpeg_fdct_islow() 192 z3 = tmp4 + tmp6; in jpeg_fdct_islow() 196 tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ in jpeg_fdct_islow() 208 dataptr[7] = (DCTELEM)DESCALE(tmp4 + z1 + z3, CONST_BITS - PASS1_BITS); in jpeg_fdct_islow() 230 tmp4 = dataptr[DCTSIZE * 3] - dataptr[DCTSIZE * 4]; in jpeg_fdct_islow() 254 * i0..i3 in the paper are tmp4 in jpeg_fdct_islow() [all...] |
H A D | jidctflt.c | 76 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in jpeg_idct_float() local 146 tmp4 = DEQUANTIZE(inptr[DCTSIZE * 1], quantptr[DCTSIZE * 1] * _0_125); in jpeg_idct_float() 153 z11 = tmp4 + tmp7; in jpeg_idct_float() 154 z12 = tmp4 - tmp7; in jpeg_idct_float() 165 tmp4 = tmp10 - tmp5; in jpeg_idct_float() 173 wsptr[DCTSIZE * 3] = tmp3 + tmp4; in jpeg_idct_float() 174 wsptr[DCTSIZE * 4] = tmp3 - tmp4; in jpeg_idct_float() 223 tmp4 = tmp10 - tmp5; in jpeg_idct_float() 233 outptr[3] = range_limit[((int)(tmp3 + tmp4)) & RANGE_MASK]; in jpeg_idct_float() 234 outptr[4] = range_limit[((int)(tmp3 - tmp4)) in jpeg_idct_float() [all...] |
H A D | jidctfst.c | 175 DCTELEM tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in jpeg_idct_ifast() local 245 tmp4 = DEQUANTIZE(inptr[DCTSIZE * 1], quantptr[DCTSIZE * 1]); in jpeg_idct_ifast() 252 z11 = tmp4 + tmp7; in jpeg_idct_ifast() 253 z12 = tmp4 - tmp7; in jpeg_idct_ifast() 264 tmp4 = tmp10 + tmp5; in jpeg_idct_ifast() 272 wsptr[DCTSIZE * 4] = (int)(tmp3 + tmp4); in jpeg_idct_ifast() 273 wsptr[DCTSIZE * 3] = (int)(tmp3 - tmp4); in jpeg_idct_ifast() 346 tmp4 = tmp10 + tmp5; in jpeg_idct_ifast() 363 range_limit[IDESCALE(tmp3 + tmp4, PASS1_BITS + 3) & RANGE_MASK]; in jpeg_idct_ifast() 365 range_limit[IDESCALE(tmp3 - tmp4, PASS1_BIT in jpeg_idct_ifast() [all...] |
H A D | jfdctflt.c | 62 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in jpeg_fdct_float() local 79 tmp4 = dataptr[3] - dataptr[4]; in jpeg_fdct_float() 97 tmp10 = tmp4 + tmp5; /* phase 2 */ in jpeg_fdct_float() 129 tmp4 = dataptr[DCTSIZE * 3] - dataptr[DCTSIZE * 4]; in jpeg_fdct_float() 147 tmp10 = tmp4 + tmp5; /* phase 2 */ in jpeg_fdct_float()
|
H A D | jfdctfst.c | 119 DCTELEM tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in jpeg_fdct_ifast() local 137 tmp4 = dataptr[3] - dataptr[4]; in jpeg_fdct_ifast() 155 tmp10 = tmp4 + tmp5; /* phase 2 */ in jpeg_fdct_ifast() 187 tmp4 = dataptr[DCTSIZE * 3] - dataptr[DCTSIZE * 4]; in jpeg_fdct_ifast() 205 tmp10 = tmp4 + tmp5; /* phase 2 */ in jpeg_fdct_ifast()
|
/third_party/ffmpeg/libavcodec/ |
H A D | faandct.c | 67 FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in row_fdct() local 80 tmp4= data[3 + i] - data[4 + i]; in row_fdct() 95 tmp4 += tmp5; in row_fdct() 99 z2= tmp4*(A2+A5) - tmp6*A5; in row_fdct() 100 z4= tmp6*(A4-A5) + tmp4*A5; in row_fdct() 116 FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in ff_faandct() local 134 tmp4= temp[8*3 + i] - temp[8*4 + i]; in ff_faandct() 149 tmp4 += tmp5; in ff_faandct() 153 z2= tmp4*(A2+A5) - tmp6*A5; in ff_faandct() 154 z4= tmp6*(A4-A5) + tmp4*A in ff_faandct() 170 FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; ff_faandct248() local [all...] |
H A D | jfdctint_template.c | 184 int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in row_fdct() local 203 tmp4 = dataptr[3] - dataptr[4]; in row_fdct() 225 * i0..i3 in the paper are tmp4..tmp7 here. in row_fdct() 228 z1 = tmp4 + tmp7; in row_fdct() 230 z3 = tmp4 + tmp6; in row_fdct() 234 tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ in row_fdct() 246 dataptr[7] = (int16_t) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS); in row_fdct() 262 int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in ff_jpeg_fdct_islow() local 284 tmp4 in ff_jpeg_fdct_islow() 344 int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; ff_fdct248_islow() local [all...] |
H A D | jfdctfst.c | 145 int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in row_fdct() local 162 tmp4 = dataptr[3] - dataptr[4]; in row_fdct() 180 tmp10 = tmp4 + tmp5; /* phase 2 */ in row_fdct() 209 int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in ff_fdct_ifast() local 228 tmp4 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4]; in ff_fdct_ifast() 246 tmp10 = tmp4 + tmp5; /* phase 2 */ in ff_fdct_ifast() 275 int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in ff_fdct_ifast248() local 291 tmp4 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*1]; in ff_fdct_ifast248() 310 tmp10 = tmp4 + tmp7; in ff_fdct_ifast248() 313 tmp13 = tmp4 in ff_fdct_ifast248() [all...] |
H A D | hq_hqadsp.c | 37 int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9, tmpA; in idct_row() local 44 tmp4 = tmp3 - tmp1; in idct_row() 50 tmpA = IDCTMUL(tmp4, FIX_1_414) * 4 - tmp9; in idct_row() 75 int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9, tmpA; in idct_col() local 82 tmp4 = tmp3 - tmp1; in idct_col() 88 tmpA = IDCTMUL(tmp4, FIX_1_414) * 2 - tmp9; in idct_col()
|
H A D | fft_template.c | 326 unsigned tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; in fft_calc_c() local 343 tmp4 = tmpz[0].im - (unsigned)tmpz[1].im; in fft_calc_c() 352 tmpz[1].im = tmp4 - tmp7; in fft_calc_c() 353 tmpz[3].im = tmp4 + tmp7; in fft_calc_c() 368 tmp4 = tmpz[6].im + (unsigned)tmpz[7].im; in fft_calc_c() 371 tmp6 = tmp2 + tmp4; in fft_calc_c() 372 tmp8 = tmp2 - tmp4; in fft_calc_c() 377 tmp4 = tmpz[6].im - (unsigned)tmpz[7].im; in fft_calc_c() 390 accu = (int64_t)Q31(M_SQRT1_2)*(int)(tmp3 - tmp4); in fft_calc_c() 394 accu = (int64_t)Q31(M_SQRT1_2)*(int)(tmp3 + tmp4); in fft_calc_c() [all...] |
/third_party/node/deps/openssl/openssl/crypto/rc5/asm/ |
H A D | rc5-586.pl | 27 $tmp4="edx"; 47 &mov($tmp4,&wparam(0)); 51 &mov($A,&DWP(0,$tmp4,"",0)); 52 &mov($B,&DWP(4,$tmp4,"",0)); 110 &mov(&DWP(0,$tmp4,"",0),$A); 111 &mov(&DWP(4,$tmp4,"",0),$B);
|
/third_party/openssl/crypto/rc5/asm/ |
H A D | rc5-586.pl | 27 $tmp4="edx"; 47 &mov($tmp4,&wparam(0)); 51 &mov($A,&DWP(0,$tmp4,"",0)); 52 &mov($B,&DWP(4,$tmp4,"",0)); 110 &mov(&DWP(0,$tmp4,"",0),$A); 111 &mov(&DWP(4,$tmp4,"",0),$B);
|
/third_party/skia/third_party/externals/libjpeg-turbo/simd/arm/ |
H A D | jidctfst-neon.c | 147 int16x4_t tmp4 = vmul_s16(vget_high_s16(row1), quant_row1); in jsimd_idct_ifast_neon() local 154 int16x4_t z11 = vadd_s16(tmp4, tmp7); in jsimd_idct_ifast_neon() 155 int16x4_t z12 = vsub_s16(tmp4, tmp7); in jsimd_idct_ifast_neon() 174 tmp4 = vadd_s16(tmp10, tmp5); in jsimd_idct_ifast_neon() 182 row4 = vcombine_s16(dcval, vadd_s16(tmp3, tmp4)); in jsimd_idct_ifast_neon() 183 row3 = vcombine_s16(dcval, vsub_s16(tmp3, tmp4)); in jsimd_idct_ifast_neon() 222 int16x4_t tmp4 = vmul_s16(vget_low_s16(row1), quant_row1); in jsimd_idct_ifast_neon() local 229 int16x4_t z11 = vadd_s16(tmp4, tmp7); in jsimd_idct_ifast_neon() 230 int16x4_t z12 = vsub_s16(tmp4, tmp7); in jsimd_idct_ifast_neon() 249 tmp4 in jsimd_idct_ifast_neon() 292 int16x8_t tmp4 = vmulq_s16(row1, quant_row1); jsimd_idct_ifast_neon() local 400 int16x8_t tmp4 = vaddq_s16(tmp10, tmp5); jsimd_idct_ifast_neon() local [all...] |
/third_party/node/deps/openssl/openssl/crypto/poly1305/asm/ |
H A D | poly1305-mips.pl | 71 ($in0,$in1,$tmp0,$tmp1,$tmp2,$tmp3,$tmp4) = ($a4,$a5,$a6,$a7,$at,$t0,$t1); 124 dsrl $tmp4,$in1,24 128 and $tmp4,$tmp0 131 or $tmp3,$tmp4 133 and $tmp4,$in1,$tmp0 137 dsll $tmp4,8 141 or $tmp3,$tmp4 243 dsrl $tmp4,$in1,24 247 and $tmp4,$tmp0 250 or $tmp3,$tmp4 [all...] |
/third_party/openssl/crypto/poly1305/asm/ |
H A D | poly1305-mips.pl | 71 ($in0,$in1,$tmp0,$tmp1,$tmp2,$tmp3,$tmp4) = ($a4,$a5,$a6,$a7,$at,$t0,$t1); 124 dsrl $tmp4,$in1,24 128 and $tmp4,$tmp0 131 or $tmp3,$tmp4 133 and $tmp4,$in1,$tmp0 137 dsll $tmp4,8 141 or $tmp3,$tmp4 243 dsrl $tmp4,$in1,24 247 and $tmp4,$tmp0 250 or $tmp3,$tmp4 [all...] |
/third_party/ffmpeg/libavcodec/loongarch/ |
H A D | h264_deblock_lasx.c | 59 tmp4 = __lasx_xvld(mv_t, 208); \ 63 tmp3 = __lasx_xvpermi_q(tmp4, tmp3, 0x20); \ 85 tmp4 = __lasx_xvsub_h(tmp3, tmp2); \ 86 tmp1 = __lasx_xvsat_h(tmp4, 7); \ 120 __m256i tmp2, tmp3, tmp4, tmp5; in ff_h264_loop_filter_strength_lasx() local 142 LASX_TRANSPOSE4x4_H(tmp0, tmp2, tmp1, tmp3, tmp2, tmp3, tmp4, tmp5); in ff_h264_loop_filter_strength_lasx() 145 __lasx_xvstelm_d(tmp4, (int8_t*)bS + 16, 0, 0); in ff_h264_loop_filter_strength_lasx()
|
/third_party/ffmpeg/libavfilter/ |
H A D | vf_fspp.c | 250 int_simd16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in column_fidct_c() local 277 tmp4 = dataptr[DCTSIZE * 3] - dataptr[DCTSIZE * 4]; in column_fidct_c() 313 tmp10 = tmp4 + tmp5; in column_fidct_c() 332 THRESHOLD(tmp4, d1, threshold[1 * 8]); in column_fidct_c() 340 z11 = tmp4 + tmp7; in column_fidct_c() 341 z12 = (tmp4 - tmp7) << 1; in column_fidct_c() 351 tmp4 = tmp10 + tmp5; in column_fidct_c() 356 wsptr[DCTSIZE * 3] += (tmp3 - tmp4); in column_fidct_c() 357 wsptr[DCTSIZE * 4] += (tmp3 + tmp4); in column_fidct_c() 373 int_simd16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp in row_idct_c() local 435 int_simd16_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; row_fdct_c() local [all...] |
/third_party/ffmpeg/libswscale/ppc/ |
H A D | swscale_vsx.c | 539 vec_s32 tmp, tmp2, tmp3, tmp4; in yuv2rgb_full_X_vsx_template() local 599 tmp4 = vec_mergel(tmp, tmp2); in yuv2rgb_full_X_vsx_template() 602 vy32_r = vec_adds(vy32_r, tmp4); in yuv2rgb_full_X_vsx_template() 610 tmp4 = vec_mergel(tmp, tmp2); in yuv2rgb_full_X_vsx_template() 613 vu32_r = vec_adds(vu32_r, tmp4); in yuv2rgb_full_X_vsx_template() 619 tmp4 = vec_mergel(tmp, tmp2); in yuv2rgb_full_X_vsx_template() 622 vv32_r = vec_adds(vv32_r, tmp4); in yuv2rgb_full_X_vsx_template() 666 tmp4 = vec_mergel(tmp, tmp2); \ 675 tmp4 = vec_add(tmp4, tmp 696 vec_s32 tmp, tmp2, tmp3, tmp4, tmp5, tmp6; yuv2rgb_full_2_vsx_template() local 818 vec_s32 tmp, tmp2, tmp3, tmp4, tmp5, tmp6; yuv2rgb_2_vsx_template() local 1437 vec_s32 vy32[4], vu32[2], vv32[2], tmp, tmp2, tmp3, tmp4; yuv2422_X_vsx_template() local 1548 vec_s32 tmp, tmp2, tmp3, tmp4, tmp5, tmp6; yuv2422_2_vsx_template() local [all...] |
/third_party/skia/third_party/externals/libjpeg-turbo/simd/i386/ |
H A D | jfdctint-mmx.asm | 190 psubw mm2, mm1 ; mm2=data3-data4=tmp4 252 movq mm0, mm2 ; mm2=tmp4 281 ; z1 = tmp4 + tmp7; z2 = tmp5 + tmp6; 282 ; tmp4 = tmp4 * 0.298631336; tmp5 = tmp5 * 2.053119869; 285 ; data7 = tmp4 + z1 + z3; data5 = tmp5 + z2 + z4; 289 ; tmp4 = tmp4 * (0.298631336 - 0.899976223) + tmp7 * -0.899976223; 292 ; tmp7 = tmp4 * -0.899976223 + tmp7 * (1.501321110 - 0.899976223); 293 ; data7 = tmp4 [all...] |
H A D | jfdctint-sse2.asm | 207 psubw xmm2, xmm4 ; xmm2=data3-data4=tmp4 269 movdqa xmm6, xmm2 ; xmm2=tmp4 298 ; z1 = tmp4 + tmp7; z2 = tmp5 + tmp6; 299 ; tmp4 = tmp4 * 0.298631336; tmp5 = tmp5 * 2.053119869; 302 ; data7 = tmp4 + z1 + z3; data5 = tmp5 + z2 + z4; 306 ; tmp4 = tmp4 * (0.298631336 - 0.899976223) + tmp7 * -0.899976223; 309 ; tmp7 = tmp4 * -0.899976223 + tmp7 * (1.501321110 - 0.899976223); 310 ; data7 = tmp4 [all...] |