/third_party/node/deps/openssl/openssl/crypto/bn/asm/ |
H A D | ppc64-mont.pl | 146 $t7="r31"; 289 mulld $t7,$a0,$t3 ; ap[0]*bp[0] 300 mulld $t7,$t7,$n0 ; tp[0]*n0 302 extrdi $t4,$t7,16,48 303 extrdi $t5,$t7,16,32 304 extrdi $t6,$t7,16,16 305 extrdi $t7,$t7,16,0 309 std $t7,` [all...] |
/third_party/openssl/crypto/bn/asm/ |
H A D | ppc64-mont.pl | 146 $t7="r31"; 289 mulld $t7,$a0,$t3 ; ap[0]*bp[0] 300 mulld $t7,$t7,$n0 ; tp[0]*n0 302 extrdi $t4,$t7,16,48 303 extrdi $t5,$t7,16,32 304 extrdi $t6,$t7,16,16 305 extrdi $t7,$t7,16,0 309 std $t7,` [all...] |
/third_party/typescript/tests/baselines/reference/ |
H A D | dynamicNames.js | 110 let t7: N.T7; 122 t4 = t5, t4 = t6, t4 = t7, t5 = t4, t5 = t6, t5 = t7, t6 = t4, t6 = t5, t6 = t7, t7 = t4, t7 = t5, t7 = t6; 189 let t7;
200 t4 = t5, t4 = t6, t4 = t7, t5 = t4, t5 = t6, t5 = t7, t [all...] |
H A D | readonlyTupleAndArrayElaboration.js | 36 const t7: [1] = [1]; 37 const t8: [] = t7; 96 var t7 = [1];
variable 97 var t8 = t7;
|
H A D | duplicateObjectLiteralProperty_computedName.js | 32 const t7 = { 76 var t7 = (_g = {
|
H A D | typeSatisfaction.js | 24 let t7 = { a: 'test' } satisfies A; 35 var t7 = { a: 'test' };
|
/third_party/python/Lib/test/ |
H A D | test_pkg.py | 241 ("t7.py", ""), 242 ("t7", None), 243 ("t7 __init__.py", ""), 244 ("t7 sub.py", 246 ("t7 sub", None), 247 ("t7 sub __init__.py", ""), 248 ("t7 sub .py", 250 ("t7 sub subsub", None), 251 ("t7 sub subsub __init__.py", 257 t7, su [all...] |
/third_party/ffmpeg/libavcodec/mips/ |
H A D | vc1dsp_msa.c | 144 v4i32 t1, t2, t3, t4, t5, t6, t7, t8; in ff_vc1_inv_trans_4x8_msa() local 176 t7 = cnst_22 * in_r5 + cnst_10 * in_r7; in ff_vc1_inv_trans_4x8_msa() 181 in_r4 = (t5 + t7) >> 3; in ff_vc1_inv_trans_4x8_msa() 187 in_r7 = (t5 - t7) >> 3; in ff_vc1_inv_trans_4x8_msa() 199 t7 = t2 - t4, t8 = t1 - t3; in ff_vc1_inv_trans_4x8_msa() 214 in_r2 = (t7 + t3) >> 7; in ff_vc1_inv_trans_4x8_msa() 217 in_r5 = (t7 - t3 + cnst_1) >> 7; in ff_vc1_inv_trans_4x8_msa() 234 v4i32 t1, t2, t3, t4, t5, t6, t7, t8; in ff_vc1_inv_trans_8x4_msa() local 261 t7 = t2 - t4, t8 = t1 - t3; in ff_vc1_inv_trans_8x4_msa() 268 in2 = (t7 in ff_vc1_inv_trans_8x4_msa() 318 v8i16 t0, t1, t2, t3, t4, t5, t6, t7; put_vc1_mspel_mc_h_v_msa() local [all...] |
H A D | mpegaudiodsp_mips_fixed.c | 361 int t4, t5, t6, t8, t7; in imdct36_mips_fixed() local 395 "lw %[t7], 11*4(%[in]) \n\t" in imdct36_mips_fixed() 400 "addu %[t6], %[t6], %[t7] \n\t" in imdct36_mips_fixed() 401 "addu %[t7], %[t7], %[t8] \n\t" in imdct36_mips_fixed() 403 "addu %[t5], %[t5], %[t7] \n\t" in imdct36_mips_fixed() 410 "addu %[t7], %[t7], %[t1] \n\t" in imdct36_mips_fixed() 413 "sw %[t7], 11*4(%[in]) \n\t" in imdct36_mips_fixed() 425 "lw %[t7], in imdct36_mips_fixed() [all...] |
/third_party/ffmpeg/libavcodec/loongarch/ |
H A D | vc1dsp_lasx.c | 30 __m256i temp0, temp1, temp2, temp3, t1, t2, t3, t4, t5, t6, t7, t8; in ff_vc1_inv_trans_8x8_lasx() local 69 t7 = __lasx_xvsub_w(t2, t4); in ff_vc1_inv_trans_8x8_lasx() 82 DUP4_ARG2(__lasx_xvadd_w, t1, t5, t6, t2, t7, t3, t8, t4, in ff_vc1_inv_trans_8x8_lasx() 84 DUP4_ARG2(__lasx_xvsub_w, t8, t4, t7, t3, t6, t2, t5, t1, in ff_vc1_inv_trans_8x8_lasx() 106 t7 = __lasx_xvsub_w(t2, t4); in ff_vc1_inv_trans_8x8_lasx() 119 DUP4_ARG2(__lasx_xvadd_w, t5, t1, t6, t2, t7, t3, t8, t4, in ff_vc1_inv_trans_8x8_lasx() 121 DUP4_ARG2(__lasx_xvsub_w, t8, t4, t7, t3, t6, t2, t5, t1, in ff_vc1_inv_trans_8x8_lasx() 527 __m256i t0, t1, t2, t3, t4, t5, t6, t7; in put_vc1_mspel_mc_h_v_lasx() local 595 t7 = __lasx_xvdp2_h_bu(temp0, const_para1_2); in put_vc1_mspel_mc_h_v_lasx() 596 t7 in put_vc1_mspel_mc_h_v_lasx() 815 __m256i t0, t1, t2, t3, t4, t5, t6, t7; put_vc1_mspel_mc_h_lasx() local [all...] |
/third_party/pulseaudio/speex/libspeexdsp/ |
H A D | smallft.c | 278 int t0,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; in dradfg() local 424 t7=idl1; in dradfg() 427 ch2[t4++]=c2[ik]+ar1*c2[t7++]; in dradfg() 447 t7=t2; in dradfg() 452 ch2[t7++]+=ai2*c2[t9++]; in dradfg() 502 t7=t4; in dradfg() 506 cc[t5]=ch[t7]; in dradfg() 509 t7+=ido; in dradfg() 526 t7=t3; in dradfg() 532 cc[i+t7 in dradfg() 692 int i,k,t0,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; dradb3() local 754 int i,k,t0,t1,t2,t3,t4,t5,t6,t7,t8; dradb4() local 845 int idij,ipph,i,j,k,l,ik,is,t0,t1,t2,t3,t4,t5,t6,t7,t8,t9,t10, dradbg() local [all...] |
/third_party/ffmpeg/libavcodec/ |
H A D | hqxdsp.c | 29 int t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, tA, tB, tC, tD, tE, tF; in idct_col() local 49 t7 = t3 * 2 + t6; in idct_col() 51 t9 = (int)(t7 * 11585U) >> 14; in idct_col() 75 int t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, tA, tB, tC, tD, tE, tF; in idct_row() local 85 t7 = t3 * 2 + t6; in idct_row() 87 t9 = (t7 * 11585) >> 14; in idct_row()
|
H A D | ivi_dsp.c | 246 t0, t1, t2, t3, t4, t5, t6, t7, t8) {\ 249 IVI_HAAR_BFLY(t5, s7, t5, t7, t0); IVI_HAAR_BFLY(t1, s2, t1, t2, t0);\ 251 IVI_HAAR_BFLY(t7, s8, t7, t8, t0);\ 258 d7 = COMPENSATE(t7);\ 278 int t0, t1, t2, t3, t4, t5, t6, t7, t8; in ff_ivi_inverse_haar_8x8() local 296 t0, t1, t2, t3, t4, t5, t6, t7, t8); in ff_ivi_inverse_haar_8x8() 318 t0, t1, t2, t3, t4, t5, t6, t7, t8); in ff_ivi_inverse_haar_8x8() 330 int t0, t1, t2, t3, t4, t5, t6, t7, t8; in ff_ivi_row_haar8() local 343 t0, t1, t2, t3, t4, t5, t6, t7, t in ff_ivi_row_haar8() 355 int t0, t1, t2, t3, t4, t5, t6, t7, t8; ff_ivi_col_haar8() local 543 int t0, t1, t2, t3, t4, t5, t6, t7, t8; ff_ivi_inverse_slant_8x8() local 633 int t0, t1, t2, t3, t4, t5, t6, t7, t8; ff_ivi_row_slant8() local 671 int t0, t1, t2, t3, t4, t5, t6, t7, t8; ff_ivi_col_slant8() local [all...] |
H A D | mss34dsp.c | 72 const unsigned t7 = 77062U * t5 - 51491U * t4; \ 79 blk[1 * step] = (int)( t3 + t7 + t8 + tA) >> shift; \ 81 blk[3 * step] = (int)( t0 + t7 - t9 + tB) >> shift; \ 82 blk[4 * step] = (int)(-(t0 + t7) - t9 + tB) >> shift; \ 84 blk[6 * step] = (int)(-(t3 + t7) + t8 + tA) >> shift; \
|
H A D | h264pred.c | 155 src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l5 + l7 + 2*l6 + 2)>>3; in pred4x4_down_left_rv40_c() 156 src[3+3*stride]=(t6 + t7 + 1 + l6 + l7 + 1)>>2; in pred4x4_down_left_rv40_c() 181 src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l3*4 + 2)>>3; in pred4x4_down_left_rv40_nodown_c() 182 src[3+3*stride]=(t6 + t7 + 1 + 2*l3 + 1)>>2; in pred4x4_down_left_rv40_nodown_c() 250 src[3+3*stride]=(t5 + 2*t6 + t7 + 2)>>2; in pred4x4_vertical_left_vp8_c() 268 src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3; in pred4x4_horizontal_up_rv40_c() 270 src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3; in pred4x4_horizontal_up_rv40_c() 274 src[2+2*stride]=(t6 + t7 + l3 + l4 + 2)>>2; in pred4x4_horizontal_up_rv40_c() 294 src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3; in pred4x4_horizontal_up_rv40_nodown_c() 296 src[1+2*stride]=(t6 + 3*t7 in pred4x4_horizontal_up_rv40_nodown_c() [all...] |
H A D | vc1dsp.c | 271 register int t1, t2, t3, t4, t5, t6, t7, t8; in vc1_inv_trans_8x8_c() local 284 t7 = t2 - t4; in vc1_inv_trans_8x8_c() 294 dst[2] = (t7 + t3) >> 3; in vc1_inv_trans_8x8_c() 297 dst[5] = (t7 - t3) >> 3; in vc1_inv_trans_8x8_c() 315 t7 = t2 - t4; in vc1_inv_trans_8x8_c() 325 dst[16] = (t7 + t3) >> 7; in vc1_inv_trans_8x8_c() 328 dst[40] = (t7 - t3 + 1) >> 7; in vc1_inv_trans_8x8_c() 362 register int t1, t2, t3, t4, t5, t6, t7, t8; in vc1_inv_trans_8x4_c() local 376 t7 = t2 - t4; in vc1_inv_trans_8x4_c() 386 dst[2] = (t7 in vc1_inv_trans_8x4_c() 435 register int t1, t2, t3, t4, t5, t6, t7, t8; vc1_inv_trans_4x8_c() local [all...] |
/third_party/ffmpeg/libavcodec/alpha/ |
H A D | me_cmp_mvi_asm.S | 73 t7: right_u -> right hi -> right label 93 ldq_u t7, 16(a2) # right_u 114 extqh t7, a2, t7 # right hi 116 or t7, tb, t7 # right 117 perr t9, t7, td # error right 139 ldq t7, 8(a1) # ref right 162 perr t5, t7, t1 # error right
|
H A D | hpeldsp_alpha_asm.S | 67 ldq_u t7, 8(a1) 86 extqh t7, a1, t7 87 or t6, t7, t6
|
H A D | idctdsp_alpha_asm.S | 110 ldl t7, 0(te) # pix2 (try to hit cache line soon) 121 unpkbw t7, t7 # 2 0 138 addq t6, t7, t6 # 2 3
|
/third_party/node/deps/openssl/openssl/crypto/aes/asm/ |
H A D | aes-mips.pl | 126 my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7,$t8,$t9,$t10,$t11) = map("\$$_",(12..23)); 171 lwxs $t7,$i3($Tbl) # Te2[s1>>8] 191 rotr $t7,$t7,16 199 xor $t3,$t7 200 lwxs $t7,$i3($Tbl) # Te0[s3>>24] 219 xor $t3,$t7 254 ext $t7,$s1,8,8 258 $PTR_INS $i3,$t7,2,8 306 lw $t7, [all...] |
/third_party/openssl/crypto/aes/asm/ |
H A D | aes-mips.pl | 126 my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7,$t8,$t9,$t10,$t11) = map("\$$_",(12..23)); 171 lwxs $t7,$i3($Tbl) # Te2[s1>>8] 191 rotr $t7,$t7,16 199 xor $t3,$t7 200 lwxs $t7,$i3($Tbl) # Te0[s3>>24] 219 xor $t3,$t7 254 ext $t7,$s1,8,8 258 $PTR_INS $i3,$t7,2,8 306 lw $t7, [all...] |
/third_party/ffmpeg/libavcodec/ppc/ |
H A D | idctdsp.c | 51 t7 = vec_mradds(a2, vx5, vx3); \ 61 t8 = vec_subs(t1, t7); \ 62 t1 = vec_adds(t1, t7); \ 65 t7 = vec_adds(t5, t2); \ 73 vy0 = vec_adds(t7, t1); \ 74 vy7 = vec_subs(t7, t1); \ 84 vec_s16 t0, t1, t2, t3, t4, t5, t6, t7, t8; \
|
/third_party/node/deps/v8/src/codegen/loong64/ |
H A D | macro-assembler-loong64.cc | 409 DCHECK(rj != t7); in CallRecordWriteStub() 1424 DCHECK(rj != t7); in CallRecordWriteStub() 1426 Bstrpick_d(t7, rj, 31, 0); in CallRecordWriteStub() 1427 movgr2fr_d(fd, t7); in CallRecordWriteStub() 1439 DCHECK(rj != t7); in CallRecordWriteStub() 1446 andi(t7, rj, 1); in CallRecordWriteStub() 1448 or_(t7, t7, rj); in CallRecordWriteStub() 1449 movgr2fr_d(fd, t7); in CallRecordWriteStub() 1470 DCHECK(rj != t7); in CallRecordWriteStub() [all...] |
/third_party/ffmpeg/libavcodec/arm/ |
H A D | vc1dsp_neon.S | 175 vsub.i16 q11, q14, q15 @ t8|t7 = old t1|t2 - old t3|t4 184 vswp d22, d23 @ q11 = t7|t8 197 @ t7 d22 203 vadd.i16 q2, q15, q11 @ line[5,4] = t7|t8 + 1 208 vhadd.s16 q1, q9, q11 @ line[2,3] = (t3|t4 + t7|t8) >> 1 211 vhsub.s16 q2, q2, q9 @ line[5,4] = (t7|t8 - t3|t4 + 1) >> 1 214 vhsub.s16 q2, q11, q9 @ line[5,4] = (t7|t8 - t3|t4) >> 1 296 @ Compute t5, t6, t7, t8 from old t1, t2, t3, t4. Actually, it computes 297 @ half of t5, t6, t7, t8 since t1, t2, t3, t4 are halved. 301 vsub.i16 q2, q12, q2 @ t7 [all...] |
/third_party/ffmpeg/libavcodec/x86/ |
H A D | vp9itxfm.asm | 387 SUMSUB_BA w, 3, 6, 5 ; m3=t0+t7, m6=t0-t7 417 SUMSUB_BA w, 3, 7, 0 ; m3=t7a+t6a (t7), m7=t7a-t6a (t6a) 455 SUMSUB_BA w, 3, 7, 4 ; m3=t7a+t6a (t7), m7=t7a-t6a (t6a) 468 pmulhrsw m3, m1, W_16069x2_REG ; m3=t7 476 psubw m6, m0, m3 ; m6=t0-t7 477 paddw m3, m0 ; m3=t0+t7 743 VP9_UNPACK_MULSUB_2D_4X 1, 6, 4, 7, 4756, 15679 ; m1/4=t7[d], m6/7=t6[d] 747 VP9_RND_SH_SUMSUB_BA 1, 5, 4, 0, 3, D_8192_REG ; m1=t3[w], m5=t7[w] 770 ; m4=t0, m3=t1, m6=t2, m1=t3, m0=t4, m7=t5, m2=t6, m5=t7 [all...] |