/third_party/ffmpeg/libavcodec/ |
H A D | faanidct.c | 54 FLOAT s04, d04, s17, d17, s26, d26, s53, d53; in p8idct() local 74 s26 = temp[2*x + i] + temp[6*x + i]; in p8idct() 77 d26-= s26; in p8idct() 82 os07= s04 + s26; in p8idct() 83 os34= s04 - s26; in p8idct()
|
/third_party/ffmpeg/libavutil/arm/ |
H A D | float_dsp_vfp.S | 143 vmul.f s26, s2, s10 155 vmls.f s26, s18, s22 203 vstmia DST0!, {s26-s27} 247 vstmia DST0!, {s26-s27} 313 vmulge.f32 s26, s17, s26 395 vsub.f s26, s2, s10 400 vstr d13, [BASE2, #8-16*1] @ s26,s27 427 vstr d13, [BASE2, #8-16*3] @ s26,s27 443 vstr d13, [BASE2, #8-16*2] @ s26,s2 [all...] |
/third_party/ffmpeg/libavcodec/arm/ |
H A D | fmtconvert_vfp.S | 70 vcvt.f32.s32 s26, s26 113 vcvt.f32.s32 s26, s26 151 vcvt.f32.s32 s26, s26
|
H A D | mdct_vfp.S | 136 TCOS_D1_HEAD .req d13 @ s26,s27 321 1: postrotation_innerloop_rolled tail, head, s24, s25, s26, s27, s20, 16, 0
|
H A D | fft_vfp.S | 291 vadd.f s0, s26, s24 294 vsub.f s3, s26, s24
|
/third_party/vixl/test/aarch64/ |
H A D | test-assembler-fp-aarch64.cc | 2258 __ Fmov(s26, INT32_MIN); in TEST() 2278 __ Frint32x(s13, s26); in TEST() 2402 __ Fmov(s26, INT32_MIN); in TEST() 2422 __ Frint32z(s13, s26); in TEST() 2546 __ Fmov(s26, INT64_MIN); in TEST() 2567 __ Frint64x(s13, s26); in TEST() 2692 __ Fmov(s26, INT64_MIN); 2713 __ Frint64z(s13, s26); 2834 __ Fmov(s26, -0.0); 2847 __ Frinta(s10, s26); [all...] |
H A D | test-disasm-fp-aarch64.cc | 157 COMPARE(fmin(s25, s26, s27), "fmin s25, s26, s27"); in TEST() 320 COMPARE(scvtf(s26, x0), "scvtf s26, x0"); in TEST()
|
H A D | test-api-aarch64.cc | 525 VIXL_CHECK(AreConsecutive(s26, s27, s28, s29)); in TEST() 1471 temps.Include(CPURegList(s24, s25, s26)); in TEST() 1507 temps.Exclude(CPURegList(s24, s25, s26)); in TEST()
|
H A D | test-trace-aarch64.cc | 418 __ facgt(s12, s26, s12); in GenerateTestSequenceFP() 502 __ fcvtpu(wzr, s26); in GenerateTestSequenceFP() 531 __ fdiv(s26, s5, s25); in GenerateTestSequenceFP() 559 __ fnmadd(s0, s18, s26, s18); in GenerateTestSequenceFP() 561 __ fnmsub(s29, s0, s11, s26); in GenerateTestSequenceFP() 603 __ scvtf(s26, x12, 38); in GenerateTestSequenceFP() 1268 __ mov(s26, v19.S(), 0); in GenerateTestSequenceNEON() 1581 __ sqdmull(d25, s2, s26); in GenerateTestSequenceNEON() 1619 __ sqrshl(s26, s18, s2); in GenerateTestSequenceNEON() 2366 __ uqshrn(h28, s26, 1 in GenerateTestSequenceNEON() [all...] |
H A D | test-assembler-neon-aarch64.cc | 4562 __ Sqdmlsl(s26, h0, v1.H(), 7); in TEST() 5615 __ Sqneg(s26, s2); in TEST() 5699 __ Sqabs(s26, s2); in TEST() 8874 __ Uqshl(s26, s1, s2); in TEST() 8926 __ Sqshl(s26, s1, s2); in TEST() 9034 __ Uqrshl(s26, s1, s2); in TEST() 9086 __ Sqrshl(s26, s1, s2); in TEST()
|
H A D | test-assembler-aarch64.cc | 4387 __ ldr(s26, &after_s); 4420 ASSERT_EQUAL_FP32(2.5, s26); 4474 __ ldr(s26, &after_s); 4506 ASSERT_EQUAL_FP32(2.5, s26); 4804 __ ldr(s26, &after_s); 4834 ASSERT_EQUAL_FP32(2.5, s26);
|
H A D | test-disasm-aarch64.cc | 1483 COMPARE(ldp(s26, s27, MemOperand(x28, 252, PreIndex)), in TEST() 1484 "ldp s26, s27, [x28, #252]!"); in TEST() 1548 COMPARE(stp(s26, s27, MemOperand(x28, 252, PreIndex)), in TEST() 1549 "stp s26, s27, [x28, #252]!"); in TEST()
|
H A D | test-disasm-neon-aarch64.cc | 125 COMPARE(str(s26, MemOperand(sp, 4, PreIndex)), "str s26, [sp, #4]!"); in TEST() 183 COMPARE(ldr(s26, MemOperand(sp, -4, PreIndex)), "ldr s26, [sp, #-4]!"); in TEST()
|
H A D | test-disasm-sve-aarch64.cc | 1543 COMPARE(faddv(s26, p6, z16.VnS()), "faddv s26, p6, z16.s"); in TEST()
|
/third_party/node/deps/v8/src/codegen/arm/ |
H A D | register-arm.h | 26 V(s24) V(s25) V(s26) V(s27) V(s28) V(s29) V(s30) V(s31)
|
/third_party/node/deps/v8/src/execution/arm/ |
H A D | simulator-arm.h | 110 s26, enumerator
|
/third_party/node/deps/v8/src/codegen/arm64/ |
H A D | register-arm64.h | 52 V(s24) V(s25) V(s26) V(s27) V(s28) V(s29) V(s30) V(s31)
|
/third_party/vixl/test/aarch32/ |
H A D | test-assembler-aarch32.cc | 3371 temps.Include(s26); in TEST() 3390 VIXL_CHECK(temps.IsAvailable(s26)); in TEST()
|