/third_party/vixl/test/aarch64/ |
H A D | test-trace-aarch64.cc | 632 __ abs(v29.V8H(), v13.V8H()); in GenerateTestSequenceNEON() 640 __ add(v4.V8H(), v2.V8H(), v1.V8H()); in GenerateTestSequenceNEON() 643 __ addhn(v31.V8B(), v12.V8H(), v22.V8H()); in GenerateTestSequenceNEON() 644 __ addhn2(v16.V16B(), v21.V8H(), v20.V8H()); in GenerateTestSequenceNEON() 646 __ addhn2(v31.V8H(), v in GenerateTestSequenceNEON() [all...] |
H A D | test-cpu-features-aarch64.cc | 759 TEST_NEON(abs_3, abs(v0.V8H(), v1.V8H())) 764 TEST_NEON(addhn_0, addhn(v0.V8B(), v1.V8H(), v2.V8H())) 767 TEST_NEON(addhn2_0, addhn2(v0.V16B(), v1.V8H(), v2.V8H())) 768 TEST_NEON(addhn2_1, addhn2(v0.V8H(), v1.V4S(), v2.V4S())) 774 TEST_NEON(addp_4, addp(v0.V8H(), v1.V8H(), v2.V8H())) [all...] |
H A D | test-disasm-neon-aarch64.cc | 308 V(V8H(), "8h") \ 317 V(V8H(), "8h", V16B(), "16b") \ 318 V(V4S(), "4s", V8H(), "8h") \ 322 V(V8H(), "8h", V8B(), "8b") \ 327 V(V8H(), "8h", V16B(), "16b") \ 328 V(V4S(), "4s", V8H(), "8h") \ 335 V(V8H(), "8h") \ 341 V(V8H(), "8h") \ 397 COMPARE_MACRO(Ld1(v4.V8H(), v5.V8H(), MemOperan in TEST() [all...] |
H A D | test-assembler-neon-aarch64.cc | 433 __ Ld1(v5.V8H(), v6.V8H(), v7.V8H(), MemOperand(x17)); in TEST() 479 __ Ld1(v5.V8H(), v6.V8H(), v7.V8H(), MemOperand(x19, 48, PostIndex)); in TEST() 675 __ Ld2(v6.V8H(), v7.V8H(), MemOperand(x17)); in TEST() 717 __ Ld2(v6.V8H(), v7.V8H(), MemOperan in TEST() [all...] |
H A D | test-api-aarch64.cc | 268 VIXL_CHECK(VRegister(7, kQRegSize, 8).Is(v7.V8H())); in TEST() 280 VIXL_CHECK(VRegister(8, kFormat8H).Is(v8.V8H())); in TEST() 374 CPURegisterByValueHelper(v31.V8H()); in TEST() 681 VIXL_CHECK(Helper::GetVariant(v11.V8H()) == kNEON); in TEST()
|
H A D | test-assembler-sve-aarch64.cc | 328 __ Saddlv(s12, v12.V8H()); 330 __ Uaddl(v14.V8H(), v14.V8B(), v14.V8B());
|
/third_party/node/deps/v8/src/wasm/baseline/arm64/ |
H A D | liftoff-assembler-arm64.h | 1685 Sxtl(dst.fp().V8H(), dst.fp().V8B()); in LoadTransform() 1688 Uxtl(dst.fp().V8H(), dst.fp().V8B()); in LoadTransform() 1714 ld1r(dst.fp().V8H(), src_op); in LoadTransform() 2326 Smull2(tmp2, lhs.fp().V8H(), rhs.fp().V8H()); in emit_i32x4_dot_i16x8_s() 2332 Saddlp(dst.fp().V4S(), src.fp().V8H()); in emit_i32x4_extadd_pairwise_i16x8_s() 2337 Uaddlp(dst.fp().V4S(), src.fp().V8H()); in emit_i32x4_extadd_pairwise_i16x8_u() 2355 Smull2(dst.fp().V4S(), src1.fp().V8H(), src2.fp().V8H()); in emit_i32x4_extmul_high_i16x8_s() 2361 Umull2(dst.fp().V4S(), src1.fp().V8H(), src in emit_i32x4_extmul_high_i16x8_u() [all...] |
/third_party/vixl/benchmarks/aarch64/ |
H A D | bench-utils.cc | 380 __ Sqrshl(PickV().V8H(), PickV().V8H(), PickV().V8H()); in GenerateNEONSequence() 382 __ Sqdmlal2(PickV().V4S(), PickV().V8H(), PickV().V8H()); in GenerateNEONSequence() 398 __ Fmaxv(PickV().H(), PickV().V8H()); in GenerateNEONSequence()
|
/third_party/node/deps/v8/src/compiler/backend/arm64/ |
H A D | code-generator-arm64.cc | 2488 __ Smull2(tmp2, lhs.V8H(), rhs.V8H()); in AssembleArchInstruction() 2505 ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 4, V8H, Sshl, W); in AssembleArchInstruction() 2509 ASSEMBLE_SIMD_SHIFT_RIGHT(Sshr, 4, V8H, Sshl, W); in AssembleArchInstruction() 2523 __ Sqxtn2(dst.V8H(), src1.V4S()); in AssembleArchInstruction() 2530 ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 4, V8H, Ushl, W); in AssembleArchInstruction() 2544 __ Sqxtun2(dst.V8H(), src1.V4S()); in AssembleArchInstruction() 2557 __ Sshr(tmp.V8H(), src.V8H(), 15); in AssembleArchInstruction() 2562 __ Addv(tmp.H(), tmp.V8H()); in AssembleArchInstruction() [all...] |
/third_party/node/deps/v8/src/codegen/arm64/ |
H A D | register-arm64.h | 351 VRegister V8H() const { in V8H() function in v8::internal::VRegister
|
H A D | macro-assembler-arm64.cc | 497 Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xFFFF); in Movi32bitHelper()
|
/third_party/vixl/src/aarch64/ |
H A D | registers-aarch64.h | 608 inline VRegister V8H() const;
|
H A D | macro-assembler-aarch64.cc | 1111 Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xffff); in Emit()
|