/third_party/vixl/test/aarch64/ |
H A D | test-cpu-features-aarch64.cc | 758 TEST_NEON(abs_2, abs(v0.V4H(), v1.V4H())) 765 TEST_NEON(addhn_1, addhn(v0.V4H(), v1.V4S(), v2.V4S())) 773 TEST_NEON(addp_3, addp(v0.V4H(), v1.V4H(), v2.V4H())) 780 TEST_NEON(addv_2, addv(h0, v1.V4H())) 785 TEST_NEON(add_2, add(v0.V4H(), v1.V4H(), v2.V4H())) [all...] |
H A D | test-trace-aarch64.cc | 629 __ abs(v21.V4H(), v27.V4H()); in GenerateTestSequenceNEON() 637 __ add(v27.V4H(), v23.V4H(), v17.V4H()); in GenerateTestSequenceNEON() 642 __ addhn(v10.V4H(), v30.V4S(), v26.V4S()); in GenerateTestSequenceNEON() 651 __ addp(v29.V4H(), v24.V4H(), v14.V4H()); in GenerateTestSequenceNEON() 657 __ addv(h27, v30.V4H()); in GenerateTestSequenceNEON() [all...] |
H A D | test-disasm-neon-aarch64.cc | 307 V(V4H(), "4h") \ 314 V(V4H(), "4h", V8B(), "8b") \ 315 V(V2S(), "2s", V4H(), "4h") \ 323 V(V4S(), "4s", V4H(), "4h") \ 334 V(V4H(), "4h") \ 340 V(V4H(), "4h") \ 395 COMPARE_MACRO(Ld1(v2.V4H(), v3.V4H(), MemOperand(x17, 16, PostIndex)), in TEST() 422 COMPARE_MACRO(Ld2(v2.V4H(), v3.V4H(), MemOperan in TEST() [all...] |
H A D | test-assembler-neon-aarch64.cc | 315 __ Ld1(v5.V4H(), v6.V4H(), v7.V4H(), MemOperand(x17)); in TEST() 369 __ Ld1(v5.V4H(), v6.V4H(), v7.V4H(), MemOperand(x19, 24, PostIndex)); in TEST() 596 __ Ld2(v6.V4H(), v7.V4H(), MemOperand(x17)); in TEST() 633 __ Ld2(v5.V4H(), v6.V4H(), MemOperan in TEST() [all...] |
H A D | test-api-aarch64.cc | 263 VIXL_CHECK(VRegister(3, kDRegSize, 4).Is(v3.V4H())); in TEST() 279 VIXL_CHECK(VRegister(7, kFormat4H).Is(v7.V4H())); in TEST() 1477 temps.Include(CPURegister(v22.V4H())); in TEST() 1513 temps.Exclude(CPURegister(v22.V4H())); in TEST()
|
H A D | test-assembler-sve-aarch64.cc | 327 __ Addv(h11, v11.V4H());
|
/third_party/node/deps/v8/src/wasm/baseline/arm64/ |
H A D | liftoff-assembler-arm64.h | 1691 Sxtl(dst.fp().V4S(), dst.fp().V4H()); in LoadTransform() 1694 Uxtl(dst.fp().V4S(), dst.fp().V4H()); in LoadTransform() 2325 Smull(tmp1, lhs.fp().V4H(), rhs.fp().V4H()); in emit_i32x4_dot_i16x8_s() 2343 Smull(dst.fp().V4S(), src1.fp().V4H(), src2.fp().V4H()); in emit_i32x4_extmul_low_i16x8_s() 2349 Umull(dst.fp().V4S(), src1.fp().V4H(), src2.fp().V4H()); in emit_i32x4_extmul_low_i16x8_u() 2977 Sqxtn(dst.fp().V4H(), lhs.fp().V4S()); in emit_i8x16_bitmask() 2991 Sqxtun(dst.fp().V4H(), lh in emit_i8x16_bitmask() [all...] |
/third_party/node/deps/v8/src/codegen/arm64/ |
H A D | register-arm64.h | 348 VRegister V4H() const { in V4H() function in v8::internal::VRegister
|
H A D | macro-assembler-arm64.cc | 497 Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xFFFF); in Movi32bitHelper()
|
/third_party/vixl/src/aarch64/ |
H A D | registers-aarch64.h | 607 inline VRegister V4H() const;
|
H A D | macro-assembler-aarch64.cc | 1111 Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xffff); in Emit()
|
/third_party/node/deps/v8/src/compiler/backend/arm64/ |
H A D | code-generator-arm64.cc | 2487 __ Smull(tmp1, lhs.V4H(), rhs.V4H()); in AssembleArchInstruction() 2522 __ Sqxtn(dst.V4H(), src0.V4S()); in AssembleArchInstruction() 2543 __ Sqxtun(dst.V4H(), src0.V4S()); in AssembleArchInstruction() 2803 __ Ldr(i.OutputSimd128Register().V4H(), i.MemoryOperand(0)); in AssembleArchInstruction() 2804 __ Sxtl(i.OutputSimd128Register().V4S(), i.OutputSimd128Register().V4H()); in AssembleArchInstruction() 2809 __ Ldr(i.OutputSimd128Register().V4H(), i.MemoryOperand(0)); in AssembleArchInstruction() 2810 __ Uxtl(i.OutputSimd128Register().V4S(), i.OutputSimd128Register().V4H()); in AssembleArchInstruction()
|