/third_party/vixl/test/aarch64/ |
H A D | test-trace-aarch64.cc | 630 __ abs(v16.V4S(), v1.V4S()); in GenerateTestSequenceNEON() 638 __ add(v25.V4S(), v28.V4S(), v29.V4S()); in GenerateTestSequenceNEON() 642 __ addhn(v10.V4H(), v30.V4S(), v26.V4S()); in GenerateTestSequenceNEON() 645 __ addhn2(v0.V4S(), v2.V2D(), v17.V2D()); in GenerateTestSequenceNEON() 646 __ addhn2(v31.V8H(), v7.V4S(), v17.V4S()); in GenerateTestSequenceNEON() [all...] |
H A D | test-cpu-features-aarch64.cc | 761 TEST_NEON(abs_5, abs(v0.V4S(), v1.V4S())) 765 TEST_NEON(addhn_1, addhn(v0.V4H(), v1.V4S(), v2.V4S())) 768 TEST_NEON(addhn2_1, addhn2(v0.V8H(), v1.V4S(), v2.V4S())) 769 TEST_NEON(addhn2_2, addhn2(v0.V4S(), v1.V2D(), v2.V2D())) 776 TEST_NEON(addp_6, addp(v0.V4S(), v1.V4S(), v2.V4S())) [all...] |
H A D | test-disasm-neon-aarch64.cc | 310 V(V4S(), "4s") \ 318 V(V4S(), "4s", V8H(), "8h") \ 319 V(V2D(), "2d", V4S(), "4s") 323 V(V4S(), "4s", V4H(), "4h") \ 328 V(V4S(), "4s", V8H(), "8h") \ 329 V(V2D(), "2d", V4S(), "4s") 337 V(V4S(), "4s") 343 V(V4S(), "4s") 404 COMPARE_MACRO(Ld1(v16.V4S(), in TEST() 405 v17.V4S(), in TEST() [all...] |
H A D | test-assembler-neon-aarch64.cc | 435 __ Ld1(v16.V4S(), v17.V4S(), v18.V4S(), v19.V4S(), MemOperand(x17)); in TEST() 480 __ Ld1(v16.V4S(), in TEST() 481 v17.V4S(), in TEST() 482 v18.V4S(), in TEST() 483 v19.V4S(), in TEST() 677 __ Ld2(v16.V4S(), v17.V4S(), MemOperan in TEST() [all...] |
H A D | test-api-aarch64.cc | 267 VIXL_CHECK(VRegister(6, kQRegSize, 4).Is(v6.V4S())); in TEST() 282 VIXL_CHECK(VRegister(10, kFormat4S).Is(v10.V4S())); in TEST() 682 VIXL_CHECK(Helper::GetVariant(v12.V4S()) == kNEON); in TEST()
|
H A D | test-assembler-sve-aarch64.cc | 19659 __ Smmla(v2.V4S(), v0.V16B(), v1.V16B()); 19661 __ Ummla(v4.V4S(), v0.V16B(), v1.V16B()); 19663 __ Usmmla(v6.V4S(), v0.V16B(), v1.V16B());
|
/third_party/vixl/examples/aarch64/ |
H A D | neon-matrix-multiply.cc | 38 // __ Fmul(v<v_out>.V4S(), v4.V4S(), v<s_column>.S(), 0); 39 // __ Fmla(v<v_out>.V4S(), v5.V4S(), v<s_column>.S(), 1); 40 // __ Fmla(v<v_out>.V4S(), v6.V4S(), v<s_column>.S(), 2); 41 // __ Fmla(v<v_out>.V4S(), v7.V4S(), v<s_column>.S(), 3); 54 __ Fmul(v_out, v4.V4S(), v_in, 0); // e.g. (v0.V4S(), v in GenerateMultiplyColumn() [all...] |
/third_party/node/deps/v8/src/wasm/baseline/arm64/ |
H A D | liftoff-assembler-arm64.h | 215 // AnyTrue does not depend on the number of lanes, so we can use V4S for all. in EmitAnyTrue() 218 assm->Umaxv(temp, src.fp().V4S()); in EmitAnyTrue() 1691 Sxtl(dst.fp().V4S(), dst.fp().V4H()); in LoadTransform() 1694 Uxtl(dst.fp().V4S(), dst.fp().V4H()); in LoadTransform() 1716 ld1r(dst.fp().V4S(), src_op); in LoadTransform() 1921 Dup(dst.fp().V4S(), src.fp().S(), 0); in emit_f32x4_splat() 1927 Mov(dst.fp().S(), lhs.fp().V4S(), imm_lane_idx); in emit_f32x4_extract_lane() 1935 Mov(dst.fp().V4S(), src1.fp().V4S()); in emit_f32x4_replace_lane() 1937 Mov(dst.fp().V4S(), imm_lane_id in emit_f32x4_replace_lane() [all...] |
/third_party/vixl/benchmarks/aarch64/ |
H A D | bench-utils.cc | 382 __ Sqdmlal2(PickV().V4S(), PickV().V8H(), PickV().V8H()); in GenerateNEONSequence() 395 __ Ld3(vt.V4S(), vt2.V4S(), vt3.V4S(), MemOperand(scratch)); in GenerateNEONSequence() 399 __ Fminp(PickV().V4S(), PickV().V4S(), PickV().V4S()); in GenerateNEONSequence()
|
/third_party/node/deps/v8/src/compiler/backend/arm64/ |
H A D | code-generator-arm64.cc | 2315 VRegister dst = i.OutputSimd128Register().V4S(); in AssembleArchInstruction() 2316 VRegister lhs = i.InputSimd128Register(0).V4S(); in AssembleArchInstruction() 2317 VRegister rhs = i.InputSimd128Register(1).V4S(); in AssembleArchInstruction() 2326 VRegister dst = i.OutputSimd128Register().V4S(); in AssembleArchInstruction() 2327 VRegister lhs = i.InputSimd128Register(0).V4S(); in AssembleArchInstruction() 2328 VRegister rhs = i.InputSimd128Register(1).V4S(); in AssembleArchInstruction() 2394 __ Rev64(tmp2.V4S(), src2.V4S()); in AssembleArchInstruction() 2398 __ Mul(tmp2.V4S(), tmp2.V4S(), src in AssembleArchInstruction() [all...] |
/third_party/node/deps/v8/src/codegen/arm64/ |
H A D | register-arm64.h | 357 VRegister V4S() const { in V4S() function in v8::internal::VRegister
|
H A D | macro-assembler-arm64.cc | 529 Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xFFFFFFFF); in Movi64bitHelper()
|
/third_party/vixl/src/aarch64/ |
H A D | registers-aarch64.h | 610 inline VRegister V4S() const;
|
H A D | macro-assembler-aarch64.cc | 1144 Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xffffffff); in Emit()
|