Home
last modified time | relevance | path

Searched refs:V16B (Results 1 - 19 of 19) sorted by relevance

/third_party/vixl/test/aarch64/
H A Dtest-assembler-neon-aarch64.cc429 __ Ld1(v2.V16B(), MemOperand(x17)); in TEST()
431 __ Ld1(v3.V16B(), v4.V16B(), MemOperand(x17)); in TEST()
477 __ Ld1(v2.V16B(), MemOperand(x17, x22, PostIndex)); in TEST()
478 __ Ld1(v3.V16B(), v4.V16B(), MemOperand(x18, 32, PostIndex)); in TEST()
671 __ Ld2(v2.V16B(), v3.V16B(), MemOperand(x17)); in TEST()
673 __ Ld2(v4.V16B(), v5.V16B(), MemOperan in TEST()
[all...]
H A Dtest-trace-aarch64.cc626 __ abs(v16.V16B(), v11.V16B()); in GenerateTestSequenceNEON()
634 __ add(v31.V16B(), v15.V16B(), v23.V16B()); in GenerateTestSequenceNEON()
644 __ addhn2(v16.V16B(), v21.V8H(), v20.V8H()); in GenerateTestSequenceNEON()
648 __ addp(v3.V16B(), v8.V16B(), v28.V16B()); in GenerateTestSequenceNEON()
655 __ addv(b27, v23.V16B()); in GenerateTestSequenceNEON()
[all...]
H A Dtest-cpu-features-aarch64.cc757 TEST_NEON(abs_1, abs(v0.V16B(), v1.V16B()))
767 TEST_NEON(addhn2_0, addhn2(v0.V16B(), v1.V8H(), v2.V8H()))
772 TEST_NEON(addp_2, addp(v0.V16B(), v1.V16B(), v2.V16B()))
779 TEST_NEON(addv_1, addv(b0, v1.V16B()))
784 TEST_NEON(add_1, add(v0.V16B(), v1.V16B(), v2.V16B()))
[all...]
H A Dtest-disasm-neon-aarch64.cc306 V(V16B(), "16b") \
317 V(V8H(), "8h", V16B(), "16b") \
327 V(V8H(), "8h", V16B(), "16b") \
333 V(V16B(), "16b") \
393 COMPARE_MACRO(Ld1(v1.V16B(), MemOperand(x16, 16, PostIndex)), in TEST()
509 COMPARE_MACRO(St1(v1.V16B(), MemOperand(x16, 16, PostIndex)), in TEST()
537 COMPARE_MACRO(St2(v1.V16B(), v2.V16B(), MemOperand(x16, 32, PostIndex)), in TEST()
661 COMPARE_MACRO(Ld1(v1.V16B(), 1, MemOperand(x16)), "ld1 {v1.b}[1], [x16]"); in TEST()
677 COMPARE_MACRO(Ld1(v1.V16B(), in TEST()
[all...]
H A Dtest-simulator-aarch64.cc1487 VRegister vn = v1.V16B(); in Test1OpNEON_Helper()
1488 VRegister vntmp = v3.V16B(); in Test1OpNEON_Helper()
1516 __ Movi(vd.V16B(), 0); in Test1OpNEON_Helper()
1704 VRegister vn_ext = (kDRegSize == vn_bits) ? vn.V8B() : vn.V16B(); in Test1OpAcrossNEON_Helper()
1705 VRegister vntmp_ext = (kDRegSize == vn_bits) ? vntmp.V8B() : vntmp.V16B(); in Test1OpAcrossNEON_Helper()
1932 VRegister vd = v0.V16B(); in Test2OpNEON_Helper()
1933 VRegister vn = v1.V16B(); in Test2OpNEON_Helper()
1934 VRegister vm = v2.V16B(); in Test2OpNEON_Helper()
1935 VRegister vntmp = v3.V16B(); in Test2OpNEON_Helper()
1936 VRegister vmtmp = v4.V16B(); in Test2OpNEON_Helper()
[all...]
H A Dtest-api-aarch64.cc269 VIXL_CHECK(VRegister(8, kQRegSize, 16).Is(v8.V16B())); in TEST()
277 VIXL_CHECK(VRegister(5, kFormat16B).Is(v5.V16B())); in TEST()
680 VIXL_CHECK(Helper::GetVariant(v10.V16B()) == kNEON); in TEST()
H A Dtest-assembler-sve-aarch64.cc326 __ Sminv(b10, v10.V16B());
19659 __ Smmla(v2.V4S(), v0.V16B(), v1.V16B());
19661 __ Ummla(v4.V4S(), v0.V16B(), v1.V16B());
19663 __ Usmmla(v6.V4S(), v0.V16B(), v1.V16B());
H A Dtest-assembler-aarch64.cc12329 __ Ld1(v0.V16B(), MemOperand(x10, x11, PostIndex));
15002 __ Movi(v0.V16B(), 0xFF);
/third_party/vixl/examples/aarch64/
H A Dadd2-vectors.cc59 __ Ld1(v0.V16B(), MemOperand(x0)); in GenerateAdd2Vectors()
60 __ Ld1(v1.V16B(), MemOperand(x1, 16, PostIndex)); in GenerateAdd2Vectors()
61 __ Add(v0.V16B(), v0.V16B(), v1.V16B()); in GenerateAdd2Vectors()
62 __ St1(v0.V16B(), MemOperand(x0, 16, PostIndex)); in GenerateAdd2Vectors()
H A Dneon-matrix-multiply.cc81 __ Movi(v0.V16B(), 0); in GenerateNEONMatrixMultiply()
82 __ Movi(v1.V16B(), 0); in GenerateNEONMatrixMultiply()
83 __ Movi(v2.V16B(), 0); in GenerateNEONMatrixMultiply()
84 __ Movi(v3.V16B(), 0); in GenerateNEONMatrixMultiply()
/third_party/node/deps/v8/src/wasm/baseline/arm64/
H A Dliftoff-assembler-arm64.h1712 ld1r(dst.fp().V16B(), src_op); in LoadTransform()
1775 Tbl(dst.fp().V16B(), lhs.fp().V16B(), rhs.fp().V16B()); in emit_i8x16_swizzle()
1878 Bsl(tmp.V16B(), rhs.fp().V16B(), lhs.fp().V16B()); in emit_f64x2_pmin()
1895 Bsl(tmp.V16B(), rhs.fp().V16B(), lhs.fp().V16B()); in emit_f64x2_pmax()
[all...]
/third_party/vixl/benchmarks/aarch64/
H A Dbench-utils.cc379 __ And(PickV().V16B(), PickV().V16B(), PickV().V16B()); in GenerateNEONSequence()
396 __ St4(vt.V16B(), vt2.V16B(), vt3.V16B(), vt4.V16B(), MemOperand(scratch)); in GenerateNEONSequence()
/third_party/node/deps/v8/src/compiler/backend/arm64/
H A Dcode-generator-arm64.cc2286 __ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B()); in AssembleArchInstruction()
2296 __ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B()); in AssembleArchInstruction()
2322 __ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B()); in AssembleArchInstruction()
2332 __ Bsl(dst.V16B(), rh in AssembleArchInstruction()
[all...]
/third_party/node/deps/v8/src/codegen/arm64/
H A Dregister-arm64.h345 VRegister V16B() const { in V16B() function in v8::internal::VRegister
H A Dassembler-arm64.cc3204 orr(vd.V16B(), vn.V16B(), vn.V16B()); in mov()
3245 not_(vd.V16B(), vn.V16B()); in mvn()
H A Dmacro-assembler-arm64.cc416 movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1); in Movi16bitHelper()
/third_party/vixl/src/aarch64/
H A Dregisters-aarch64.h605 inline VRegister V16B() const;
H A Dassembler-aarch64.cc4525 orr(vd.V16B(), vn.V16B(), vn.V16B());
4578 not_(vd.V16B(), vn.V16B());
H A Dmacro-assembler-aarch64.cc1029 movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1); in Emit()

Completed in 79 milliseconds