/third_party/vixl/test/aarch64/ |
H A D | test-disasm-neon-aarch64.cc | 1831 COMPARE_MACRO(Fmls(v17.V8H(), v18.V8H(), v19.V8H()), in TEST() 1833 COMPARE_MACRO(Fmls(v20.V4H(), v21.V4H(), v22.V4H()), in TEST() 2017 COMPARE_MACRO(Fmls(v4.M, v5.M, v6.M), "fmls v4." S ", v5." S ", v6." S); in TEST() 2540 COMPARE_MACRO(Fmls(v0.V4H(), v1.V4H(), v2.H(), 0), in TEST() 2542 COMPARE_MACRO(Fmls(v2.V8H(), v3.V8H(), v15.H(), 7), in TEST() 2544 COMPARE_MACRO(Fmls(v0.V2S(), v1.V2S(), v2.S(), 0), in TEST() 2546 COMPARE_MACRO(Fmls(v2.V4S(), v3.V4S(), v15.S(), 3), in TEST() 2548 COMPARE_MACRO(Fmls(v2.V4S(), v3.V4S(), v31.S(), 3), in TEST() 2550 COMPARE_MACRO(Fmls(v0.V2D(), v1.V2D(), v2.D(), 0), in TEST() 2552 COMPARE_MACRO(Fmls(v in TEST() [all...] |
H A D | test-disasm-sve-aarch64.cc | 1638 COMPARE_MACRO(Fmls(z0.VnD(), p1.Merging(), z0.VnD(), z2.VnD(), z4.VnD()), in TEST() 1640 COMPARE_MACRO(Fmls(z3.VnS(), p2.Merging(), z4.VnS(), z3.VnS(), z5.VnS()), in TEST() 1642 COMPARE_MACRO(Fmls(z4.VnH(), p3.Merging(), z5.VnH(), z6.VnH(), z4.VnH()), in TEST() 1646 COMPARE_MACRO(Fmls(z5.VnD(), p4.Merging(), z6.VnD(), z7.VnD(), z8.VnD()), in TEST() 1699 COMPARE_MACRO(Fmls(z0.VnD(), p1.Merging(), z0.VnD(), z2.VnD(), z4.VnD()), in TEST() 1701 COMPARE_MACRO(Fmls(z3.VnS(), p2.Merging(), z4.VnS(), z3.VnS(), z5.VnS()), in TEST() 1703 COMPARE_MACRO(Fmls(z4.VnH(), p3.Merging(), z5.VnH(), z6.VnH(), z4.VnH()), in TEST() 1705 COMPARE_MACRO(Fmls(z5.VnD(), p4.Merging(), z6.VnD(), z7.VnD(), z8.VnD()), in TEST() 1785 COMPARE_MACRO(Fmls(z10.VnH(), z11.VnH(), z12.VnH(), z4.VnH(), 7), in TEST() 1788 COMPARE_MACRO(Fmls(z1 in TEST() [all...] |
H A D | test-assembler-neon-aarch64.cc | 9311 __ Fmls(v19.V2S(), v1.V2S(), v2.V2S()); in TEST() 9312 __ Fmls(v20.V4S(), v1.V4S(), v2.V4S()); in TEST() 9313 __ Fmls(v21.V2D(), v1.V2D(), v2.V2D()); in TEST() 9398 __ Fmls(v16.V8H(), v0.V8H(), v1.V8H()); in TEST() 9399 __ Fmls(v17.V8H(), v2.V8H(), v3.V8H()); in TEST() 9400 __ Fmls(v18.V8H(), v2.V8H(), v6.V8H()); in TEST() 9401 __ Fmls(v19.V8H(), v3.V8H(), v6.V8H()); in TEST() 9402 __ Fmls(v20.V4H(), v0.V4H(), v1.V4H()); in TEST() 9403 __ Fmls(v21.V4H(), v2.V4H(), v3.V4H()); in TEST() 9404 __ Fmls(v2 in TEST() [all...] |
H A D | test-assembler-sve-aarch64.cc | 16681 // Fmls `ditto` fmls, fmsb and movprfx + fmls 16794 &MacroAssembler::Fmls, 16808 &MacroAssembler::Fmls, 16822 &MacroAssembler::Fmls, 16991 // Indexed form of Fmla and Fmls won't swap argument, passing strict NaN 17060 // Using the vector form of Fmla and Fmls to verify the indexed form. 17069 &MacroAssembler::Fmls, // vector form 17070 &MacroAssembler::Fmls, // indexed form 17089 &MacroAssembler::Fmls, // vector form 17090 &MacroAssembler::Fmls, // indexe [all...] |
/third_party/node/deps/v8/src/codegen/arm64/ |
H A D | macro-assembler-arm64.h | 216 V(fmls, Fmls) \ 383 V(fmls, Fmls) \
|
/third_party/vixl/src/aarch64/ |
H A D | macro-assembler-sve-aarch64.cc | 1857 V(Fmls, fmls, FourRegOneImmDestructiveHelper) \ 2036 void MacroAssembler::Fmls(const ZRegister& zd, in Fmls() function in vixl::aarch64::MacroAssembler
|
H A D | macro-assembler-aarch64.h | 2880 V(fmls, Fmls) \ 3110 V(fmls, Fmls) \ 4653 void Fmls( 4660 void Fmls(const ZRegister& zd,
|
/third_party/node/deps/v8/src/compiler/backend/arm64/ |
H A D | code-generator-arm64.cc | 2277 SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfms, Fmls, 2D); in AssembleArchInstruction() 2313 SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F32x4Qfms, Fmls, 4S); in AssembleArchInstruction()
|