/third_party/vixl/test/aarch64/ |
H A D | test-assembler-neon-aarch64.cc | 2211 __ Movi(v0.V2D(), 0x0001020304050607, 0x08090a0b0c0d0e0f); in TEST() 2212 __ Movi(v1.V2D(), 0x1011121314151617, 0x18191a1b1c1d1e1f); in TEST() 2304 __ Movi(v0.V2D(), 0x0001020304050607, 0x08090a0b0c0d0e0f); in TEST() 2305 __ Movi(v1.V2D(), 0x1011121314151617, 0x18191a1b1c1d1e1f); in TEST() 2306 __ Movi(v2.V2D(), 0x2021222324252627, 0x28292a2b2c2d2e2f); in TEST() 2403 __ Movi(v0.V2D(), 0x0001020304050607, 0x08090a0b0c0d0e0f); in TEST() 2404 __ Movi(v1.V2D(), 0x1011121314151617, 0x18191a1b1c1d1e1f); in TEST() 2405 __ Movi(v2.V2D(), 0x2021222324252627, 0x28292a2b2c2d2e2f); in TEST() 2406 __ Movi(v3.V2D(), 0x2021222324252627, 0x28292a2b2c2d2e2f); in TEST() 3463 __ Movi(v in TEST() [all...] |
H A D | test-disasm-neon-aarch64.cc | 3179 COMPARE_MACRO(Movi(v4.V8B(), 0xaa), "movi v4.8b, #0xaa"); in TEST() 3180 COMPARE_MACRO(Movi(v1.V16B(), 0xcc), "movi v1.16b, #0xcc"); in TEST() 3182 COMPARE_MACRO(Movi(v4.V4H(), 0xaa, LSL, 0), "movi v4.4h, #0xaa, lsl #0"); in TEST() 3183 COMPARE_MACRO(Movi(v1.V8H(), 0xcc, LSL, 8), "movi v1.8h, #0xcc, lsl #8"); in TEST() 3185 COMPARE_MACRO(Movi(v4.V2S(), 0xaa, LSL, 0), "movi v4.2s, #0xaa, lsl #0"); in TEST() 3186 COMPARE_MACRO(Movi(v1.V2S(), 0xcc, LSL, 8), "movi v1.2s, #0xcc, lsl #8"); in TEST() 3187 COMPARE_MACRO(Movi(v4.V4S(), 0xaa, LSL, 16), "movi v4.4s, #0xaa, lsl #16"); in TEST() 3188 COMPARE_MACRO(Movi(v1.V4S(), 0xcc, LSL, 24), "movi v1.4s, #0xcc, lsl #24"); in TEST() 3190 COMPARE_MACRO(Movi(v4.V2S(), 0xaa, MSL, 8), "movi v4.2s, #0xaa, msl #8"); in TEST() 3191 COMPARE_MACRO(Movi(v in TEST() [all...] |
H A D | test-simulator-aarch64.cc | 1516 __ Movi(vd.V16B(), 0); in Test1OpNEON_Helper() 2471 __ Movi(vd.V16B(), 0); in Test2OpImmNEON_Helper()
|
H A D | test-assembler-sve-aarch64.cc | 19650 __ Movi(v0.V2D(), 0xffeeddccbbaa9988, 0x77665544332211); 19651 __ Movi(v1.V2D(), 0xaa5555aa55555555, 0x55aaaa55aaaaaa); 19652 __ Movi(v2.V2D(), 0, 0); 19653 __ Movi(v3.V2D(), 0, 0); 19654 __ Movi(v4.V2D(), 0, 0); 19655 __ Movi(v5.V2D(), 0, 0); 19656 __ Movi(v6.V2D(), 0, 0); 19657 __ Movi(v7.V2D(), 0, 0);
|
H A D | test-assembler-aarch64.cc | 15002 __ Movi(v0.V16B(), 0xFF); 15006 __ Movi(v0.V1D(), 0xDECAFC0FFEE);
|
/third_party/vixl/examples/aarch64/ |
H A D | neon-matrix-multiply.cc | 81 __ Movi(v0.V16B(), 0); in GenerateNEONMatrixMultiply() 82 __ Movi(v1.V16B(), 0); in GenerateNEONMatrixMultiply() 83 __ Movi(v2.V16B(), 0); in GenerateNEONMatrixMultiply() 84 __ Movi(v3.V16B(), 0); in GenerateNEONMatrixMultiply()
|
/third_party/vixl/src/aarch64/ |
H A D | macro-assembler-aarch64.cc | 1162 void MacroAssembler::Movi(const VRegister& vd, in Emit() function in vixl::aarch64::MacroAssembler 1187 void MacroAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) { in Emit() function in vixl::aarch64::MacroAssembler 1191 Movi(vd.V2D(), lo); in Emit() 1195 Movi(vd.V1D(), lo); in Emit() 1668 Movi(vd, rawbits); in Emit() 1708 Movi(vd, rawbits); in Emit() 1744 Movi(vd, static_cast<uint64_t>(rawbits)); in Emit()
|
H A D | macro-assembler-aarch64.h | 3478 void Movi(const VRegister& vd, 3482 void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
|
/third_party/node/deps/v8/src/wasm/baseline/arm64/ |
H A D | liftoff-assembler-arm64.h | 2234 Movi(mask.V2D(), 0x0000'0008'0000'0004, 0x0000'0002'0000'0001); in emit_i32x4_bitmask() 2410 Movi(mask.V2D(), 0x0080'0040'0020'0010, 0x0008'0004'0002'0001); in emit_i16x8_bitmask() 2551 Movi(temp.V16B(), imms[1], imms[0]); in emit_i8x16_shuffle() 2616 Movi(mask.V2D(), 0x8040'2010'0804'0201); in emit_i8x16_bitmask() 2882 Movi(dst.fp().V16B(), vals[1], vals[0]); in emit_i8x16_bitmask()
|
/third_party/node/deps/v8/src/codegen/arm64/ |
H A D | macro-assembler-arm64-inl.h | 685 Movi(vd, bits); in Fmov() 712 Movi(vd, bits); in Fmov()
|
H A D | macro-assembler-arm64.cc | 546 void TurboAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift, in Movi() function in v8::internal::TurboAssembler 567 void TurboAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) { in Movi() function in v8::internal::TurboAssembler 570 Movi(vd.V2D(), lo); in Movi()
|
H A D | macro-assembler-arm64.h | 938 void Movi(const VRegister& vd, uint64_t imm, Shift shift = LSL, 940 void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
|
/third_party/node/deps/v8/src/compiler/backend/arm64/ |
H A D | code-generator-arm64.cc | 2475 __ Movi(mask.V2D(), 0x0000'0008'0000'0004, 0x0000'0002'0000'0001); in AssembleArchInstruction() 2560 __ Movi(mask.V2D(), 0x0080'0040'0020'0010, 0x0008'0004'0002'0001); in AssembleArchInstruction() 2616 __ Movi(mask.V2D(), 0x8040'2010'0804'0201); in AssembleArchInstruction() 2627 __ Movi(i.OutputSimd128Register().V16B(), imm2, imm1); in AssembleArchInstruction() 2753 __ Movi(temp, imm2, imm1); in AssembleArchInstruction()
|