/third_party/vixl/examples/aarch64/ |
H A D | simulated-runtime-calls.cc | 71 __ Lsl(w0, w0, 2); in GenerateRuntimeCallExamples()
|
/third_party/skia/third_party/externals/swiftshader/third_party/subzero/src/DartARM32/ |
H A D | assembler_arm.h | 993 void Lsl(Register rd, Register rm, const Operand& shift_imm, 996 void Lsl(Register rd, Register rm, Register rs, Condition cond = AL); 1025 Lsl(reg, reg, Operand(kSmiTagSize), cond); in SmiTag() 1029 Lsl(dst, src, Operand(kSmiTagSize), cond); in SmiTag()
|
H A D | assembler_arm.cc | 2445 void Assembler::Lsl(Register rd, Register rm, const Operand& shift_imm, 2448 ASSERT(shift_imm.encoding() != 0); // Do not use Lsl if no shift is wanted. 2453 void Assembler::Lsl(Register rd, Register rm, Register rs, Condition cond) {
|
/third_party/vixl/test/aarch32/ |
H A D | test-disasm-a32.cc | 3448 COMPARE_T32(Lsl(eq, r0, r1, 16), in TEST() 3452 COMPARE_T32(Lsl(eq, r0, r1, 0), in TEST() 3457 COMPARE_T32(Lsl(eq, r0, r1, 32), in TEST() 3463 COMPARE_T32(Lsl(eq, r7, r7, r3), in TEST() 3467 COMPARE_T32(Lsl(eq, r8, r8, r3), in TEST() 4060 CHECK_T32_16(Lsl(DontCare, r0, r1, 31), "lsls r0, r1, #31\n"); in TEST() 4062 CHECK_T32_16_IT_BLOCK(Lsl(DontCare, eq, r0, r1, 31), in TEST() 4066 CHECK_T32_16(Lsl(DontCare, r0, r0, r1), "lsls r0, r1\n"); in TEST() 4068 CHECK_T32_16_IT_BLOCK(Lsl(DontCare, eq, r0, r0, r1), in TEST()
|
H A D | test-simulator-cond-rd-rn-operand-rm-a32.cc | 144 M(Lsl) \
|
H A D | test-simulator-cond-rd-rn-operand-rm-t32.cc | 144 M(Lsl) \
|
H A D | test-assembler-aarch32.cc | 783 __ Lsl(r3, r1, 4); in TEST() 807 __ Lsl(r3, r1, r9); in TEST() 2795 __ Lsl(r4, r3, 28); in TEST()
|
/third_party/node/deps/v8/src/codegen/arm64/ |
H A D | macro-assembler-arm64-inl.h | 773 void TurboAssembler::Lsl(const Register& rd, const Register& rn, in Lsl() function in v8::internal::TurboAssembler 780 void TurboAssembler::Lsl(const Register& rd, const Register& rn, in Lsl() function in v8::internal::TurboAssembler 1047 Lsl(dst, src, kSmiShift); in SmiTag()
|
H A D | macro-assembler-arm64.h | 1048 inline void Lsl(const Register& rd, const Register& rn, unsigned shift); 1049 inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
|
/third_party/skia/third_party/externals/swiftshader/third_party/subzero/src/ |
H A D | IceInstARM32.h | 397 Lsl, enumerator 1008 using InstARM32Lsl = InstARM32ThreeAddrGPR<InstARM32::Lsl>;
|
H A D | IceInstARM32.cpp | 3411 template class InstARM32ThreeAddrGPR<InstARM32::Lsl>;
|
/third_party/vixl/test/aarch64/ |
H A D | test-assembler-aarch64.cc | 6565 __ Lsl(x16, x0, x1); 6566 __ Lsl(x17, x0, x2); 6567 __ Lsl(x18, x0, x3); 6568 __ Lsl(x19, x0, x4); 6569 __ Lsl(x20, x0, x5); 6570 __ Lsl(x21, x0, x6); 6572 __ Lsl(w22, w0, w1); 6573 __ Lsl(w23, w0, w2); 6574 __ Lsl(w24, w0, w3); 6575 __ Lsl(w2 [all...] |
H A D | test-assembler-sve-aarch64.cc | 10709 __ Lsl(zn_s, zn_s, kSRegSize); 12700 __ Lsl(zd_lsl, zn, shift - 1); // Lsl supports 0 - lane_size-1. 12799 macro = &MacroAssembler::Lsl; 12994 __ Lsl(z3.VnB(), p0.Merging(), z0.VnB(), z1.VnB()); 13000 __ Lsl(z6.VnH(), p3.Merging(), z0.VnH(), z1.VnH()); 13006 __ Lsl(z9.VnS(), p0.Merging(), z0.VnS(), z1.VnS()); 13011 __ Lsl(z12.VnD(), p0.Merging(), z0.VnD(), z1.VnD()); 13015 __ Lsl(z14.VnD(), p0.Merging(), z1.VnD(), z11.VnD()); 13069 __ Lsl(z [all...] |
H A D | test-disasm-sve-aarch64.cc | 353 COMPARE_MACRO(Lsl(z4.VnB(), p0.Merging(), z4.VnB(), z30.VnB()), in TEST() 355 COMPARE_MACRO(Lsl(z4.VnB(), p0.Merging(), z30.VnB(), z4.VnB()), in TEST() 357 COMPARE_MACRO(Lsl(z4.VnB(), p0.Merging(), z10.VnB(), z14.VnB()), in TEST() 403 COMPARE_MACRO(Lsl(z29.VnS(), p6.Merging(), z24.VnS(), 0), in TEST()
|
/third_party/vixl/src/aarch64/ |
H A D | macro-assembler-aarch64.h | 2144 void Lsl(const Register& rd, const Register& rn, unsigned shift) { in Lsl() function in vixl::aarch64::MacroAssembler 2151 void Lsl(const Register& rd, const Register& rn, const Register& rm) { in Lsl() function in vixl::aarch64::MacroAssembler 5401 void Lsl(const ZRegister& zd, in Lsl() function in vixl::aarch64::MacroAssembler 5409 void Lsl(const ZRegister& zd, 5413 void Lsl(const ZRegister& zd, const ZRegister& zn, int shift) { in Lsl() function in vixl::aarch64::MacroAssembler 5418 void Lsl(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) { in Lsl() function in vixl::aarch64::MacroAssembler
|
H A D | macro-assembler-sve-aarch64.cc | 670 V(Lsl, lsl) \
|
/third_party/node/deps/v8/src/wasm/baseline/arm64/ |
H A D | liftoff-assembler-arm64.h | 1109 I32_SHIFTOP(i32_shl, Lsl) 1118 I64_SHIFTOP(i64_shl, Lsl)
|
/third_party/vixl/src/aarch32/ |
H A D | macro-assembler-aarch32.h | 2643 void Lsl(Condition cond, Register rd, Register rm, const Operand& operand) { in MacroAssembler() function in vixl::aarch32::MacroAssembler 2664 void Lsl(Register rd, Register rm, const Operand& operand) { in MacroAssembler() function in vixl::aarch32::MacroAssembler 2665 Lsl(al, rd, rm, operand); in MacroAssembler() 2667 void Lsl(FlagsUpdate flags, in MacroAssembler() function in vixl::aarch32::MacroAssembler 2674 Lsl(cond, rd, rm, operand); in MacroAssembler() 2688 Lsl(cond, rd, rm, operand); in MacroAssembler() 2693 void Lsl(FlagsUpdate flags, in MacroAssembler() function in vixl::aarch32::MacroAssembler 2697 Lsl(flags, al, rd, rm, operand); in MacroAssembler()
|
/third_party/node/deps/v8/src/compiler/backend/arm64/ |
H A D | code-generator-arm64.cc | 1419 ASSEMBLE_SHIFT(Lsl, 64); in AssembleArchInstruction() 1422 ASSEMBLE_SHIFT(Lsl, 32); in AssembleArchInstruction()
|
/third_party/node/deps/v8/src/builtins/arm64/ |
H A D | builtins-arm64.cc | 3413 __ Lsl(result, mantissa, exponent); in Generate_DoubleToI()
|