/arkcompiler/runtime_core/static_core/compiler/optimizer/ir/ |
H A D | constants.h | 51 enum ShiftType : uint8_t { LSL, LSR, ASR, ROR, INVALID_SHIFT };
|
/arkcompiler/runtime_core/compiler/optimizer/ir/ |
H A D | constants.h | 64 enum ShiftType : uint8_t { LSL, LSR, ASR, ROR, INVALID_SHIFT };
|
/arkcompiler/ets_runtime/ecmascript/compiler/ |
H A D | lcr_opcodes.h | 35 V(Lsl, LSL, GateFlags::NONE_FLAG, 0, 0, 2) \
|
H A D | gate.cpp | 146 case OpCode::LSL: in CheckValueInput()
|
H A D | instruction_combine.cpp | 64 case OpCode::LSL: in VisitGate()
|
/arkcompiler/runtime_core/static_core/compiler/tests/codegen/ |
H A D | codegen_test_2.cpp | 345 TestBinaryOperationWithShiftedOperand<Opcode::AddSR, 10U, 2U, ShiftType::LSL, 1U, 14U>(); in TEST_F() 363 TestBinaryOperationWithShiftedOperand<Opcode::AndSR, 1U, 1U, ShiftType::LSL, 1U, 0U>(); in TEST_F() 372 TestBinaryOperationWithShiftedOperand<Opcode::OrSR, 1U, 1U, ShiftType::LSL, 1U, 3U>(); in TEST_F() 381 TestBinaryOperationWithShiftedOperand<Opcode::XorSR, 3U, 1U, ShiftType::LSL, 1U, 1U>(); in TEST_F()
|
/arkcompiler/runtime_core/compiler/tests/ |
H A D | inst_generator.h | 250 std::vector<ShiftType> only_shifts_ = {ShiftType::LSL, ShiftType::LSR, ShiftType::ASR}; 251 std::vector<ShiftType> shifts_and_rotation_ = {ShiftType::LSL, ShiftType::LSR, ShiftType::ASR, ShiftType::ROR};
|
/arkcompiler/runtime_core/static_core/compiler/tests/ |
H A D | inst_generator.h | 295 std::vector<ShiftType> onlyShifts_ = {ShiftType::LSL, ShiftType::LSR, ShiftType::ASR}; 296 std::vector<ShiftType> shiftsAndRotation_ = {ShiftType::LSL, ShiftType::LSR, ShiftType::ASR, ShiftType::ROR};
|
H A D | inst_generator_test.cpp | 530 case ShiftType::LSL: in DoShift() 600 return DoShift(param1, ShiftType::LSL, param2, typeSize); in DoLogic() 605 return DoShift(param1, ShiftType::LSL, param2 & (typeSize - 1L), typeSize); in DoLogic()
|
H A D | lowering_test.cpp | 1690 std::initializer_list<ShiftOpPair> shiftOps = {{Opcode::Shl, Opcode::ShlI, ShiftType::LSL}, in TEST_F() 1758 std::initializer_list<ShiftOpPair> shiftOps = {{Opcode::Shl, Opcode::ShlI, ShiftType::LSL}, in TEST_F() 1844 std::initializer_list<ShiftOpPair> shiftOps = {{Opcode::Shl, Opcode::ShlI, ShiftType::LSL}, in TEST_F() 1865 std::initializer_list<ShiftOpPair> shiftOps = {{Opcode::Shl, Opcode::ShlI, ShiftType::LSL}, in TEST_F() 1972 std::initializer_list<ShiftOpPair> shiftOps = {{Opcode::Shl, Opcode::ShlI, ShiftType::LSL}, in TEST_F() 2030 std::initializer_list<ShiftOpPair> shiftOps = {{Opcode::Shl, Opcode::ShlI, ShiftType::LSL}, in TEST_F() 2090 {Opcode::Shl, ShiftType::LSL}, {Opcode::Shr, ShiftType::LSR}, {Opcode::AShr, ShiftType::ASR}}; in TEST_F() 2140 {Opcode::Shl, ShiftType::LSL}, {Opcode::Shr, ShiftType::LSR}, {Opcode::AShr, ShiftType::ASR}}; in TEST_F() 2188 {Opcode::Shl, ShiftType::LSL}, {Opcode::Shr, ShiftType::LSR}, {Opcode::AShr, ShiftType::ASR}}; in TEST_F() 2218 {Opcode::Shl, ShiftType::LSL}, {Opcod in TEST_F() [all...] |
/arkcompiler/ets_runtime/ecmascript/compiler/trampoline/aarch64/ |
H A D | asm_interpreter_call.cpp | 115 __ Add(trampoline, glueRegister, Operand(runtimeId, LSL, 3)); in AsmInterpEntryDispatch() 534 __ Add(Register(FP), temp, Operand(argc, LSL, 3)); // 3: argc * 8 in CallNativeWithArgv() 583 __ Add(trampoline, glue, Operand(runtimeId, LSL, 3)); in CallNativeWithArgv() 732 __ Add(pc, pc, Operand(jumpSizeRegister, LSL, 0)); in ResumeRspAndDispatch() 1012 __ Add(pc, pc, Operand(jumpSizeRegister, LSL, 0)); 1413 __ Add(X15, X15, Operand(Register(X16), LSL, GCBitset::BYTE_PER_WORD_LOG2)); 1951 __ Add(op, glue, Operand(op, LSL, 3));
|
H A D | common_call.cpp | 184 __ Cmp(op, Operand(numArgs, LSL, 3)); // 3: each args occupies 8 bytes in StackOverflowCheck()
|
H A D | optimized_call.cpp | 82 __ Add(tmp, glue, Operand(tmp, LSL, 3)); in CallRuntime() 1083 __ Add(tmp, glue, Operand(runtimeId, LSL, 3)); in CallRuntimeWithArgv()
|
/arkcompiler/runtime_core/static_core/compiler/optimizer/code_generator/ |
H A D | operands.h | 411 explicit Shift(Reg base, uint32_t scale) : Shift(base, ShiftType::LSL, scale) {} in Shift()
|
/arkcompiler/ets_runtime/ecmascript/compiler/assembler/aarch64/ |
H A D | assembler_aarch64_constants.h | 59 LSL = 0x0,
|
H A D | assembler_aarch64.h | 164 Operand(Register reg, Shift shift = Shift::LSL, uint8_t shift_amount = 0) in Operand()
|
/arkcompiler/runtime_core/static_core/compiler/optimizer/code_generator/target/aarch32/ |
H A D | encode.cpp | 241 return vixl::aarch32::MemOperand(baseReg, VixlReg(indexReg), vixl::aarch32::LSL, shift); in ConvertMem() 882 return vixl::aarch32::MemOperand(baseReg, VixlReg(indexReg), vixl::aarch32::LSL, shift); in PrepareMemLdS() 889 GetMasm()->Add(tmp, baseReg, vixl::aarch32::Operand(VixlReg(indexReg), vixl::aarch32::LSL, shift)); in PrepareMemLdS() 935 GetMasm()->Add(tmp, baseReg, vixl::aarch32::Operand(VixlReg(indexReg), vixl::aarch32::LSL, scale)); in PrepareMemLdSForFloat() 1643 vixl::aarch32::Operand(VixlRegU(dst), vixl::aarch32::LSL, SHIFT_BITS_DOUBLE)); in EncodeCastDoubleToInt64() member in ark::compiler::aarch32::vixl::aarch32 2287 vixl::aarch32::Operand(VixlRegU(src0), vixl::aarch32::LSL, VixlReg(src1))); in EncodeShl() member in ark::compiler::aarch32::vixl::aarch32 2313 vixl::aarch32::Operand(VixlRegU(src0), vixl::aarch32::LSL, VixlReg(tmpReg))); in EncodeShr() member in ark::compiler::aarch32::vixl::aarch32 2340 vixl::aarch32::Operand(VixlRegU(src0), vixl::aarch32::LSL, VixlReg(tmpReg))); in EncodeAShr() member in ark::compiler::aarch32::vixl::aarch32 2416 vixl::aarch32::Operand(VixlRegU(src), vixl::aarch32::LSL, VixlReg(hiReg))); in EncodeShl() member in ark::compiler::aarch32::vixl::aarch32 2442 vixl::aarch32::Operand(VixlRegU(src), vixl::aarch32::LSL, VixlRe in EncodeShr() member in ark::compiler::aarch32::vixl::aarch32 2468 vixl::aarch32::Operand(VixlRegU(src), vixl::aarch32::LSL, VixlReg(tmpReg))); EncodeAShr() member in ark::compiler::aarch32::vixl::aarch32 [all...] |
/arkcompiler/ets_runtime/ecmascript/compiler/assembler/tests/ |
H A D | assembler_aarch64_test.cpp | 219 __ Add(Register(X1), Register(X2), Operand(Register(X3), LSL, 3)); in HWTEST_F_L0() local
|
/arkcompiler/ets_runtime/ecmascript/compiler/tests/ |
H A D | instruction_combine_test.cpp | 418 EXPECT_EQ(m.Opcode(), OpCode::LSL); in HWTEST_F_L0() 521 EXPECT_EQ(m.Opcode(), OpCode::LSL); in HWTEST_F_L0()
|
/arkcompiler/runtime_core/static_core/compiler/optimizer/code_generator/target/aarch64/ |
H A D | encode.cpp | 95 case ShiftType::LSL: in Convert() 96 return vixl::aarch64::Shift::LSL; in Convert() 163 return vixl::aarch64::MemOperand(VixlReg(baseReg), VixlReg(indexReg), vixl::aarch64::LSL, scale); in ConvertMem() 2866 * Is the index shift amount, optional and defaulting to #0 when extend is not LSL: 2879 * Is the index shift amount, optional and defaulting to #0 when extend is not LSL, and can be either #0 or #1. 2895 return shiftType == ShiftType::LSL || shiftType == ShiftType::LSR || shiftType == ShiftType::ASR; in CanEncodeShiftedOperand()
|
/arkcompiler/runtime_core/static_core/compiler/optimizer/optimizations/ |
H A D | lowering.cpp | 658 return ShiftType::LSL; in GetShiftTypeByOpcode()
|
/arkcompiler/ets_runtime/ecmascript/compiler/codegen/maple/ |
H A D | litecg_ir_builder.cpp | 458 case OpCode::LSL: in HandleBB() 460 InsertUsedOpcodeSet(usedOpcodeSet, OpCode::LSL); in HandleBB()
|
/arkcompiler/runtime_core/static_core/compiler/optimizer/code_generator/target/amd64/ |
H A D | encode.cpp | 1179 case ShiftType::LSL: in MakeShift()
|
/arkcompiler/ets_runtime/ecmascript/compiler/codegen/llvm/ |
H A D | llvm_ir_builder.cpp | 206 {OpCode::LSL, &LLVMIRBuilder::HandleIntLsl}, in InitializeHandlers()
|