Lines Matching refs:imm
225 uint32_t imm) {
226 if (IsUintN(16, imm)) {
228 mov(cond, tmp, imm & 0xffff);
232 if (ImmediateT32::IsImmediateT32(~imm)) {
234 mvn(cond, tmp, ~imm);
238 if (ImmediateA32::IsImmediateA32(~imm)) {
240 mvn(cond, tmp, ~imm);
245 mov(cond, tmp, imm & 0xffff);
246 movt(cond, tmp, imm >> 16);
751 uint32_t imm = operand.GetImmediate();
757 HandleOutOfBoundsImmediate(cond, rn, imm);
769 HandleOutOfBoundsImmediate(al, scratch, imm);
780 HandleOutOfBoundsImmediate(cond, scratch, imm);
791 HandleOutOfBoundsImmediate(cond, scratch, imm);
801 HandleOutOfBoundsImmediate(cond, scratch, imm);
905 int32_t imm = operand.GetSignedImmediate();
906 if (((type == kOrn) || (type == kOrns)) && IsModifiedImmediate(~imm)) {
910 orr(cond, rd, rn, ~imm);
913 orrs(cond, rd, rn, ~imm);
925 // TODO: orn r0, r1, imm -> orr r0, r1, neg(imm) if doable
955 int32_t imm = operand.GetSignedImmediate();
956 HandleOutOfBoundsImmediate(cond, scratch, imm);
994 uint32_t imm,
996 uint32_t high = imm & ~mask;
1003 uint32_t low = imm & mask;
1067 int32_t imm = operand.GetSignedImmediate();
1068 if (ImmediateT32::IsImmediateT32(~imm)) {
1072 orn(cond, rd, rn, ~imm);
1075 orns(cond, rd, rn, ~imm);
1082 if (imm < 0) {
1085 // add rd, rn, #imm <-> sub rd, rn, - #imm
1087 // adc rd, rn, #imm <-> sbc rd, rn, NOT #imm
1091 imm = -imm;
1095 imm = -imm;
1099 imm = -imm;
1103 imm = -imm;
1107 imm = ~imm;
1111 imm = ~imm;
1115 imm = ~imm;
1119 imm = ~imm;
1126 (this->*asmcb)(cond, size, rd, rn, Operand(imm));
1134 // When type is Add or Sub, imm is always positive (imm < 0 has just been
1135 // handled and imm == 0 would have been generated without the need of a
1139 VIXL_ASSERT(imm > 0);
1143 int trailing_zeroes = CountTrailingZeros(imm) & ~1u;
1147 if (GenerateSplitInstruction(instruction, cond, rd, rn, imm, mask)) {
1161 if (GenerateSplitInstruction(asmcb, cond, rd, rn, -imm, mask)) {
1178 bool offset_is_negative_or_zero = (imm <= 0);
1182 offset_is_negative_or_zero = (imm <= 0);
1186 offset_is_negative_or_zero = (imm >= 0);
1190 offset_is_negative_or_zero = (imm < 0);
1194 offset_is_negative_or_zero = (imm > 0);
1272 static inline bool IsI64BitPattern(T imm) {
1274 if (((imm & mask) != mask) && ((imm & mask) != 0)) return false;
1281 static inline bool IsI8BitPattern(T imm) {
1282 uint8_t imm8 = imm & 0xff;
1284 imm >>= 8;
1285 if ((imm & 0xff) != imm8) return false;
1324 static inline RES replicate(T imm) {
1327 RES res = imm;
1329 res = (res << (sizeof(T) * 8)) | imm;
1379 uint32_t imm = neon_imm.GetImmediate<uint32_t>();
1381 if (IsI8BitPattern(imm)) {
1383 vmov(cond, I8, rd, imm & 0xff);
1388 if (IsI64BitPattern(imm)) {
1390 vmov(cond, I64, rd, replicate<uint64_t>(imm));
1395 if (cond.Is(al) && CanBeInverted(imm)) {
1397 vmvn(I32, rd, ~imm);
1404 uint16_t imm = neon_imm.GetImmediate<uint16_t>();
1406 if (IsI8BitPattern(imm)) {
1408 vmov(cond, I8, rd, imm & 0xff);
1415 uint64_t imm = neon_imm.GetImmediate<uint64_t>();
1417 if (IsI8BitPattern(imm)) {
1419 vmov(cond, I8, rd, imm & 0xff);
1435 mov(cond, scratch, static_cast<uint32_t>(imm & 0xffffffff));
1451 mov(cond, scratch, static_cast<uint32_t>(imm >> 32));
1525 uint32_t imm = neon_imm.GetImmediate<uint32_t>();
1527 if (IsI8BitPattern(imm)) {
1529 vmov(cond, I8, rd, imm & 0xff);
1534 if (IsI64BitPattern(imm)) {
1536 vmov(cond, I64, rd, replicate<uint64_t>(imm));
1541 if (CanBeInverted(imm)) {
1543 vmvn(cond, I32, rd, ~imm);
1550 uint16_t imm = neon_imm.GetImmediate<uint16_t>();
1552 if (IsI8BitPattern(imm)) {
1554 vmov(cond, I8, rd, imm & 0xff);
1561 uint64_t imm = neon_imm.GetImmediate<uint64_t>();
1563 if (IsI8BitPattern(imm)) {
1565 vmov(cond, I8, rd, imm & 0xff);
1578 mov(cond, scratch, static_cast<uint32_t>(imm & 0xffffffff));
1592 mov(cond, scratch, static_cast<uint32_t>(imm >> 32));
1747 // Avoid the unpredictable case 'str r0, [r0, imm]!'
1793 // Avoid the unpredictable case 'ldr r0, [r0], imm'
1840 // Avoid the unpredictable case 'str r0, [r0, imm]!'
1890 // Avoid the unpredictable case 'ldr r0, [r0], imm'
2019 // Avoid the unpredictable case 'ldrd r0, r1, [r0], imm'