Lines Matching refs:rt
38 static inline bool IsZero(const Operand& rt) {
39 if (rt.is_reg()) {
40 return rt.rm() == zero_reg;
42 return rt.immediate() == 0;
340 void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) {
341 if (rt.is_reg()) {
344 ((rt.rm().code() & 0b11000) == 0b01000)) {
345 c_addw(rd, rt.rm());
347 addw(rd, rs, rt.rm());
350 if (FLAG_riscv_c_extension && is_int6(rt.immediate()) &&
352 !MustUseReg(rt.rmode())) {
353 c_addiw(rd, static_cast<int8_t>(rt.immediate()));
354 } else if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
355 addiw(rd, rs, static_cast<int32_t>(rt.immediate()));
356 } else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
357 (2048 <= rt.immediate() && rt.immediate() <= 4094)) {
358 addiw(rd, rs, rt.immediate() / 2);
359 addiw(rd, rd, rt.immediate() - (rt.immediate() / 2));
364 Li(scratch, rt.immediate());
370 void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) {
371 if (rt.is_reg()) {
373 (rt.rm() != zero_reg) && (rs != zero_reg)) {
374 c_add(rd, rt.rm());
376 add(rd, rs, rt.rm());
379 if (FLAG_riscv_c_extension && is_int6(rt.immediate()) &&
380 (rd.code() == rs.code()) && (rd != zero_reg) && (rt.immediate() != 0) &&
381 !MustUseReg(rt.rmode())) {
382 c_addi(rd, static_cast<int8_t>(rt.immediate()));
383 } else if (FLAG_riscv_c_extension && is_int10(rt.immediate()) &&
384 (rt.immediate() != 0) && ((rt.immediate() & 0xf) == 0) &&
386 !MustUseReg(rt.rmode())) {
387 c_addi16sp(static_cast<int16_t>(rt.immediate()));
389 (rs == sp) && is_uint10(rt.immediate()) &&
390 (rt.immediate() != 0) && !MustUseReg(rt.rmode())) {
391 c_addi4spn(rd, static_cast<uint16_t>(rt.immediate()));
392 } else if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
393 addi(rd, rs, static_cast<int32_t>(rt.immediate()));
394 } else if ((-4096 <= rt.immediate() && rt.immediate() <= -2049) ||
395 (2048 <= rt.immediate() && rt.immediate() <= 4094)) {
396 addi(rd, rs, rt.immediate() / 2);
397 addi(rd, rd, rt.immediate() - (rt.immediate() / 2));
403 Li(scratch, rt.immediate());
409 void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) {
410 if (rt.is_reg()) {
413 ((rt.rm().code() & 0b11000) == 0b01000)) {
414 c_subw(rd, rt.rm());
416 subw(rd, rs, rt.rm());
419 DCHECK(is_int32(rt.immediate()));
421 (rd != zero_reg) && is_int6(-rt.immediate()) &&
422 !MustUseReg(rt.rmode())) {
426 -rt.immediate())); // No c_subiw instr, use c_addiw(x, y, -imm).
427 } else if (is_int12(-rt.immediate()) && !MustUseReg(rt.rmode())) {
430 -rt.immediate())); // No subiw instr, use addiw(x, y, -imm).
431 } else if ((-4096 <= -rt.immediate() && -rt.immediate() <= -2049) ||
432 (2048 <= -rt.immediate() && -rt.immediate() <= 4094)) {
433 addiw(rd, rs, -rt.immediate() / 2);
434 addiw(rd, rd, -rt.immediate() - (-rt.immediate() / 2));
438 if (-rt.immediate() >> 12 == 0 && !MustUseReg(rt.rmode())) {
440 Li(scratch, -rt.immediate());
444 Li(scratch, rt.immediate());
451 void TurboAssembler::Sub64(Register rd, Register rs, const Operand& rt) {
452 if (rt.is_reg()) {
455 ((rt.rm().code() & 0b11000) == 0b01000)) {
456 c_sub(rd, rt.rm());
458 sub(rd, rs, rt.rm());
461 (rd != zero_reg) && is_int6(-rt.immediate()) &&
462 (rt.immediate() != 0) && !MustUseReg(rt.rmode())) {
465 -rt.immediate())); // No c_subi instr, use c_addi(x, y, -imm).
467 } else if (FLAG_riscv_c_extension && is_int10(-rt.immediate()) &&
468 (rt.immediate() != 0) && ((rt.immediate() & 0xf) == 0) &&
470 !MustUseReg(rt.rmode())) {
471 c_addi16sp(static_cast<int16_t>(-rt.immediate()));
472 } else if (is_int12(-rt.immediate()) && !MustUseReg(rt.rmode())) {
475 -rt.immediate())); // No subi instr, use addi(x, y, -imm).
476 } else if ((-4096 <= -rt.immediate() && -rt.immediate() <= -2049) ||
477 (2048 <= -rt.immediate() && -rt.immediate() <= 4094)) {
478 addi(rd, rs, -rt.immediate() / 2);
479 addi(rd, rd, -rt.immediate() - (-rt.immediate() / 2));
481 int li_count = InstrCountForLi64Bit(rt.immediate());
482 int li_neg_count = InstrCountForLi64Bit(-rt.immediate());
483 if (li_neg_count < li_count && !MustUseReg(rt.rmode())) {
485 DCHECK(rt.immediate() != std::numeric_limits<int32_t>::min());
488 Li(scratch, -rt.immediate());
494 Li(scratch, rt.immediate());
500 void TurboAssembler::Mul32(Register rd, Register rs, const Operand& rt) {
501 if (rt.is_reg()) {
502 mulw(rd, rs, rt.rm());
507 Li(scratch, rt.immediate());
512 void TurboAssembler::Mulh32(Register rd, Register rs, const Operand& rt) {
513 if (rt.is_reg()) {
514 mul(rd, rs, rt.rm());
519 Li(scratch, rt.immediate());
525 void TurboAssembler::Mulhu32(Register rd, Register rs, const Operand& rt,
528 if (rt.is_reg()) {
529 slli(rtz, rt.rm(), 32);
531 Li(rtz, rt.immediate() << 32);
537 void TurboAssembler::Mul64(Register rd, Register rs, const Operand& rt) {
538 if (rt.is_reg()) {
539 mul(rd, rs, rt.rm());
544 Li(scratch, rt.immediate());
549 void TurboAssembler::Mulh64(Register rd, Register rs, const Operand& rt) {
550 if (rt.is_reg()) {
551 mulh(rd, rs, rt.rm());
556 Li(scratch, rt.immediate());
561 void TurboAssembler::Div32(Register res, Register rs, const Operand& rt) {
562 if (rt.is_reg()) {
563 divw(res, rs, rt.rm());
568 Li(scratch, rt.immediate());
573 void TurboAssembler::Mod32(Register rd, Register rs, const Operand& rt) {
574 if (rt.is_reg()) {
575 remw(rd, rs, rt.rm());
580 Li(scratch, rt.immediate());
585 void TurboAssembler::Modu32(Register rd, Register rs, const Operand& rt) {
586 if (rt.is_reg()) {
587 remuw(rd, rs, rt.rm());
592 Li(scratch, rt.immediate());
597 void TurboAssembler::Div64(Register rd, Register rs, const Operand& rt) {
598 if (rt.is_reg()) {
599 div(rd, rs, rt.rm());
604 Li(scratch, rt.immediate());
609 void TurboAssembler::Divu32(Register res, Register rs, const Operand& rt) {
610 if (rt.is_reg()) {
611 divuw(res, rs, rt.rm());
616 Li(scratch, rt.immediate());
621 void TurboAssembler::Divu64(Register res, Register rs, const Operand& rt) {
622 if (rt.is_reg()) {
623 divu(res, rs, rt.rm());
628 Li(scratch, rt.immediate());
633 void TurboAssembler::Mod64(Register rd, Register rs, const Operand& rt) {
634 if (rt.is_reg()) {
635 rem(rd, rs, rt.rm());
640 Li(scratch, rt.immediate());
645 void TurboAssembler::Modu64(Register rd, Register rs, const Operand& rt) {
646 if (rt.is_reg()) {
647 remu(rd, rs, rt.rm());
652 Li(scratch, rt.immediate());
657 void TurboAssembler::And(Register rd, Register rs, const Operand& rt) {
658 if (rt.is_reg()) {
661 ((rt.rm().code() & 0b11000) == 0b01000)) {
662 c_and(rd, rt.rm());
664 and_(rd, rs, rt.rm());
667 if (FLAG_riscv_c_extension && is_int6(rt.immediate()) &&
668 !MustUseReg(rt.rmode()) && (rd.code() == rs.code()) &&
670 c_andi(rd, static_cast<int8_t>(rt.immediate()));
671 } else if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
672 andi(rd, rs, static_cast<int32_t>(rt.immediate()));
677 Li(scratch, rt.immediate());
683 void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) {
684 if (rt.is_reg()) {
687 ((rt.rm().code() & 0b11000) == 0b01000)) {
688 c_or(rd, rt.rm());
690 or_(rd, rs, rt.rm());
693 if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
694 ori(rd, rs, static_cast<int32_t>(rt.immediate()));
699 Li(scratch, rt.immediate());
705 void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) {
706 if (rt.is_reg()) {
709 ((rt.rm().code() & 0b11000) == 0b01000)) {
710 c_xor(rd, rt.rm());
712 xor_(rd, rs, rt.rm());
715 if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
716 xori(rd, rs, static_cast<int32_t>(rt.immediate()));
721 Li(scratch, rt.immediate());
727 void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) {
728 if (rt.is_reg()) {
729 or_(rd, rs, rt.rm());
732 Or(rd, rs, rt);
737 void TurboAssembler::Neg(Register rs, const Operand& rt) {
738 DCHECK(rt.is_reg());
739 neg(rs, rt.rm());
742 void TurboAssembler::Seqz(Register rd, const Operand& rt) {
743 if (rt.is_reg()) {
744 seqz(rd, rt.rm());
746 li(rd, rt.immediate() == 0);
750 void TurboAssembler::Snez(Register rd, const Operand& rt) {
751 if (rt.is_reg()) {
752 snez(rd, rt.rm());
754 li(rd, rt.immediate() != 0);
758 void TurboAssembler::Seq(Register rd, Register rs, const Operand& rt) {
760 Seqz(rd, rt);
761 } else if (IsZero(rt)) {
764 Sub64(rd, rs, rt);
769 void TurboAssembler::Sne(Register rd, Register rs, const Operand& rt) {
771 Snez(rd, rt);
772 } else if (IsZero(rt)) {
775 Sub64(rd, rs, rt);
780 void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) {
781 if (rt.is_reg()) {
782 slt(rd, rs, rt.rm());
784 if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
785 slti(rd, rs, static_cast<int32_t>(rt.immediate()));
791 Li(scratch, rt.immediate());
797 void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
798 if (rt.is_reg()) {
799 sltu(rd, rs, rt.rm());
801 if (is_int12(rt.immediate()) && !MustUseReg(rt.rmode())) {
802 sltiu(rd, rs, static_cast<int32_t>(rt.immediate()));
808 Li(scratch, rt.immediate());
814 void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) {
815 if (rt.is_reg()) {
816 slt(rd, rt.rm(), rs);
822 Li(scratch, rt.immediate());
828 void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) {
829 if (rt.is_reg()) {
830 sltu(rd, rt.rm(), rs);
836 Li(scratch, rt.immediate());
842 void TurboAssembler::Sge(Register rd, Register rs, const Operand& rt) {
843 Slt(rd, rs, rt);
847 void TurboAssembler::Sgeu(Register rd, Register rs, const Operand& rt) {
848 Sltu(rd, rs, rt);
852 void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) {
853 if (rt.is_reg()) {
854 slt(rd, rt.rm(), rs);
860 Li(scratch, rt.immediate());
865 void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) {
866 if (rt.is_reg()) {
867 sltu(rd, rt.rm(), rs);
873 Li(scratch, rt.immediate());
878 void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) {
879 if (rt.is_reg()) {
880 sllw(rd, rs, rt.rm());
882 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
887 void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) {
888 if (rt.is_reg()) {
889 sraw(rd, rs, rt.rm());
891 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
896 void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) {
897 if (rt.is_reg()) {
898 srlw(rd, rs, rt.rm());
900 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
905 void TurboAssembler::Sra64(Register rd, Register rs, const Operand& rt) {
906 if (rt.is_reg()) {
907 sra(rd, rs, rt.rm());
909 ((rd.code() & 0b11000) == 0b01000) && is_int6(rt.immediate())) {
910 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
913 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
918 void TurboAssembler::Srl64(Register rd, Register rs, const Operand& rt) {
919 if (rt.is_reg()) {
920 srl(rd, rs, rt.rm());
922 ((rd.code() & 0b11000) == 0b01000) && is_int6(rt.immediate())) {
923 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
926 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
931 void TurboAssembler::Sll64(Register rd, Register rs, const Operand& rt) {
932 if (rt.is_reg()) {
933 sll(rd, rs, rt.rm());
935 uint8_t shamt = static_cast<uint8_t>(rt.immediate());
953 void TurboAssembler::Mv(Register rd, const Operand& rt) {
954 if (FLAG_riscv_c_extension && (rd != zero_reg) && (rt.rm() != zero_reg)) {
955 c_mv(rd, rt.rm());
957 mv(rd, rt.rm());
961 void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) {
965 if (rt.is_reg()) {
966 negw(scratch, rt.rm());
968 srlw(rd, rs, rt.rm());
972 int64_t ror_value = rt.immediate() % 32;
986 void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) {
990 if (rt.is_reg()) {
991 negw(scratch, rt.rm());
993 srl(rd, rs, rt.rm());
996 int64_t dror_value = rt.immediate() % 64;
1009 void TurboAssembler::CalcScaledAddress(Register rd, Register rt, Register rs,
1013 Register tmp = rd == rt ? temps.Acquire() : rd;
1014 DCHECK(tmp != rt);
1016 Add64(rd, rt, tmp);
1770 void TurboAssembler::ExtractBits(Register rt, Register rs, uint16_t pos,
1774 slli(rt, rs, 64 - (pos + size));
1776 srai(rt, rt, 64 - size);
1778 srli(rt, rt, 64 - size);
2129 // slli(rt, rs, 64 - (pos + size));
2131 // srai(rt, rt, 64 - size);
2133 // srli(rt, rt, 64 - size);
2458 void TurboAssembler::CompareI(Register rd, Register rs, const Operand& rt,
2462 Seq(rd, rs, rt);
2465 Sne(rd, rs, rt);
2470 Sgt(rd, rs, rt);
2473 Sge(rd, rs, rt); // rs >= rt
2476 Slt(rd, rs, rt); // rs < rt
2479 Sle(rd, rs, rt); // rs <= rt
2484 Sgtu(rd, rs, rt); // rs > rt
2487 Sgeu(rd, rs, rt); // rs >= rt
2490 Sltu(rd, rs, rt); // rs < rt
2493 Sleu(rd, rs, rt); // rs <= rt
2800 #define BRANCH_ARGS_CHECK(cond, rs, rt) \
2801 DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \
2802 (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg)))
2810 const Operand& rt, Label::Distance near_jump) {
2811 bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt);
2833 const Operand& rt, Label::Distance near_jump) {
2835 if (!BranchShortCheck(0, L, cond, rs, rt)) {
2839 BranchShort(&skip, neg_cond, rs, rt);
2852 BranchShort(&skip, neg_cond, rs, rt);
2860 BranchShort(L, cond, rs, rt);
2895 Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt,
2898 if (rt.is_reg()) {
2899 r2 = rt.rm();
2902 li(r2, rt);
2916 Register* scratch, const Operand& rt) {
2918 *scratch = GetRtAsRegisterHelper(rt, *scratch);
2924 Register rs, const Operand& rt) {
2929 if (!rt.is_reg()) {
2931 li(scratch, rt);
2933 scratch = rt.rm();
2944 // rs == rt
2945 if (rt.is_reg() && rs == rt.rm()) {
2954 // rs != rt
2955 if (rt.is_reg() && rs == rt.rm()) {
2965 // rs > rt
2966 if (rt.is_reg() && rs == rt.rm()) {
2974 // rs >= rt
2975 if (rt.is_reg() && rs == rt.rm()) {
2984 // rs < rt
2985 if (rt.is_reg() && rs == rt.rm()) {
2993 // rs <= rt
2994 if (rt.is_reg() && rs == rt.rm()) {
3005 // rs > rt
3006 if (rt.is_reg() && rs == rt.rm()) {
3014 // rs >= rt
3015 if (rt.is_reg() && rs == rt.rm()) {
3024 // rs < rt
3025 if (rt.is_reg() && rs == rt.rm()) {
3033 // rs <= rt
3034 if (rt.is_reg() && rs == rt.rm()) {
3052 Register rs, const Operand& rt) {
3053 BRANCH_ARGS_CHECK(cond, rs, rt);
3057 return BranchShortHelper(offset, nullptr, cond, rs, rt);
3060 return BranchShortHelper(0, L, cond, rs, rt);
3065 const Operand& rt) {
3066 BranchShortCheck(offset, nullptr, cond, rs, rt);
3070 const Operand& rt) {
3071 BranchShortCheck(0, L, cond, rs, rt);
3079 const Operand& rt) {
3080 bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt);
3102 const Operand& rt) {
3104 if (!BranchAndLinkShortCheck(0, L, cond, rs, rt)) {
3107 BranchShort(&skip, neg_cond, rs, rt);
3115 BranchShort(&skip, neg_cond, rs, rt);
3119 BranchAndLinkShortCheck(0, L, cond, rs, rt);
3144 const Operand& rt) {
3157 Operand(GetRtAsRegisterHelper(rt, scratch)));
3167 const Operand& rt) {
3168 BRANCH_ARGS_CHECK(cond, rs, rt);
3172 return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt);
3175 return BranchAndLinkShortHelper(0, L, cond, rs, rt);
3202 const Operand& rt) {
3208 BRANCH_ARGS_CHECK(cond, rs, rt);
3209 Branch(kInstrSize * 2, NegateCondition(cond), rs, rt);
3215 Condition cond, Register rs, const Operand& rt) {
3218 Branch(&skip, NegateCondition(cond), rs, rt);
3230 Register rs, const Operand& rt) {
3232 Jump(static_cast<intptr_t>(target), rmode, cond, rs, rt);
3236 Condition cond, Register rs, const Operand& rt) {
3250 Branch(&skip, NegateCondition(cond), rs, rt);
3261 Jump(t6, cond, rs, rt);
3268 Jump(t6, cond, rs, rt);
3274 Jump(static_cast<intptr_t>(target_index), rmode, cond, rs, rt);
3284 const Operand& rt) {
3289 BRANCH_ARGS_CHECK(cond, rs, rt);
3290 Branch(kInstrSize * 2, NegateCondition(cond), rs, rt);
3311 Register rs, const Operand& rt) {
3313 Call(t6, cond, rs, rt);
3317 Condition cond, Register rs, const Operand& rt) {
3329 Branch(&skip, NegateCondition(cond), rs, rt);
3341 Call(t6, cond, rs, rt);
3348 Call(t6, cond, rs, rt);
3356 Call(static_cast<Address>(target_index), rmode, cond, rs, rt);
3456 void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt) {
3457 Jump(ra, cond, rs, rt);
4239 Operand rt) {
4240 if (FLAG_debug_code) Check(cc, reason, rs, rt);
4244 Operand rt) {
4246 BranchShort(&L, cc, rs, rt);