Lines Matching refs:Rn
51 #define A64_BR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_NOLINK)
52 #define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK)
53 #define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN)
56 #define A64_LS_REG(Rt, Rn, Rm, size, type) \
57 aarch64_insn_gen_load_store_reg(Rt, Rn, Rm, \
73 #define A64_LS_IMM(Rt, Rn, imm, size, type) \
74 aarch64_insn_gen_load_store_imm(Rt, Rn, imm, \
96 #define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \
97 aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \
100 /* Rn -= 16; Rn[0] = Rt; Rn[8] = Rt2; */
101 #define A64_PUSH(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, -16, STORE, PRE_INDEX)
102 /* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */
103 #define A64_POP(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX)
108 #define A64_LSX(sf, Rt, Rn, Rs, type) \
109 aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \
111 /* Rt = [Rn]; (atomic) */
112 #define A64_LDXR(sf, Rt, Rn) \
113 A64_LSX(sf, Rt, Rn, A64_ZR, LOAD_EX)
114 /* [Rn] = Rt; (atomic) Rs = [state] */
115 #define A64_STXR(sf, Rt, Rn, Rs) \
116 A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
117 /* [Rn] = Rt (store release); (atomic) Rs = [state] */
118 #define A64_STLXR(sf, Rt, Rn, Rs) \
119 aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \
128 #define A64_ST_OP(sf, Rn, Rs, op) \
129 aarch64_insn_gen_atomic_ld_op(A64_ZR, Rn, Rs, \
132 /* [Rn] <op>= Rs */
133 #define A64_STADD(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, ADD)
134 #define A64_STCLR(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, CLR)
135 #define A64_STEOR(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, EOR)
136 #define A64_STSET(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, SET)
138 #define A64_LD_OP_AL(sf, Rt, Rn, Rs, op) \
139 aarch64_insn_gen_atomic_ld_op(Rt, Rn, Rs, \
142 /* Rt = [Rn] (load acquire); [Rn] <op>= Rs (store release) */
143 #define A64_LDADDAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, ADD)
144 #define A64_LDCLRAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, CLR)
145 #define A64_LDEORAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, EOR)
146 #define A64_LDSETAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, SET)
147 /* Rt = [Rn] (load acquire); [Rn] = Rs (store release) */
148 #define A64_SWPAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, SWP)
149 /* Rs = CAS(Rn, Rs, Rt) (load acquire & store release) */
150 #define A64_CASAL(sf, Rt, Rn, Rs) \
151 aarch64_insn_gen_cas(Rt, Rn, Rs, A64_SIZE(sf), \
155 #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
156 aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
158 /* Rd = Rn OP imm12 */
159 #define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD)
160 #define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB)
161 #define A64_ADDS_I(sf, Rd, Rn, imm12) \
162 A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD_SETFLAGS)
163 #define A64_SUBS_I(sf, Rd, Rn, imm12) \
164 A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB_SETFLAGS)
165 /* Rn + imm12; set condition flags */
166 #define A64_CMN_I(sf, Rn, imm12) A64_ADDS_I(sf, A64_ZR, Rn, imm12)
167 /* Rn - imm12; set condition flags */
168 #define A64_CMP_I(sf, Rn, imm12) A64_SUBS_I(sf, A64_ZR, Rn, imm12)
169 /* Rd = Rn */
170 #define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0)
173 #define A64_BITFIELD(sf, Rd, Rn, immr, imms, type) \
174 aarch64_insn_gen_bitfield(Rd, Rn, immr, imms, \
177 #define A64_SBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, SIGNED)
179 #define A64_UBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, UNSIGNED)
181 /* Rd = Rn << shift */
182 #define A64_LSL(sf, Rd, Rn, shift) ({ \
184 A64_UBFM(sf, Rd, Rn, (unsigned)-(shift) % sz, sz - 1 - (shift)); \
186 /* Rd = Rn >> shift */
187 #define A64_LSR(sf, Rd, Rn, shift) A64_UBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
188 /* Rd = Rn >> shift; signed */
189 #define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
192 #define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15)
193 #define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31)
196 #define A64_SXTB(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 7)
197 #define A64_SXTH(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 15)
198 #define A64_SXTW(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 31)
212 #define A64_ADDSUB_SREG(sf, Rd, Rn, Rm, type) \
213 aarch64_insn_gen_add_sub_shifted_reg(Rd, Rn, Rm, 0, \
215 /* Rd = Rn OP Rm */
216 #define A64_ADD(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, ADD)
217 #define A64_SUB(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB)
218 #define A64_SUBS(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB_SETFLAGS)
221 /* Rn - Rm; set condition flags */
222 #define A64_CMP(sf, Rn, Rm) A64_SUBS(sf, A64_ZR, Rn, Rm)
225 #define A64_DATA1(sf, Rd, Rn, type) aarch64_insn_gen_data1(Rd, Rn, \
227 /* Rd = BSWAPx(Rn) */
228 #define A64_REV16(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_16)
229 #define A64_REV32(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_32)
230 #define A64_REV64(Rd, Rn) A64_DATA1(1, Rd, Rn, REVERSE_64)
233 /* Rd = Rn OP Rm */
234 #define A64_DATA2(sf, Rd, Rn, Rm, type) aarch64_insn_gen_data2(Rd, Rn, Rm, \
236 #define A64_UDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, UDIV)
237 #define A64_SDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, SDIV)
238 #define A64_LSLV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSLV)
239 #define A64_LSRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSRV)
240 #define A64_ASRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, ASRV)
243 /* Rd = Ra + Rn * Rm */
244 #define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
246 /* Rd = Ra - Rn * Rm */
247 #define A64_MSUB(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
249 /* Rd = Rn * Rm */
250 #define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm)
253 #define A64_LOGIC_SREG(sf, Rd, Rn, Rm, type) \
254 aarch64_insn_gen_logical_shifted_reg(Rd, Rn, Rm, 0, \
256 /* Rd = Rn OP Rm */
257 #define A64_AND(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND)
258 #define A64_ORR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, ORR)
259 #define A64_EOR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, EOR)
260 #define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS)
261 /* Rn & Rm; set condition flags */
262 #define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
263 /* Rd = ~Rm (alias of ORN with A64_ZR as Rn) */
268 #define A64_LOGIC_IMM(sf, Rd, Rn, imm, type) ({ \
271 A64_VARIANT(sf), Rn, Rd, imm64); \
273 /* Rd = Rn OP imm */
274 #define A64_AND_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, AND)
275 #define A64_ORR_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, ORR)
276 #define A64_EOR_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, EOR)
277 #define A64_ANDS_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, AND_SETFLAGS)
278 /* Rn & imm; set condition flags */
279 #define A64_TST_I(sf, Rn, imm) A64_ANDS_I(sf, A64_ZR, Rn, imm)