Lines Matching refs:entry
45 copy_entry_size(const struct copy_entry *entry)
47 return (entry->flags & IR3_REG_HALF) ? 1 : 2;
86 const struct copy_entry *entry)
88 assert(!entry->src.flags);
90 if (entry->flags & IR3_REG_HALF) {
100 if (entry->src.reg >= RA_HALF_SIZE) {
102 physreg_t tmp = entry->dst < 2 ? 2 : 0;
107 .src = {.reg = entry->src.reg & ~1u},
109 .flags = entry->flags & ~IR3_REG_HALF,
116 (entry->src.reg & ~1u) == (entry->dst & ~1u) ?
117 tmp + (entry->dst & 1u) : entry->dst;
122 .src = {.reg = tmp + (entry->src.reg & 1)},
124 .flags = entry->flags,
130 .src = {.reg = entry->src.reg & ~1u},
132 .flags = entry->flags & ~IR3_REG_HALF,
140 if (entry->dst >= RA_HALF_SIZE) {
143 .src = {.reg = entry->dst},
144 .dst = entry->src.reg,
145 .flags = entry->flags,
151 unsigned src_num = ra_physreg_to_num(entry->src.reg, entry->flags);
152 unsigned dst_num = ra_physreg_to_num(entry->dst, entry->flags);
161 assert(!(entry->flags & IR3_REG_SHARED));
162 do_xor(instr, dst_num, dst_num, src_num, entry->flags);
163 do_xor(instr, src_num, src_num, dst_num, entry->flags);
164 do_xor(instr, dst_num, dst_num, src_num, entry->flags);
172 (entry->flags & IR3_REG_SHARED) ? OPC_SWZ_SHARED_MACRO : OPC_SWZ;
174 ir3_dst_create(swz, dst_num, entry->flags);
175 ir3_dst_create(swz, src_num, entry->flags);
176 ir3_src_create(swz, src_num, entry->flags);
177 ir3_src_create(swz, dst_num, entry->flags);
178 swz->cat1.dst_type = (entry->flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
179 swz->cat1.src_type = (entry->flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
187 const struct copy_entry *entry)
189 if (entry->flags & IR3_REG_HALF) {
191 if (entry->dst >= RA_HALF_SIZE) {
193 physreg_t tmp = !entry->src.flags && entry->src.reg < 2 ? 2 : 0;
197 .src = {.reg = entry->dst & ~1u},
199 .flags = entry->flags & ~IR3_REG_HALF,
205 struct copy_src src = entry->src;
206 if (!src.flags && (src.reg & ~1u) == (entry->dst & ~1u))
212 .dst = tmp + (entry->dst & 1),
213 .flags = entry->flags,
218 .src = {.reg = entry->dst & ~1u},
220 .flags = entry->flags & ~IR3_REG_HALF,
225 if (!entry->src.flags && entry->src.reg >= RA_HALF_SIZE) {
226 unsigned src_num = ra_physreg_to_num(entry->src.reg & ~1u,
227 entry->flags & ~IR3_REG_HALF);
228 unsigned dst_num = ra_physreg_to_num(entry->dst, entry->flags);
230 if (entry->src.reg % 2 == 0) {
234 ir3_dst_create(cov, dst_num, entry->flags);
235 ir3_src_create(cov, src_num, entry->flags & ~IR3_REG_HALF);
243 ir3_dst_create(shr, dst_num, entry->flags);
244 ir3_src_create(shr, src_num, entry->flags & ~IR3_REG_HALF);
252 unsigned src_num = ra_physreg_to_num(entry->src.reg, entry->flags);
253 unsigned dst_num = ra_physreg_to_num(entry->dst, entry->flags);
257 (entry->flags & IR3_REG_SHARED) ? OPC_READ_FIRST_MACRO : OPC_MOV;
259 ir3_dst_create(mov, dst_num, entry->flags);
260 ir3_src_create(mov, src_num, entry->flags | entry->src.flags);
261 mov->cat1.dst_type = (entry->flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
262 mov->cat1.src_type = (entry->flags & IR3_REG_HALF) ? TYPE_U16 : TYPE_U32;
263 if (entry->src.flags & IR3_REG_IMMED)
264 mov->srcs[0]->uim_val = entry->src.imm;
265 else if (entry->src.flags & IR3_REG_CONST)
266 mov->srcs[0]->num = entry->src.const_num;
285 entry_blocked(struct copy_entry *entry, struct copy_ctx *ctx)
287 for (unsigned i = 0; i < copy_entry_size(entry); i++) {
288 if (ctx->physreg_use_count[entry->dst + i] != 0)
296 split_32bit_copy(struct copy_ctx *ctx, struct copy_entry *entry)
298 assert(!entry->done);
299 assert(!(entry->src.flags & (IR3_REG_IMMED | IR3_REG_CONST)));
300 assert(copy_entry_size(entry) == 2);
303 new_entry->dst = entry->dst + 1;
304 new_entry->src.flags = entry->src.flags;
305 new_entry->src.reg = entry->src.reg + 1;
307 entry->flags |= IR3_REG_HALF;
308 new_entry->flags = entry->flags;
309 ctx->physreg_dst[entry->dst + 1] = new_entry;
321 struct copy_entry *entry = &ctx->entries[i];
322 for (unsigned j = 0; j < copy_entry_size(entry); j++) {
323 if (!entry->src.flags)
324 ctx->physreg_use_count[entry->src.reg + j]++;
327 assert(!ctx->physreg_dst[entry->dst + j]);
328 ctx->physreg_dst[entry->dst + j] = entry;
346 struct copy_entry *entry = &ctx->entries[i];
347 if (!entry->done && !entry_blocked(entry, ctx)) {
348 entry->done = true;
350 do_copy(compiler, instr, entry);
351 for (unsigned j = 0; j < copy_entry_size(entry); j++) {
352 if (!entry->src.flags)
353 ctx->physreg_use_count[entry->src.reg + j]--;
354 ctx->physreg_dst[entry->dst + j] = NULL;
373 struct copy_entry *entry = &ctx->entries[i];
374 if (entry->done || entry->flags & IR3_REG_HALF)
377 if (((ctx->physreg_use_count[entry->dst] == 0 ||
378 ctx->physreg_use_count[entry->dst + 1] == 0)) &&
379 !(entry->src.flags & (IR3_REG_IMMED | IR3_REG_CONST))) {
380 split_32bit_copy(ctx, entry);
390 * remaining entry, it has a destination n_2, which (because every
423 struct copy_entry *entry = &ctx->entries[i];
424 if (entry->done)
427 assert(!entry->src.flags);
430 if (entry->dst == entry->src.reg) {
431 entry->done = true;
435 do_swap(compiler, instr, entry);
440 if (entry->flags & IR3_REG_HALF) {
447 if (blocking->src.reg <= entry->dst &&
448 blocking->src.reg + 1 >= entry->dst &&
462 if (blocking->src.reg >= entry->dst &&
463 blocking->src.reg < entry->dst + copy_entry_size(entry)) {
465 entry->src.reg + (blocking->src.reg - entry->dst);
469 entry->done = true;