1/* 2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Rob Clark <robclark@freedesktop.org> 25 */ 26 27#include <math.h> 28#include "util/half_float.h" 29#include "util/u_math.h" 30 31#include "ir3.h" 32#include "ir3_compiler.h" 33#include "ir3_shader.h" 34 35#define swap(a, b) \ 36 do { \ 37 __typeof(a) __tmp = (a); \ 38 (a) = (b); \ 39 (b) = __tmp; \ 40 } while (0) 41 42/* 43 * Copy Propagate: 44 */ 45 46struct ir3_cp_ctx { 47 struct ir3 *shader; 48 struct ir3_shader_variant *so; 49 bool progress; 50}; 51 52/* is it a type preserving mov, with ok flags? 53 * 54 * @instr: the mov to consider removing 55 * @dst_instr: the instruction consuming the mov (instr) 56 * 57 * TODO maybe drop allow_flags since this is only false when dst is 58 * NULL (ie. outputs) 59 */ 60static bool 61is_eligible_mov(struct ir3_instruction *instr, 62 struct ir3_instruction *dst_instr, bool allow_flags) 63{ 64 if (is_same_type_mov(instr)) { 65 struct ir3_register *dst = instr->dsts[0]; 66 struct ir3_register *src = instr->srcs[0]; 67 struct ir3_instruction *src_instr = ssa(src); 68 69 /* only if mov src is SSA (not const/immed): */ 70 if (!src_instr) 71 return false; 72 73 /* no indirect: */ 74 if (dst->flags & IR3_REG_RELATIV) 75 return false; 76 if (src->flags & IR3_REG_RELATIV) 77 return false; 78 79 if (src->flags & IR3_REG_ARRAY) 80 return false; 81 82 if (!allow_flags) 83 if (src->flags & (IR3_REG_FABS | IR3_REG_FNEG | IR3_REG_SABS | 84 IR3_REG_SNEG | IR3_REG_BNOT)) 85 return false; 86 87 return true; 88 } 89 return false; 90} 91 92/* we can end up with extra cmps.s from frontend, which uses a 93 * 94 * cmps.s p0.x, cond, 0 95 * 96 * as a way to mov into the predicate register. But frequently 'cond' 97 * is itself a cmps.s/cmps.f/cmps.u. So detect this special case. 98 */ 99static bool 100is_foldable_double_cmp(struct ir3_instruction *cmp) 101{ 102 struct ir3_instruction *cond = ssa(cmp->srcs[0]); 103 return (cmp->dsts[0]->num == regid(REG_P0, 0)) && cond && 104 (cmp->srcs[1]->flags & IR3_REG_IMMED) && 105 (cmp->srcs[1]->iim_val == 0) && 106 (cmp->cat2.condition == IR3_COND_NE) && 107 (!cond->address || cond->address->def->instr->block == cmp->block); 108} 109 110/* propagate register flags from src to dst.. negates need special 111 * handling to cancel each other out. 112 */ 113static void 114combine_flags(unsigned *dstflags, struct ir3_instruction *src) 115{ 116 unsigned srcflags = src->srcs[0]->flags; 117 118 /* if what we are combining into already has (abs) flags, 119 * we can drop (neg) from src: 120 */ 121 if (*dstflags & IR3_REG_FABS) 122 srcflags &= ~IR3_REG_FNEG; 123 if (*dstflags & IR3_REG_SABS) 124 srcflags &= ~IR3_REG_SNEG; 125 126 if (srcflags & IR3_REG_FABS) 127 *dstflags |= IR3_REG_FABS; 128 if (srcflags & IR3_REG_SABS) 129 *dstflags |= IR3_REG_SABS; 130 if (srcflags & IR3_REG_FNEG) 131 *dstflags ^= IR3_REG_FNEG; 132 if (srcflags & IR3_REG_SNEG) 133 *dstflags ^= IR3_REG_SNEG; 134 if (srcflags & IR3_REG_BNOT) 135 *dstflags ^= IR3_REG_BNOT; 136 137 *dstflags &= ~IR3_REG_SSA; 138 *dstflags |= srcflags & IR3_REG_SSA; 139 *dstflags |= srcflags & IR3_REG_CONST; 140 *dstflags |= srcflags & IR3_REG_IMMED; 141 *dstflags |= srcflags & IR3_REG_RELATIV; 142 *dstflags |= srcflags & IR3_REG_ARRAY; 143 *dstflags |= srcflags & IR3_REG_SHARED; 144 145 /* if src of the src is boolean we can drop the (abs) since we know 146 * the source value is already a postitive integer. This cleans 147 * up the absnegs that get inserted when converting between nir and 148 * native boolean (see ir3_b2n/n2b) 149 */ 150 struct ir3_instruction *srcsrc = ssa(src->srcs[0]); 151 if (srcsrc && is_bool(srcsrc)) 152 *dstflags &= ~IR3_REG_SABS; 153} 154 155/* Tries lowering an immediate register argument to a const buffer access by 156 * adding to the list of immediates to be pushed to the const buffer when 157 * switching to this shader. 158 */ 159static bool 160lower_immed(struct ir3_cp_ctx *ctx, struct ir3_instruction *instr, unsigned n, 161 struct ir3_register *reg, unsigned new_flags) 162{ 163 if (!(new_flags & IR3_REG_IMMED)) 164 return false; 165 166 new_flags &= ~IR3_REG_IMMED; 167 new_flags |= IR3_REG_CONST; 168 169 if (!ir3_valid_flags(instr, n, new_flags)) 170 return false; 171 172 reg = ir3_reg_clone(ctx->shader, reg); 173 174 /* Half constant registers seems to handle only 32-bit values 175 * within floating-point opcodes. So convert back to 32-bit values. 176 */ 177 bool f_opcode = 178 (is_cat2_float(instr->opc) || is_cat3_float(instr->opc)) ? true : false; 179 if (f_opcode && (new_flags & IR3_REG_HALF)) 180 reg->uim_val = fui(_mesa_half_to_float(reg->uim_val)); 181 182 /* in some cases, there are restrictions on (abs)/(neg) plus const.. 183 * so just evaluate those and clear the flags: 184 */ 185 if (new_flags & IR3_REG_SABS) { 186 reg->iim_val = abs(reg->iim_val); 187 new_flags &= ~IR3_REG_SABS; 188 } 189 190 if (new_flags & IR3_REG_FABS) { 191 reg->fim_val = fabs(reg->fim_val); 192 new_flags &= ~IR3_REG_FABS; 193 } 194 195 if (new_flags & IR3_REG_SNEG) { 196 reg->iim_val = -reg->iim_val; 197 new_flags &= ~IR3_REG_SNEG; 198 } 199 200 if (new_flags & IR3_REG_FNEG) { 201 reg->fim_val = -reg->fim_val; 202 new_flags &= ~IR3_REG_FNEG; 203 } 204 205 /* Reallocate for 4 more elements whenever it's necessary. Note that ir3 206 * printing relies on having groups of 4 dwords, so we fill the unused 207 * slots with a dummy value. 208 */ 209 struct ir3_const_state *const_state = ir3_const_state(ctx->so); 210 if (const_state->immediates_count == const_state->immediates_size) { 211 const_state->immediates = rerzalloc( 212 const_state, const_state->immediates, 213 __typeof__(const_state->immediates[0]), const_state->immediates_size, 214 const_state->immediates_size + 4); 215 const_state->immediates_size += 4; 216 217 for (int i = const_state->immediates_count; 218 i < const_state->immediates_size; i++) 219 const_state->immediates[i] = 0xd0d0d0d0; 220 } 221 222 int i; 223 for (i = 0; i < const_state->immediates_count; i++) { 224 if (const_state->immediates[i] == reg->uim_val) 225 break; 226 } 227 228 if (i == const_state->immediates_count) { 229 /* Add on a new immediate to be pushed, if we have space left in the 230 * constbuf. 231 */ 232 if (const_state->offsets.immediate + const_state->immediates_count / 4 >= 233 ir3_max_const(ctx->so)) 234 return false; 235 236 const_state->immediates[i] = reg->uim_val; 237 const_state->immediates_count++; 238 } 239 240 reg->flags = new_flags; 241 reg->num = i + (4 * const_state->offsets.immediate); 242 243 instr->srcs[n] = reg; 244 245 return true; 246} 247 248static void 249unuse(struct ir3_instruction *instr) 250{ 251 assert(instr->use_count > 0); 252 253 if (--instr->use_count == 0) { 254 struct ir3_block *block = instr->block; 255 256 instr->barrier_class = 0; 257 instr->barrier_conflict = 0; 258 259 /* we don't want to remove anything in keeps (which could 260 * be things like array store's) 261 */ 262 for (unsigned i = 0; i < block->keeps_count; i++) { 263 assert(block->keeps[i] != instr); 264 } 265 } 266} 267 268/** 269 * Handles the special case of the 2nd src (n == 1) to "normal" mad 270 * instructions, which cannot reference a constant. See if it is 271 * possible to swap the 1st and 2nd sources. 272 */ 273static bool 274try_swap_mad_two_srcs(struct ir3_instruction *instr, unsigned new_flags) 275{ 276 if (!is_mad(instr->opc)) 277 return false; 278 279 /* If we've already tried, nothing more to gain.. we will only 280 * have previously swapped if the original 2nd src was const or 281 * immed. So swapping back won't improve anything and could 282 * result in an infinite "progress" loop. 283 */ 284 if (instr->cat3.swapped) 285 return false; 286 287 /* cat3 doesn't encode immediate, but we can lower immediate 288 * to const if that helps: 289 */ 290 if (new_flags & IR3_REG_IMMED) { 291 new_flags &= ~IR3_REG_IMMED; 292 new_flags |= IR3_REG_CONST; 293 } 294 295 /* If the reason we couldn't fold without swapping is something 296 * other than const source, then swapping won't help: 297 */ 298 if (!(new_flags & IR3_REG_CONST)) 299 return false; 300 301 instr->cat3.swapped = true; 302 303 /* NOTE: pre-swap first two src's before valid_flags(), 304 * which might try to dereference the n'th src: 305 */ 306 swap(instr->srcs[0], instr->srcs[1]); 307 308 bool valid_swap = 309 /* can we propagate mov if we move 2nd src to first? */ 310 ir3_valid_flags(instr, 0, new_flags) && 311 /* and does first src fit in second slot? */ 312 ir3_valid_flags(instr, 1, instr->srcs[1]->flags); 313 314 if (!valid_swap) { 315 /* put things back the way they were: */ 316 swap(instr->srcs[0], instr->srcs[1]); 317 } /* otherwise leave things swapped */ 318 319 return valid_swap; 320} 321 322/* Values that are uniform inside a loop can become divergent outside 323 * it if the loop has a divergent trip count. This means that we can't 324 * propagate a copy of a shared to non-shared register if it would 325 * make the shared reg's live range extend outside of its loop. Users 326 * outside the loop would see the value for the thread(s) that last 327 * exited the loop, rather than for their own thread. 328 */ 329static bool 330is_valid_shared_copy(struct ir3_instruction *dst_instr, 331 struct ir3_instruction *src_instr, 332 struct ir3_register *src_reg) 333{ 334 return !(src_reg->flags & IR3_REG_SHARED) || 335 dst_instr->block->loop_id == src_instr->block->loop_id; 336} 337 338/** 339 * Handle cp for a given src register. This additionally handles 340 * the cases of collapsing immedate/const (which replace the src 341 * register with a non-ssa src) or collapsing mov's from relative 342 * src (which needs to also fixup the address src reference by the 343 * instruction). 344 */ 345static bool 346reg_cp(struct ir3_cp_ctx *ctx, struct ir3_instruction *instr, 347 struct ir3_register *reg, unsigned n) 348{ 349 struct ir3_instruction *src = ssa(reg); 350 351 if (is_eligible_mov(src, instr, true)) { 352 /* simple case, no immed/const/relativ, only mov's w/ ssa src: */ 353 struct ir3_register *src_reg = src->srcs[0]; 354 unsigned new_flags = reg->flags; 355 356 if (!is_valid_shared_copy(instr, src, src_reg)) 357 return false; 358 359 combine_flags(&new_flags, src); 360 361 if (ir3_valid_flags(instr, n, new_flags)) { 362 if (new_flags & IR3_REG_ARRAY) { 363 assert(!(reg->flags & IR3_REG_ARRAY)); 364 reg->array = src_reg->array; 365 } 366 reg->flags = new_flags; 367 reg->def = src_reg->def; 368 369 instr->barrier_class |= src->barrier_class; 370 instr->barrier_conflict |= src->barrier_conflict; 371 372 unuse(src); 373 reg->def->instr->use_count++; 374 375 return true; 376 } 377 } else if ((is_same_type_mov(src) || is_const_mov(src)) && 378 /* cannot collapse const/immed/etc into control flow: */ 379 opc_cat(instr->opc) != 0) { 380 /* immed/const/etc cases, which require some special handling: */ 381 struct ir3_register *src_reg = src->srcs[0]; 382 unsigned new_flags = reg->flags; 383 384 if (!is_valid_shared_copy(instr, src, src_reg)) 385 return false; 386 387 if (src_reg->flags & IR3_REG_ARRAY) 388 return false; 389 390 combine_flags(&new_flags, src); 391 392 if (!ir3_valid_flags(instr, n, new_flags)) { 393 /* See if lowering an immediate to const would help. */ 394 if (lower_immed(ctx, instr, n, src_reg, new_flags)) 395 return true; 396 397 /* special case for "normal" mad instructions, we can 398 * try swapping the first two args if that fits better. 399 * 400 * the "plain" MAD's (ie. the ones that don't shift first 401 * src prior to multiply) can swap their first two srcs if 402 * src[0] is !CONST and src[1] is CONST: 403 */ 404 if ((n == 1) && try_swap_mad_two_srcs(instr, new_flags)) { 405 return true; 406 } else { 407 return false; 408 } 409 } 410 411 /* Here we handle the special case of mov from 412 * CONST and/or RELATIV. These need to be handled 413 * specially, because in the case of move from CONST 414 * there is no src ir3_instruction so we need to 415 * replace the ir3_register. And in the case of 416 * RELATIV we need to handle the address register 417 * dependency. 418 */ 419 if (src_reg->flags & IR3_REG_CONST) { 420 /* an instruction cannot reference two different 421 * address registers: 422 */ 423 if ((src_reg->flags & IR3_REG_RELATIV) && 424 conflicts(instr->address, reg->def->instr->address)) 425 return false; 426 427 /* These macros expand to a mov in an if statement */ 428 if ((src_reg->flags & IR3_REG_RELATIV) && 429 is_subgroup_cond_mov_macro(instr)) 430 return false; 431 432 /* This seems to be a hw bug, or something where the timings 433 * just somehow don't work out. This restriction may only 434 * apply if the first src is also CONST. 435 */ 436 if ((opc_cat(instr->opc) == 3) && (n == 2) && 437 (src_reg->flags & IR3_REG_RELATIV) && (src_reg->array.offset == 0)) 438 return false; 439 440 /* When narrowing constant from 32b to 16b, it seems 441 * to work only for float. So we should do this only with 442 * float opcodes. 443 */ 444 if (src->cat1.dst_type == TYPE_F16) { 445 /* TODO: should we have a way to tell phi/collect to use a 446 * float move so that this is legal? 447 */ 448 if (is_meta(instr)) 449 return false; 450 if (instr->opc == OPC_MOV && !type_float(instr->cat1.src_type)) 451 return false; 452 if (!is_cat2_float(instr->opc) && !is_cat3_float(instr->opc)) 453 return false; 454 } else if (src->cat1.dst_type == TYPE_U16) { 455 /* Since we set CONSTANT_DEMOTION_ENABLE, a float reference of 456 * what was a U16 value read from the constbuf would incorrectly 457 * do 32f->16f conversion, when we want to read a 16f value. 458 */ 459 if (is_cat2_float(instr->opc) || is_cat3_float(instr->opc)) 460 return false; 461 if (instr->opc == OPC_MOV && type_float(instr->cat1.src_type)) 462 return false; 463 } 464 465 src_reg = ir3_reg_clone(instr->block->shader, src_reg); 466 src_reg->flags = new_flags; 467 instr->srcs[n] = src_reg; 468 469 if (src_reg->flags & IR3_REG_RELATIV) 470 ir3_instr_set_address(instr, reg->def->instr->address->def->instr); 471 472 return true; 473 } 474 475 if (src_reg->flags & IR3_REG_IMMED) { 476 int32_t iim_val = src_reg->iim_val; 477 478 assert((opc_cat(instr->opc) == 1) || 479 (opc_cat(instr->opc) == 2) || 480 (opc_cat(instr->opc) == 6) || 481 is_meta(instr) || 482 (is_mad(instr->opc) && (n == 0))); 483 484 if ((opc_cat(instr->opc) == 2) && 485 !ir3_cat2_int(instr->opc)) { 486 iim_val = ir3_flut(src_reg); 487 if (iim_val < 0) { 488 /* Fall back to trying to load the immediate as a const: */ 489 return lower_immed(ctx, instr, n, src_reg, new_flags); 490 } 491 } 492 493 if (new_flags & IR3_REG_SABS) 494 iim_val = abs(iim_val); 495 496 if (new_flags & IR3_REG_SNEG) 497 iim_val = -iim_val; 498 499 if (new_flags & IR3_REG_BNOT) 500 iim_val = ~iim_val; 501 502 if (ir3_valid_flags(instr, n, new_flags) && 503 ir3_valid_immediate(instr, iim_val)) { 504 new_flags &= ~(IR3_REG_SABS | IR3_REG_SNEG | IR3_REG_BNOT); 505 src_reg = ir3_reg_clone(instr->block->shader, src_reg); 506 src_reg->flags = new_flags; 507 src_reg->iim_val = iim_val; 508 instr->srcs[n] = src_reg; 509 510 return true; 511 } else { 512 /* Fall back to trying to load the immediate as a const: */ 513 return lower_immed(ctx, instr, n, src_reg, new_flags); 514 } 515 } 516 } 517 518 return false; 519} 520 521/* Handle special case of eliminating output mov, and similar cases where 522 * there isn't a normal "consuming" instruction. In this case we cannot 523 * collapse flags (ie. output mov from const, or w/ abs/neg flags, cannot 524 * be eliminated) 525 */ 526static struct ir3_instruction * 527eliminate_output_mov(struct ir3_cp_ctx *ctx, struct ir3_instruction *instr) 528{ 529 if (is_eligible_mov(instr, NULL, false)) { 530 struct ir3_register *reg = instr->srcs[0]; 531 if (!(reg->flags & IR3_REG_ARRAY)) { 532 struct ir3_instruction *src_instr = ssa(reg); 533 assert(src_instr); 534 ctx->progress = true; 535 return src_instr; 536 } 537 } 538 return instr; 539} 540 541/** 542 * Find instruction src's which are mov's that can be collapsed, replacing 543 * the mov dst with the mov src 544 */ 545static void 546instr_cp(struct ir3_cp_ctx *ctx, struct ir3_instruction *instr) 547{ 548 if (instr->srcs_count == 0) 549 return; 550 551 if (ir3_instr_check_mark(instr)) 552 return; 553 554 /* walk down the graph from each src: */ 555 bool progress; 556 do { 557 progress = false; 558 foreach_src_n (reg, n, instr) { 559 struct ir3_instruction *src = ssa(reg); 560 561 if (!src) 562 continue; 563 564 instr_cp(ctx, src); 565 566 /* TODO non-indirect access we could figure out which register 567 * we actually want and allow cp.. 568 */ 569 if ((reg->flags & IR3_REG_ARRAY) && src->opc != OPC_META_PHI) 570 continue; 571 572 /* Don't CP absneg into meta instructions, that won't end well: */ 573 if (is_meta(instr) && 574 (src->opc == OPC_ABSNEG_F || src->opc == OPC_ABSNEG_S)) 575 continue; 576 577 /* Don't CP mova and mova1 into their users */ 578 if (writes_addr0(src) || writes_addr1(src)) 579 continue; 580 581 progress |= reg_cp(ctx, instr, reg, n); 582 ctx->progress |= progress; 583 } 584 } while (progress); 585 586 /* After folding a mov's source we may wind up with a type-converting mov 587 * of an immediate. This happens e.g. with texture descriptors, since we 588 * narrow the descriptor (which may be a constant) to a half-reg in ir3. 589 * By converting the immediate in-place to the destination type, we can 590 * turn the mov into a same-type mov so that it can be further propagated. 591 */ 592 if (instr->opc == OPC_MOV && (instr->srcs[0]->flags & IR3_REG_IMMED) && 593 instr->cat1.src_type != instr->cat1.dst_type && 594 /* Only do uint types for now, until we generate other types of 595 * mov's during instruction selection. 596 */ 597 full_type(instr->cat1.src_type) == TYPE_U32 && 598 full_type(instr->cat1.dst_type) == TYPE_U32) { 599 uint32_t uimm = instr->srcs[0]->uim_val; 600 if (instr->cat1.dst_type == TYPE_U16) 601 uimm &= 0xffff; 602 instr->srcs[0]->uim_val = uimm; 603 if (instr->dsts[0]->flags & IR3_REG_HALF) 604 instr->srcs[0]->flags |= IR3_REG_HALF; 605 else 606 instr->srcs[0]->flags &= ~IR3_REG_HALF; 607 instr->cat1.src_type = instr->cat1.dst_type; 608 ctx->progress = true; 609 } 610 611 /* Re-write the instruction writing predicate register to get rid 612 * of the double cmps. 613 */ 614 if ((instr->opc == OPC_CMPS_S) && is_foldable_double_cmp(instr)) { 615 struct ir3_instruction *cond = ssa(instr->srcs[0]); 616 switch (cond->opc) { 617 case OPC_CMPS_S: 618 case OPC_CMPS_F: 619 case OPC_CMPS_U: 620 instr->opc = cond->opc; 621 instr->flags = cond->flags; 622 instr->cat2 = cond->cat2; 623 if (cond->address) 624 ir3_instr_set_address(instr, cond->address->def->instr); 625 instr->srcs[0] = ir3_reg_clone(ctx->shader, cond->srcs[0]); 626 instr->srcs[1] = ir3_reg_clone(ctx->shader, cond->srcs[1]); 627 instr->barrier_class |= cond->barrier_class; 628 instr->barrier_conflict |= cond->barrier_conflict; 629 unuse(cond); 630 ctx->progress = true; 631 break; 632 default: 633 break; 634 } 635 } 636 637 /* Handle converting a sam.s2en (taking samp/tex idx params via register) 638 * into a normal sam (encoding immediate samp/tex idx) if they are 639 * immediate. This saves some instructions and regs in the common case 640 * where we know samp/tex at compile time. This needs to be done in the 641 * frontend for bindless tex, though, so don't replicate it here. 642 */ 643 if (is_tex(instr) && (instr->flags & IR3_INSTR_S2EN) && 644 !(instr->flags & IR3_INSTR_B) && 645 !(ir3_shader_debug & IR3_DBG_FORCES2EN)) { 646 /* The first src will be a collect, if both of it's 647 * two sources are mov from imm, then we can 648 */ 649 struct ir3_instruction *samp_tex = ssa(instr->srcs[0]); 650 651 assert(samp_tex->opc == OPC_META_COLLECT); 652 653 struct ir3_register *samp = samp_tex->srcs[0]; 654 struct ir3_register *tex = samp_tex->srcs[1]; 655 656 if ((samp->flags & IR3_REG_IMMED) && (tex->flags & IR3_REG_IMMED) && 657 (samp->iim_val < 16) && (tex->iim_val < 16)) { 658 instr->flags &= ~IR3_INSTR_S2EN; 659 instr->cat5.samp = samp->iim_val; 660 instr->cat5.tex = tex->iim_val; 661 662 /* shuffle around the regs to remove the first src: */ 663 instr->srcs_count--; 664 for (unsigned i = 0; i < instr->srcs_count; i++) { 665 instr->srcs[i] = instr->srcs[i + 1]; 666 } 667 668 ctx->progress = true; 669 } 670 } 671} 672 673bool 674ir3_cp(struct ir3 *ir, struct ir3_shader_variant *so) 675{ 676 struct ir3_cp_ctx ctx = { 677 .shader = ir, 678 .so = so, 679 }; 680 681 /* This is a bit annoying, and probably wouldn't be necessary if we 682 * tracked a reverse link from producing instruction to consumer. 683 * But we need to know when we've eliminated the last consumer of 684 * a mov, so we need to do a pass to first count consumers of a 685 * mov. 686 */ 687 foreach_block (block, &ir->block_list) { 688 foreach_instr (instr, &block->instr_list) { 689 690 /* by the way, we don't account for false-dep's, so the CP 691 * pass should always happen before false-dep's are inserted 692 */ 693 assert(instr->deps_count == 0); 694 695 foreach_ssa_src (src, instr) { 696 src->use_count++; 697 } 698 } 699 } 700 701 ir3_clear_mark(ir); 702 703 foreach_block (block, &ir->block_list) { 704 if (block->condition) { 705 instr_cp(&ctx, block->condition); 706 block->condition = eliminate_output_mov(&ctx, block->condition); 707 } 708 709 for (unsigned i = 0; i < block->keeps_count; i++) { 710 instr_cp(&ctx, block->keeps[i]); 711 block->keeps[i] = eliminate_output_mov(&ctx, block->keeps[i]); 712 } 713 } 714 715 return ctx.progress; 716} 717