1/* 2 * Copyright © 2012 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24/** @file brw_fs_copy_propagation.cpp 25 * 26 * Support for global copy propagation in two passes: A local pass that does 27 * intra-block copy (and constant) propagation, and a global pass that uses 28 * dataflow analysis on the copies available at the end of each block to re-do 29 * local copy propagation with more copies available. 30 * 31 * See Muchnick's Advanced Compiler Design and Implementation, section 32 * 12.5 (p356). 33 */ 34 35#define ACP_HASH_SIZE 64 36 37#include "util/bitset.h" 38#include "util/u_math.h" 39#include "brw_fs.h" 40#include "brw_fs_live_variables.h" 41#include "brw_cfg.h" 42#include "brw_eu.h" 43 44using namespace brw; 45 46namespace { /* avoid conflict with opt_copy_propagation_elements */ 47struct acp_entry : public exec_node { 48 fs_reg dst; 49 fs_reg src; 50 unsigned global_idx; 51 unsigned size_written; 52 unsigned size_read; 53 enum opcode opcode; 54 bool saturate; 55 bool is_partial_write; 56}; 57 58struct block_data { 59 /** 60 * Which entries in the fs_copy_prop_dataflow acp table are live at the 61 * start of this block. This is the useful output of the analysis, since 62 * it lets us plug those into the local copy propagation on the second 63 * pass. 64 */ 65 BITSET_WORD *livein; 66 67 /** 68 * Which entries in the fs_copy_prop_dataflow acp table are live at the end 69 * of this block. This is done in initial setup from the per-block acps 70 * returned by the first local copy prop pass. 71 */ 72 BITSET_WORD *liveout; 73 74 /** 75 * Which entries in the fs_copy_prop_dataflow acp table are generated by 76 * instructions in this block which reach the end of the block without 77 * being killed. 78 */ 79 BITSET_WORD *copy; 80 81 /** 82 * Which entries in the fs_copy_prop_dataflow acp table are killed over the 83 * course of this block. 84 */ 85 BITSET_WORD *kill; 86 87 /** 88 * Which entries in the fs_copy_prop_dataflow acp table are guaranteed to 89 * have a fully uninitialized destination at the end of this block. 90 */ 91 BITSET_WORD *undef; 92}; 93 94class fs_copy_prop_dataflow 95{ 96public: 97 fs_copy_prop_dataflow(void *mem_ctx, cfg_t *cfg, 98 const fs_live_variables &live, 99 exec_list *out_acp[ACP_HASH_SIZE]); 100 101 void setup_initial_values(); 102 void run(); 103 104 void dump_block_data() const UNUSED; 105 106 void *mem_ctx; 107 cfg_t *cfg; 108 const fs_live_variables &live; 109 110 acp_entry **acp; 111 int num_acp; 112 int bitset_words; 113 114 struct block_data *bd; 115}; 116} /* anonymous namespace */ 117 118fs_copy_prop_dataflow::fs_copy_prop_dataflow(void *mem_ctx, cfg_t *cfg, 119 const fs_live_variables &live, 120 exec_list *out_acp[ACP_HASH_SIZE]) 121 : mem_ctx(mem_ctx), cfg(cfg), live(live) 122{ 123 bd = rzalloc_array(mem_ctx, struct block_data, cfg->num_blocks); 124 125 num_acp = 0; 126 foreach_block (block, cfg) { 127 for (int i = 0; i < ACP_HASH_SIZE; i++) { 128 num_acp += out_acp[block->num][i].length(); 129 } 130 } 131 132 acp = rzalloc_array(mem_ctx, struct acp_entry *, num_acp); 133 134 bitset_words = BITSET_WORDS(num_acp); 135 136 int next_acp = 0; 137 foreach_block (block, cfg) { 138 bd[block->num].livein = rzalloc_array(bd, BITSET_WORD, bitset_words); 139 bd[block->num].liveout = rzalloc_array(bd, BITSET_WORD, bitset_words); 140 bd[block->num].copy = rzalloc_array(bd, BITSET_WORD, bitset_words); 141 bd[block->num].kill = rzalloc_array(bd, BITSET_WORD, bitset_words); 142 bd[block->num].undef = rzalloc_array(bd, BITSET_WORD, bitset_words); 143 144 for (int i = 0; i < ACP_HASH_SIZE; i++) { 145 foreach_in_list(acp_entry, entry, &out_acp[block->num][i]) { 146 acp[next_acp] = entry; 147 148 entry->global_idx = next_acp; 149 150 /* opt_copy_propagation_local populates out_acp with copies created 151 * in a block which are still live at the end of the block. This 152 * is exactly what we want in the COPY set. 153 */ 154 BITSET_SET(bd[block->num].copy, next_acp); 155 156 next_acp++; 157 } 158 } 159 } 160 161 assert(next_acp == num_acp); 162 163 setup_initial_values(); 164 run(); 165} 166 167/** 168 * Set up initial values for each of the data flow sets, prior to running 169 * the fixed-point algorithm. 170 */ 171void 172fs_copy_prop_dataflow::setup_initial_values() 173{ 174 /* Initialize the COPY and KILL sets. */ 175 { 176 /* Create a temporary table of ACP entries which we'll use for efficient 177 * look-up. Unfortunately, we have to do this in two steps because we 178 * have to match both sources and destinations and an ACP entry can only 179 * be in one list at a time. 180 * 181 * We choose to make the table size between num_acp/2 and num_acp/4 to 182 * try and trade off between the time it takes to initialize the table 183 * via exec_list constructors or make_empty() and the cost of 184 * collisions. In practice, it doesn't appear to matter too much what 185 * size we make the table as long as it's roughly the same order of 186 * magnitude as num_acp. We get most of the benefit of the table 187 * approach even if we use a table of size ACP_HASH_SIZE though a 188 * full-sized table is 1-2% faster in practice. 189 */ 190 unsigned acp_table_size = util_next_power_of_two(num_acp) / 4; 191 acp_table_size = MAX2(acp_table_size, ACP_HASH_SIZE); 192 exec_list *acp_table = new exec_list[acp_table_size]; 193 194 /* First, get all the KILLs for instructions which overwrite ACP 195 * destinations. 196 */ 197 for (int i = 0; i < num_acp; i++) { 198 unsigned idx = reg_space(acp[i]->dst) & (acp_table_size - 1); 199 acp_table[idx].push_tail(acp[i]); 200 } 201 202 foreach_block (block, cfg) { 203 foreach_inst_in_block(fs_inst, inst, block) { 204 if (inst->dst.file != VGRF) 205 continue; 206 207 unsigned idx = reg_space(inst->dst) & (acp_table_size - 1); 208 foreach_in_list(acp_entry, entry, &acp_table[idx]) { 209 if (regions_overlap(inst->dst, inst->size_written, 210 entry->dst, entry->size_written)) 211 BITSET_SET(bd[block->num].kill, entry->global_idx); 212 } 213 } 214 } 215 216 /* Clear the table for the second pass */ 217 for (unsigned i = 0; i < acp_table_size; i++) 218 acp_table[i].make_empty(); 219 220 /* Next, get all the KILLs for instructions which overwrite ACP 221 * sources. 222 */ 223 for (int i = 0; i < num_acp; i++) { 224 unsigned idx = reg_space(acp[i]->src) & (acp_table_size - 1); 225 acp_table[idx].push_tail(acp[i]); 226 } 227 228 foreach_block (block, cfg) { 229 foreach_inst_in_block(fs_inst, inst, block) { 230 if (inst->dst.file != VGRF && 231 inst->dst.file != FIXED_GRF) 232 continue; 233 234 unsigned idx = reg_space(inst->dst) & (acp_table_size - 1); 235 foreach_in_list(acp_entry, entry, &acp_table[idx]) { 236 if (regions_overlap(inst->dst, inst->size_written, 237 entry->src, entry->size_read)) 238 BITSET_SET(bd[block->num].kill, entry->global_idx); 239 } 240 } 241 } 242 243 delete [] acp_table; 244 } 245 246 /* Populate the initial values for the livein and liveout sets. For the 247 * block at the start of the program, livein = 0 and liveout = copy. 248 * For the others, set liveout and livein to ~0 (the universal set). 249 */ 250 foreach_block (block, cfg) { 251 if (block->parents.is_empty()) { 252 for (int i = 0; i < bitset_words; i++) { 253 bd[block->num].livein[i] = 0u; 254 bd[block->num].liveout[i] = bd[block->num].copy[i]; 255 } 256 } else { 257 for (int i = 0; i < bitset_words; i++) { 258 bd[block->num].liveout[i] = ~0u; 259 bd[block->num].livein[i] = ~0u; 260 } 261 } 262 } 263 264 /* Initialize the undef set. */ 265 foreach_block (block, cfg) { 266 for (int i = 0; i < num_acp; i++) { 267 BITSET_SET(bd[block->num].undef, i); 268 for (unsigned off = 0; off < acp[i]->size_written; off += REG_SIZE) { 269 if (BITSET_TEST(live.block_data[block->num].defout, 270 live.var_from_reg(byte_offset(acp[i]->dst, off)))) 271 BITSET_CLEAR(bd[block->num].undef, i); 272 } 273 } 274 } 275} 276 277/** 278 * Walk the set of instructions in the block, marking which entries in the acp 279 * are killed by the block. 280 */ 281void 282fs_copy_prop_dataflow::run() 283{ 284 bool progress; 285 286 do { 287 progress = false; 288 289 foreach_block (block, cfg) { 290 if (block->parents.is_empty()) 291 continue; 292 293 for (int i = 0; i < bitset_words; i++) { 294 const BITSET_WORD old_liveout = bd[block->num].liveout[i]; 295 BITSET_WORD livein_from_any_block = 0; 296 297 /* Update livein for this block. If a copy is live out of all 298 * parent blocks, it's live coming in to this block. 299 */ 300 bd[block->num].livein[i] = ~0u; 301 foreach_list_typed(bblock_link, parent_link, link, &block->parents) { 302 bblock_t *parent = parent_link->block; 303 /* Consider ACP entries with a known-undefined destination to 304 * be available from the parent. This is valid because we're 305 * free to set the undefined variable equal to the source of 306 * the ACP entry without breaking the application's 307 * expectations, since the variable is undefined. 308 */ 309 bd[block->num].livein[i] &= (bd[parent->num].liveout[i] | 310 bd[parent->num].undef[i]); 311 livein_from_any_block |= bd[parent->num].liveout[i]; 312 } 313 314 /* Limit to the set of ACP entries that can possibly be available 315 * at the start of the block, since propagating from a variable 316 * which is guaranteed to be undefined (rather than potentially 317 * undefined for some dynamic control-flow paths) doesn't seem 318 * particularly useful. 319 */ 320 bd[block->num].livein[i] &= livein_from_any_block; 321 322 /* Update liveout for this block. */ 323 bd[block->num].liveout[i] = 324 bd[block->num].copy[i] | (bd[block->num].livein[i] & 325 ~bd[block->num].kill[i]); 326 327 if (old_liveout != bd[block->num].liveout[i]) 328 progress = true; 329 } 330 } 331 } while (progress); 332} 333 334void 335fs_copy_prop_dataflow::dump_block_data() const 336{ 337 foreach_block (block, cfg) { 338 fprintf(stderr, "Block %d [%d, %d] (parents ", block->num, 339 block->start_ip, block->end_ip); 340 foreach_list_typed(bblock_link, link, link, &block->parents) { 341 bblock_t *parent = link->block; 342 fprintf(stderr, "%d ", parent->num); 343 } 344 fprintf(stderr, "):\n"); 345 fprintf(stderr, " livein = 0x"); 346 for (int i = 0; i < bitset_words; i++) 347 fprintf(stderr, "%08x", bd[block->num].livein[i]); 348 fprintf(stderr, ", liveout = 0x"); 349 for (int i = 0; i < bitset_words; i++) 350 fprintf(stderr, "%08x", bd[block->num].liveout[i]); 351 fprintf(stderr, ",\n copy = 0x"); 352 for (int i = 0; i < bitset_words; i++) 353 fprintf(stderr, "%08x", bd[block->num].copy[i]); 354 fprintf(stderr, ", kill = 0x"); 355 for (int i = 0; i < bitset_words; i++) 356 fprintf(stderr, "%08x", bd[block->num].kill[i]); 357 fprintf(stderr, "\n"); 358 } 359} 360 361static bool 362is_logic_op(enum opcode opcode) 363{ 364 return (opcode == BRW_OPCODE_AND || 365 opcode == BRW_OPCODE_OR || 366 opcode == BRW_OPCODE_XOR || 367 opcode == BRW_OPCODE_NOT); 368} 369 370static bool 371can_take_stride(fs_inst *inst, brw_reg_type dst_type, 372 unsigned arg, unsigned stride, 373 const struct brw_compiler *compiler) 374{ 375 const struct intel_device_info *devinfo = compiler->devinfo; 376 377 if (stride > 4) 378 return false; 379 380 /* Bail if the channels of the source need to be aligned to the byte offset 381 * of the corresponding channel of the destination, and the provided stride 382 * would break this restriction. 383 */ 384 if (has_dst_aligned_region_restriction(devinfo, inst, dst_type) && 385 !(type_sz(inst->src[arg].type) * stride == 386 type_sz(dst_type) * inst->dst.stride || 387 stride == 0)) 388 return false; 389 390 /* 3-source instructions can only be Align16, which restricts what strides 391 * they can take. They can only take a stride of 1 (the usual case), or 0 392 * with a special "repctrl" bit. But the repctrl bit doesn't work for 393 * 64-bit datatypes, so if the source type is 64-bit then only a stride of 394 * 1 is allowed. From the Broadwell PRM, Volume 7 "3D Media GPGPU", page 395 * 944: 396 * 397 * This is applicable to 32b datatypes and 16b datatype. 64b datatypes 398 * cannot use the replicate control. 399 */ 400 if (inst->is_3src(compiler)) { 401 if (type_sz(inst->src[arg].type) > 4) 402 return stride == 1; 403 else 404 return stride == 1 || stride == 0; 405 } 406 407 /* From the Broadwell PRM, Volume 2a "Command Reference - Instructions", 408 * page 391 ("Extended Math Function"): 409 * 410 * The following restrictions apply for align1 mode: Scalar source is 411 * supported. Source and destination horizontal stride must be the 412 * same. 413 * 414 * From the Haswell PRM Volume 2b "Command Reference - Instructions", page 415 * 134 ("Extended Math Function"): 416 * 417 * Scalar source is supported. Source and destination horizontal stride 418 * must be 1. 419 * 420 * and similar language exists for IVB and SNB. Pre-SNB, math instructions 421 * are sends, so the sources are moved to MRF's and there are no 422 * restrictions. 423 */ 424 if (inst->is_math()) { 425 if (devinfo->ver == 6 || devinfo->ver == 7) { 426 assert(inst->dst.stride == 1); 427 return stride == 1 || stride == 0; 428 } else if (devinfo->ver >= 8) { 429 return stride == inst->dst.stride || stride == 0; 430 } 431 } 432 433 return true; 434} 435 436static bool 437instruction_requires_packed_data(fs_inst *inst) 438{ 439 switch (inst->opcode) { 440 case FS_OPCODE_DDX_FINE: 441 case FS_OPCODE_DDX_COARSE: 442 case FS_OPCODE_DDY_FINE: 443 case FS_OPCODE_DDY_COARSE: 444 case SHADER_OPCODE_QUAD_SWIZZLE: 445 return true; 446 default: 447 return false; 448 } 449} 450 451bool 452fs_visitor::try_copy_propagate(fs_inst *inst, int arg, acp_entry *entry) 453{ 454 if (inst->src[arg].file != VGRF) 455 return false; 456 457 if (entry->src.file == IMM) 458 return false; 459 assert(entry->src.file == VGRF || entry->src.file == UNIFORM || 460 entry->src.file == ATTR || entry->src.file == FIXED_GRF); 461 462 /* Avoid propagating a LOAD_PAYLOAD instruction into another if there is a 463 * good chance that we'll be able to eliminate the latter through register 464 * coalescing. If only part of the sources of the second LOAD_PAYLOAD can 465 * be simplified through copy propagation we would be making register 466 * coalescing impossible, ending up with unnecessary copies in the program. 467 * This is also the case for is_multi_copy_payload() copies that can only 468 * be coalesced when the instruction is lowered into a sequence of MOVs. 469 * 470 * Worse -- In cases where the ACP entry was the result of CSE combining 471 * multiple LOAD_PAYLOAD subexpressions, propagating the first LOAD_PAYLOAD 472 * into the second would undo the work of CSE, leading to an infinite 473 * optimization loop. Avoid this by detecting LOAD_PAYLOAD copies from CSE 474 * temporaries which should match is_coalescing_payload(). 475 */ 476 if (entry->opcode == SHADER_OPCODE_LOAD_PAYLOAD && 477 (is_coalescing_payload(alloc, inst) || is_multi_copy_payload(inst))) 478 return false; 479 480 assert(entry->dst.file == VGRF); 481 if (inst->src[arg].nr != entry->dst.nr) 482 return false; 483 484 /* Bail if inst is reading a range that isn't contained in the range 485 * that entry is writing. 486 */ 487 if (!region_contained_in(inst->src[arg], inst->size_read(arg), 488 entry->dst, entry->size_written)) 489 return false; 490 491 /* Send messages with EOT set are restricted to use g112-g127 (and we 492 * sometimes need g127 for other purposes), so avoid copy propagating 493 * anything that would make it impossible to satisfy that restriction. 494 */ 495 if (inst->eot) { 496 /* Avoid propagating a FIXED_GRF register, as that's already pinned. */ 497 if (entry->src.file == FIXED_GRF) 498 return false; 499 500 /* We might be propagating from a large register, while the SEND only 501 * is reading a portion of it (say the .A channel in an RGBA value). 502 * We need to pin both split SEND sources in g112-g126/127, so only 503 * allow this if the registers aren't too large. 504 */ 505 if (inst->opcode == SHADER_OPCODE_SEND && entry->src.file == VGRF) { 506 int other_src = arg == 2 ? 3 : 2; 507 unsigned other_size = inst->src[other_src].file == VGRF ? 508 alloc.sizes[inst->src[other_src].nr] : 509 inst->size_read(other_src); 510 unsigned prop_src_size = alloc.sizes[entry->src.nr]; 511 if (other_size + prop_src_size > 15) 512 return false; 513 } 514 } 515 516 /* Avoid propagating odd-numbered FIXED_GRF registers into the first source 517 * of a LINTERP instruction on platforms where the PLN instruction has 518 * register alignment restrictions. 519 */ 520 if (devinfo->has_pln && devinfo->ver <= 6 && 521 entry->src.file == FIXED_GRF && (entry->src.nr & 1) && 522 inst->opcode == FS_OPCODE_LINTERP && arg == 0) 523 return false; 524 525 /* we can't generally copy-propagate UD negations because we 526 * can end up accessing the resulting values as signed integers 527 * instead. See also resolve_ud_negate() and comment in 528 * fs_generator::generate_code. 529 */ 530 if (entry->src.type == BRW_REGISTER_TYPE_UD && 531 entry->src.negate) 532 return false; 533 534 bool has_source_modifiers = entry->src.abs || entry->src.negate; 535 536 if (has_source_modifiers && !inst->can_do_source_mods(devinfo)) 537 return false; 538 539 /* Reject cases that would violate register regioning restrictions. */ 540 if ((entry->src.file == UNIFORM || !entry->src.is_contiguous()) && 541 ((devinfo->ver == 6 && inst->is_math()) || 542 inst->is_send_from_grf() || 543 inst->uses_indirect_addressing())) { 544 return false; 545 } 546 547 if (has_source_modifiers && 548 inst->opcode == SHADER_OPCODE_GFX4_SCRATCH_WRITE) 549 return false; 550 551 /* Some instructions implemented in the generator backend, such as 552 * derivatives, assume that their operands are packed so we can't 553 * generally propagate strided regions to them. 554 */ 555 const unsigned entry_stride = (entry->src.file == FIXED_GRF ? 1 : 556 entry->src.stride); 557 if (instruction_requires_packed_data(inst) && entry_stride != 1) 558 return false; 559 560 const brw_reg_type dst_type = (has_source_modifiers && 561 entry->dst.type != inst->src[arg].type) ? 562 entry->dst.type : inst->dst.type; 563 564 /* Bail if the result of composing both strides would exceed the 565 * hardware limit. 566 */ 567 if (!can_take_stride(inst, dst_type, arg, 568 entry_stride * inst->src[arg].stride, 569 compiler)) 570 return false; 571 572 /* From the Cherry Trail/Braswell PRMs, Volume 7: 3D Media GPGPU: 573 * EU Overview 574 * Register Region Restrictions 575 * Special Requirements for Handling Double Precision Data Types : 576 * 577 * "When source or destination datatype is 64b or operation is integer 578 * DWord multiply, regioning in Align1 must follow these rules: 579 * 580 * 1. Source and Destination horizontal stride must be aligned to the 581 * same qword. 582 * 2. Regioning must ensure Src.Vstride = Src.Width * Src.Hstride. 583 * 3. Source and Destination offset must be the same, except the case 584 * of scalar source." 585 * 586 * Most of this is already checked in can_take_stride(), we're only left 587 * with checking 3. 588 */ 589 if (has_dst_aligned_region_restriction(devinfo, inst, dst_type) && 590 entry_stride != 0 && 591 (reg_offset(inst->dst) % REG_SIZE) != (reg_offset(entry->src) % REG_SIZE)) 592 return false; 593 594 /* Bail if the source FIXED_GRF region of the copy cannot be trivially 595 * composed with the source region of the instruction -- E.g. because the 596 * copy uses some extended stride greater than 4 not supported natively by 597 * the hardware as a horizontal stride, or because instruction compression 598 * could require us to use a vertical stride shorter than a GRF. 599 */ 600 if (entry->src.file == FIXED_GRF && 601 (inst->src[arg].stride > 4 || 602 inst->dst.component_size(inst->exec_size) > 603 inst->src[arg].component_size(inst->exec_size))) 604 return false; 605 606 /* Bail if the instruction type is larger than the execution type of the 607 * copy, what implies that each channel is reading multiple channels of the 608 * destination of the copy, and simply replacing the sources would give a 609 * program with different semantics. 610 */ 611 if ((type_sz(entry->dst.type) < type_sz(inst->src[arg].type) || 612 entry->is_partial_write) && 613 inst->opcode != BRW_OPCODE_MOV) { 614 return false; 615 } 616 617 /* Bail if the result of composing both strides cannot be expressed 618 * as another stride. This avoids, for example, trying to transform 619 * this: 620 * 621 * MOV (8) rX<1>UD rY<0;1,0>UD 622 * FOO (8) ... rX<8;8,1>UW 623 * 624 * into this: 625 * 626 * FOO (8) ... rY<0;1,0>UW 627 * 628 * Which would have different semantics. 629 */ 630 if (entry_stride != 1 && 631 (inst->src[arg].stride * 632 type_sz(inst->src[arg].type)) % type_sz(entry->src.type) != 0) 633 return false; 634 635 /* Since semantics of source modifiers are type-dependent we need to 636 * ensure that the meaning of the instruction remains the same if we 637 * change the type. If the sizes of the types are different the new 638 * instruction will read a different amount of data than the original 639 * and the semantics will always be different. 640 */ 641 if (has_source_modifiers && 642 entry->dst.type != inst->src[arg].type && 643 (!inst->can_change_types() || 644 type_sz(entry->dst.type) != type_sz(inst->src[arg].type))) 645 return false; 646 647 if (devinfo->ver >= 8 && (entry->src.negate || entry->src.abs) && 648 is_logic_op(inst->opcode)) { 649 return false; 650 } 651 652 if (entry->saturate) { 653 switch(inst->opcode) { 654 case BRW_OPCODE_SEL: 655 if ((inst->conditional_mod != BRW_CONDITIONAL_GE && 656 inst->conditional_mod != BRW_CONDITIONAL_L) || 657 inst->src[1].file != IMM || 658 inst->src[1].f < 0.0 || 659 inst->src[1].f > 1.0) { 660 return false; 661 } 662 break; 663 default: 664 return false; 665 } 666 } 667 668 /* Save the offset of inst->src[arg] relative to entry->dst for it to be 669 * applied later. 670 */ 671 const unsigned rel_offset = inst->src[arg].offset - entry->dst.offset; 672 673 /* Fold the copy into the instruction consuming it. */ 674 inst->src[arg].file = entry->src.file; 675 inst->src[arg].nr = entry->src.nr; 676 inst->src[arg].subnr = entry->src.subnr; 677 inst->src[arg].offset = entry->src.offset; 678 679 /* Compose the strides of both regions. */ 680 if (entry->src.file == FIXED_GRF) { 681 if (inst->src[arg].stride) { 682 const unsigned orig_width = 1 << entry->src.width; 683 const unsigned reg_width = REG_SIZE / (type_sz(inst->src[arg].type) * 684 inst->src[arg].stride); 685 inst->src[arg].width = cvt(MIN2(orig_width, reg_width)) - 1; 686 inst->src[arg].hstride = cvt(inst->src[arg].stride); 687 inst->src[arg].vstride = inst->src[arg].hstride + inst->src[arg].width; 688 } else { 689 inst->src[arg].vstride = inst->src[arg].hstride = 690 inst->src[arg].width = 0; 691 } 692 693 inst->src[arg].stride = 1; 694 695 /* Hopefully no Align16 around here... */ 696 assert(entry->src.swizzle == BRW_SWIZZLE_XYZW); 697 inst->src[arg].swizzle = entry->src.swizzle; 698 } else { 699 inst->src[arg].stride *= entry->src.stride; 700 } 701 702 /* Compose any saturate modifiers. */ 703 inst->saturate = inst->saturate || entry->saturate; 704 705 /* Compute the first component of the copy that the instruction is 706 * reading, and the base byte offset within that component. 707 */ 708 assert((entry->dst.offset % REG_SIZE == 0 || inst->opcode == BRW_OPCODE_MOV) && 709 entry->dst.stride == 1); 710 const unsigned component = rel_offset / type_sz(entry->dst.type); 711 const unsigned suboffset = rel_offset % type_sz(entry->dst.type); 712 713 /* Calculate the byte offset at the origin of the copy of the given 714 * component and suboffset. 715 */ 716 inst->src[arg] = byte_offset(inst->src[arg], 717 component * entry_stride * type_sz(entry->src.type) + suboffset); 718 719 if (has_source_modifiers) { 720 if (entry->dst.type != inst->src[arg].type) { 721 /* We are propagating source modifiers from a MOV with a different 722 * type. If we got here, then we can just change the source and 723 * destination types of the instruction and keep going. 724 */ 725 assert(inst->can_change_types()); 726 for (int i = 0; i < inst->sources; i++) { 727 inst->src[i].type = entry->dst.type; 728 } 729 inst->dst.type = entry->dst.type; 730 } 731 732 if (!inst->src[arg].abs) { 733 inst->src[arg].abs = entry->src.abs; 734 inst->src[arg].negate ^= entry->src.negate; 735 } 736 } 737 738 return true; 739} 740 741 742bool 743fs_visitor::try_constant_propagate(fs_inst *inst, acp_entry *entry) 744{ 745 bool progress = false; 746 747 if (entry->src.file != IMM) 748 return false; 749 if (type_sz(entry->src.type) > 4) 750 return false; 751 if (entry->saturate) 752 return false; 753 754 for (int i = inst->sources - 1; i >= 0; i--) { 755 if (inst->src[i].file != VGRF) 756 continue; 757 758 assert(entry->dst.file == VGRF); 759 if (inst->src[i].nr != entry->dst.nr) 760 continue; 761 762 /* Bail if inst is reading a range that isn't contained in the range 763 * that entry is writing. 764 */ 765 if (!region_contained_in(inst->src[i], inst->size_read(i), 766 entry->dst, entry->size_written)) 767 continue; 768 769 /* If the type sizes don't match each channel of the instruction is 770 * either extracting a portion of the constant (which could be handled 771 * with some effort but the code below doesn't) or reading multiple 772 * channels of the source at once. 773 */ 774 if (type_sz(inst->src[i].type) != type_sz(entry->dst.type)) 775 continue; 776 777 fs_reg val = entry->src; 778 val.type = inst->src[i].type; 779 780 if (inst->src[i].abs) { 781 if ((devinfo->ver >= 8 && is_logic_op(inst->opcode)) || 782 !brw_abs_immediate(val.type, &val.as_brw_reg())) { 783 continue; 784 } 785 } 786 787 if (inst->src[i].negate) { 788 if ((devinfo->ver >= 8 && is_logic_op(inst->opcode)) || 789 !brw_negate_immediate(val.type, &val.as_brw_reg())) { 790 continue; 791 } 792 } 793 794 switch (inst->opcode) { 795 case BRW_OPCODE_MOV: 796 case SHADER_OPCODE_LOAD_PAYLOAD: 797 case FS_OPCODE_PACK: 798 inst->src[i] = val; 799 progress = true; 800 break; 801 802 case SHADER_OPCODE_INT_QUOTIENT: 803 case SHADER_OPCODE_INT_REMAINDER: 804 /* FINISHME: Promote non-float constants and remove this. */ 805 if (devinfo->ver < 8) 806 break; 807 FALLTHROUGH; 808 case SHADER_OPCODE_POW: 809 /* Allow constant propagation into src1 (except on Gen 6 which 810 * doesn't support scalar source math), and let constant combining 811 * promote the constant on Gen < 8. 812 */ 813 if (devinfo->ver == 6) 814 break; 815 FALLTHROUGH; 816 case BRW_OPCODE_BFI1: 817 case BRW_OPCODE_ASR: 818 case BRW_OPCODE_SHL: 819 case BRW_OPCODE_SHR: 820 case BRW_OPCODE_SUBB: 821 if (i == 1) { 822 inst->src[i] = val; 823 progress = true; 824 } 825 break; 826 827 case BRW_OPCODE_MACH: 828 case BRW_OPCODE_MUL: 829 case SHADER_OPCODE_MULH: 830 case BRW_OPCODE_ADD: 831 case BRW_OPCODE_OR: 832 case BRW_OPCODE_AND: 833 case BRW_OPCODE_XOR: 834 case BRW_OPCODE_ADDC: 835 if (i == 1) { 836 inst->src[i] = val; 837 progress = true; 838 } else if (i == 0 && inst->src[1].file != IMM) { 839 /* Don't copy propagate the constant in situations like 840 * 841 * mov(8) g8<1>D 0x7fffffffD 842 * mul(8) g16<1>D g8<8,8,1>D g15<16,8,2>W 843 * 844 * On platforms that only have a 32x16 multiplier, this will 845 * result in lowering the multiply to 846 * 847 * mul(8) g15<1>D g14<8,8,1>D 0xffffUW 848 * mul(8) g16<1>D g14<8,8,1>D 0x7fffUW 849 * add(8) g15.1<2>UW g15.1<16,8,2>UW g16<16,8,2>UW 850 * 851 * On Gfx8 and Gfx9, which have the full 32x32 multiplier, it 852 * results in 853 * 854 * mul(8) g16<1>D g15<16,8,2>W 0x7fffffffD 855 * 856 * Volume 2a of the Skylake PRM says: 857 * 858 * When multiplying a DW and any lower precision integer, the 859 * DW operand must on src0. 860 */ 861 if (inst->opcode == BRW_OPCODE_MUL && 862 type_sz(inst->src[1].type) < 4 && 863 type_sz(val.type) == 4) 864 break; 865 866 /* Fit this constant in by commuting the operands. 867 * Exception: we can't do this for 32-bit integer MUL/MACH 868 * because it's asymmetric. 869 * 870 * The BSpec says for Broadwell that 871 * 872 * "When multiplying DW x DW, the dst cannot be accumulator." 873 * 874 * Integer MUL with a non-accumulator destination will be lowered 875 * by lower_integer_multiplication(), so don't restrict it. 876 */ 877 if (((inst->opcode == BRW_OPCODE_MUL && 878 inst->dst.is_accumulator()) || 879 inst->opcode == BRW_OPCODE_MACH) && 880 (inst->src[1].type == BRW_REGISTER_TYPE_D || 881 inst->src[1].type == BRW_REGISTER_TYPE_UD)) 882 break; 883 inst->src[0] = inst->src[1]; 884 inst->src[1] = val; 885 progress = true; 886 } 887 break; 888 889 case BRW_OPCODE_CMP: 890 case BRW_OPCODE_IF: 891 if (i == 1) { 892 inst->src[i] = val; 893 progress = true; 894 } else if (i == 0 && inst->src[1].file != IMM) { 895 enum brw_conditional_mod new_cmod; 896 897 new_cmod = brw_swap_cmod(inst->conditional_mod); 898 if (new_cmod != BRW_CONDITIONAL_NONE) { 899 /* Fit this constant in by swapping the operands and 900 * flipping the test 901 */ 902 inst->src[0] = inst->src[1]; 903 inst->src[1] = val; 904 inst->conditional_mod = new_cmod; 905 progress = true; 906 } 907 } 908 break; 909 910 case BRW_OPCODE_SEL: 911 if (i == 1) { 912 inst->src[i] = val; 913 progress = true; 914 } else if (i == 0 && inst->src[1].file != IMM && 915 (inst->conditional_mod == BRW_CONDITIONAL_NONE || 916 /* Only GE and L are commutative. */ 917 inst->conditional_mod == BRW_CONDITIONAL_GE || 918 inst->conditional_mod == BRW_CONDITIONAL_L)) { 919 inst->src[0] = inst->src[1]; 920 inst->src[1] = val; 921 922 /* If this was predicated, flipping operands means 923 * we also need to flip the predicate. 924 */ 925 if (inst->conditional_mod == BRW_CONDITIONAL_NONE) { 926 inst->predicate_inverse = 927 !inst->predicate_inverse; 928 } 929 progress = true; 930 } 931 break; 932 933 case FS_OPCODE_FB_WRITE_LOGICAL: 934 /* The stencil and omask sources of FS_OPCODE_FB_WRITE_LOGICAL are 935 * bit-cast using a strided region so they cannot be immediates. 936 */ 937 if (i != FB_WRITE_LOGICAL_SRC_SRC_STENCIL && 938 i != FB_WRITE_LOGICAL_SRC_OMASK) { 939 inst->src[i] = val; 940 progress = true; 941 } 942 break; 943 944 case SHADER_OPCODE_TEX_LOGICAL: 945 case SHADER_OPCODE_TXD_LOGICAL: 946 case SHADER_OPCODE_TXF_LOGICAL: 947 case SHADER_OPCODE_TXL_LOGICAL: 948 case SHADER_OPCODE_TXS_LOGICAL: 949 case FS_OPCODE_TXB_LOGICAL: 950 case SHADER_OPCODE_TXF_CMS_LOGICAL: 951 case SHADER_OPCODE_TXF_CMS_W_LOGICAL: 952 case SHADER_OPCODE_TXF_CMS_W_GFX12_LOGICAL: 953 case SHADER_OPCODE_TXF_UMS_LOGICAL: 954 case SHADER_OPCODE_TXF_MCS_LOGICAL: 955 case SHADER_OPCODE_LOD_LOGICAL: 956 case SHADER_OPCODE_TG4_LOGICAL: 957 case SHADER_OPCODE_TG4_OFFSET_LOGICAL: 958 case SHADER_OPCODE_SAMPLEINFO_LOGICAL: 959 case SHADER_OPCODE_IMAGE_SIZE_LOGICAL: 960 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL: 961 case SHADER_OPCODE_UNTYPED_ATOMIC_FLOAT_LOGICAL: 962 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL: 963 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL: 964 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL: 965 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL: 966 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL: 967 case SHADER_OPCODE_BYTE_SCATTERED_WRITE_LOGICAL: 968 case SHADER_OPCODE_BYTE_SCATTERED_READ_LOGICAL: 969 inst->src[i] = val; 970 progress = true; 971 break; 972 973 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD: 974 case SHADER_OPCODE_BROADCAST: 975 inst->src[i] = val; 976 progress = true; 977 break; 978 979 case BRW_OPCODE_MAD: 980 case BRW_OPCODE_LRP: 981 inst->src[i] = val; 982 progress = true; 983 break; 984 985 case FS_OPCODE_PACK_HALF_2x16_SPLIT: 986 inst->src[i] = val; 987 progress = true; 988 break; 989 990 default: 991 break; 992 } 993 } 994 995 return progress; 996} 997 998static bool 999can_propagate_from(fs_inst *inst) 1000{ 1001 return (inst->opcode == BRW_OPCODE_MOV && 1002 inst->dst.file == VGRF && 1003 ((inst->src[0].file == VGRF && 1004 !regions_overlap(inst->dst, inst->size_written, 1005 inst->src[0], inst->size_read(0))) || 1006 inst->src[0].file == ATTR || 1007 inst->src[0].file == UNIFORM || 1008 inst->src[0].file == IMM || 1009 (inst->src[0].file == FIXED_GRF && 1010 inst->src[0].is_contiguous())) && 1011 inst->src[0].type == inst->dst.type && 1012 /* Subset of !is_partial_write() conditions. */ 1013 !((inst->predicate && inst->opcode != BRW_OPCODE_SEL) || 1014 !inst->dst.is_contiguous())) || 1015 is_identity_payload(FIXED_GRF, inst); 1016} 1017 1018/* Walks a basic block and does copy propagation on it using the acp 1019 * list. 1020 */ 1021bool 1022fs_visitor::opt_copy_propagation_local(void *copy_prop_ctx, bblock_t *block, 1023 exec_list *acp) 1024{ 1025 bool progress = false; 1026 1027 foreach_inst_in_block(fs_inst, inst, block) { 1028 /* Try propagating into this instruction. */ 1029 for (int i = 0; i < inst->sources; i++) { 1030 if (inst->src[i].file != VGRF) 1031 continue; 1032 1033 foreach_in_list(acp_entry, entry, &acp[inst->src[i].nr % ACP_HASH_SIZE]) { 1034 if (try_constant_propagate(inst, entry)) 1035 progress = true; 1036 else if (try_copy_propagate(inst, i, entry)) 1037 progress = true; 1038 } 1039 } 1040 1041 /* kill the destination from the ACP */ 1042 if (inst->dst.file == VGRF || inst->dst.file == FIXED_GRF) { 1043 foreach_in_list_safe(acp_entry, entry, &acp[inst->dst.nr % ACP_HASH_SIZE]) { 1044 if (regions_overlap(entry->dst, entry->size_written, 1045 inst->dst, inst->size_written)) 1046 entry->remove(); 1047 } 1048 1049 /* Oops, we only have the chaining hash based on the destination, not 1050 * the source, so walk across the entire table. 1051 */ 1052 for (int i = 0; i < ACP_HASH_SIZE; i++) { 1053 foreach_in_list_safe(acp_entry, entry, &acp[i]) { 1054 /* Make sure we kill the entry if this instruction overwrites 1055 * _any_ of the registers that it reads 1056 */ 1057 if (regions_overlap(entry->src, entry->size_read, 1058 inst->dst, inst->size_written)) 1059 entry->remove(); 1060 } 1061 } 1062 } 1063 1064 /* If this instruction's source could potentially be folded into the 1065 * operand of another instruction, add it to the ACP. 1066 */ 1067 if (can_propagate_from(inst)) { 1068 acp_entry *entry = rzalloc(copy_prop_ctx, acp_entry); 1069 entry->dst = inst->dst; 1070 entry->src = inst->src[0]; 1071 entry->size_written = inst->size_written; 1072 for (unsigned i = 0; i < inst->sources; i++) 1073 entry->size_read += inst->size_read(i); 1074 entry->opcode = inst->opcode; 1075 entry->saturate = inst->saturate; 1076 entry->is_partial_write = inst->is_partial_write(); 1077 acp[entry->dst.nr % ACP_HASH_SIZE].push_tail(entry); 1078 } else if (inst->opcode == SHADER_OPCODE_LOAD_PAYLOAD && 1079 inst->dst.file == VGRF) { 1080 int offset = 0; 1081 for (int i = 0; i < inst->sources; i++) { 1082 int effective_width = i < inst->header_size ? 8 : inst->exec_size; 1083 const unsigned size_written = effective_width * 1084 type_sz(inst->src[i].type); 1085 if (inst->src[i].file == VGRF || 1086 (inst->src[i].file == FIXED_GRF && 1087 inst->src[i].is_contiguous())) { 1088 acp_entry *entry = rzalloc(copy_prop_ctx, acp_entry); 1089 entry->dst = byte_offset(inst->dst, offset); 1090 entry->src = inst->src[i]; 1091 entry->size_written = size_written; 1092 entry->size_read = inst->size_read(i); 1093 entry->opcode = inst->opcode; 1094 if (!entry->dst.equals(inst->src[i])) { 1095 acp[entry->dst.nr % ACP_HASH_SIZE].push_tail(entry); 1096 } else { 1097 ralloc_free(entry); 1098 } 1099 } 1100 offset += size_written; 1101 } 1102 } 1103 } 1104 1105 return progress; 1106} 1107 1108bool 1109fs_visitor::opt_copy_propagation() 1110{ 1111 bool progress = false; 1112 void *copy_prop_ctx = ralloc_context(NULL); 1113 exec_list *out_acp[cfg->num_blocks]; 1114 1115 for (int i = 0; i < cfg->num_blocks; i++) 1116 out_acp[i] = new exec_list [ACP_HASH_SIZE]; 1117 1118 const fs_live_variables &live = live_analysis.require(); 1119 1120 /* First, walk through each block doing local copy propagation and getting 1121 * the set of copies available at the end of the block. 1122 */ 1123 foreach_block (block, cfg) { 1124 progress = opt_copy_propagation_local(copy_prop_ctx, block, 1125 out_acp[block->num]) || progress; 1126 1127 /* If the destination of an ACP entry exists only within this block, 1128 * then there's no need to keep it for dataflow analysis. We can delete 1129 * it from the out_acp table and avoid growing the bitsets any bigger 1130 * than we absolutely have to. 1131 * 1132 * Because nothing in opt_copy_propagation_local touches the block 1133 * start/end IPs and opt_copy_propagation_local is incapable of 1134 * extending the live range of an ACP destination beyond the block, 1135 * it's safe to use the liveness information in this way. 1136 */ 1137 for (unsigned a = 0; a < ACP_HASH_SIZE; a++) { 1138 foreach_in_list_safe(acp_entry, entry, &out_acp[block->num][a]) { 1139 assert(entry->dst.file == VGRF); 1140 if (block->start_ip <= live.vgrf_start[entry->dst.nr] && 1141 live.vgrf_end[entry->dst.nr] <= block->end_ip) 1142 entry->remove(); 1143 } 1144 } 1145 } 1146 1147 /* Do dataflow analysis for those available copies. */ 1148 fs_copy_prop_dataflow dataflow(copy_prop_ctx, cfg, live, out_acp); 1149 1150 /* Next, re-run local copy propagation, this time with the set of copies 1151 * provided by the dataflow analysis available at the start of a block. 1152 */ 1153 foreach_block (block, cfg) { 1154 exec_list in_acp[ACP_HASH_SIZE]; 1155 1156 for (int i = 0; i < dataflow.num_acp; i++) { 1157 if (BITSET_TEST(dataflow.bd[block->num].livein, i)) { 1158 struct acp_entry *entry = dataflow.acp[i]; 1159 in_acp[entry->dst.nr % ACP_HASH_SIZE].push_tail(entry); 1160 } 1161 } 1162 1163 progress = opt_copy_propagation_local(copy_prop_ctx, block, in_acp) || 1164 progress; 1165 } 1166 1167 for (int i = 0; i < cfg->num_blocks; i++) 1168 delete [] out_acp[i]; 1169 ralloc_free(copy_prop_ctx); 1170 1171 if (progress) 1172 invalidate_analysis(DEPENDENCY_INSTRUCTION_DATA_FLOW | 1173 DEPENDENCY_INSTRUCTION_DETAIL); 1174 1175 return progress; 1176} 1177