1/* 2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Rob Clark <robclark@freedesktop.org> 25 */ 26 27#include "util/format/u_format.h" 28#include "util/u_atomic.h" 29#include "util/u_math.h" 30#include "util/u_memory.h" 31#include "util/u_string.h" 32 33#include "drm/freedreno_drmif.h" 34 35#include "ir3_assembler.h" 36#include "ir3_compiler.h" 37#include "ir3_nir.h" 38#include "ir3_parser.h" 39#include "ir3_shader.h" 40 41#include "isa/isa.h" 42 43#include "disasm.h" 44 45int 46ir3_glsl_type_size(const struct glsl_type *type, bool bindless) 47{ 48 return glsl_count_attribute_slots(type, false); 49} 50 51/* for vertex shader, the inputs are loaded into registers before the shader 52 * is executed, so max_regs from the shader instructions might not properly 53 * reflect the # of registers actually used, especially in case passthrough 54 * varyings. 55 * 56 * Likewise, for fragment shader, we can have some regs which are passed 57 * input values but never touched by the resulting shader (ie. as result 58 * of dead code elimination or simply because we don't know how to turn 59 * the reg off. 60 */ 61static void 62fixup_regfootprint(struct ir3_shader_variant *v) 63{ 64 unsigned i; 65 66 for (i = 0; i < v->inputs_count; i++) { 67 /* skip frag inputs fetch via bary.f since their reg's are 68 * not written by gpu before shader starts (and in fact the 69 * regid's might not even be valid) 70 */ 71 if (v->inputs[i].bary) 72 continue; 73 74 /* ignore high regs that are global to all threads in a warp 75 * (they exist by default) (a5xx+) 76 */ 77 if (v->inputs[i].regid >= regid(48, 0)) 78 continue; 79 80 if (v->inputs[i].compmask) { 81 unsigned n = util_last_bit(v->inputs[i].compmask) - 1; 82 int32_t regid = v->inputs[i].regid + n; 83 if (v->inputs[i].half) { 84 if (!v->mergedregs) { 85 v->info.max_half_reg = MAX2(v->info.max_half_reg, regid >> 2); 86 } else { 87 v->info.max_reg = MAX2(v->info.max_reg, regid >> 3); 88 } 89 } else { 90 v->info.max_reg = MAX2(v->info.max_reg, regid >> 2); 91 } 92 } 93 } 94 95 for (i = 0; i < v->outputs_count; i++) { 96 /* for ex, VS shaders with tess don't have normal varying outs: */ 97 if (!VALIDREG(v->outputs[i].regid)) 98 continue; 99 int32_t regid = v->outputs[i].regid + 3; 100 if (v->outputs[i].half) { 101 if (!v->mergedregs) { 102 v->info.max_half_reg = MAX2(v->info.max_half_reg, regid >> 2); 103 } else { 104 v->info.max_reg = MAX2(v->info.max_reg, regid >> 3); 105 } 106 } else { 107 v->info.max_reg = MAX2(v->info.max_reg, regid >> 2); 108 } 109 } 110 111 for (i = 0; i < v->num_sampler_prefetch; i++) { 112 unsigned n = util_last_bit(v->sampler_prefetch[i].wrmask) - 1; 113 int32_t regid = v->sampler_prefetch[i].dst + n; 114 if (v->sampler_prefetch[i].half_precision) { 115 if (!v->mergedregs) { 116 v->info.max_half_reg = MAX2(v->info.max_half_reg, regid >> 2); 117 } else { 118 v->info.max_reg = MAX2(v->info.max_reg, regid >> 3); 119 } 120 } else { 121 v->info.max_reg = MAX2(v->info.max_reg, regid >> 2); 122 } 123 } 124} 125 126/* wrapper for ir3_assemble() which does some info fixup based on 127 * shader state. Non-static since used by ir3_cmdline too. 128 */ 129void * 130ir3_shader_assemble(struct ir3_shader_variant *v) 131{ 132 const struct ir3_compiler *compiler = v->compiler; 133 struct ir3_info *info = &v->info; 134 uint32_t *bin; 135 136 ir3_collect_info(v); 137 138 if (v->constant_data_size) { 139 /* Make sure that where we're about to place the constant_data is safe 140 * to indirectly upload from. 141 */ 142 info->constant_data_offset = 143 align(info->size, v->compiler->const_upload_unit * 16); 144 info->size = info->constant_data_offset + v->constant_data_size; 145 } 146 147 /* Pad out the size so that when turnip uploads the shaders in 148 * sequence, the starting offset of the next one is properly aligned. 149 */ 150 info->size = align(info->size, compiler->instr_align * sizeof(uint64_t)); 151 152 bin = isa_assemble(v); 153 if (!bin) 154 return NULL; 155 156 /* Append the immediates after the end of the program. This lets us emit 157 * the immediates as an indirect load, while avoiding creating another BO. 158 */ 159 if (v->constant_data_size) 160 memcpy(&bin[info->constant_data_offset / 4], v->constant_data, 161 v->constant_data_size); 162 ralloc_free(v->constant_data); 163 v->constant_data = NULL; 164 165 /* NOTE: if relative addressing is used, we set constlen in 166 * the compiler (to worst-case value) since we don't know in 167 * the assembler what the max addr reg value can be: 168 */ 169 v->constlen = MAX2(v->constlen, info->max_const + 1); 170 171 if (v->constlen > ir3_const_state(v)->offsets.driver_param) 172 v->need_driver_params = true; 173 174 /* On a4xx and newer, constlen must be a multiple of 16 dwords even though 175 * uploads are in units of 4 dwords. Round it up here to make calculations 176 * regarding the shared constlen simpler. 177 */ 178 if (compiler->gen >= 4) 179 v->constlen = align(v->constlen, 4); 180 181 /* Use the per-wave layout by default on a6xx for compute shaders. It 182 * should result in better performance when loads/stores are to a uniform 183 * index. 184 */ 185 v->pvtmem_per_wave = compiler->gen >= 6 && !info->multi_dword_ldp_stp && 186 ((v->type == MESA_SHADER_COMPUTE) || 187 (v->type == MESA_SHADER_KERNEL)); 188 189 fixup_regfootprint(v); 190 191 return bin; 192} 193 194static bool 195try_override_shader_variant(struct ir3_shader_variant *v, 196 const char *identifier) 197{ 198 assert(ir3_shader_override_path); 199 200 char *name = 201 ralloc_asprintf(NULL, "%s/%s.asm", ir3_shader_override_path, identifier); 202 203 FILE *f = fopen(name, "r"); 204 205 if (!f) { 206 ralloc_free(name); 207 return false; 208 } 209 210 struct ir3_kernel_info info; 211 info.numwg = INVALID_REG; 212 v->ir = ir3_parse(v, &info, f); 213 214 fclose(f); 215 216 if (!v->ir) { 217 fprintf(stderr, "Failed to parse %s\n", name); 218 exit(1); 219 } 220 221 v->bin = ir3_shader_assemble(v); 222 if (!v->bin) { 223 fprintf(stderr, "Failed to assemble %s\n", name); 224 exit(1); 225 } 226 227 ralloc_free(name); 228 return true; 229} 230 231static void 232assemble_variant(struct ir3_shader_variant *v) 233{ 234 v->bin = ir3_shader_assemble(v); 235 236 bool dbg_enabled = shader_debug_enabled(v->type); 237 if (dbg_enabled || ir3_shader_override_path || v->disasm_info.write_disasm) { 238 unsigned char sha1[21]; 239 char sha1buf[41]; 240 241 _mesa_sha1_compute(v->bin, v->info.size, sha1); 242 _mesa_sha1_format(sha1buf, sha1); 243 244 bool shader_overridden = 245 ir3_shader_override_path && try_override_shader_variant(v, sha1buf); 246 247 if (v->disasm_info.write_disasm) { 248 char *stream_data = NULL; 249 size_t stream_size = 0; 250 FILE *stream = open_memstream(&stream_data, &stream_size); 251 252 fprintf(stream, 253 "Native code%s for unnamed %s shader %s with sha1 %s:\n", 254 shader_overridden ? " (overridden)" : "", ir3_shader_stage(v), 255 v->name, sha1buf); 256 ir3_shader_disasm(v, v->bin, stream); 257 258 fclose(stream); 259 260 v->disasm_info.disasm = ralloc_size(v, stream_size + 1); 261 memcpy(v->disasm_info.disasm, stream_data, stream_size); 262 v->disasm_info.disasm[stream_size] = 0; 263 free(stream_data); 264 } 265 266 if (dbg_enabled || shader_overridden) { 267 char *stream_data = NULL; 268 size_t stream_size = 0; 269 FILE *stream = open_memstream(&stream_data, &stream_size); 270 271 fprintf(stream, 272 "Native code%s for unnamed %s shader %s with sha1 %s:\n", 273 shader_overridden ? " (overridden)" : "", ir3_shader_stage(v), 274 v->name, sha1buf); 275 if (v->type == MESA_SHADER_FRAGMENT) 276 fprintf(stream, "SIMD0\n"); 277 ir3_shader_disasm(v, v->bin, stream); 278 fclose(stream); 279 280 mesa_log_multiline(MESA_LOG_INFO, stream_data); 281 free(stream_data); 282 } 283 } 284 285 /* no need to keep the ir around beyond this point: */ 286 ir3_destroy(v->ir); 287 v->ir = NULL; 288} 289 290static bool 291compile_variant(struct ir3_shader *shader, struct ir3_shader_variant *v) 292{ 293 int ret = ir3_compile_shader_nir(shader->compiler, shader, v); 294 if (ret) { 295 mesa_loge("compile failed! (%s:%s)", shader->nir->info.name, 296 shader->nir->info.label); 297 return false; 298 } 299 300 assemble_variant(v); 301 if (!v->bin) { 302 mesa_loge("assemble failed! (%s:%s)", shader->nir->info.name, 303 shader->nir->info.label); 304 return false; 305 } 306 307 return true; 308} 309 310/* 311 * For creating normal shader variants, 'nonbinning' is NULL. For 312 * creating binning pass shader, it is link to corresponding normal 313 * (non-binning) variant. 314 */ 315static struct ir3_shader_variant * 316alloc_variant(struct ir3_shader *shader, const struct ir3_shader_key *key, 317 struct ir3_shader_variant *nonbinning, void *mem_ctx) 318{ 319 /* hang the binning variant off it's non-binning counterpart instead 320 * of the shader, to simplify the error cleanup paths 321 */ 322 if (nonbinning) 323 mem_ctx = nonbinning; 324 struct ir3_shader_variant *v = rzalloc_size(mem_ctx, sizeof(*v)); 325 326 if (!v) 327 return NULL; 328 329 v->id = ++shader->variant_count; 330 v->shader_id = shader->id; 331 v->binning_pass = !!nonbinning; 332 v->nonbinning = nonbinning; 333 v->key = *key; 334 v->type = shader->type; 335 v->compiler = shader->compiler; 336 v->mergedregs = shader->compiler->gen >= 6; 337 v->stream_output = shader->stream_output; 338 339 v->name = ralloc_strdup(v, shader->nir->info.name); 340 341 struct shader_info *info = &shader->nir->info; 342 switch (v->type) { 343 case MESA_SHADER_TESS_CTRL: 344 case MESA_SHADER_TESS_EVAL: 345 v->tess.primitive_mode = info->tess._primitive_mode; 346 v->tess.tcs_vertices_out = info->tess.tcs_vertices_out; 347 v->tess.spacing = info->tess.spacing; 348 v->tess.ccw = info->tess.ccw; 349 v->tess.point_mode = info->tess.point_mode; 350 break; 351 352 case MESA_SHADER_GEOMETRY: 353 v->gs.output_primitive = info->gs.output_primitive; 354 v->gs.vertices_out = info->gs.vertices_out; 355 v->gs.invocations = info->gs.invocations; 356 v->gs.vertices_in = info->gs.vertices_in; 357 break; 358 359 case MESA_SHADER_FRAGMENT: 360 v->fs.early_fragment_tests = info->fs.early_fragment_tests; 361 v->fs.color_is_dual_source = info->fs.color_is_dual_source; 362 break; 363 364 case MESA_SHADER_COMPUTE: 365 case MESA_SHADER_KERNEL: 366 v->cs.req_input_mem = shader->cs.req_input_mem; 367 v->cs.req_local_mem = shader->cs.req_local_mem; 368 break; 369 370 default: 371 break; 372 } 373 374 v->num_ssbos = info->num_ssbos; 375 v->num_ibos = info->num_ssbos + info->num_images; 376 v->num_reserved_user_consts = shader->num_reserved_user_consts; 377 v->api_wavesize = shader->api_wavesize; 378 v->real_wavesize = shader->real_wavesize; 379 380 if (!v->binning_pass) { 381 v->const_state = rzalloc_size(v, sizeof(*v->const_state)); 382 v->const_state->shared_consts_enable = shader->shared_consts_enable; 383 } 384 385 return v; 386} 387 388static bool 389needs_binning_variant(struct ir3_shader_variant *v) 390{ 391 if ((v->type == MESA_SHADER_VERTEX) && ir3_has_binning_vs(&v->key)) 392 return true; 393 return false; 394} 395 396static struct ir3_shader_variant * 397create_variant(struct ir3_shader *shader, const struct ir3_shader_key *key, 398 bool write_disasm, void *mem_ctx) 399{ 400 struct ir3_shader_variant *v = alloc_variant(shader, key, NULL, mem_ctx); 401 402 if (!v) 403 goto fail; 404 405 v->disasm_info.write_disasm = write_disasm; 406 407 if (needs_binning_variant(v)) { 408 v->binning = alloc_variant(shader, key, v, mem_ctx); 409 if (!v->binning) 410 goto fail; 411 v->binning->disasm_info.write_disasm = write_disasm; 412 } 413 414 if (ir3_disk_cache_retrieve(shader, v)) 415 return v; 416 417 if (!shader->nir_finalized) { 418 ir3_nir_post_finalize(shader); 419 420 if (ir3_shader_debug & IR3_DBG_DISASM) { 421 mesa_logi("dump nir%d: type=%d", shader->id, shader->type); 422 nir_log_shaderi(shader->nir); 423 } 424 425 if (v->disasm_info.write_disasm) { 426 v->disasm_info.nir = nir_shader_as_str(shader->nir, v); 427 } 428 429 shader->nir_finalized = true; 430 } 431 432 if (!compile_variant(shader, v)) 433 goto fail; 434 435 if (needs_binning_variant(v) && !compile_variant(shader, v->binning)) 436 goto fail; 437 438 ir3_disk_cache_store(shader, v); 439 440 return v; 441 442fail: 443 ralloc_free(v); 444 return NULL; 445} 446 447struct ir3_shader_variant * 448ir3_shader_create_variant(struct ir3_shader *shader, 449 const struct ir3_shader_key *key, 450 bool keep_ir) 451{ 452 return create_variant(shader, key, keep_ir, NULL); 453} 454 455static inline struct ir3_shader_variant * 456shader_variant(struct ir3_shader *shader, const struct ir3_shader_key *key) 457{ 458 struct ir3_shader_variant *v; 459 460 for (v = shader->variants; v; v = v->next) 461 if (ir3_shader_key_equal(key, &v->key)) 462 return v; 463 464 return NULL; 465} 466 467struct ir3_shader_variant * 468ir3_shader_get_variant(struct ir3_shader *shader, 469 const struct ir3_shader_key *key, bool binning_pass, 470 bool write_disasm, bool *created) 471{ 472 mtx_lock(&shader->variants_lock); 473 struct ir3_shader_variant *v = shader_variant(shader, key); 474 475 if (!v) { 476 /* compile new variant if it doesn't exist already: */ 477 v = create_variant(shader, key, write_disasm, shader); 478 if (v) { 479 v->next = shader->variants; 480 shader->variants = v; 481 *created = true; 482 } 483 } 484 485 if (v && binning_pass) { 486 v = v->binning; 487 assert(v); 488 } 489 490 mtx_unlock(&shader->variants_lock); 491 492 return v; 493} 494 495void 496ir3_shader_destroy(struct ir3_shader *shader) 497{ 498 ralloc_free(shader->nir); 499 mtx_destroy(&shader->variants_lock); 500 ralloc_free(shader); 501} 502 503/** 504 * Creates a bitmask of the used bits of the shader key by this particular 505 * shader. Used by the gallium driver to skip state-dependent recompiles when 506 * possible. 507 */ 508static void 509ir3_setup_used_key(struct ir3_shader *shader) 510{ 511 nir_shader *nir = shader->nir; 512 struct shader_info *info = &nir->info; 513 struct ir3_shader_key *key = &shader->key_mask; 514 515 /* This key flag is just used to make for a cheaper ir3_shader_key_equal 516 * check in the common case. 517 */ 518 key->has_per_samp = true; 519 520 key->safe_constlen = true; 521 522 /* When clip/cull distances are natively supported, we only use 523 * ucp_enables to determine whether to lower legacy clip planes to 524 * gl_ClipDistance. 525 */ 526 if (info->stage != MESA_SHADER_COMPUTE && (info->stage != MESA_SHADER_FRAGMENT || !shader->compiler->has_clip_cull)) 527 key->ucp_enables = 0xff; 528 529 if (info->stage == MESA_SHADER_FRAGMENT) { 530 key->fastc_srgb = ~0; 531 key->fsamples = ~0; 532 memset(key->fsampler_swizzles, 0xff, sizeof(key->fsampler_swizzles)); 533 534 if (info->inputs_read & VARYING_BITS_COLOR) { 535 key->rasterflat = true; 536 } 537 538 if (info->inputs_read & VARYING_BIT_LAYER) { 539 key->layer_zero = true; 540 } 541 542 if (info->inputs_read & VARYING_BIT_VIEWPORT) { 543 key->view_zero = true; 544 } 545 546 /* Only used for deciding on behavior of 547 * nir_intrinsic_load_barycentric_sample, or the centroid demotion 548 * on older HW. 549 */ 550 key->msaa = info->fs.uses_sample_qualifier || 551 (shader->compiler->gen < 6 && 552 (BITSET_TEST(info->system_values_read, 553 SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID) || 554 BITSET_TEST(info->system_values_read, 555 SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID))); 556 } else if (info->stage == MESA_SHADER_COMPUTE) { 557 key->fastc_srgb = ~0; 558 key->fsamples = ~0; 559 memset(key->fsampler_swizzles, 0xff, sizeof(key->fsampler_swizzles)); 560 } else { 561 key->tessellation = ~0; 562 key->has_gs = true; 563 564 if (info->stage == MESA_SHADER_VERTEX) { 565 key->vastc_srgb = ~0; 566 key->vsamples = ~0; 567 memset(key->vsampler_swizzles, 0xff, sizeof(key->vsampler_swizzles)); 568 } 569 570 if (info->stage == MESA_SHADER_TESS_CTRL) 571 key->tcs_store_primid = true; 572 } 573} 574 575/* Given an array of constlen's, decrease some of them so that the sum stays 576 * within "combined_limit" while trying to fairly share the reduction. Returns 577 * a bitfield of which stages should be trimmed. 578 */ 579static uint32_t 580trim_constlens(unsigned *constlens, unsigned first_stage, unsigned last_stage, 581 unsigned combined_limit, unsigned safe_limit) 582{ 583 unsigned cur_total = 0; 584 for (unsigned i = first_stage; i <= last_stage; i++) { 585 cur_total += constlens[i]; 586 } 587 588 unsigned max_stage = 0; 589 unsigned max_const = 0; 590 uint32_t trimmed = 0; 591 592 while (cur_total > combined_limit) { 593 for (unsigned i = first_stage; i <= last_stage; i++) { 594 if (constlens[i] >= max_const) { 595 max_stage = i; 596 max_const = constlens[i]; 597 } 598 } 599 600 assert(max_const > safe_limit); 601 trimmed |= 1 << max_stage; 602 cur_total = cur_total - max_const + safe_limit; 603 constlens[max_stage] = safe_limit; 604 } 605 606 return trimmed; 607} 608 609/* Figures out which stages in the pipeline to use the "safe" constlen for, in 610 * order to satisfy all shared constlen limits. 611 */ 612uint32_t 613ir3_trim_constlen(struct ir3_shader_variant **variants, 614 const struct ir3_compiler *compiler) 615{ 616 unsigned constlens[MESA_SHADER_STAGES] = {}; 617 618 for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) { 619 if (variants[i]) 620 constlens[i] = variants[i]->constlen; 621 } 622 623 uint32_t trimmed = 0; 624 STATIC_ASSERT(MESA_SHADER_STAGES <= 8 * sizeof(trimmed)); 625 626 bool shared_consts_enable = 627 ir3_const_state(variants[MESA_SHADER_VERTEX])->shared_consts_enable; 628 629 /* Use a hw quirk for geometry shared consts, not matched with actual 630 * shared consts size (on a6xx). 631 */ 632 uint32_t shared_consts_size_geom = shared_consts_enable ? 633 compiler->geom_shared_consts_size_quirk : 0; 634 635 uint32_t shared_consts_size = shared_consts_enable ? 636 compiler->shared_consts_size : 0; 637 638 uint32_t safe_shared_consts_size = shared_consts_enable ? 639 ALIGN_POT(MAX2(DIV_ROUND_UP(shared_consts_size_geom, 4), 640 DIV_ROUND_UP(shared_consts_size, 5)), 4) : 0; 641 642 /* There are two shared limits to take into account, the geometry limit on 643 * a6xx and the total limit. The frag limit on a6xx only matters for a 644 * single stage, so it's always satisfied with the first variant. 645 */ 646 if (compiler->gen >= 6) { 647 trimmed |= 648 trim_constlens(constlens, MESA_SHADER_VERTEX, MESA_SHADER_GEOMETRY, 649 compiler->max_const_geom - shared_consts_size_geom, 650 compiler->max_const_safe - safe_shared_consts_size); 651 } 652 trimmed |= 653 trim_constlens(constlens, MESA_SHADER_VERTEX, MESA_SHADER_FRAGMENT, 654 compiler->max_const_pipeline - shared_consts_size, 655 compiler->max_const_safe - safe_shared_consts_size); 656 657 return trimmed; 658} 659 660struct ir3_shader * 661ir3_shader_from_nir(struct ir3_compiler *compiler, nir_shader *nir, 662 const struct ir3_shader_options *options, 663 struct ir3_stream_output_info *stream_output) 664{ 665 struct ir3_shader *shader = rzalloc_size(NULL, sizeof(*shader)); 666 667 mtx_init(&shader->variants_lock, mtx_plain); 668 shader->compiler = compiler; 669 shader->id = p_atomic_inc_return(&shader->compiler->shader_count); 670 shader->type = nir->info.stage; 671 if (stream_output) 672 memcpy(&shader->stream_output, stream_output, 673 sizeof(shader->stream_output)); 674 shader->num_reserved_user_consts = options->reserved_user_consts; 675 shader->api_wavesize = options->api_wavesize; 676 shader->real_wavesize = options->real_wavesize; 677 shader->shared_consts_enable = options->shared_consts_enable; 678 shader->nir = nir; 679 680 ir3_disk_cache_init_shader_key(compiler, shader); 681 682 ir3_setup_used_key(shader); 683 684 return shader; 685} 686 687static void 688dump_reg(FILE *out, const char *name, uint32_t r) 689{ 690 if (r != regid(63, 0)) { 691 const char *reg_type = (r & HALF_REG_ID) ? "hr" : "r"; 692 fprintf(out, "; %s: %s%d.%c\n", name, reg_type, (r & ~HALF_REG_ID) >> 2, 693 "xyzw"[r & 0x3]); 694 } 695} 696 697static void 698dump_output(FILE *out, struct ir3_shader_variant *so, unsigned slot, 699 const char *name) 700{ 701 uint32_t regid; 702 regid = ir3_find_output_regid(so, slot); 703 dump_reg(out, name, regid); 704} 705 706static const char * 707input_name(struct ir3_shader_variant *so, int i) 708{ 709 if (so->inputs[i].sysval) { 710 return gl_system_value_name(so->inputs[i].slot); 711 } else if (so->type == MESA_SHADER_VERTEX) { 712 return gl_vert_attrib_name(so->inputs[i].slot); 713 } else { 714 return gl_varying_slot_name_for_stage(so->inputs[i].slot, so->type); 715 } 716} 717 718static const char * 719output_name(struct ir3_shader_variant *so, int i) 720{ 721 if (so->type == MESA_SHADER_FRAGMENT) { 722 return gl_frag_result_name(so->outputs[i].slot); 723 } else { 724 switch (so->outputs[i].slot) { 725 case VARYING_SLOT_GS_HEADER_IR3: 726 return "GS_HEADER"; 727 case VARYING_SLOT_GS_VERTEX_FLAGS_IR3: 728 return "GS_VERTEX_FLAGS"; 729 case VARYING_SLOT_TCS_HEADER_IR3: 730 return "TCS_HEADER"; 731 default: 732 return gl_varying_slot_name_for_stage(so->outputs[i].slot, so->type); 733 } 734 } 735} 736 737static void 738dump_const_state(struct ir3_shader_variant *so, FILE *out) 739{ 740 const struct ir3_const_state *cs = ir3_const_state(so); 741 const struct ir3_ubo_analysis_state *us = &cs->ubo_state; 742 743 fprintf(out, "; num_ubos: %u\n", cs->num_ubos); 744 fprintf(out, "; num_driver_params: %u\n", cs->num_driver_params); 745 fprintf(out, "; offsets:\n"); 746 if (cs->offsets.ubo != ~0) 747 fprintf(out, "; ubo: c%u.x\n", cs->offsets.ubo); 748 if (cs->offsets.image_dims != ~0) 749 fprintf(out, "; image_dims: c%u.x\n", cs->offsets.image_dims); 750 if (cs->offsets.kernel_params != ~0) 751 fprintf(out, "; kernel_params: c%u.x\n", cs->offsets.kernel_params); 752 if (cs->offsets.driver_param != ~0) 753 fprintf(out, "; driver_param: c%u.x\n", cs->offsets.driver_param); 754 if (cs->offsets.tfbo != ~0) 755 fprintf(out, "; tfbo: c%u.x\n", cs->offsets.tfbo); 756 if (cs->offsets.primitive_param != ~0) 757 fprintf(out, "; primitive_params: c%u.x\n", cs->offsets.primitive_param); 758 if (cs->offsets.primitive_map != ~0) 759 fprintf(out, "; primitive_map: c%u.x\n", cs->offsets.primitive_map); 760 fprintf(out, "; ubo_state:\n"); 761 fprintf(out, "; num_enabled: %u\n", us->num_enabled); 762 for (unsigned i = 0; i < us->num_enabled; i++) { 763 const struct ir3_ubo_range *r = &us->range[i]; 764 765 assert((r->offset % 16) == 0); 766 767 fprintf(out, "; range[%u]:\n", i); 768 fprintf(out, "; block: %u\n", r->ubo.block); 769 if (r->ubo.bindless) 770 fprintf(out, "; bindless_base: %u\n", r->ubo.bindless_base); 771 fprintf(out, "; offset: c%u.x\n", r->offset/16); 772 773 unsigned size = r->end - r->start; 774 assert((size % 16) == 0); 775 776 fprintf(out, "; size: %u vec4 (%ub -> %ub)\n", (size/16), r->start, r->end); 777 } 778} 779 780void 781ir3_shader_disasm(struct ir3_shader_variant *so, uint32_t *bin, FILE *out) 782{ 783 struct ir3 *ir = so->ir; 784 struct ir3_register *reg; 785 const char *type = ir3_shader_stage(so); 786 uint8_t regid; 787 unsigned i; 788 789 dump_const_state(so, out); 790 791 foreach_input_n (instr, i, ir) { 792 reg = instr->dsts[0]; 793 regid = reg->num; 794 fprintf(out, "@in(%sr%d.%c)\tin%d", 795 (reg->flags & IR3_REG_HALF) ? "h" : "", (regid >> 2), 796 "xyzw"[regid & 0x3], i); 797 798 if (reg->wrmask > 0x1) 799 fprintf(out, " (wrmask=0x%x)", reg->wrmask); 800 fprintf(out, "\n"); 801 } 802 803 /* print pre-dispatch texture fetches: */ 804 for (i = 0; i < so->num_sampler_prefetch; i++) { 805 const struct ir3_sampler_prefetch *fetch = &so->sampler_prefetch[i]; 806 fprintf(out, 807 "@tex(%sr%d.%c)\tsrc=%u, samp=%u, tex=%u, wrmask=0x%x, cmd=%u\n", 808 fetch->half_precision ? "h" : "", fetch->dst >> 2, 809 "xyzw"[fetch->dst & 0x3], fetch -> src, fetch -> samp_id, 810 fetch -> tex_id, fetch -> wrmask, fetch -> cmd); 811 } 812 813 const struct ir3_const_state *const_state = ir3_const_state(so); 814 for (i = 0; i < DIV_ROUND_UP(const_state->immediates_count, 4); i++) { 815 fprintf(out, "@const(c%d.x)\t", const_state->offsets.immediate + i); 816 fprintf(out, "0x%08x, 0x%08x, 0x%08x, 0x%08x\n", 817 const_state->immediates[i * 4 + 0], 818 const_state->immediates[i * 4 + 1], 819 const_state->immediates[i * 4 + 2], 820 const_state->immediates[i * 4 + 3]); 821 } 822 823 isa_decode(bin, so->info.sizedwords * 4, out, 824 &(struct isa_decode_options){ 825 .gpu_id = fd_dev_gpu_id(ir->compiler->dev_id), 826 .show_errors = true, 827 .branch_labels = true, 828 }); 829 830 fprintf(out, "; %s: outputs:", type); 831 for (i = 0; i < so->outputs_count; i++) { 832 uint8_t regid = so->outputs[i].regid; 833 const char *reg_type = so->outputs[i].half ? "hr" : "r"; 834 fprintf(out, " %s%d.%c (%s)", reg_type, (regid >> 2), "xyzw"[regid & 0x3], 835 output_name(so, i)); 836 } 837 fprintf(out, "\n"); 838 839 fprintf(out, "; %s: inputs:", type); 840 for (i = 0; i < so->inputs_count; i++) { 841 uint8_t regid = so->inputs[i].regid; 842 fprintf(out, " r%d.%c (%s slot=%d cm=%x,il=%u,b=%u)", (regid >> 2), 843 "xyzw"[regid & 0x3], input_name(so, i), so -> inputs[i].slot, 844 so->inputs[i].compmask, so->inputs[i].inloc, so->inputs[i].bary); 845 } 846 fprintf(out, "\n"); 847 848 /* print generic shader info: */ 849 fprintf( 850 out, 851 "; %s prog %d/%d: %u instr, %u nops, %u non-nops, %u mov, %u cov, %u dwords\n", 852 type, so->shader_id, so->id, so->info.instrs_count, so->info.nops_count, 853 so->info.instrs_count - so->info.nops_count, so->info.mov_count, 854 so->info.cov_count, so->info.sizedwords); 855 856 fprintf(out, 857 "; %s prog %d/%d: %u last-baryf, %d half, %d full, %u constlen\n", 858 type, so->shader_id, so->id, so->info.last_baryf, 859 so->info.max_half_reg + 1, so->info.max_reg + 1, so->constlen); 860 861 fprintf( 862 out, 863 "; %s prog %d/%d: %u cat0, %u cat1, %u cat2, %u cat3, %u cat4, %u cat5, %u cat6, %u cat7, \n", 864 type, so->shader_id, so->id, so->info.instrs_per_cat[0], 865 so->info.instrs_per_cat[1], so->info.instrs_per_cat[2], 866 so->info.instrs_per_cat[3], so->info.instrs_per_cat[4], 867 so->info.instrs_per_cat[5], so->info.instrs_per_cat[6], 868 so->info.instrs_per_cat[7]); 869 870 fprintf( 871 out, 872 "; %s prog %d/%d: %u sstall, %u (ss), %u systall, %u (sy), %d loops\n", 873 type, so->shader_id, so->id, so->info.sstall, so->info.ss, 874 so->info.systall, so->info.sy, so->loops); 875 876 /* print shader type specific info: */ 877 switch (so->type) { 878 case MESA_SHADER_VERTEX: 879 dump_output(out, so, VARYING_SLOT_POS, "pos"); 880 dump_output(out, so, VARYING_SLOT_PSIZ, "psize"); 881 break; 882 case MESA_SHADER_FRAGMENT: 883 dump_reg(out, "pos (ij_pixel)", 884 ir3_find_sysval_regid(so, SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL)); 885 dump_reg( 886 out, "pos (ij_centroid)", 887 ir3_find_sysval_regid(so, SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID)); 888 dump_reg(out, "pos (center_rhw)", 889 ir3_find_sysval_regid(so, SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTER_RHW)); 890 dump_output(out, so, FRAG_RESULT_DEPTH, "posz"); 891 if (so->color0_mrt) { 892 dump_output(out, so, FRAG_RESULT_COLOR, "color"); 893 } else { 894 dump_output(out, so, FRAG_RESULT_DATA0, "data0"); 895 dump_output(out, so, FRAG_RESULT_DATA1, "data1"); 896 dump_output(out, so, FRAG_RESULT_DATA2, "data2"); 897 dump_output(out, so, FRAG_RESULT_DATA3, "data3"); 898 dump_output(out, so, FRAG_RESULT_DATA4, "data4"); 899 dump_output(out, so, FRAG_RESULT_DATA5, "data5"); 900 dump_output(out, so, FRAG_RESULT_DATA6, "data6"); 901 dump_output(out, so, FRAG_RESULT_DATA7, "data7"); 902 } 903 dump_reg(out, "fragcoord", 904 ir3_find_sysval_regid(so, SYSTEM_VALUE_FRAG_COORD)); 905 dump_reg(out, "fragface", 906 ir3_find_sysval_regid(so, SYSTEM_VALUE_FRONT_FACE)); 907 break; 908 default: 909 /* TODO */ 910 break; 911 } 912 913 fprintf(out, "\n"); 914} 915 916uint64_t 917ir3_shader_outputs(const struct ir3_shader *so) 918{ 919 return so->nir->info.outputs_written; 920} 921 922/* Add any missing varyings needed for stream-out. Otherwise varyings not 923 * used by fragment shader will be stripped out. 924 */ 925void 926ir3_link_stream_out(struct ir3_shader_linkage *l, 927 const struct ir3_shader_variant *v) 928{ 929 const struct ir3_stream_output_info *strmout = &v->stream_output; 930 931 /* 932 * First, any stream-out varyings not already in linkage map (ie. also 933 * consumed by frag shader) need to be added: 934 */ 935 for (unsigned i = 0; i < strmout->num_outputs; i++) { 936 const struct ir3_stream_output *out = &strmout->output[i]; 937 unsigned k = out->register_index; 938 unsigned compmask = 939 (1 << (out->num_components + out->start_component)) - 1; 940 unsigned idx, nextloc = 0; 941 942 /* psize/pos need to be the last entries in linkage map, and will 943 * get added link_stream_out, so skip over them: 944 */ 945 if ((v->outputs[k].slot == VARYING_SLOT_PSIZ) || 946 (v->outputs[k].slot == VARYING_SLOT_POS)) 947 continue; 948 949 for (idx = 0; idx < l->cnt; idx++) { 950 if (l->var[idx].slot == v->outputs[k].slot) 951 break; 952 nextloc = MAX2(nextloc, l->var[idx].loc + 4); 953 } 954 955 /* add if not already in linkage map: */ 956 if (idx == l->cnt) { 957 ir3_link_add(l, v->outputs[k].slot, v->outputs[k].regid, 958 compmask, nextloc); 959 } 960 961 /* expand component-mask if needed, ie streaming out all components 962 * but frag shader doesn't consume all components: 963 */ 964 if (compmask & ~l->var[idx].compmask) { 965 l->var[idx].compmask |= compmask; 966 l->max_loc = MAX2( 967 l->max_loc, l->var[idx].loc + util_last_bit(l->var[idx].compmask)); 968 } 969 } 970} 971