1/* 2 * Copyright © 2015 Red Hat 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24#include "nir.h" 25#include "nir_control_flow.h" 26#include "nir_xfb_info.h" 27 28/* Secret Decoder Ring: 29 * clone_foo(): 30 * Allocate and clone a foo. 31 * __clone_foo(): 32 * Clone body of foo (ie. parent class, embedded struct, etc) 33 */ 34 35typedef struct { 36 /* True if we are cloning an entire shader. */ 37 bool global_clone; 38 39 /* If true allows the clone operation to fall back to the original pointer 40 * if no clone pointer is found in the remap table. This allows us to 41 * clone a loop body without having to add srcs from outside the loop to 42 * the remap table. This is useful for loop unrolling. 43 */ 44 bool allow_remap_fallback; 45 46 /* maps orig ptr -> cloned ptr: */ 47 struct hash_table *remap_table; 48 49 /* List of phi sources. */ 50 struct list_head phi_srcs; 51 52 /* new shader object, used as memctx for just about everything else: */ 53 nir_shader *ns; 54} clone_state; 55 56static void 57init_clone_state(clone_state *state, struct hash_table *remap_table, 58 bool global, bool allow_remap_fallback) 59{ 60 state->global_clone = global; 61 state->allow_remap_fallback = allow_remap_fallback; 62 63 if (remap_table) { 64 state->remap_table = remap_table; 65 } else { 66 state->remap_table = _mesa_pointer_hash_table_create(NULL); 67 } 68 69 list_inithead(&state->phi_srcs); 70} 71 72static void 73free_clone_state(clone_state *state) 74{ 75 _mesa_hash_table_destroy(state->remap_table, NULL); 76} 77 78static inline void * 79_lookup_ptr(clone_state *state, const void *ptr, bool global) 80{ 81 struct hash_entry *entry; 82 83 if (!ptr) 84 return NULL; 85 86 if (!state->global_clone && global) 87 return (void *)ptr; 88 89 if (unlikely(!state->remap_table)) { 90 assert(state->allow_remap_fallback); 91 return (void *)ptr; 92 } 93 94 entry = _mesa_hash_table_search(state->remap_table, ptr); 95 if (!entry) { 96 assert(state->allow_remap_fallback); 97 return (void *)ptr; 98 } 99 100 return entry->data; 101} 102 103static void 104add_remap(clone_state *state, void *nptr, const void *ptr) 105{ 106 _mesa_hash_table_insert(state->remap_table, ptr, nptr); 107} 108 109static void * 110remap_local(clone_state *state, const void *ptr) 111{ 112 return _lookup_ptr(state, ptr, false); 113} 114 115static void * 116remap_global(clone_state *state, const void *ptr) 117{ 118 return _lookup_ptr(state, ptr, true); 119} 120 121static nir_register * 122remap_reg(clone_state *state, const nir_register *reg) 123{ 124 return _lookup_ptr(state, reg, false); 125} 126 127static nir_variable * 128remap_var(clone_state *state, const nir_variable *var) 129{ 130 return _lookup_ptr(state, var, nir_variable_is_global(var)); 131} 132 133nir_constant * 134nir_constant_clone(const nir_constant *c, nir_variable *nvar) 135{ 136 nir_constant *nc = ralloc(nvar, nir_constant); 137 138 memcpy(nc->values, c->values, sizeof(nc->values)); 139 nc->num_elements = c->num_elements; 140 nc->elements = ralloc_array(nvar, nir_constant *, c->num_elements); 141 for (unsigned i = 0; i < c->num_elements; i++) { 142 nc->elements[i] = nir_constant_clone(c->elements[i], nvar); 143 } 144 145 return nc; 146} 147 148/* NOTE: for cloning nir_variables, bypass nir_variable_create to avoid 149 * having to deal with locals and globals separately: 150 */ 151nir_variable * 152nir_variable_clone(const nir_variable *var, nir_shader *shader) 153{ 154 nir_variable *nvar = rzalloc(shader, nir_variable); 155 156 nvar->type = var->type; 157 nvar->name = ralloc_strdup(nvar, var->name); 158 nvar->data = var->data; 159 nvar->num_state_slots = var->num_state_slots; 160 if (var->num_state_slots) { 161 nvar->state_slots = ralloc_array(nvar, nir_state_slot, var->num_state_slots); 162 memcpy(nvar->state_slots, var->state_slots, 163 var->num_state_slots * sizeof(nir_state_slot)); 164 } 165 if (var->constant_initializer) { 166 nvar->constant_initializer = 167 nir_constant_clone(var->constant_initializer, nvar); 168 } 169 nvar->interface_type = var->interface_type; 170 171 nvar->num_members = var->num_members; 172 if (var->num_members) { 173 nvar->members = ralloc_array(nvar, struct nir_variable_data, 174 var->num_members); 175 memcpy(nvar->members, var->members, 176 var->num_members * sizeof(*var->members)); 177 } 178 179 return nvar; 180} 181 182static nir_variable * 183clone_variable(clone_state *state, const nir_variable *var) 184{ 185 nir_variable *nvar = nir_variable_clone(var, state->ns); 186 add_remap(state, nvar, var); 187 188 return nvar; 189} 190 191/* clone list of nir_variable: */ 192static void 193clone_var_list(clone_state *state, struct exec_list *dst, 194 const struct exec_list *list) 195{ 196 exec_list_make_empty(dst); 197 foreach_list_typed(nir_variable, var, node, list) { 198 nir_variable *nvar = clone_variable(state, var); 199 exec_list_push_tail(dst, &nvar->node); 200 } 201} 202 203/* NOTE: for cloning nir_registers, bypass nir_global/local_reg_create() 204 * to avoid having to deal with locals and globals separately: 205 */ 206static nir_register * 207clone_register(clone_state *state, const nir_register *reg) 208{ 209 nir_register *nreg = rzalloc(state->ns, nir_register); 210 add_remap(state, nreg, reg); 211 212 nreg->num_components = reg->num_components; 213 nreg->bit_size = reg->bit_size; 214 nreg->num_array_elems = reg->num_array_elems; 215 nreg->index = reg->index; 216 217 /* reconstructing uses/defs/if_uses handled by nir_instr_insert() */ 218 list_inithead(&nreg->uses); 219 list_inithead(&nreg->defs); 220 list_inithead(&nreg->if_uses); 221 222 return nreg; 223} 224 225/* clone list of nir_register: */ 226static void 227clone_reg_list(clone_state *state, struct exec_list *dst, 228 const struct exec_list *list) 229{ 230 exec_list_make_empty(dst); 231 foreach_list_typed(nir_register, reg, node, list) { 232 nir_register *nreg = clone_register(state, reg); 233 exec_list_push_tail(dst, &nreg->node); 234 } 235} 236 237static void 238__clone_src(clone_state *state, void *ninstr_or_if, 239 nir_src *nsrc, const nir_src *src) 240{ 241 nsrc->is_ssa = src->is_ssa; 242 if (src->is_ssa) { 243 nsrc->ssa = remap_local(state, src->ssa); 244 } else { 245 nsrc->reg.reg = remap_reg(state, src->reg.reg); 246 if (src->reg.indirect) { 247 nsrc->reg.indirect = malloc(sizeof(nir_src)); 248 __clone_src(state, ninstr_or_if, nsrc->reg.indirect, src->reg.indirect); 249 } 250 nsrc->reg.base_offset = src->reg.base_offset; 251 } 252} 253 254static void 255__clone_dst(clone_state *state, nir_instr *ninstr, 256 nir_dest *ndst, const nir_dest *dst) 257{ 258 ndst->is_ssa = dst->is_ssa; 259 if (dst->is_ssa) { 260 nir_ssa_dest_init(ninstr, ndst, dst->ssa.num_components, 261 dst->ssa.bit_size, NULL); 262 if (likely(state->remap_table)) 263 add_remap(state, &ndst->ssa, &dst->ssa); 264 } else { 265 ndst->reg.reg = remap_reg(state, dst->reg.reg); 266 if (dst->reg.indirect) { 267 ndst->reg.indirect = malloc(sizeof(nir_src)); 268 __clone_src(state, ninstr, ndst->reg.indirect, dst->reg.indirect); 269 } 270 ndst->reg.base_offset = dst->reg.base_offset; 271 } 272} 273 274static nir_alu_instr * 275clone_alu(clone_state *state, const nir_alu_instr *alu) 276{ 277 nir_alu_instr *nalu = nir_alu_instr_create(state->ns, alu->op); 278 nalu->exact = alu->exact; 279 nalu->no_signed_wrap = alu->no_signed_wrap; 280 nalu->no_unsigned_wrap = alu->no_unsigned_wrap; 281 282 __clone_dst(state, &nalu->instr, &nalu->dest.dest, &alu->dest.dest); 283 nalu->dest.saturate = alu->dest.saturate; 284 nalu->dest.write_mask = alu->dest.write_mask; 285 286 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) { 287 __clone_src(state, &nalu->instr, &nalu->src[i].src, &alu->src[i].src); 288 nalu->src[i].negate = alu->src[i].negate; 289 nalu->src[i].abs = alu->src[i].abs; 290 memcpy(nalu->src[i].swizzle, alu->src[i].swizzle, 291 sizeof(nalu->src[i].swizzle)); 292 } 293 294 return nalu; 295} 296 297nir_alu_instr * 298nir_alu_instr_clone(nir_shader *shader, const nir_alu_instr *orig) 299{ 300 clone_state state = { 301 .allow_remap_fallback = true, 302 .ns = shader, 303 }; 304 return clone_alu(&state, orig); 305} 306 307static nir_deref_instr * 308clone_deref_instr(clone_state *state, const nir_deref_instr *deref) 309{ 310 nir_deref_instr *nderef = 311 nir_deref_instr_create(state->ns, deref->deref_type); 312 313 __clone_dst(state, &nderef->instr, &nderef->dest, &deref->dest); 314 315 nderef->modes = deref->modes; 316 nderef->type = deref->type; 317 318 if (deref->deref_type == nir_deref_type_var) { 319 nderef->var = remap_var(state, deref->var); 320 return nderef; 321 } 322 323 __clone_src(state, &nderef->instr, &nderef->parent, &deref->parent); 324 325 switch (deref->deref_type) { 326 case nir_deref_type_struct: 327 nderef->strct.index = deref->strct.index; 328 break; 329 330 case nir_deref_type_array: 331 case nir_deref_type_ptr_as_array: 332 __clone_src(state, &nderef->instr, 333 &nderef->arr.index, &deref->arr.index); 334 nderef->arr.in_bounds = deref->arr.in_bounds; 335 break; 336 337 case nir_deref_type_array_wildcard: 338 /* Nothing to do */ 339 break; 340 341 case nir_deref_type_cast: 342 nderef->cast.ptr_stride = deref->cast.ptr_stride; 343 nderef->cast.align_mul = deref->cast.align_mul; 344 nderef->cast.align_offset = deref->cast.align_offset; 345 break; 346 347 default: 348 unreachable("Invalid instruction deref type"); 349 } 350 351 return nderef; 352} 353 354static nir_intrinsic_instr * 355clone_intrinsic(clone_state *state, const nir_intrinsic_instr *itr) 356{ 357 nir_intrinsic_instr *nitr = 358 nir_intrinsic_instr_create(state->ns, itr->intrinsic); 359 360 unsigned num_srcs = nir_intrinsic_infos[itr->intrinsic].num_srcs; 361 362 if (nir_intrinsic_infos[itr->intrinsic].has_dest) 363 __clone_dst(state, &nitr->instr, &nitr->dest, &itr->dest); 364 365 nitr->num_components = itr->num_components; 366 memcpy(nitr->const_index, itr->const_index, sizeof(nitr->const_index)); 367 368 for (unsigned i = 0; i < num_srcs; i++) 369 __clone_src(state, &nitr->instr, &nitr->src[i], &itr->src[i]); 370 371 return nitr; 372} 373 374static nir_load_const_instr * 375clone_load_const(clone_state *state, const nir_load_const_instr *lc) 376{ 377 nir_load_const_instr *nlc = 378 nir_load_const_instr_create(state->ns, lc->def.num_components, 379 lc->def.bit_size); 380 381 memcpy(&nlc->value, &lc->value, sizeof(*nlc->value) * lc->def.num_components); 382 383 add_remap(state, &nlc->def, &lc->def); 384 385 return nlc; 386} 387 388static nir_ssa_undef_instr * 389clone_ssa_undef(clone_state *state, const nir_ssa_undef_instr *sa) 390{ 391 nir_ssa_undef_instr *nsa = 392 nir_ssa_undef_instr_create(state->ns, sa->def.num_components, 393 sa->def.bit_size); 394 395 add_remap(state, &nsa->def, &sa->def); 396 397 return nsa; 398} 399 400static nir_tex_instr * 401clone_tex(clone_state *state, const nir_tex_instr *tex) 402{ 403 nir_tex_instr *ntex = nir_tex_instr_create(state->ns, tex->num_srcs); 404 405 ntex->sampler_dim = tex->sampler_dim; 406 ntex->dest_type = tex->dest_type; 407 ntex->op = tex->op; 408 __clone_dst(state, &ntex->instr, &ntex->dest, &tex->dest); 409 for (unsigned i = 0; i < ntex->num_srcs; i++) { 410 ntex->src[i].src_type = tex->src[i].src_type; 411 __clone_src(state, &ntex->instr, &ntex->src[i].src, &tex->src[i].src); 412 } 413 ntex->coord_components = tex->coord_components; 414 ntex->is_array = tex->is_array; 415 ntex->array_is_lowered_cube = tex->array_is_lowered_cube; 416 ntex->is_shadow = tex->is_shadow; 417 ntex->is_new_style_shadow = tex->is_new_style_shadow; 418 ntex->is_sparse = tex->is_sparse; 419 ntex->component = tex->component; 420 memcpy(ntex->tg4_offsets, tex->tg4_offsets, sizeof(tex->tg4_offsets)); 421 422 ntex->texture_index = tex->texture_index; 423 ntex->sampler_index = tex->sampler_index; 424 425 ntex->texture_non_uniform = tex->texture_non_uniform; 426 ntex->sampler_non_uniform = tex->sampler_non_uniform; 427 428 return ntex; 429} 430 431static nir_phi_instr * 432clone_phi(clone_state *state, const nir_phi_instr *phi, nir_block *nblk) 433{ 434 nir_phi_instr *nphi = nir_phi_instr_create(state->ns); 435 436 __clone_dst(state, &nphi->instr, &nphi->dest, &phi->dest); 437 438 /* Cloning a phi node is a bit different from other instructions. The 439 * sources of phi instructions are the only time where we can use an SSA 440 * def before it is defined. In order to handle this, we just copy over 441 * the sources from the old phi instruction directly and then fix them up 442 * in a second pass once all the instrutions in the function have been 443 * properly cloned. 444 * 445 * In order to ensure that the copied sources (which are the same as the 446 * old phi instruction's sources for now) don't get inserted into the old 447 * shader's use-def lists, we have to add the phi instruction *before* we 448 * set up its sources. 449 */ 450 nir_instr_insert_after_block(nblk, &nphi->instr); 451 452 nir_foreach_phi_src(src, phi) { 453 nir_phi_src *nsrc = nir_phi_instr_add_src(nphi, src->pred, src->src); 454 455 /* Stash it in the list of phi sources. We'll walk this list and fix up 456 * sources at the very end of clone_function_impl. 457 */ 458 list_add(&nsrc->src.use_link, &state->phi_srcs); 459 } 460 461 return nphi; 462} 463 464static nir_jump_instr * 465clone_jump(clone_state *state, const nir_jump_instr *jmp) 466{ 467 /* These aren't handled because they require special block linking */ 468 assert(jmp->type != nir_jump_goto && jmp->type != nir_jump_goto_if); 469 470 nir_jump_instr *njmp = nir_jump_instr_create(state->ns, jmp->type); 471 472 return njmp; 473} 474 475static nir_call_instr * 476clone_call(clone_state *state, const nir_call_instr *call) 477{ 478 nir_function *ncallee = remap_global(state, call->callee); 479 nir_call_instr *ncall = nir_call_instr_create(state->ns, ncallee); 480 481 for (unsigned i = 0; i < ncall->num_params; i++) 482 __clone_src(state, ncall, &ncall->params[i], &call->params[i]); 483 484 return ncall; 485} 486 487static nir_instr * 488clone_instr(clone_state *state, const nir_instr *instr) 489{ 490 switch (instr->type) { 491 case nir_instr_type_alu: 492 return &clone_alu(state, nir_instr_as_alu(instr))->instr; 493 case nir_instr_type_deref: 494 return &clone_deref_instr(state, nir_instr_as_deref(instr))->instr; 495 case nir_instr_type_intrinsic: 496 return &clone_intrinsic(state, nir_instr_as_intrinsic(instr))->instr; 497 case nir_instr_type_load_const: 498 return &clone_load_const(state, nir_instr_as_load_const(instr))->instr; 499 case nir_instr_type_ssa_undef: 500 return &clone_ssa_undef(state, nir_instr_as_ssa_undef(instr))->instr; 501 case nir_instr_type_tex: 502 return &clone_tex(state, nir_instr_as_tex(instr))->instr; 503 case nir_instr_type_phi: 504 unreachable("Cannot clone phis with clone_instr"); 505 case nir_instr_type_jump: 506 return &clone_jump(state, nir_instr_as_jump(instr))->instr; 507 case nir_instr_type_call: 508 return &clone_call(state, nir_instr_as_call(instr))->instr; 509 case nir_instr_type_parallel_copy: 510 unreachable("Cannot clone parallel copies"); 511 default: 512 unreachable("bad instr type"); 513 return NULL; 514 } 515} 516 517nir_instr * 518nir_instr_clone(nir_shader *shader, const nir_instr *orig) 519{ 520 clone_state state = { 521 .allow_remap_fallback = true, 522 .ns = shader, 523 }; 524 return clone_instr(&state, orig); 525} 526 527nir_instr * 528nir_instr_clone_deep(nir_shader *shader, const nir_instr *orig, 529 struct hash_table *remap_table) 530{ 531 clone_state state = { 532 .allow_remap_fallback = true, 533 .ns = shader, 534 .remap_table = remap_table, 535 }; 536 return clone_instr(&state, orig); 537} 538 539static nir_block * 540clone_block(clone_state *state, struct exec_list *cf_list, const nir_block *blk) 541{ 542 /* Don't actually create a new block. Just use the one from the tail of 543 * the list. NIR guarantees that the tail of the list is a block and that 544 * no two blocks are side-by-side in the IR; It should be empty. 545 */ 546 nir_block *nblk = 547 exec_node_data(nir_block, exec_list_get_tail(cf_list), cf_node.node); 548 assert(nblk->cf_node.type == nir_cf_node_block); 549 assert(exec_list_is_empty(&nblk->instr_list)); 550 551 /* We need this for phi sources */ 552 add_remap(state, nblk, blk); 553 554 nir_foreach_instr(instr, blk) { 555 if (instr->type == nir_instr_type_phi) { 556 /* Phi instructions are a bit of a special case when cloning because 557 * we don't want inserting the instruction to automatically handle 558 * use/defs for us. Instead, we need to wait until all the 559 * blocks/instructions are in so that we can set their sources up. 560 */ 561 clone_phi(state, nir_instr_as_phi(instr), nblk); 562 } else { 563 nir_instr *ninstr = clone_instr(state, instr); 564 nir_instr_insert_after_block(nblk, ninstr); 565 } 566 } 567 568 return nblk; 569} 570 571static void 572clone_cf_list(clone_state *state, struct exec_list *dst, 573 const struct exec_list *list); 574 575static nir_if * 576clone_if(clone_state *state, struct exec_list *cf_list, const nir_if *i) 577{ 578 nir_if *ni = nir_if_create(state->ns); 579 ni->control = i->control; 580 581 __clone_src(state, ni, &ni->condition, &i->condition); 582 583 nir_cf_node_insert_end(cf_list, &ni->cf_node); 584 585 clone_cf_list(state, &ni->then_list, &i->then_list); 586 clone_cf_list(state, &ni->else_list, &i->else_list); 587 588 return ni; 589} 590 591static nir_loop * 592clone_loop(clone_state *state, struct exec_list *cf_list, const nir_loop *loop) 593{ 594 nir_loop *nloop = nir_loop_create(state->ns); 595 nloop->control = loop->control; 596 nloop->partially_unrolled = loop->partially_unrolled; 597 598 nir_cf_node_insert_end(cf_list, &nloop->cf_node); 599 600 clone_cf_list(state, &nloop->body, &loop->body); 601 602 return nloop; 603} 604 605/* clone list of nir_cf_node: */ 606static void 607clone_cf_list(clone_state *state, struct exec_list *dst, 608 const struct exec_list *list) 609{ 610 foreach_list_typed(nir_cf_node, cf, node, list) { 611 switch (cf->type) { 612 case nir_cf_node_block: 613 clone_block(state, dst, nir_cf_node_as_block(cf)); 614 break; 615 case nir_cf_node_if: 616 clone_if(state, dst, nir_cf_node_as_if(cf)); 617 break; 618 case nir_cf_node_loop: 619 clone_loop(state, dst, nir_cf_node_as_loop(cf)); 620 break; 621 default: 622 unreachable("bad cf type"); 623 } 624 } 625} 626 627/* After we've cloned almost everything, we have to walk the list of phi 628 * sources and fix them up. Thanks to loops, the block and SSA value for a 629 * phi source may not be defined when we first encounter it. Instead, we 630 * add it to the phi_srcs list and we fix it up here. 631 */ 632static void 633fixup_phi_srcs(clone_state *state) 634{ 635 list_for_each_entry_safe(nir_phi_src, src, &state->phi_srcs, src.use_link) { 636 src->pred = remap_local(state, src->pred); 637 638 /* Remove from this list */ 639 list_del(&src->src.use_link); 640 641 if (src->src.is_ssa) { 642 src->src.ssa = remap_local(state, src->src.ssa); 643 list_addtail(&src->src.use_link, &src->src.ssa->uses); 644 } else { 645 src->src.reg.reg = remap_reg(state, src->src.reg.reg); 646 list_addtail(&src->src.use_link, &src->src.reg.reg->uses); 647 } 648 } 649 assert(list_is_empty(&state->phi_srcs)); 650} 651 652void 653nir_cf_list_clone(nir_cf_list *dst, nir_cf_list *src, nir_cf_node *parent, 654 struct hash_table *remap_table) 655{ 656 exec_list_make_empty(&dst->list); 657 dst->impl = src->impl; 658 659 if (exec_list_is_empty(&src->list)) 660 return; 661 662 clone_state state; 663 init_clone_state(&state, remap_table, false, true); 664 665 /* We use the same shader */ 666 state.ns = src->impl->function->shader; 667 668 /* The control-flow code assumes that the list of cf_nodes always starts 669 * and ends with a block. We start by adding an empty block. 670 */ 671 nir_block *nblk = nir_block_create(state.ns); 672 nblk->cf_node.parent = parent; 673 exec_list_push_tail(&dst->list, &nblk->cf_node.node); 674 675 clone_cf_list(&state, &dst->list, &src->list); 676 677 fixup_phi_srcs(&state); 678 679 if (!remap_table) 680 free_clone_state(&state); 681} 682 683static nir_function_impl * 684clone_function_impl(clone_state *state, const nir_function_impl *fi) 685{ 686 nir_function_impl *nfi = nir_function_impl_create_bare(state->ns); 687 688 if (fi->preamble) 689 nfi->preamble = remap_global(state, fi->preamble); 690 691 clone_var_list(state, &nfi->locals, &fi->locals); 692 clone_reg_list(state, &nfi->registers, &fi->registers); 693 nfi->reg_alloc = fi->reg_alloc; 694 695 assert(list_is_empty(&state->phi_srcs)); 696 697 clone_cf_list(state, &nfi->body, &fi->body); 698 699 fixup_phi_srcs(state); 700 701 /* All metadata is invalidated in the cloning process */ 702 nfi->valid_metadata = 0; 703 704 return nfi; 705} 706 707nir_function_impl * 708nir_function_impl_clone(nir_shader *shader, const nir_function_impl *fi) 709{ 710 clone_state state; 711 init_clone_state(&state, NULL, false, false); 712 713 state.ns = shader; 714 715 nir_function_impl *nfi = clone_function_impl(&state, fi); 716 717 free_clone_state(&state); 718 719 return nfi; 720} 721 722static nir_function * 723clone_function(clone_state *state, const nir_function *fxn, nir_shader *ns) 724{ 725 assert(ns == state->ns); 726 nir_function *nfxn = nir_function_create(ns, fxn->name); 727 728 /* Needed for call instructions */ 729 add_remap(state, nfxn, fxn); 730 731 nfxn->num_params = fxn->num_params; 732 if (fxn->num_params) { 733 nfxn->params = ralloc_array(state->ns, nir_parameter, fxn->num_params); 734 memcpy(nfxn->params, fxn->params, sizeof(nir_parameter) * fxn->num_params); 735 } 736 nfxn->is_entrypoint = fxn->is_entrypoint; 737 nfxn->is_preamble = fxn->is_preamble; 738 739 /* At first glance, it looks like we should clone the function_impl here. 740 * However, call instructions need to be able to reference at least the 741 * function and those will get processed as we clone the function_impls. 742 * We stop here and do function_impls as a second pass. 743 */ 744 745 return nfxn; 746} 747 748nir_shader * 749nir_shader_clone(void *mem_ctx, const nir_shader *s) 750{ 751 clone_state state; 752 init_clone_state(&state, NULL, true, false); 753 754 nir_shader *ns = nir_shader_create(mem_ctx, s->info.stage, s->options, NULL); 755 state.ns = ns; 756 757 clone_var_list(&state, &ns->variables, &s->variables); 758 759 /* Go through and clone functions */ 760 foreach_list_typed(nir_function, fxn, node, &s->functions) 761 clone_function(&state, fxn, ns); 762 763 /* Only after all functions are cloned can we clone the actual function 764 * implementations. This is because nir_call_instrs and preambles need to 765 * reference the functions of other functions and we don't know what order 766 * the functions will have in the list. 767 */ 768 nir_foreach_function(fxn, s) { 769 nir_function *nfxn = remap_global(&state, fxn); 770 nfxn->impl = clone_function_impl(&state, fxn->impl); 771 nfxn->impl->function = nfxn; 772 } 773 774 ns->info = s->info; 775 ns->info.name = ralloc_strdup(ns, ns->info.name); 776 if (ns->info.label) 777 ns->info.label = ralloc_strdup(ns, ns->info.label); 778 779 ns->num_inputs = s->num_inputs; 780 ns->num_uniforms = s->num_uniforms; 781 ns->num_outputs = s->num_outputs; 782 ns->scratch_size = s->scratch_size; 783 784 ns->constant_data_size = s->constant_data_size; 785 if (s->constant_data_size > 0) { 786 ns->constant_data = ralloc_size(ns, s->constant_data_size); 787 memcpy(ns->constant_data, s->constant_data, s->constant_data_size); 788 } 789 790 if (s->xfb_info) { 791 size_t size = nir_xfb_info_size(s->xfb_info->output_count); 792 ns->xfb_info = ralloc_size(ns, size); 793 memcpy(ns->xfb_info, s->xfb_info, size); 794 } 795 796 free_clone_state(&state); 797 798 return ns; 799} 800 801/** Overwrites dst and replaces its contents with src 802 * 803 * Everything ralloc parented to dst and src itself (but not its children) 804 * will be freed. 805 * 806 * This should only be used by test code which needs to swap out shaders with 807 * a cloned or deserialized version. 808 */ 809void 810nir_shader_replace(nir_shader *dst, nir_shader *src) 811{ 812 /* Delete all of dest's ralloc children */ 813 void *dead_ctx = ralloc_context(NULL); 814 ralloc_adopt(dead_ctx, dst); 815 ralloc_free(dead_ctx); 816 817 list_for_each_entry_safe(nir_instr, instr, &dst->gc_list, gc_node) { 818 nir_instr_free(instr); 819 } 820 821 /* Re-parent all of src's ralloc children to dst */ 822 ralloc_adopt(dst, src); 823 824 memcpy(dst, src, sizeof(*dst)); 825 826 /* We have to move all the linked lists over separately because we need the 827 * pointers in the list elements to point to the lists in dst and not src. 828 */ 829 list_replace(&src->gc_list, &dst->gc_list); 830 list_inithead(&src->gc_list); 831 exec_list_move_nodes_to(&src->variables, &dst->variables); 832 833 /* Now move the functions over. This takes a tiny bit more work */ 834 exec_list_move_nodes_to(&src->functions, &dst->functions); 835 nir_foreach_function(function, dst) 836 function->shader = dst; 837 838 ralloc_free(src); 839} 840