1/*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "aco_instruction_selection.h"
26
27#include "common/ac_nir.h"
28#include "common/sid.h"
29#include "vulkan/radv_descriptor_set.h"
30
31#include "nir_control_flow.h"
32
33#include <vector>
34
35namespace aco {
36
37namespace {
38
39bool
40is_loop_header_block(nir_block* block)
41{
42   return block->cf_node.parent->type == nir_cf_node_loop &&
43          block == nir_loop_first_block(nir_cf_node_as_loop(block->cf_node.parent));
44}
45
46/* similar to nir_block_is_unreachable(), but does not require dominance information */
47bool
48is_block_reachable(nir_function_impl* impl, nir_block* known_reachable, nir_block* block)
49{
50   if (block == nir_start_block(impl) || block == known_reachable)
51      return true;
52
53   /* skip loop back-edges */
54   if (is_loop_header_block(block)) {
55      nir_loop* loop = nir_cf_node_as_loop(block->cf_node.parent);
56      nir_block* preheader = nir_block_cf_tree_prev(nir_loop_first_block(loop));
57      return is_block_reachable(impl, known_reachable, preheader);
58   }
59
60   set_foreach (block->predecessors, entry) {
61      if (is_block_reachable(impl, known_reachable, (nir_block*)entry->key))
62         return true;
63   }
64
65   return false;
66}
67
68/* Check whether the given SSA def is only used by cross-lane instructions. */
69bool
70only_used_by_cross_lane_instrs(nir_ssa_def* ssa, bool follow_phis = true)
71{
72   nir_foreach_use (src, ssa) {
73      switch (src->parent_instr->type) {
74      case nir_instr_type_alu: {
75         nir_alu_instr* alu = nir_instr_as_alu(src->parent_instr);
76         if (alu->op != nir_op_unpack_64_2x32_split_x && alu->op != nir_op_unpack_64_2x32_split_y)
77            return false;
78         if (!only_used_by_cross_lane_instrs(&alu->dest.dest.ssa, follow_phis))
79            return false;
80
81         continue;
82      }
83      case nir_instr_type_intrinsic: {
84         nir_intrinsic_instr* intrin = nir_instr_as_intrinsic(src->parent_instr);
85         if (intrin->intrinsic != nir_intrinsic_read_invocation &&
86             intrin->intrinsic != nir_intrinsic_read_first_invocation &&
87             intrin->intrinsic != nir_intrinsic_lane_permute_16_amd)
88            return false;
89
90         continue;
91      }
92      case nir_instr_type_phi: {
93         /* Don't follow more than 1 phis, this avoids infinite loops. */
94         if (!follow_phis)
95            return false;
96
97         nir_phi_instr* phi = nir_instr_as_phi(src->parent_instr);
98         if (!only_used_by_cross_lane_instrs(&phi->dest.ssa, false))
99            return false;
100
101         continue;
102      }
103      default: return false;
104      }
105   }
106
107   return true;
108}
109
110/* If one side of a divergent IF ends in a branch and the other doesn't, we
111 * might have to emit the contents of the side without the branch at the merge
112 * block instead. This is so that we can use any SGPR live-out of the side
113 * without the branch without creating a linear phi in the invert or merge block. */
114bool
115sanitize_if(nir_function_impl* impl, nir_if* nif)
116{
117   // TODO: skip this if the condition is uniform and there are no divergent breaks/continues?
118
119   nir_block* then_block = nir_if_last_then_block(nif);
120   nir_block* else_block = nir_if_last_else_block(nif);
121   bool then_jump = nir_block_ends_in_jump(then_block) ||
122                    !is_block_reachable(impl, nir_if_first_then_block(nif), then_block);
123   bool else_jump = nir_block_ends_in_jump(else_block) ||
124                    !is_block_reachable(impl, nir_if_first_else_block(nif), else_block);
125   if (then_jump == else_jump)
126      return false;
127
128   /* If the continue from block is empty then return as there is nothing to
129    * move.
130    */
131   if (nir_cf_list_is_empty_block(else_jump ? &nif->then_list : &nif->else_list))
132      return false;
133
134   /* Even though this if statement has a jump on one side, we may still have
135    * phis afterwards.  Single-source phis can be produced by loop unrolling
136    * or dead control-flow passes and are perfectly legal.  Run a quick phi
137    * removal on the block after the if to clean up any such phis.
138    */
139   nir_opt_remove_phis_block(nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node)));
140
141   /* Finally, move the continue from branch after the if-statement. */
142   nir_block* last_continue_from_blk = else_jump ? then_block : else_block;
143   nir_block* first_continue_from_blk =
144      else_jump ? nir_if_first_then_block(nif) : nir_if_first_else_block(nif);
145
146   nir_cf_list tmp;
147   nir_cf_extract(&tmp, nir_before_block(first_continue_from_blk),
148                  nir_after_block(last_continue_from_blk));
149   nir_cf_reinsert(&tmp, nir_after_cf_node(&nif->cf_node));
150
151   return true;
152}
153
154bool
155sanitize_cf_list(nir_function_impl* impl, struct exec_list* cf_list)
156{
157   bool progress = false;
158   foreach_list_typed (nir_cf_node, cf_node, node, cf_list) {
159      switch (cf_node->type) {
160      case nir_cf_node_block: break;
161      case nir_cf_node_if: {
162         nir_if* nif = nir_cf_node_as_if(cf_node);
163         progress |= sanitize_cf_list(impl, &nif->then_list);
164         progress |= sanitize_cf_list(impl, &nif->else_list);
165         progress |= sanitize_if(impl, nif);
166         break;
167      }
168      case nir_cf_node_loop: {
169         nir_loop* loop = nir_cf_node_as_loop(cf_node);
170         progress |= sanitize_cf_list(impl, &loop->body);
171         break;
172      }
173      case nir_cf_node_function: unreachable("Invalid cf type");
174      }
175   }
176
177   return progress;
178}
179
180void
181apply_nuw_to_ssa(isel_context* ctx, nir_ssa_def* ssa)
182{
183   nir_ssa_scalar scalar;
184   scalar.def = ssa;
185   scalar.comp = 0;
186
187   if (!nir_ssa_scalar_is_alu(scalar) || nir_ssa_scalar_alu_op(scalar) != nir_op_iadd)
188      return;
189
190   nir_alu_instr* add = nir_instr_as_alu(ssa->parent_instr);
191
192   if (add->no_unsigned_wrap)
193      return;
194
195   nir_ssa_scalar src0 = nir_ssa_scalar_chase_alu_src(scalar, 0);
196   nir_ssa_scalar src1 = nir_ssa_scalar_chase_alu_src(scalar, 1);
197
198   if (nir_ssa_scalar_is_const(src0)) {
199      nir_ssa_scalar tmp = src0;
200      src0 = src1;
201      src1 = tmp;
202   }
203
204   uint32_t src1_ub = nir_unsigned_upper_bound(ctx->shader, ctx->range_ht, src1, &ctx->ub_config);
205   add->no_unsigned_wrap =
206      !nir_addition_might_overflow(ctx->shader, ctx->range_ht, src0, src1_ub, &ctx->ub_config);
207}
208
209void
210apply_nuw_to_offsets(isel_context* ctx, nir_function_impl* impl)
211{
212   nir_foreach_block (block, impl) {
213      nir_foreach_instr (instr, block) {
214         if (instr->type != nir_instr_type_intrinsic)
215            continue;
216         nir_intrinsic_instr* intrin = nir_instr_as_intrinsic(instr);
217
218         switch (intrin->intrinsic) {
219         case nir_intrinsic_load_constant:
220         case nir_intrinsic_load_uniform:
221         case nir_intrinsic_load_push_constant:
222            if (!nir_src_is_divergent(intrin->src[0]))
223               apply_nuw_to_ssa(ctx, intrin->src[0].ssa);
224            break;
225         case nir_intrinsic_load_ubo:
226         case nir_intrinsic_load_ssbo:
227            if (!nir_src_is_divergent(intrin->src[1]))
228               apply_nuw_to_ssa(ctx, intrin->src[1].ssa);
229            break;
230         case nir_intrinsic_store_ssbo:
231            if (!nir_src_is_divergent(intrin->src[2]))
232               apply_nuw_to_ssa(ctx, intrin->src[2].ssa);
233            break;
234         default: break;
235         }
236      }
237   }
238}
239
240RegClass
241get_reg_class(isel_context* ctx, RegType type, unsigned components, unsigned bitsize)
242{
243   if (bitsize == 1)
244      return RegClass(RegType::sgpr, ctx->program->lane_mask.size() * components);
245   else
246      return RegClass::get(type, components * bitsize / 8u);
247}
248
249void
250setup_vs_output_info(isel_context* ctx, nir_shader* nir,
251                     const aco_vp_output_info* outinfo)
252{
253   ctx->export_clip_dists = outinfo->export_clip_dists;
254   ctx->num_clip_distances = util_bitcount(outinfo->clip_dist_mask);
255   ctx->num_cull_distances = util_bitcount(outinfo->cull_dist_mask);
256
257   assert(ctx->num_clip_distances + ctx->num_cull_distances <= 8);
258
259   /* GFX10+ early rasterization:
260    * When there are no param exports in an NGG (or legacy VS) shader,
261    * RADV sets NO_PC_EXPORT=1, which means the HW will start clipping and rasterization
262    * as soon as it encounters a DONE pos export. When this happens, PS waves can launch
263    * before the NGG (or VS) waves finish.
264    */
265   ctx->program->early_rast = ctx->program->gfx_level >= GFX10 && outinfo->param_exports == 0;
266}
267
268void
269setup_vs_variables(isel_context* ctx, nir_shader* nir)
270{
271   if (ctx->stage == vertex_vs || ctx->stage == vertex_ngg) {
272      setup_vs_output_info(ctx, nir, &ctx->program->info.vs.outinfo);
273
274      /* TODO: NGG streamout */
275      if (ctx->stage.hw == HWStage::NGG)
276         assert(!ctx->program->info.so.num_outputs);
277   }
278
279   if (ctx->stage == vertex_ngg) {
280      ctx->program->config->lds_size =
281         DIV_ROUND_UP(nir->info.shared_size, ctx->program->dev.lds_encoding_granule);
282      assert((ctx->program->config->lds_size * ctx->program->dev.lds_encoding_granule) <
283             (32 * 1024));
284   }
285}
286
287void
288setup_gs_variables(isel_context* ctx, nir_shader* nir)
289{
290   if (ctx->stage == vertex_geometry_gs || ctx->stage == tess_eval_geometry_gs) {
291      ctx->program->config->lds_size =
292         ctx->program->info.gfx9_gs_ring_lds_size; /* Already in units of the alloc granularity */
293   } else if (ctx->stage == vertex_geometry_ngg || ctx->stage == tess_eval_geometry_ngg) {
294      setup_vs_output_info(ctx, nir, &ctx->program->info.vs.outinfo);
295
296      ctx->program->config->lds_size =
297         DIV_ROUND_UP(nir->info.shared_size, ctx->program->dev.lds_encoding_granule);
298   }
299}
300
301void
302setup_tcs_info(isel_context* ctx, nir_shader* nir, nir_shader* vs)
303{
304   ctx->tcs_in_out_eq = ctx->program->info.vs.tcs_in_out_eq;
305   ctx->tcs_temp_only_inputs = ctx->program->info.vs.tcs_temp_only_input_mask;
306   ctx->tcs_num_patches = ctx->program->info.num_tess_patches;
307   ctx->program->config->lds_size = ctx->program->info.tcs.num_lds_blocks;
308}
309
310void
311setup_tes_variables(isel_context* ctx, nir_shader* nir)
312{
313   ctx->tcs_num_patches = ctx->program->info.num_tess_patches;
314
315   if (ctx->stage == tess_eval_vs || ctx->stage == tess_eval_ngg) {
316      setup_vs_output_info(ctx, nir, &ctx->program->info.tes.outinfo);
317
318      /* TODO: NGG streamout */
319      if (ctx->stage.hw == HWStage::NGG)
320         assert(!ctx->program->info.so.num_outputs);
321   }
322
323   if (ctx->stage == tess_eval_ngg) {
324      ctx->program->config->lds_size =
325         DIV_ROUND_UP(nir->info.shared_size, ctx->program->dev.lds_encoding_granule);
326      assert((ctx->program->config->lds_size * ctx->program->dev.lds_encoding_granule) <
327             (32 * 1024));
328   }
329}
330
331void
332setup_ms_variables(isel_context* ctx, nir_shader* nir)
333{
334   setup_vs_output_info(ctx, nir, &ctx->program->info.ms.outinfo);
335
336   ctx->program->config->lds_size =
337      DIV_ROUND_UP(nir->info.shared_size, ctx->program->dev.lds_encoding_granule);
338   assert((ctx->program->config->lds_size * ctx->program->dev.lds_encoding_granule) < (32 * 1024));
339}
340
341void
342setup_variables(isel_context* ctx, nir_shader* nir)
343{
344   switch (nir->info.stage) {
345   case MESA_SHADER_FRAGMENT: {
346      break;
347   }
348   case MESA_SHADER_COMPUTE:
349   case MESA_SHADER_TASK: {
350      ctx->program->config->lds_size =
351         DIV_ROUND_UP(nir->info.shared_size, ctx->program->dev.lds_encoding_granule);
352      break;
353   }
354   case MESA_SHADER_VERTEX: {
355      setup_vs_variables(ctx, nir);
356      break;
357   }
358   case MESA_SHADER_GEOMETRY: {
359      setup_gs_variables(ctx, nir);
360      break;
361   }
362   case MESA_SHADER_TESS_CTRL: {
363      break;
364   }
365   case MESA_SHADER_TESS_EVAL: {
366      setup_tes_variables(ctx, nir);
367      break;
368   }
369   case MESA_SHADER_MESH: {
370      setup_ms_variables(ctx, nir);
371      break;
372   }
373   default: unreachable("Unhandled shader stage.");
374   }
375
376   /* Make sure we fit the available LDS space. */
377   assert((ctx->program->config->lds_size * ctx->program->dev.lds_encoding_granule) <=
378          ctx->program->dev.lds_limit);
379}
380
381void
382setup_nir(isel_context* ctx, nir_shader* nir)
383{
384   /* the variable setup has to be done before lower_io / CSE */
385   setup_variables(ctx, nir);
386
387   nir_convert_to_lcssa(nir, true, false);
388   nir_lower_phis_to_scalar(nir, true);
389
390   nir_function_impl* func = nir_shader_get_entrypoint(nir);
391   nir_index_ssa_defs(func);
392}
393
394} /* end namespace */
395
396void
397init_context(isel_context* ctx, nir_shader* shader)
398{
399   nir_function_impl* impl = nir_shader_get_entrypoint(shader);
400   ctx->shader = shader;
401
402   /* Init NIR range analysis. */
403   ctx->range_ht = _mesa_pointer_hash_table_create(NULL);
404   ctx->ub_config.min_subgroup_size = 64;
405   ctx->ub_config.max_subgroup_size = 64;
406   if (ctx->shader->info.stage == MESA_SHADER_COMPUTE && ctx->program->info.cs.subgroup_size) {
407      ctx->ub_config.min_subgroup_size = ctx->program->info.cs.subgroup_size;
408      ctx->ub_config.max_subgroup_size = ctx->program->info.cs.subgroup_size;
409   }
410   ctx->ub_config.max_workgroup_invocations = 2048;
411   ctx->ub_config.max_workgroup_count[0] = 65535;
412   ctx->ub_config.max_workgroup_count[1] = 65535;
413   ctx->ub_config.max_workgroup_count[2] = 65535;
414   ctx->ub_config.max_workgroup_size[0] = 2048;
415   ctx->ub_config.max_workgroup_size[1] = 2048;
416   ctx->ub_config.max_workgroup_size[2] = 2048;
417   for (unsigned i = 0; i < MAX_VERTEX_ATTRIBS; i++) {
418      unsigned attrib_format = ctx->options->key.vs.vertex_attribute_formats[i];
419      unsigned dfmt = attrib_format & 0xf;
420      unsigned nfmt = (attrib_format >> 4) & 0x7;
421
422      uint32_t max = UINT32_MAX;
423      if (nfmt == V_008F0C_BUF_NUM_FORMAT_UNORM) {
424         max = 0x3f800000u;
425      } else if (nfmt == V_008F0C_BUF_NUM_FORMAT_UINT || nfmt == V_008F0C_BUF_NUM_FORMAT_USCALED) {
426         bool uscaled = nfmt == V_008F0C_BUF_NUM_FORMAT_USCALED;
427         switch (dfmt) {
428         case V_008F0C_BUF_DATA_FORMAT_8:
429         case V_008F0C_BUF_DATA_FORMAT_8_8:
430         case V_008F0C_BUF_DATA_FORMAT_8_8_8_8: max = uscaled ? 0x437f0000u : UINT8_MAX; break;
431         case V_008F0C_BUF_DATA_FORMAT_10_10_10_2:
432         case V_008F0C_BUF_DATA_FORMAT_2_10_10_10: max = uscaled ? 0x447fc000u : 1023; break;
433         case V_008F0C_BUF_DATA_FORMAT_10_11_11:
434         case V_008F0C_BUF_DATA_FORMAT_11_11_10: max = uscaled ? 0x44ffe000u : 2047; break;
435         case V_008F0C_BUF_DATA_FORMAT_16:
436         case V_008F0C_BUF_DATA_FORMAT_16_16:
437         case V_008F0C_BUF_DATA_FORMAT_16_16_16_16: max = uscaled ? 0x477fff00u : UINT16_MAX; break;
438         case V_008F0C_BUF_DATA_FORMAT_32:
439         case V_008F0C_BUF_DATA_FORMAT_32_32:
440         case V_008F0C_BUF_DATA_FORMAT_32_32_32:
441         case V_008F0C_BUF_DATA_FORMAT_32_32_32_32: max = uscaled ? 0x4f800000u : UINT32_MAX; break;
442         }
443      }
444      ctx->ub_config.vertex_attrib_max[i] = max;
445   }
446
447   nir_divergence_analysis(shader);
448   nir_opt_uniform_atomics(shader);
449
450   apply_nuw_to_offsets(ctx, impl);
451
452   /* sanitize control flow */
453   sanitize_cf_list(impl, &impl->body);
454   nir_metadata_preserve(impl, nir_metadata_none);
455
456   /* we'll need these for isel */
457   nir_metadata_require(impl, nir_metadata_block_index);
458
459   if (!ctx->stage.has(SWStage::GSCopy) && ctx->options->dump_preoptir) {
460      fprintf(stderr, "NIR shader before instruction selection:\n");
461      nir_print_shader(shader, stderr);
462   }
463
464   ctx->first_temp_id = ctx->program->peekAllocationId();
465   ctx->program->allocateRange(impl->ssa_alloc);
466   RegClass* regclasses = ctx->program->temp_rc.data() + ctx->first_temp_id;
467
468   std::unique_ptr<unsigned[]> nir_to_aco{new unsigned[impl->num_blocks]()};
469
470   /* TODO: make this recursive to improve compile times */
471   bool done = false;
472   while (!done) {
473      done = true;
474      nir_foreach_block (block, impl) {
475         nir_foreach_instr (instr, block) {
476            switch (instr->type) {
477            case nir_instr_type_alu: {
478               nir_alu_instr* alu_instr = nir_instr_as_alu(instr);
479               RegType type =
480                  nir_dest_is_divergent(alu_instr->dest.dest) ? RegType::vgpr : RegType::sgpr;
481               switch (alu_instr->op) {
482               case nir_op_fmul:
483               case nir_op_fmulz:
484               case nir_op_fadd:
485               case nir_op_fsub:
486               case nir_op_ffma:
487               case nir_op_ffmaz:
488               case nir_op_fmax:
489               case nir_op_fmin:
490               case nir_op_fneg:
491               case nir_op_fabs:
492               case nir_op_fsat:
493               case nir_op_fsign:
494               case nir_op_frcp:
495               case nir_op_frsq:
496               case nir_op_fsqrt:
497               case nir_op_fexp2:
498               case nir_op_flog2:
499               case nir_op_ffract:
500               case nir_op_ffloor:
501               case nir_op_fceil:
502               case nir_op_ftrunc:
503               case nir_op_fround_even:
504               case nir_op_fsin_amd:
505               case nir_op_fcos_amd:
506               case nir_op_f2f16:
507               case nir_op_f2f16_rtz:
508               case nir_op_f2f16_rtne:
509               case nir_op_f2f32:
510               case nir_op_f2f64:
511               case nir_op_u2f16:
512               case nir_op_u2f32:
513               case nir_op_u2f64:
514               case nir_op_i2f16:
515               case nir_op_i2f32:
516               case nir_op_i2f64:
517               case nir_op_pack_half_2x16_split:
518               case nir_op_pack_unorm_2x16:
519               case nir_op_pack_snorm_2x16:
520               case nir_op_pack_uint_2x16:
521               case nir_op_pack_sint_2x16:
522               case nir_op_unpack_half_2x16_split_x:
523               case nir_op_unpack_half_2x16_split_y:
524               case nir_op_fddx:
525               case nir_op_fddy:
526               case nir_op_fddx_fine:
527               case nir_op_fddy_fine:
528               case nir_op_fddx_coarse:
529               case nir_op_fddy_coarse:
530               case nir_op_fquantize2f16:
531               case nir_op_ldexp:
532               case nir_op_frexp_sig:
533               case nir_op_frexp_exp:
534               case nir_op_cube_face_index_amd:
535               case nir_op_cube_face_coord_amd:
536               case nir_op_sad_u8x4:
537               case nir_op_udot_4x8_uadd:
538               case nir_op_sdot_4x8_iadd:
539               case nir_op_udot_4x8_uadd_sat:
540               case nir_op_sdot_4x8_iadd_sat:
541               case nir_op_udot_2x16_uadd:
542               case nir_op_sdot_2x16_iadd:
543               case nir_op_udot_2x16_uadd_sat:
544               case nir_op_sdot_2x16_iadd_sat: type = RegType::vgpr; break;
545               case nir_op_f2i16:
546               case nir_op_f2u16:
547               case nir_op_f2i32:
548               case nir_op_f2u32:
549               case nir_op_f2i64:
550               case nir_op_f2u64:
551               case nir_op_b2i8:
552               case nir_op_b2i16:
553               case nir_op_b2i32:
554               case nir_op_b2i64:
555               case nir_op_b2b32:
556               case nir_op_b2f16:
557               case nir_op_b2f32:
558               case nir_op_mov: break;
559               case nir_op_iabs:
560               case nir_op_iadd:
561               case nir_op_iadd_sat:
562               case nir_op_uadd_sat:
563               case nir_op_isub:
564               case nir_op_isub_sat:
565               case nir_op_usub_sat:
566               case nir_op_imul:
567               case nir_op_imin:
568               case nir_op_imax:
569               case nir_op_umin:
570               case nir_op_umax:
571               case nir_op_ishl:
572               case nir_op_ishr:
573               case nir_op_ushr:
574                  /* packed 16bit instructions have to be VGPR */
575                  type = alu_instr->dest.dest.ssa.num_components == 2 ? RegType::vgpr : type;
576                  FALLTHROUGH;
577               default:
578                  for (unsigned i = 0; i < nir_op_infos[alu_instr->op].num_inputs; i++) {
579                     if (regclasses[alu_instr->src[i].src.ssa->index].type() == RegType::vgpr)
580                        type = RegType::vgpr;
581                  }
582                  break;
583               }
584
585               RegClass rc = get_reg_class(ctx, type, alu_instr->dest.dest.ssa.num_components,
586                                           alu_instr->dest.dest.ssa.bit_size);
587               regclasses[alu_instr->dest.dest.ssa.index] = rc;
588               break;
589            }
590            case nir_instr_type_load_const: {
591               unsigned num_components = nir_instr_as_load_const(instr)->def.num_components;
592               unsigned bit_size = nir_instr_as_load_const(instr)->def.bit_size;
593               RegClass rc = get_reg_class(ctx, RegType::sgpr, num_components, bit_size);
594               regclasses[nir_instr_as_load_const(instr)->def.index] = rc;
595               break;
596            }
597            case nir_instr_type_intrinsic: {
598               nir_intrinsic_instr* intrinsic = nir_instr_as_intrinsic(instr);
599               if (!nir_intrinsic_infos[intrinsic->intrinsic].has_dest)
600                  break;
601               RegType type = RegType::sgpr;
602               switch (intrinsic->intrinsic) {
603               case nir_intrinsic_load_push_constant:
604               case nir_intrinsic_load_workgroup_id:
605               case nir_intrinsic_load_num_workgroups:
606               case nir_intrinsic_load_ray_launch_size_addr_amd:
607               case nir_intrinsic_load_sbt_base_amd:
608               case nir_intrinsic_load_subgroup_id:
609               case nir_intrinsic_load_num_subgroups:
610               case nir_intrinsic_load_first_vertex:
611               case nir_intrinsic_load_base_instance:
612               case nir_intrinsic_vote_all:
613               case nir_intrinsic_vote_any:
614               case nir_intrinsic_read_first_invocation:
615               case nir_intrinsic_read_invocation:
616               case nir_intrinsic_first_invocation:
617               case nir_intrinsic_ballot:
618               case nir_intrinsic_bindless_image_samples:
619               case nir_intrinsic_has_input_vertex_amd:
620               case nir_intrinsic_has_input_primitive_amd:
621               case nir_intrinsic_load_force_vrs_rates_amd:
622               case nir_intrinsic_load_scalar_arg_amd:
623               case nir_intrinsic_load_smem_amd: type = RegType::sgpr; break;
624               case nir_intrinsic_load_sample_id:
625               case nir_intrinsic_load_input:
626               case nir_intrinsic_load_output:
627               case nir_intrinsic_load_input_vertex:
628               case nir_intrinsic_load_per_vertex_input:
629               case nir_intrinsic_load_per_vertex_output:
630               case nir_intrinsic_load_vertex_id_zero_base:
631               case nir_intrinsic_load_barycentric_sample:
632               case nir_intrinsic_load_barycentric_pixel:
633               case nir_intrinsic_load_barycentric_model:
634               case nir_intrinsic_load_barycentric_centroid:
635               case nir_intrinsic_load_barycentric_at_sample:
636               case nir_intrinsic_load_barycentric_at_offset:
637               case nir_intrinsic_load_interpolated_input:
638               case nir_intrinsic_load_frag_coord:
639               case nir_intrinsic_load_frag_shading_rate:
640               case nir_intrinsic_load_sample_pos:
641               case nir_intrinsic_load_local_invocation_id:
642               case nir_intrinsic_load_local_invocation_index:
643               case nir_intrinsic_load_subgroup_invocation:
644               case nir_intrinsic_load_tess_coord:
645               case nir_intrinsic_write_invocation_amd:
646               case nir_intrinsic_mbcnt_amd:
647               case nir_intrinsic_byte_permute_amd:
648               case nir_intrinsic_lane_permute_16_amd:
649               case nir_intrinsic_load_instance_id:
650               case nir_intrinsic_ssbo_atomic_add:
651               case nir_intrinsic_ssbo_atomic_imin:
652               case nir_intrinsic_ssbo_atomic_umin:
653               case nir_intrinsic_ssbo_atomic_imax:
654               case nir_intrinsic_ssbo_atomic_umax:
655               case nir_intrinsic_ssbo_atomic_and:
656               case nir_intrinsic_ssbo_atomic_or:
657               case nir_intrinsic_ssbo_atomic_xor:
658               case nir_intrinsic_ssbo_atomic_exchange:
659               case nir_intrinsic_ssbo_atomic_comp_swap:
660               case nir_intrinsic_ssbo_atomic_fmin:
661               case nir_intrinsic_ssbo_atomic_fmax:
662               case nir_intrinsic_global_atomic_add_amd:
663               case nir_intrinsic_global_atomic_imin_amd:
664               case nir_intrinsic_global_atomic_umin_amd:
665               case nir_intrinsic_global_atomic_imax_amd:
666               case nir_intrinsic_global_atomic_umax_amd:
667               case nir_intrinsic_global_atomic_and_amd:
668               case nir_intrinsic_global_atomic_or_amd:
669               case nir_intrinsic_global_atomic_xor_amd:
670               case nir_intrinsic_global_atomic_exchange_amd:
671               case nir_intrinsic_global_atomic_comp_swap_amd:
672               case nir_intrinsic_global_atomic_fmin_amd:
673               case nir_intrinsic_global_atomic_fmax_amd:
674               case nir_intrinsic_bindless_image_atomic_add:
675               case nir_intrinsic_bindless_image_atomic_umin:
676               case nir_intrinsic_bindless_image_atomic_imin:
677               case nir_intrinsic_bindless_image_atomic_umax:
678               case nir_intrinsic_bindless_image_atomic_imax:
679               case nir_intrinsic_bindless_image_atomic_and:
680               case nir_intrinsic_bindless_image_atomic_or:
681               case nir_intrinsic_bindless_image_atomic_xor:
682               case nir_intrinsic_bindless_image_atomic_exchange:
683               case nir_intrinsic_bindless_image_atomic_comp_swap:
684               case nir_intrinsic_bindless_image_atomic_fmin:
685               case nir_intrinsic_bindless_image_atomic_fmax:
686               case nir_intrinsic_bindless_image_size:
687               case nir_intrinsic_shared_atomic_add:
688               case nir_intrinsic_shared_atomic_imin:
689               case nir_intrinsic_shared_atomic_umin:
690               case nir_intrinsic_shared_atomic_imax:
691               case nir_intrinsic_shared_atomic_umax:
692               case nir_intrinsic_shared_atomic_and:
693               case nir_intrinsic_shared_atomic_or:
694               case nir_intrinsic_shared_atomic_xor:
695               case nir_intrinsic_shared_atomic_exchange:
696               case nir_intrinsic_shared_atomic_comp_swap:
697               case nir_intrinsic_shared_atomic_fadd:
698               case nir_intrinsic_shared_atomic_fmin:
699               case nir_intrinsic_shared_atomic_fmax:
700               case nir_intrinsic_load_scratch:
701               case nir_intrinsic_load_invocation_id:
702               case nir_intrinsic_load_primitive_id:
703               case nir_intrinsic_load_buffer_amd:
704               case nir_intrinsic_load_initial_edgeflags_amd:
705               case nir_intrinsic_gds_atomic_add_amd:
706               case nir_intrinsic_bvh64_intersect_ray_amd:
707               case nir_intrinsic_load_vector_arg_amd: type = RegType::vgpr; break;
708               case nir_intrinsic_load_shared:
709               case nir_intrinsic_load_shared2_amd:
710                  /* When the result of these loads is only used by cross-lane instructions,
711                   * it is beneficial to use a VGPR destination. This is because this allows
712                   * to put the s_waitcnt further down, which decreases latency.
713                   */
714                  if (only_used_by_cross_lane_instrs(&intrinsic->dest.ssa)) {
715                     type = RegType::vgpr;
716                     break;
717                  }
718                  FALLTHROUGH;
719               case nir_intrinsic_shuffle:
720               case nir_intrinsic_quad_broadcast:
721               case nir_intrinsic_quad_swap_horizontal:
722               case nir_intrinsic_quad_swap_vertical:
723               case nir_intrinsic_quad_swap_diagonal:
724               case nir_intrinsic_quad_swizzle_amd:
725               case nir_intrinsic_masked_swizzle_amd:
726               case nir_intrinsic_inclusive_scan:
727               case nir_intrinsic_exclusive_scan:
728               case nir_intrinsic_reduce:
729               case nir_intrinsic_load_ubo:
730               case nir_intrinsic_load_ssbo:
731               case nir_intrinsic_load_global_amd:
732                  type = nir_dest_is_divergent(intrinsic->dest) ? RegType::vgpr : RegType::sgpr;
733                  break;
734               case nir_intrinsic_load_view_index:
735                  type = ctx->stage == fragment_fs ? RegType::vgpr : RegType::sgpr;
736                  break;
737               default:
738                  for (unsigned i = 0; i < nir_intrinsic_infos[intrinsic->intrinsic].num_srcs;
739                       i++) {
740                     if (regclasses[intrinsic->src[i].ssa->index].type() == RegType::vgpr)
741                        type = RegType::vgpr;
742                  }
743                  break;
744               }
745               RegClass rc = get_reg_class(ctx, type, intrinsic->dest.ssa.num_components,
746                                           intrinsic->dest.ssa.bit_size);
747               regclasses[intrinsic->dest.ssa.index] = rc;
748               break;
749            }
750            case nir_instr_type_tex: {
751               nir_tex_instr* tex = nir_instr_as_tex(instr);
752               RegType type = nir_dest_is_divergent(tex->dest) ? RegType::vgpr : RegType::sgpr;
753
754               if (tex->op == nir_texop_texture_samples) {
755                  assert(!tex->dest.ssa.divergent);
756               }
757
758               RegClass rc =
759                  get_reg_class(ctx, type, tex->dest.ssa.num_components, tex->dest.ssa.bit_size);
760               regclasses[tex->dest.ssa.index] = rc;
761               break;
762            }
763            case nir_instr_type_parallel_copy: {
764               nir_foreach_parallel_copy_entry (entry, nir_instr_as_parallel_copy(instr)) {
765                  regclasses[entry->dest.ssa.index] = regclasses[entry->src.ssa->index];
766               }
767               break;
768            }
769            case nir_instr_type_ssa_undef: {
770               unsigned num_components = nir_instr_as_ssa_undef(instr)->def.num_components;
771               unsigned bit_size = nir_instr_as_ssa_undef(instr)->def.bit_size;
772               RegClass rc = get_reg_class(ctx, RegType::sgpr, num_components, bit_size);
773               regclasses[nir_instr_as_ssa_undef(instr)->def.index] = rc;
774               break;
775            }
776            case nir_instr_type_phi: {
777               nir_phi_instr* phi = nir_instr_as_phi(instr);
778               RegType type = RegType::sgpr;
779               unsigned num_components = phi->dest.ssa.num_components;
780               assert((phi->dest.ssa.bit_size != 1 || num_components == 1) &&
781                      "Multiple components not supported on boolean phis.");
782
783               if (nir_dest_is_divergent(phi->dest)) {
784                  type = RegType::vgpr;
785               } else {
786                  nir_foreach_phi_src (src, phi) {
787                     if (regclasses[src->src.ssa->index].type() == RegType::vgpr)
788                        type = RegType::vgpr;
789                  }
790               }
791
792               RegClass rc = get_reg_class(ctx, type, num_components, phi->dest.ssa.bit_size);
793               if (rc != regclasses[phi->dest.ssa.index])
794                  done = false;
795               regclasses[phi->dest.ssa.index] = rc;
796               break;
797            }
798            default: break;
799            }
800         }
801      }
802   }
803
804   ctx->program->config->spi_ps_input_ena = ctx->program->info.ps.spi_ps_input;
805   ctx->program->config->spi_ps_input_addr = ctx->program->info.ps.spi_ps_input;
806
807   ctx->cf_info.nir_to_aco = std::move(nir_to_aco);
808
809   /* align and copy constant data */
810   while (ctx->program->constant_data.size() % 4u)
811      ctx->program->constant_data.push_back(0);
812   ctx->constant_data_offset = ctx->program->constant_data.size();
813   ctx->program->constant_data.insert(ctx->program->constant_data.end(),
814                                      (uint8_t*)shader->constant_data,
815                                      (uint8_t*)shader->constant_data + shader->constant_data_size);
816}
817
818void
819cleanup_context(isel_context* ctx)
820{
821   _mesa_hash_table_destroy(ctx->range_ht, NULL);
822}
823
824isel_context
825setup_isel_context(Program* program, unsigned shader_count, struct nir_shader* const* shaders,
826                   ac_shader_config* config, const struct aco_compiler_options* options,
827                   const struct aco_shader_info* info,
828                   const struct radv_shader_args* args, bool is_gs_copy_shader,
829                   bool is_ps_epilog)
830{
831   SWStage sw_stage = SWStage::None;
832   for (unsigned i = 0; i < shader_count; i++) {
833      switch (shaders[i]->info.stage) {
834      case MESA_SHADER_VERTEX: sw_stage = sw_stage | SWStage::VS; break;
835      case MESA_SHADER_TESS_CTRL: sw_stage = sw_stage | SWStage::TCS; break;
836      case MESA_SHADER_TESS_EVAL: sw_stage = sw_stage | SWStage::TES; break;
837      case MESA_SHADER_GEOMETRY:
838         sw_stage = sw_stage | (is_gs_copy_shader ? SWStage::GSCopy : SWStage::GS);
839         break;
840      case MESA_SHADER_FRAGMENT: sw_stage = sw_stage | SWStage::FS; break;
841      case MESA_SHADER_COMPUTE: sw_stage = sw_stage | SWStage::CS; break;
842      case MESA_SHADER_TASK: sw_stage = sw_stage | SWStage::TS; break;
843      case MESA_SHADER_MESH: sw_stage = sw_stage | SWStage::MS; break;
844      default: unreachable("Shader stage not implemented");
845      }
846   }
847
848   if (is_ps_epilog) {
849      assert(shader_count == 0 && !shaders);
850      sw_stage = SWStage::FS;
851   }
852
853   bool gfx9_plus = options->gfx_level >= GFX9;
854   bool ngg = info->is_ngg && options->gfx_level >= GFX10;
855   HWStage hw_stage{};
856   if (sw_stage == SWStage::VS && info->vs.as_es && !ngg)
857      hw_stage = HWStage::ES;
858   else if (sw_stage == SWStage::VS && !info->vs.as_ls && !ngg)
859      hw_stage = HWStage::VS;
860   else if (sw_stage == SWStage::VS && ngg)
861      hw_stage = HWStage::NGG; /* GFX10/NGG: VS without GS uses the HW GS stage */
862   else if (sw_stage == SWStage::GS)
863      hw_stage = HWStage::GS;
864   else if (sw_stage == SWStage::FS)
865      hw_stage = HWStage::FS;
866   else if (sw_stage == SWStage::CS)
867      hw_stage = HWStage::CS;
868   else if (sw_stage == SWStage::GSCopy)
869      hw_stage = HWStage::VS;
870   else if (sw_stage == SWStage::TS)
871      hw_stage = HWStage::CS; /* Task shaders are implemented with compute shaders. */
872   else if (sw_stage == SWStage::MS)
873      hw_stage = HWStage::NGG; /* Mesh shaders only work on NGG and on GFX10.3+. */
874   else if (sw_stage == SWStage::VS_GS && gfx9_plus && !ngg)
875      hw_stage = HWStage::GS; /* GFX6-9: VS+GS merged into a GS (and GFX10/legacy) */
876   else if (sw_stage == SWStage::VS_GS && ngg)
877      hw_stage = HWStage::NGG; /* GFX10+: VS+GS merged into an NGG GS */
878   else if (sw_stage == SWStage::VS && info->vs.as_ls)
879      hw_stage = HWStage::LS; /* GFX6-8: VS is a Local Shader, when tessellation is used */
880   else if (sw_stage == SWStage::TCS)
881      hw_stage = HWStage::HS; /* GFX6-8: TCS is a Hull Shader */
882   else if (sw_stage == SWStage::VS_TCS)
883      hw_stage = HWStage::HS; /* GFX9-10: VS+TCS merged into a Hull Shader */
884   else if (sw_stage == SWStage::TES && !info->tes.as_es && !ngg)
885      hw_stage = HWStage::VS; /* GFX6-9: TES without GS uses the HW VS stage (and GFX10/legacy) */
886   else if (sw_stage == SWStage::TES && !info->tes.as_es && ngg)
887      hw_stage = HWStage::NGG; /* GFX10/NGG: TES without GS */
888   else if (sw_stage == SWStage::TES && info->tes.as_es && !ngg)
889      hw_stage = HWStage::ES; /* GFX6-8: TES is an Export Shader */
890   else if (sw_stage == SWStage::TES_GS && gfx9_plus && !ngg)
891      hw_stage = HWStage::GS; /* GFX9: TES+GS merged into a GS (and GFX10/legacy) */
892   else if (sw_stage == SWStage::TES_GS && ngg)
893      hw_stage = HWStage::NGG; /* GFX10+: TES+GS merged into an NGG GS */
894   else
895      unreachable("Shader stage not implemented");
896
897   init_program(program, Stage{hw_stage, sw_stage}, info, options->gfx_level, options->family,
898                options->wgp_mode, config);
899
900   isel_context ctx = {};
901   ctx.program = program;
902   ctx.args = args;
903   ctx.options = options;
904   ctx.stage = program->stage;
905
906   program->workgroup_size = program->info.workgroup_size;
907   assert(program->workgroup_size);
908
909   /* Mesh shading only works on GFX10.3+. */
910   ASSERTED bool mesh_shading = ctx.stage.has(SWStage::TS) || ctx.stage.has(SWStage::MS);
911   assert(!mesh_shading || ctx.program->gfx_level >= GFX10_3);
912
913   if (ctx.stage == tess_control_hs)
914      setup_tcs_info(&ctx, shaders[0], NULL);
915   else if (ctx.stage == vertex_tess_control_hs)
916      setup_tcs_info(&ctx, shaders[1], shaders[0]);
917
918   calc_min_waves(program);
919
920   unsigned scratch_size = 0;
921   if (program->stage == gs_copy_vs) {
922      assert(shader_count == 1);
923      setup_vs_output_info(&ctx, shaders[0], &program->info.vs.outinfo);
924   } else {
925      for (unsigned i = 0; i < shader_count; i++) {
926         nir_shader* nir = shaders[i];
927         setup_nir(&ctx, nir);
928      }
929
930      for (unsigned i = 0; i < shader_count; i++)
931         scratch_size = std::max(scratch_size, shaders[i]->scratch_size);
932   }
933
934   ctx.program->config->scratch_bytes_per_wave = align(scratch_size * ctx.program->wave_size, 1024);
935
936   ctx.block = ctx.program->create_and_insert_block();
937   ctx.block->kind = block_kind_top_level;
938
939   return ctx;
940}
941
942} // namespace aco
943