1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#include "ac_nir.h"
26#include "ac_nir_to_llvm.h"
27#include "ac_rtld.h"
28#include "si_pipe.h"
29#include "si_shader_internal.h"
30#include "sid.h"
31#include "tgsi/tgsi_from_mesa.h"
32#include "util/u_memory.h"
33
34struct si_llvm_diagnostics {
35   struct util_debug_callback *debug;
36   unsigned retval;
37};
38
39static void si_diagnostic_handler(LLVMDiagnosticInfoRef di, void *context)
40{
41   struct si_llvm_diagnostics *diag = (struct si_llvm_diagnostics *)context;
42   LLVMDiagnosticSeverity severity = LLVMGetDiagInfoSeverity(di);
43   const char *severity_str = NULL;
44
45   switch (severity) {
46   case LLVMDSError:
47      severity_str = "error";
48      break;
49   case LLVMDSWarning:
50      severity_str = "warning";
51      break;
52   case LLVMDSRemark:
53   case LLVMDSNote:
54   default:
55      return;
56   }
57
58   char *description = LLVMGetDiagInfoDescription(di);
59
60   util_debug_message(diag->debug, SHADER_INFO, "LLVM diagnostic (%s): %s", severity_str,
61                      description);
62
63   if (severity == LLVMDSError) {
64      diag->retval = 1;
65      fprintf(stderr, "LLVM triggered Diagnostic Handler: %s\n", description);
66   }
67
68   LLVMDisposeMessage(description);
69}
70
71bool si_compile_llvm(struct si_screen *sscreen, struct si_shader_binary *binary,
72                     struct ac_shader_config *conf, struct ac_llvm_compiler *compiler,
73                     struct ac_llvm_context *ac, struct util_debug_callback *debug,
74                     gl_shader_stage stage, const char *name, bool less_optimized)
75{
76   unsigned count = p_atomic_inc_return(&sscreen->num_compilations);
77
78   if (si_can_dump_shader(sscreen, stage)) {
79      fprintf(stderr, "radeonsi: Compiling shader %d\n", count);
80
81      if (!(sscreen->debug_flags & (DBG(NO_IR) | DBG(PREOPT_IR)))) {
82         fprintf(stderr, "%s LLVM IR:\n\n", name);
83         ac_dump_module(ac->module);
84         fprintf(stderr, "\n");
85      }
86   }
87
88   if (sscreen->record_llvm_ir) {
89      char *ir = LLVMPrintModuleToString(ac->module);
90      binary->llvm_ir_string = strdup(ir);
91      LLVMDisposeMessage(ir);
92   }
93
94   if (!si_replace_shader(count, binary)) {
95      struct ac_compiler_passes *passes = compiler->passes;
96
97      if (less_optimized && compiler->low_opt_passes)
98         passes = compiler->low_opt_passes;
99
100      struct si_llvm_diagnostics diag = {debug};
101      LLVMContextSetDiagnosticHandler(ac->context, si_diagnostic_handler, &diag);
102
103      if (!ac_compile_module_to_elf(passes, ac->module, (char **)&binary->elf_buffer,
104                                    &binary->elf_size))
105         diag.retval = 1;
106
107      if (diag.retval != 0) {
108         util_debug_message(debug, SHADER_INFO, "LLVM compilation failed");
109         return false;
110      }
111   }
112
113   struct ac_rtld_binary rtld;
114   if (!ac_rtld_open(&rtld, (struct ac_rtld_open_info){
115                               .info = &sscreen->info,
116                               .shader_type = stage,
117                               .wave_size = ac->wave_size,
118                               .num_parts = 1,
119                               .elf_ptrs = &binary->elf_buffer,
120                               .elf_sizes = &binary->elf_size}))
121      return false;
122
123   bool ok = ac_rtld_read_config(&sscreen->info, &rtld, conf);
124   ac_rtld_close(&rtld);
125   return ok;
126}
127
128void si_llvm_context_init(struct si_shader_context *ctx, struct si_screen *sscreen,
129                          struct ac_llvm_compiler *compiler, unsigned wave_size)
130{
131   memset(ctx, 0, sizeof(*ctx));
132   ctx->screen = sscreen;
133   ctx->compiler = compiler;
134
135   ac_llvm_context_init(&ctx->ac, compiler, sscreen->info.gfx_level, sscreen->info.family,
136                        sscreen->info.has_3d_cube_border_color_mipmap, AC_FLOAT_MODE_DEFAULT_OPENGL, wave_size, 64);
137}
138
139void si_llvm_create_func(struct si_shader_context *ctx, const char *name, LLVMTypeRef *return_types,
140                         unsigned num_return_elems, unsigned max_workgroup_size)
141{
142   LLVMTypeRef ret_type;
143   enum ac_llvm_calling_convention call_conv;
144
145   if (num_return_elems)
146      ret_type = LLVMStructTypeInContext(ctx->ac.context, return_types, num_return_elems, true);
147   else
148      ret_type = ctx->ac.voidt;
149
150   gl_shader_stage real_stage = ctx->stage;
151
152   /* LS is merged into HS (TCS), and ES is merged into GS. */
153   if (ctx->screen->info.gfx_level >= GFX9 && ctx->stage <= MESA_SHADER_GEOMETRY) {
154      if (ctx->shader->key.ge.as_ls)
155         real_stage = MESA_SHADER_TESS_CTRL;
156      else if (ctx->shader->key.ge.as_es || ctx->shader->key.ge.as_ngg)
157         real_stage = MESA_SHADER_GEOMETRY;
158   }
159
160   switch (real_stage) {
161   case MESA_SHADER_VERTEX:
162   case MESA_SHADER_TESS_EVAL:
163      call_conv = AC_LLVM_AMDGPU_VS;
164      break;
165   case MESA_SHADER_TESS_CTRL:
166      call_conv = AC_LLVM_AMDGPU_HS;
167      break;
168   case MESA_SHADER_GEOMETRY:
169      call_conv = AC_LLVM_AMDGPU_GS;
170      break;
171   case MESA_SHADER_FRAGMENT:
172      call_conv = AC_LLVM_AMDGPU_PS;
173      break;
174   case MESA_SHADER_COMPUTE:
175      call_conv = AC_LLVM_AMDGPU_CS;
176      break;
177   default:
178      unreachable("Unhandle shader type");
179   }
180
181   /* Setup the function */
182   ctx->return_type = ret_type;
183   ctx->main_fn = ac_build_main(&ctx->args, &ctx->ac, call_conv, name, ret_type, ctx->ac.module);
184   ctx->return_value = LLVMGetUndef(ctx->return_type);
185
186   if (ctx->screen->info.address32_hi) {
187      ac_llvm_add_target_dep_function_attr(ctx->main_fn, "amdgpu-32bit-address-high-bits",
188                                           ctx->screen->info.address32_hi);
189   }
190
191   if (ctx->stage <= MESA_SHADER_GEOMETRY && ctx->shader->key.ge.as_ngg &&
192       si_shader_uses_streamout(ctx->shader))
193      ac_llvm_add_target_dep_function_attr(ctx->main_fn, "amdgpu-gds-size", 256);
194
195   ac_llvm_set_workgroup_size(ctx->main_fn, max_workgroup_size);
196   ac_llvm_set_target_features(ctx->main_fn, &ctx->ac);
197}
198
199void si_llvm_create_main_func(struct si_shader_context *ctx, bool ngg_cull_shader)
200{
201   struct si_shader *shader = ctx->shader;
202   LLVMTypeRef returns[AC_MAX_ARGS];
203   unsigned i;
204
205   si_init_shader_args(ctx, ngg_cull_shader);
206
207   for (i = 0; i < ctx->args.num_sgprs_returned; i++)
208      returns[i] = ctx->ac.i32; /* SGPR */
209   for (; i < ctx->args.return_count; i++)
210      returns[i] = ctx->ac.f32; /* VGPR */
211
212   si_llvm_create_func(ctx, ngg_cull_shader ? "ngg_cull_main" : "main", returns,
213                       ctx->args.return_count, si_get_max_workgroup_size(shader));
214
215   /* Reserve register locations for VGPR inputs the PS prolog may need. */
216   if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->shader->is_monolithic) {
217      ac_llvm_add_target_dep_function_attr(
218         ctx->main_fn, "InitialPSInputAddr",
219         S_0286D0_PERSP_SAMPLE_ENA(1) | S_0286D0_PERSP_CENTER_ENA(1) |
220            S_0286D0_PERSP_CENTROID_ENA(1) | S_0286D0_LINEAR_SAMPLE_ENA(1) |
221            S_0286D0_LINEAR_CENTER_ENA(1) | S_0286D0_LINEAR_CENTROID_ENA(1) |
222            S_0286D0_FRONT_FACE_ENA(1) | S_0286D0_ANCILLARY_ENA(1) |
223            S_0286D0_SAMPLE_COVERAGE_ENA(1) | S_0286D0_POS_FIXED_PT_ENA(1));
224   }
225
226
227   if (ctx->stage <= MESA_SHADER_GEOMETRY &&
228       (shader->key.ge.as_ls || ctx->stage == MESA_SHADER_TESS_CTRL)) {
229      if (USE_LDS_SYMBOLS) {
230         /* The LSHS size is not known until draw time, so we append it
231          * at the end of whatever LDS use there may be in the rest of
232          * the shader (currently none, unless LLVM decides to do its
233          * own LDS-based lowering).
234          */
235         ctx->ac.lds = LLVMAddGlobalInAddressSpace(ctx->ac.module, LLVMArrayType(ctx->ac.i32, 0),
236                                                   "__lds_end", AC_ADDR_SPACE_LDS);
237         LLVMSetAlignment(ctx->ac.lds, 256);
238      } else {
239         ac_declare_lds_as_pointer(&ctx->ac);
240      }
241   }
242
243   /* Unlike radv, we override these arguments in the prolog, so to the
244    * API shader they appear as normal arguments.
245    */
246   if (ctx->stage == MESA_SHADER_VERTEX) {
247      ctx->abi.vertex_id = ac_get_arg(&ctx->ac, ctx->args.vertex_id);
248      ctx->abi.instance_id = ac_get_arg(&ctx->ac, ctx->args.instance_id);
249   } else if (ctx->stage == MESA_SHADER_FRAGMENT) {
250      ctx->abi.persp_centroid = ac_get_arg(&ctx->ac, ctx->args.persp_centroid);
251      ctx->abi.linear_centroid = ac_get_arg(&ctx->ac, ctx->args.linear_centroid);
252   }
253}
254
255void si_llvm_optimize_module(struct si_shader_context *ctx)
256{
257   /* Dump LLVM IR before any optimization passes */
258   if (ctx->screen->debug_flags & DBG(PREOPT_IR) && si_can_dump_shader(ctx->screen, ctx->stage))
259      LLVMDumpModule(ctx->ac.module);
260
261   /* Run the pass */
262   LLVMRunPassManager(ctx->compiler->passmgr, ctx->ac.module);
263   LLVMDisposeBuilder(ctx->ac.builder);
264}
265
266void si_llvm_dispose(struct si_shader_context *ctx)
267{
268   LLVMDisposeModule(ctx->ac.module);
269   LLVMContextDispose(ctx->ac.context);
270   ac_llvm_context_dispose(&ctx->ac);
271}
272
273/**
274 * Load a dword from a constant buffer.
275 */
276LLVMValueRef si_buffer_load_const(struct si_shader_context *ctx, LLVMValueRef resource,
277                                  LLVMValueRef offset)
278{
279   return ac_build_buffer_load(&ctx->ac, resource, 1, NULL, offset, NULL, ctx->ac.f32,
280                               0, true, true);
281}
282
283void si_llvm_build_ret(struct si_shader_context *ctx, LLVMValueRef ret)
284{
285   if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
286      LLVMBuildRetVoid(ctx->ac.builder);
287   else
288      LLVMBuildRet(ctx->ac.builder, ret);
289}
290
291LLVMValueRef si_insert_input_ret(struct si_shader_context *ctx, LLVMValueRef ret,
292                                 struct ac_arg param, unsigned return_index)
293{
294   return LLVMBuildInsertValue(ctx->ac.builder, ret, ac_get_arg(&ctx->ac, param), return_index, "");
295}
296
297LLVMValueRef si_insert_input_ret_float(struct si_shader_context *ctx, LLVMValueRef ret,
298                                       struct ac_arg param, unsigned return_index)
299{
300   LLVMBuilderRef builder = ctx->ac.builder;
301   LLVMValueRef p = ac_get_arg(&ctx->ac, param);
302
303   return LLVMBuildInsertValue(builder, ret, ac_to_float(&ctx->ac, p), return_index, "");
304}
305
306LLVMValueRef si_insert_input_ptr(struct si_shader_context *ctx, LLVMValueRef ret,
307                                 struct ac_arg param, unsigned return_index)
308{
309   LLVMBuilderRef builder = ctx->ac.builder;
310   LLVMValueRef ptr = ac_get_arg(&ctx->ac, param);
311   ptr = LLVMBuildPtrToInt(builder, ptr, ctx->ac.i32, "");
312   return LLVMBuildInsertValue(builder, ret, ptr, return_index, "");
313}
314
315LLVMValueRef si_prolog_get_internal_bindings(struct si_shader_context *ctx)
316{
317   LLVMValueRef ptr[2], list;
318   bool merged_shader = si_is_merged_shader(ctx->shader);
319
320   ptr[0] = LLVMGetParam(ctx->main_fn, (merged_shader ? 8 : 0) + SI_SGPR_INTERNAL_BINDINGS);
321   list =
322      LLVMBuildIntToPtr(ctx->ac.builder, ptr[0], ac_array_in_const32_addr_space(ctx->ac.v4i32), "");
323   return list;
324}
325
326/* Ensure that the esgs ring is declared.
327 *
328 * We declare it with 64KB alignment as a hint that the
329 * pointer value will always be 0.
330 */
331void si_llvm_declare_esgs_ring(struct si_shader_context *ctx)
332{
333   if (ctx->esgs_ring)
334      return;
335
336   assert(!LLVMGetNamedGlobal(ctx->ac.module, "esgs_ring"));
337
338   ctx->esgs_ring = LLVMAddGlobalInAddressSpace(ctx->ac.module, LLVMArrayType(ctx->ac.i32, 0),
339                                                "esgs_ring", AC_ADDR_SPACE_LDS);
340   LLVMSetLinkage(ctx->esgs_ring, LLVMExternalLinkage);
341   LLVMSetAlignment(ctx->esgs_ring, 64 * 1024);
342}
343
344static void si_init_exec_from_input(struct si_shader_context *ctx, struct ac_arg param,
345                                    unsigned bitoffset)
346{
347   LLVMValueRef args[] = {
348      ac_get_arg(&ctx->ac, param),
349      LLVMConstInt(ctx->ac.i32, bitoffset, 0),
350   };
351   ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.init.exec.from.input", ctx->ac.voidt, args, 2,
352                      AC_FUNC_ATTR_CONVERGENT);
353}
354
355/**
356 * Get the value of a shader input parameter and extract a bitfield.
357 */
358static LLVMValueRef unpack_llvm_param(struct si_shader_context *ctx, LLVMValueRef value,
359                                      unsigned rshift, unsigned bitwidth)
360{
361   if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMFloatTypeKind)
362      value = ac_to_integer(&ctx->ac, value);
363
364   if (rshift)
365      value = LLVMBuildLShr(ctx->ac.builder, value, LLVMConstInt(ctx->ac.i32, rshift, 0), "");
366
367   if (rshift + bitwidth < 32) {
368      unsigned mask = (1 << bitwidth) - 1;
369      value = LLVMBuildAnd(ctx->ac.builder, value, LLVMConstInt(ctx->ac.i32, mask, 0), "");
370   }
371
372   return value;
373}
374
375LLVMValueRef si_unpack_param(struct si_shader_context *ctx, struct ac_arg param, unsigned rshift,
376                             unsigned bitwidth)
377{
378   LLVMValueRef value = ac_get_arg(&ctx->ac, param);
379
380   return unpack_llvm_param(ctx, value, rshift, bitwidth);
381}
382
383LLVMValueRef si_get_primitive_id(struct si_shader_context *ctx, unsigned swizzle)
384{
385   if (swizzle > 0)
386      return ctx->ac.i32_0;
387
388   switch (ctx->stage) {
389   case MESA_SHADER_VERTEX:
390      return ac_get_arg(&ctx->ac, ctx->args.vs_prim_id);
391   case MESA_SHADER_TESS_CTRL:
392      return ac_get_arg(&ctx->ac, ctx->args.tcs_patch_id);
393   case MESA_SHADER_TESS_EVAL:
394      return ac_get_arg(&ctx->ac, ctx->args.tes_patch_id);
395   case MESA_SHADER_GEOMETRY:
396      return ac_get_arg(&ctx->ac, ctx->args.gs_prim_id);
397   default:
398      assert(0);
399      return ctx->ac.i32_0;
400   }
401}
402
403static void si_llvm_declare_compute_memory(struct si_shader_context *ctx)
404{
405   struct si_shader_selector *sel = ctx->shader->selector;
406   unsigned lds_size = sel->info.base.shared_size;
407
408   LLVMTypeRef i8p = LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_LDS);
409   LLVMValueRef var;
410
411   assert(!ctx->ac.lds);
412
413   var = LLVMAddGlobalInAddressSpace(ctx->ac.module, LLVMArrayType(ctx->ac.i8, lds_size),
414                                     "compute_lds", AC_ADDR_SPACE_LDS);
415   LLVMSetAlignment(var, 64 * 1024);
416
417   ctx->ac.lds = LLVMBuildBitCast(ctx->ac.builder, var, i8p, "");
418}
419
420/**
421 * Given a list of shader part functions, build a wrapper function that
422 * runs them in sequence to form a monolithic shader.
423 */
424void si_build_wrapper_function(struct si_shader_context *ctx, LLVMValueRef *parts,
425                               unsigned num_parts, unsigned main_part,
426                               unsigned next_shader_first_part, bool same_thread_count)
427{
428   LLVMBuilderRef builder = ctx->ac.builder;
429   /* PS epilog has one arg per color component; gfx9 merged shader
430    * prologs need to forward 40 SGPRs.
431    */
432   LLVMValueRef initial[AC_MAX_ARGS], out[AC_MAX_ARGS];
433   LLVMTypeRef function_type;
434   unsigned num_first_params;
435   unsigned num_out, initial_num_out;
436   ASSERTED unsigned num_out_sgpr;         /* used in debug checks */
437   ASSERTED unsigned initial_num_out_sgpr; /* used in debug checks */
438   unsigned num_sgprs, num_vgprs;
439   unsigned gprs;
440
441   memset(&ctx->args, 0, sizeof(ctx->args));
442
443   for (unsigned i = 0; i < num_parts; ++i) {
444      ac_add_function_attr(ctx->ac.context, parts[i], -1, AC_FUNC_ATTR_ALWAYSINLINE);
445      LLVMSetLinkage(parts[i], LLVMPrivateLinkage);
446   }
447
448   /* The parameters of the wrapper function correspond to those of the
449    * first part in terms of SGPRs and VGPRs, but we use the types of the
450    * main part to get the right types. This is relevant for the
451    * dereferenceable attribute on descriptor table pointers.
452    */
453   num_sgprs = 0;
454   num_vgprs = 0;
455
456   function_type = LLVMGetElementType(LLVMTypeOf(parts[0]));
457   num_first_params = LLVMCountParamTypes(function_type);
458
459   for (unsigned i = 0; i < num_first_params; ++i) {
460      LLVMValueRef param = LLVMGetParam(parts[0], i);
461
462      if (ac_is_sgpr_param(param)) {
463         assert(num_vgprs == 0);
464         num_sgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
465      } else {
466         num_vgprs += ac_get_type_size(LLVMTypeOf(param)) / 4;
467      }
468   }
469
470   gprs = 0;
471   while (gprs < num_sgprs + num_vgprs) {
472      LLVMValueRef param = LLVMGetParam(parts[main_part], ctx->args.arg_count);
473      LLVMTypeRef type = LLVMTypeOf(param);
474      unsigned size = ac_get_type_size(type) / 4;
475
476      /* This is going to get casted anyways, so we don't have to
477       * have the exact same type. But we do have to preserve the
478       * pointer-ness so that LLVM knows about it.
479       */
480      enum ac_arg_type arg_type = AC_ARG_INT;
481      if (LLVMGetTypeKind(type) == LLVMPointerTypeKind) {
482         type = LLVMGetElementType(type);
483
484         if (LLVMGetTypeKind(type) == LLVMVectorTypeKind) {
485            if (LLVMGetVectorSize(type) == 4)
486               arg_type = AC_ARG_CONST_DESC_PTR;
487            else if (LLVMGetVectorSize(type) == 8)
488               arg_type = AC_ARG_CONST_IMAGE_PTR;
489            else
490               assert(0);
491         } else if (type == ctx->ac.f32) {
492            arg_type = AC_ARG_CONST_FLOAT_PTR;
493         } else {
494            assert(0);
495         }
496      }
497
498      ac_add_arg(&ctx->args, gprs < num_sgprs ? AC_ARG_SGPR : AC_ARG_VGPR, size, arg_type, NULL);
499
500      assert(ac_is_sgpr_param(param) == (gprs < num_sgprs));
501      assert(gprs + size <= num_sgprs + num_vgprs &&
502             (gprs >= num_sgprs || gprs + size <= num_sgprs));
503
504      gprs += size;
505   }
506
507   /* Prepare the return type. */
508   unsigned num_returns = 0;
509   LLVMTypeRef returns[AC_MAX_ARGS], last_func_type, return_type;
510
511   last_func_type = LLVMGetElementType(LLVMTypeOf(parts[num_parts - 1]));
512   return_type = LLVMGetReturnType(last_func_type);
513
514   switch (LLVMGetTypeKind(return_type)) {
515   case LLVMStructTypeKind:
516      num_returns = LLVMCountStructElementTypes(return_type);
517      assert(num_returns <= ARRAY_SIZE(returns));
518      LLVMGetStructElementTypes(return_type, returns);
519      break;
520   case LLVMVoidTypeKind:
521      break;
522   default:
523      unreachable("unexpected type");
524   }
525
526   si_llvm_create_func(ctx, "wrapper", returns, num_returns,
527                       si_get_max_workgroup_size(ctx->shader));
528
529   if (si_is_merged_shader(ctx->shader) && !same_thread_count)
530      ac_init_exec_full_mask(&ctx->ac);
531
532   /* Record the arguments of the function as if they were an output of
533    * a previous part.
534    */
535   num_out = 0;
536   num_out_sgpr = 0;
537
538   for (unsigned i = 0; i < ctx->args.arg_count; ++i) {
539      LLVMValueRef param = LLVMGetParam(ctx->main_fn, i);
540      LLVMTypeRef param_type = LLVMTypeOf(param);
541      LLVMTypeRef out_type = ctx->args.args[i].file == AC_ARG_SGPR ? ctx->ac.i32 : ctx->ac.f32;
542      unsigned size = ac_get_type_size(param_type) / 4;
543
544      if (size == 1) {
545         if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
546            param = LLVMBuildPtrToInt(builder, param, ctx->ac.i32, "");
547            param_type = ctx->ac.i32;
548         }
549
550         if (param_type != out_type)
551            param = LLVMBuildBitCast(builder, param, out_type, "");
552         out[num_out++] = param;
553      } else {
554         LLVMTypeRef vector_type = LLVMVectorType(out_type, size);
555
556         if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
557            param = LLVMBuildPtrToInt(builder, param, ctx->ac.i64, "");
558            param_type = ctx->ac.i64;
559         }
560
561         if (param_type != vector_type)
562            param = LLVMBuildBitCast(builder, param, vector_type, "");
563
564         for (unsigned j = 0; j < size; ++j)
565            out[num_out++] =
566               LLVMBuildExtractElement(builder, param, LLVMConstInt(ctx->ac.i32, j, 0), "");
567      }
568
569      if (ctx->args.args[i].file == AC_ARG_SGPR)
570         num_out_sgpr = num_out;
571   }
572
573   memcpy(initial, out, sizeof(out));
574   initial_num_out = num_out;
575   initial_num_out_sgpr = num_out_sgpr;
576
577   /* Now chain the parts. */
578   LLVMValueRef ret = NULL;
579   for (unsigned part = 0; part < num_parts; ++part) {
580      LLVMValueRef in[AC_MAX_ARGS];
581      LLVMTypeRef ret_type;
582      unsigned out_idx = 0;
583      unsigned num_params = LLVMCountParams(parts[part]);
584
585      /* Merged shaders are executed conditionally depending
586       * on the number of enabled threads passed in the input SGPRs. */
587      if (si_is_multi_part_shader(ctx->shader) && part == 0) {
588         if (same_thread_count) {
589            struct ac_arg arg;
590            arg.arg_index = 3;
591            arg.used = true;
592
593            si_init_exec_from_input(ctx, arg, 0);
594         } else {
595            LLVMValueRef ena, count = initial[3];
596
597            count = LLVMBuildAnd(builder, count, LLVMConstInt(ctx->ac.i32, 0x7f, 0), "");
598            ena = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), count, "");
599            ac_build_ifcc(&ctx->ac, ena, 6506);
600         }
601      }
602
603      /* Derive arguments for the next part from outputs of the
604       * previous one.
605       */
606      for (unsigned param_idx = 0; param_idx < num_params; ++param_idx) {
607         LLVMValueRef param;
608         LLVMTypeRef param_type;
609         bool is_sgpr;
610         unsigned param_size;
611         LLVMValueRef arg = NULL;
612
613         param = LLVMGetParam(parts[part], param_idx);
614         param_type = LLVMTypeOf(param);
615         param_size = ac_get_type_size(param_type) / 4;
616         is_sgpr = ac_is_sgpr_param(param);
617
618         if (is_sgpr) {
619            ac_add_function_attr(ctx->ac.context, parts[part], param_idx + 1, AC_FUNC_ATTR_INREG);
620         } else if (out_idx < num_out_sgpr) {
621            /* Skip returned SGPRs the current part doesn't
622             * declare on the input. */
623            out_idx = num_out_sgpr;
624         }
625
626         assert(out_idx + param_size <= (is_sgpr ? num_out_sgpr : num_out));
627
628         if (param_size == 1)
629            arg = out[out_idx];
630         else
631            arg = ac_build_gather_values(&ctx->ac, &out[out_idx], param_size);
632
633         if (LLVMTypeOf(arg) != param_type) {
634            if (LLVMGetTypeKind(param_type) == LLVMPointerTypeKind) {
635               if (LLVMGetPointerAddressSpace(param_type) == AC_ADDR_SPACE_CONST_32BIT) {
636                  arg = LLVMBuildBitCast(builder, arg, ctx->ac.i32, "");
637                  arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
638               } else {
639                  arg = LLVMBuildBitCast(builder, arg, ctx->ac.i64, "");
640                  arg = LLVMBuildIntToPtr(builder, arg, param_type, "");
641               }
642            } else {
643               arg = LLVMBuildBitCast(builder, arg, param_type, "");
644            }
645         }
646
647         in[param_idx] = arg;
648         out_idx += param_size;
649      }
650
651      ret = ac_build_call(&ctx->ac, parts[part], in, num_params);
652
653      if (!same_thread_count &&
654          si_is_multi_part_shader(ctx->shader) && part + 1 == next_shader_first_part) {
655         ac_build_endif(&ctx->ac, 6506);
656
657         /* The second half of the merged shader should use
658          * the inputs from the toplevel (wrapper) function,
659          * not the return value from the last call.
660          *
661          * That's because the last call was executed condi-
662          * tionally, so we can't consume it in the main
663          * block.
664          */
665         memcpy(out, initial, sizeof(initial));
666         num_out = initial_num_out;
667         num_out_sgpr = initial_num_out_sgpr;
668
669         /* Execute the second shader conditionally based on the number of
670          * enabled threads there.
671          */
672         if (ctx->stage == MESA_SHADER_TESS_CTRL) {
673            LLVMValueRef ena, count = initial[3];
674
675            count = LLVMBuildLShr(builder, count, LLVMConstInt(ctx->ac.i32, 8, 0), "");
676            count = LLVMBuildAnd(builder, count, LLVMConstInt(ctx->ac.i32, 0x7f, 0), "");
677            ena = LLVMBuildICmp(builder, LLVMIntULT, ac_get_thread_id(&ctx->ac), count, "");
678            ac_build_ifcc(&ctx->ac, ena, 6507);
679         }
680         continue;
681      }
682
683      /* Extract the returned GPRs. */
684      ret_type = LLVMTypeOf(ret);
685      num_out = 0;
686      num_out_sgpr = 0;
687
688      if (LLVMGetTypeKind(ret_type) != LLVMVoidTypeKind) {
689         assert(LLVMGetTypeKind(ret_type) == LLVMStructTypeKind);
690
691         unsigned ret_size = LLVMCountStructElementTypes(ret_type);
692
693         for (unsigned i = 0; i < ret_size; ++i) {
694            LLVMValueRef val = LLVMBuildExtractValue(builder, ret, i, "");
695
696            assert(num_out < ARRAY_SIZE(out));
697            out[num_out++] = val;
698
699            if (LLVMTypeOf(val) == ctx->ac.i32) {
700               assert(num_out_sgpr + 1 == num_out);
701               num_out_sgpr = num_out;
702            }
703         }
704      }
705   }
706
707   /* Close the conditional wrapping the second shader. */
708   if (ctx->stage == MESA_SHADER_TESS_CTRL &&
709       !same_thread_count && si_is_multi_part_shader(ctx->shader))
710      ac_build_endif(&ctx->ac, 6507);
711
712   if (LLVMGetTypeKind(LLVMTypeOf(ret)) == LLVMVoidTypeKind)
713      LLVMBuildRetVoid(builder);
714   else
715      LLVMBuildRet(builder, ret);
716}
717
718static LLVMValueRef si_llvm_load_intrinsic(struct ac_shader_abi *abi, nir_intrinsic_op op)
719{
720   struct si_shader_context *ctx = si_shader_context_from_abi(abi);
721
722   switch (op) {
723   case nir_intrinsic_load_first_vertex:
724      return ac_get_arg(&ctx->ac, ctx->args.base_vertex);
725
726   case nir_intrinsic_load_base_vertex: {
727      /* For non-indexed draws, the base vertex set by the driver
728       * (for direct draws) or the CP (for indirect draws) is the
729       * first vertex ID, but GLSL expects 0 to be returned.
730       */
731      LLVMValueRef indexed = GET_FIELD(ctx, VS_STATE_INDEXED);
732      indexed = LLVMBuildTrunc(ctx->ac.builder, indexed, ctx->ac.i1, "");
733      return LLVMBuildSelect(ctx->ac.builder, indexed, ac_get_arg(&ctx->ac, ctx->args.base_vertex),
734                             ctx->ac.i32_0, "");
735   }
736
737   case nir_intrinsic_load_workgroup_size: {
738      assert(ctx->shader->selector->info.base.workgroup_size_variable &&
739             ctx->shader->selector->info.uses_variable_block_size);
740      LLVMValueRef chan[3] = {
741         si_unpack_param(ctx, ctx->block_size, 0, 10),
742         si_unpack_param(ctx, ctx->block_size, 10, 10),
743         si_unpack_param(ctx, ctx->block_size, 20, 10),
744      };
745      return ac_build_gather_values(&ctx->ac, chan, 3);
746   }
747
748   case nir_intrinsic_load_tess_level_outer_default:
749   case nir_intrinsic_load_tess_level_inner_default: {
750      LLVMValueRef slot = LLVMConstInt(ctx->ac.i32, SI_HS_CONST_DEFAULT_TESS_LEVELS, 0);
751      LLVMValueRef buf = ac_get_arg(&ctx->ac, ctx->internal_bindings);
752      buf = ac_build_load_to_sgpr(&ctx->ac, buf, slot);
753      int offset = op == nir_intrinsic_load_tess_level_inner_default ? 4 : 0;
754      LLVMValueRef val[4];
755
756      for (int i = 0; i < 4; i++)
757         val[i] = si_buffer_load_const(ctx, buf, LLVMConstInt(ctx->ac.i32, (offset + i) * 4, 0));
758      return ac_build_gather_values(&ctx->ac, val, 4);
759   }
760
761   case nir_intrinsic_load_patch_vertices_in:
762      if (ctx->stage == MESA_SHADER_TESS_CTRL)
763         return si_unpack_param(ctx, ctx->tcs_out_lds_layout, 13, 6);
764      else if (ctx->stage == MESA_SHADER_TESS_EVAL)
765         return si_get_num_tcs_out_vertices(ctx);
766      else
767         return NULL;
768
769   case nir_intrinsic_load_sample_mask_in:
770      return ac_to_integer(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args.sample_coverage));
771
772   case nir_intrinsic_load_lshs_vertex_stride_amd:
773      return LLVMBuildShl(ctx->ac.builder, si_get_tcs_in_vertex_dw_stride(ctx),
774                          LLVMConstInt(ctx->ac.i32, 2, 0), "");
775
776   case nir_intrinsic_load_tcs_num_patches_amd:
777      return LLVMBuildAdd(ctx->ac.builder,
778                          si_unpack_param(ctx, ctx->tcs_offchip_layout, 0, 6),
779                          ctx->ac.i32_1, "");
780
781   case nir_intrinsic_load_hs_out_patch_data_offset_amd:
782      return si_unpack_param(ctx, ctx->tcs_offchip_layout, 11, 21);
783
784   case nir_intrinsic_load_ring_tess_offchip_amd:
785      return ctx->tess_offchip_ring;
786
787   case nir_intrinsic_load_ring_tess_offchip_offset_amd:
788      return ac_get_arg(&ctx->ac, ctx->args.tess_offchip_offset);
789
790   case nir_intrinsic_load_tess_rel_patch_id_amd:
791      return si_get_rel_patch_id(ctx);
792
793   case nir_intrinsic_load_ring_esgs_amd:
794      return ctx->esgs_ring;
795
796   case nir_intrinsic_load_ring_es2gs_offset_amd:
797      return ac_get_arg(&ctx->ac, ctx->args.es2gs_offset);
798
799   default:
800      return NULL;
801   }
802}
803
804bool si_llvm_translate_nir(struct si_shader_context *ctx, struct si_shader *shader,
805                           struct nir_shader *nir, bool free_nir, bool ngg_cull_shader)
806{
807   struct si_shader_selector *sel = shader->selector;
808   const struct si_shader_info *info = &sel->info;
809
810   ctx->shader = shader;
811   ctx->stage = sel->stage;
812
813   ctx->num_const_buffers = info->base.num_ubos;
814   ctx->num_shader_buffers = info->base.num_ssbos;
815
816   ctx->num_samplers = BITSET_LAST_BIT(info->base.textures_used);
817   ctx->num_images = info->base.num_images;
818
819   ctx->abi.intrinsic_load = si_llvm_load_intrinsic;
820
821   si_llvm_init_resource_callbacks(ctx);
822   si_llvm_create_main_func(ctx, ngg_cull_shader);
823
824   if (ctx->stage <= MESA_SHADER_GEOMETRY &&
825       (ctx->shader->key.ge.as_es || ctx->stage == MESA_SHADER_GEOMETRY))
826      si_preload_esgs_ring(ctx);
827
828   switch (ctx->stage) {
829   case MESA_SHADER_VERTEX:
830      si_llvm_init_vs_callbacks(ctx, ngg_cull_shader);
831      break;
832
833   case MESA_SHADER_TESS_CTRL:
834      si_llvm_init_tcs_callbacks(ctx);
835      si_llvm_preload_tess_rings(ctx);
836      break;
837
838   case MESA_SHADER_TESS_EVAL:
839      si_llvm_preload_tess_rings(ctx);
840      break;
841
842   case MESA_SHADER_GEOMETRY:
843      si_llvm_init_gs_callbacks(ctx);
844
845      if (!ctx->shader->key.ge.as_ngg)
846         si_preload_gs_rings(ctx);
847
848      for (unsigned i = 0; i < 4; i++)
849         ctx->gs_next_vertex[i] = ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
850
851      if (shader->key.ge.as_ngg) {
852         for (unsigned i = 0; i < 4; ++i) {
853            ctx->gs_curprim_verts[i] = ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
854            ctx->gs_generated_prims[i] = ac_build_alloca(&ctx->ac, ctx->ac.i32, "");
855         }
856
857         assert(!ctx->gs_ngg_scratch);
858         LLVMTypeRef ai32 = LLVMArrayType(ctx->ac.i32, gfx10_ngg_get_scratch_dw_size(shader));
859         ctx->gs_ngg_scratch =
860            LLVMAddGlobalInAddressSpace(ctx->ac.module, ai32, "ngg_scratch", AC_ADDR_SPACE_LDS);
861         LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(ai32));
862         LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
863
864         ctx->gs_ngg_emit = LLVMAddGlobalInAddressSpace(
865            ctx->ac.module, LLVMArrayType(ctx->ac.i32, 0), "ngg_emit", AC_ADDR_SPACE_LDS);
866         LLVMSetLinkage(ctx->gs_ngg_emit, LLVMExternalLinkage);
867         LLVMSetAlignment(ctx->gs_ngg_emit, 4);
868      } else {
869         ctx->gs_emitted_vertices = LLVMConstInt(ctx->ac.i32, 0, false);
870      }
871      break;
872
873   case MESA_SHADER_FRAGMENT: {
874      si_llvm_init_ps_callbacks(ctx);
875
876      unsigned colors_read = ctx->shader->selector->info.colors_read;
877      LLVMValueRef main_fn = ctx->main_fn;
878
879      LLVMValueRef undef = LLVMGetUndef(ctx->ac.f32);
880
881      unsigned offset = SI_PARAM_POS_FIXED_PT + 1;
882
883      if (colors_read & 0x0f) {
884         unsigned mask = colors_read & 0x0f;
885         LLVMValueRef values[4];
886         values[0] = mask & 0x1 ? LLVMGetParam(main_fn, offset++) : undef;
887         values[1] = mask & 0x2 ? LLVMGetParam(main_fn, offset++) : undef;
888         values[2] = mask & 0x4 ? LLVMGetParam(main_fn, offset++) : undef;
889         values[3] = mask & 0x8 ? LLVMGetParam(main_fn, offset++) : undef;
890         ctx->abi.color0 = ac_to_integer(&ctx->ac, ac_build_gather_values(&ctx->ac, values, 4));
891      }
892      if (colors_read & 0xf0) {
893         unsigned mask = (colors_read & 0xf0) >> 4;
894         LLVMValueRef values[4];
895         values[0] = mask & 0x1 ? LLVMGetParam(main_fn, offset++) : undef;
896         values[1] = mask & 0x2 ? LLVMGetParam(main_fn, offset++) : undef;
897         values[2] = mask & 0x4 ? LLVMGetParam(main_fn, offset++) : undef;
898         values[3] = mask & 0x8 ? LLVMGetParam(main_fn, offset++) : undef;
899         ctx->abi.color1 = ac_to_integer(&ctx->ac, ac_build_gather_values(&ctx->ac, values, 4));
900      }
901
902      ctx->abi.num_interp = si_get_ps_num_interp(shader);
903      ctx->abi.interp_at_sample_force_center =
904         ctx->shader->key.ps.mono.interpolate_at_sample_force_center;
905
906      ctx->abi.kill_ps_if_inf_interp =
907         ctx->screen->options.no_infinite_interp &&
908         (ctx->shader->selector->info.uses_persp_center ||
909          ctx->shader->selector->info.uses_persp_centroid ||
910          ctx->shader->selector->info.uses_persp_sample);
911      break;
912   }
913
914   case MESA_SHADER_COMPUTE:
915      if (nir->info.cs.user_data_components_amd) {
916         ctx->abi.user_data = ac_get_arg(&ctx->ac, ctx->cs_user_data);
917         ctx->abi.user_data = ac_build_expand_to_vec4(&ctx->ac, ctx->abi.user_data,
918                                                      nir->info.cs.user_data_components_amd);
919      }
920
921      if (ctx->shader->selector->info.base.shared_size)
922         si_llvm_declare_compute_memory(ctx);
923      break;
924
925   default:
926      break;
927   }
928
929   if ((ctx->stage == MESA_SHADER_VERTEX || ctx->stage == MESA_SHADER_TESS_EVAL) &&
930       shader->key.ge.as_ngg && !shader->key.ge.as_es) {
931      /* Unconditionally declare scratch space base for streamout and
932       * vertex compaction. Whether space is actually allocated is
933       * determined during linking / PM4 creation.
934       */
935      si_llvm_declare_esgs_ring(ctx);
936
937      /* This is really only needed when streamout and / or vertex
938       * compaction is enabled.
939       */
940      if (!ctx->gs_ngg_scratch && (ctx->so.num_outputs || shader->key.ge.opt.ngg_culling)) {
941         LLVMTypeRef asi32 = LLVMArrayType(ctx->ac.i32, gfx10_ngg_get_scratch_dw_size(shader));
942         ctx->gs_ngg_scratch =
943            LLVMAddGlobalInAddressSpace(ctx->ac.module, asi32, "ngg_scratch", AC_ADDR_SPACE_LDS);
944         LLVMSetInitializer(ctx->gs_ngg_scratch, LLVMGetUndef(asi32));
945         LLVMSetAlignment(ctx->gs_ngg_scratch, 4);
946      }
947   }
948
949   /* For merged shaders (VS-TCS, VS-GS, TES-GS): */
950   if (ctx->screen->info.gfx_level >= GFX9 && si_is_merged_shader(shader)) {
951      /* TES is special because it has only 1 shader part if NGG shader culling is disabled,
952       * and therefore it doesn't use the wrapper function.
953       */
954      bool no_wrapper_func = ctx->stage == MESA_SHADER_TESS_EVAL && !shader->key.ge.as_es &&
955                             !shader->key.ge.opt.ngg_culling;
956
957      /* Set EXEC = ~0 before the first shader. If the prolog is present, EXEC is set there
958       * instead. For monolithic shaders, the wrapper function does this.
959       */
960      if ((!shader->is_monolithic || no_wrapper_func) &&
961          (ctx->stage == MESA_SHADER_TESS_EVAL ||
962           (ctx->stage == MESA_SHADER_VERTEX &&
963            !si_vs_needs_prolog(sel, &shader->key.ge.part.vs.prolog, &shader->key, ngg_cull_shader,
964                                false))))
965         ac_init_exec_full_mask(&ctx->ac);
966
967      /* NGG VS and NGG TES: Send gs_alloc_req and the prim export at the beginning to decrease
968       * register usage.
969       */
970      if ((ctx->stage == MESA_SHADER_VERTEX || ctx->stage == MESA_SHADER_TESS_EVAL) &&
971          shader->key.ge.as_ngg && !shader->key.ge.as_es && !shader->key.ge.opt.ngg_culling) {
972         /* GFX10 requires a barrier before gs_alloc_req due to a hw bug. */
973         if (ctx->screen->info.gfx_level == GFX10)
974            ac_build_s_barrier(&ctx->ac, ctx->stage);
975
976         gfx10_ngg_build_sendmsg_gs_alloc_req(ctx);
977
978         /* Build the primitive export at the beginning
979          * of the shader if possible.
980          */
981         if (gfx10_ngg_export_prim_early(shader))
982            gfx10_ngg_build_export_prim(ctx, NULL, NULL);
983      }
984
985      /* NGG GS: Initialize LDS and insert s_barrier, which must not be inside the if statement. */
986      if (ctx->stage == MESA_SHADER_GEOMETRY && shader->key.ge.as_ngg)
987         gfx10_ngg_gs_emit_begin(ctx);
988
989      LLVMValueRef thread_enabled = NULL;
990
991      if (ctx->stage == MESA_SHADER_GEOMETRY ||
992          (ctx->stage == MESA_SHADER_TESS_CTRL && !shader->is_monolithic)) {
993         /* Wrap both shaders in an if statement according to the number of enabled threads
994          * there. For monolithic TCS, the if statement is inserted by the wrapper function,
995          * not here.
996          */
997         thread_enabled = si_is_gs_thread(ctx); /* 2nd shader: thread enabled bool */
998      } else if (((shader->key.ge.as_ls || shader->key.ge.as_es) && !shader->is_monolithic) ||
999                 (shader->key.ge.as_ngg && !shader->key.ge.as_es)) {
1000         /* This is NGG VS or NGG TES or VS before GS or TES before GS or VS before TCS.
1001          * For monolithic LS (VS before TCS) and ES (VS before GS and TES before GS),
1002          * the if statement is inserted by the wrapper function.
1003          */
1004         thread_enabled = si_is_es_thread(ctx); /* 1st shader: thread enabled bool */
1005      }
1006
1007      if (thread_enabled) {
1008         ctx->merged_wrap_if_entry_block = LLVMGetInsertBlock(ctx->ac.builder);
1009         ctx->merged_wrap_if_label = 11500;
1010         ac_build_ifcc(&ctx->ac, thread_enabled, ctx->merged_wrap_if_label);
1011      }
1012
1013      /* Execute a barrier before the second shader in
1014       * a merged shader.
1015       *
1016       * Execute the barrier inside the conditional block,
1017       * so that empty waves can jump directly to s_endpgm,
1018       * which will also signal the barrier.
1019       *
1020       * This is possible in gfx9, because an empty wave for the second shader does not insert
1021       * any ending. With NGG, empty waves may still be required to export data (e.g. GS output
1022       * vertices), so we cannot let them exit early.
1023       *
1024       * If the shader is TCS and the TCS epilog is present
1025       * and contains a barrier, it will wait there and then
1026       * reach s_endpgm.
1027       */
1028      if (ctx->stage == MESA_SHADER_TESS_CTRL) {
1029         /* We need the barrier only if TCS inputs are read from LDS. */
1030         if (!shader->key.ge.opt.same_patch_vertices ||
1031             shader->selector->info.base.inputs_read &
1032             ~shader->selector->info.tcs_vgpr_only_inputs) {
1033            ac_build_waitcnt(&ctx->ac, AC_WAIT_LGKM);
1034
1035            /* If both input and output patches are wholly in one wave, we don't need a barrier.
1036             * That's true when both VS and TCS have the same number of patch vertices and
1037             * the wave size is a multiple of the number of patch vertices.
1038             */
1039            if (!shader->key.ge.opt.same_patch_vertices ||
1040                ctx->ac.wave_size % sel->info.base.tess.tcs_vertices_out != 0)
1041               ac_build_s_barrier(&ctx->ac, ctx->stage);
1042         }
1043      } else if (ctx->stage == MESA_SHADER_GEOMETRY && !shader->key.ge.as_ngg) {
1044         /* gfx10_ngg_gs_emit_begin inserts the barrier for NGG. */
1045         ac_build_waitcnt(&ctx->ac, AC_WAIT_LGKM);
1046         ac_build_s_barrier(&ctx->ac, ctx->stage);
1047      }
1048   }
1049
1050   ctx->abi.clamp_shadow_reference = true;
1051   ctx->abi.robust_buffer_access = true;
1052   ctx->abi.convert_undef_to_zero = true;
1053   ctx->abi.load_grid_size_from_user_sgpr = true;
1054   ctx->abi.clamp_div_by_zero = ctx->screen->options.clamp_div_by_zero ||
1055                                info->options & SI_PROFILE_CLAMP_DIV_BY_ZERO;
1056   ctx->abi.use_waterfall_for_divergent_tex_samplers = true;
1057
1058   for (unsigned i = 0; i < info->num_outputs; i++) {
1059      LLVMTypeRef type = ctx->ac.f32;
1060
1061      /* Only FS uses unpacked f16. Other stages pack 16-bit outputs into low and high bits of f32. */
1062      if (nir->info.stage == MESA_SHADER_FRAGMENT &&
1063          nir_alu_type_get_type_size(ctx->shader->selector->info.output_type[i]) == 16)
1064         type = ctx->ac.f16;
1065
1066      for (unsigned j = 0; j < 4; j++) {
1067         ctx->abi.outputs[i * 4 + j] = ac_build_alloca_undef(&ctx->ac, type, "");
1068         ctx->abi.is_16bit[i * 4 + j] = type == ctx->ac.f16;
1069      }
1070   }
1071
1072   ac_nir_translate(&ctx->ac, &ctx->abi, &ctx->args, nir);
1073
1074   switch (sel->stage) {
1075   case MESA_SHADER_VERTEX:
1076      if (shader->key.ge.as_ls)
1077         si_llvm_ls_build_end(ctx);
1078      else if (shader->key.ge.as_es)
1079         si_llvm_es_build_end(ctx);
1080      else if (ngg_cull_shader)
1081         gfx10_ngg_culling_build_end(ctx);
1082      else if (shader->key.ge.as_ngg)
1083         gfx10_ngg_build_end(ctx);
1084      else
1085         si_llvm_vs_build_end(ctx);
1086      break;
1087
1088   case MESA_SHADER_TESS_CTRL:
1089      si_llvm_tcs_build_end(ctx);
1090      break;
1091
1092   case MESA_SHADER_TESS_EVAL:
1093      if (ctx->shader->key.ge.as_es)
1094         si_llvm_es_build_end(ctx);
1095      else if (ngg_cull_shader)
1096         gfx10_ngg_culling_build_end(ctx);
1097      else if (ctx->shader->key.ge.as_ngg)
1098         gfx10_ngg_build_end(ctx);
1099      else
1100         si_llvm_vs_build_end(ctx);
1101      break;
1102
1103   case MESA_SHADER_GEOMETRY:
1104      if (ctx->shader->key.ge.as_ngg)
1105         gfx10_ngg_gs_build_end(ctx);
1106      else
1107         si_llvm_gs_build_end(ctx);
1108      break;
1109
1110   case MESA_SHADER_FRAGMENT:
1111      si_llvm_ps_build_end(ctx);
1112      break;
1113
1114   default:
1115      break;
1116   }
1117
1118   si_llvm_build_ret(ctx, ctx->return_value);
1119
1120   if (free_nir)
1121      ralloc_free(nir);
1122   return true;
1123}
1124
1125static bool si_should_optimize_less(struct ac_llvm_compiler *compiler,
1126                                    struct si_shader_selector *sel)
1127{
1128   if (!compiler->low_opt_passes)
1129      return false;
1130
1131   /* Assume a slow CPU. */
1132   assert(!sel->screen->info.has_dedicated_vram && sel->screen->info.gfx_level <= GFX8);
1133
1134   /* For a crazy dEQP test containing 2597 memory opcodes, mostly
1135    * buffer stores. */
1136   return sel->stage == MESA_SHADER_COMPUTE && sel->info.num_memory_stores > 1000;
1137}
1138
1139bool si_llvm_compile_shader(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
1140                            struct si_shader *shader, const struct pipe_stream_output_info *so,
1141                            struct util_debug_callback *debug, struct nir_shader *nir,
1142                            bool free_nir)
1143{
1144   struct si_shader_selector *sel = shader->selector;
1145   struct si_shader_context ctx;
1146
1147   si_llvm_context_init(&ctx, sscreen, compiler, shader->wave_size);
1148   ctx.so = *so;
1149
1150   LLVMValueRef ngg_cull_main_fn = NULL;
1151   if (sel->stage <= MESA_SHADER_TESS_EVAL && shader->key.ge.opt.ngg_culling) {
1152      if (!si_llvm_translate_nir(&ctx, shader, nir, false, true)) {
1153         si_llvm_dispose(&ctx);
1154         return false;
1155      }
1156      ngg_cull_main_fn = ctx.main_fn;
1157      ctx.main_fn = NULL;
1158   }
1159
1160   if (!si_llvm_translate_nir(&ctx, shader, nir, free_nir, false)) {
1161      si_llvm_dispose(&ctx);
1162      return false;
1163   }
1164
1165   if (shader->is_monolithic && sel->stage == MESA_SHADER_VERTEX) {
1166      LLVMValueRef parts[4];
1167      unsigned num_parts = 0;
1168      bool first_is_prolog = false;
1169      LLVMValueRef main_fn = ctx.main_fn;
1170
1171      if (ngg_cull_main_fn) {
1172         if (si_vs_needs_prolog(sel, &shader->key.ge.part.vs.prolog, &shader->key, true, false)) {
1173            union si_shader_part_key prolog_key;
1174            si_get_vs_prolog_key(&sel->info, shader->info.num_input_sgprs, true,
1175                                 &shader->key.ge.part.vs.prolog, shader, &prolog_key);
1176            prolog_key.vs_prolog.is_monolithic = true;
1177            si_llvm_build_vs_prolog(&ctx, &prolog_key);
1178            parts[num_parts++] = ctx.main_fn;
1179            first_is_prolog = true;
1180         }
1181         parts[num_parts++] = ngg_cull_main_fn;
1182      }
1183
1184      if (si_vs_needs_prolog(sel, &shader->key.ge.part.vs.prolog, &shader->key, false, false)) {
1185         union si_shader_part_key prolog_key;
1186         si_get_vs_prolog_key(&sel->info, shader->info.num_input_sgprs, false,
1187                              &shader->key.ge.part.vs.prolog, shader, &prolog_key);
1188         prolog_key.vs_prolog.is_monolithic = true;
1189         si_llvm_build_vs_prolog(&ctx, &prolog_key);
1190         parts[num_parts++] = ctx.main_fn;
1191         if (num_parts == 1)
1192            first_is_prolog = true;
1193      }
1194      parts[num_parts++] = main_fn;
1195
1196      si_build_wrapper_function(&ctx, parts, num_parts, first_is_prolog ? 1 : 0, 0, false);
1197   } else if (shader->is_monolithic && sel->stage == MESA_SHADER_TESS_EVAL && ngg_cull_main_fn) {
1198      LLVMValueRef parts[3], prolog, main_fn = ctx.main_fn;
1199
1200      /* We reuse the VS prolog code for TES just to load the input VGPRs from LDS. */
1201      union si_shader_part_key prolog_key;
1202      memset(&prolog_key, 0, sizeof(prolog_key));
1203      prolog_key.vs_prolog.num_input_sgprs = shader->info.num_input_sgprs;
1204      prolog_key.vs_prolog.num_merged_next_stage_vgprs = 5;
1205      prolog_key.vs_prolog.as_ngg = 1;
1206      prolog_key.vs_prolog.load_vgprs_after_culling = 1;
1207      prolog_key.vs_prolog.is_monolithic = true;
1208      si_llvm_build_vs_prolog(&ctx, &prolog_key);
1209      prolog = ctx.main_fn;
1210
1211      parts[0] = ngg_cull_main_fn;
1212      parts[1] = prolog;
1213      parts[2] = main_fn;
1214
1215      si_build_wrapper_function(&ctx, parts, 3, 0, 0, false);
1216   } else if (shader->is_monolithic && sel->stage == MESA_SHADER_TESS_CTRL) {
1217      if (sscreen->info.gfx_level >= GFX9) {
1218         struct si_shader_selector *ls = shader->key.ge.part.tcs.ls;
1219         LLVMValueRef parts[4];
1220         bool vs_needs_prolog =
1221            si_vs_needs_prolog(ls, &shader->key.ge.part.tcs.ls_prolog, &shader->key, false, false);
1222
1223         /* TCS main part */
1224         parts[2] = ctx.main_fn;
1225
1226         /* TCS epilog */
1227         union si_shader_part_key tcs_epilog_key;
1228         si_get_tcs_epilog_key(shader, &tcs_epilog_key);
1229         si_llvm_build_tcs_epilog(&ctx, &tcs_epilog_key);
1230         parts[3] = ctx.main_fn;
1231
1232         struct si_shader shader_ls = {};
1233         shader_ls.selector = ls;
1234         shader_ls.key.ge.part.vs.prolog = shader->key.ge.part.tcs.ls_prolog;
1235         shader_ls.key.ge.as_ls = 1;
1236         shader_ls.key.ge.mono = shader->key.ge.mono;
1237         shader_ls.key.ge.opt = shader->key.ge.opt;
1238         shader_ls.key.ge.opt.inline_uniforms = false; /* only TCS can inline uniforms */
1239         shader_ls.is_monolithic = true;
1240
1241         nir = si_get_nir_shader(&shader_ls, &free_nir, sel->info.tcs_vgpr_only_inputs);
1242         si_update_shader_binary_info(shader, nir);
1243
1244         if (!si_llvm_translate_nir(&ctx, &shader_ls, nir, free_nir, false)) {
1245            si_llvm_dispose(&ctx);
1246            return false;
1247         }
1248         shader->info.uses_instanceid |= ls->info.uses_instanceid;
1249         parts[1] = ctx.main_fn;
1250
1251         /* LS prolog */
1252         if (vs_needs_prolog) {
1253            union si_shader_part_key vs_prolog_key;
1254            si_get_vs_prolog_key(&ls->info, shader_ls.info.num_input_sgprs, false,
1255                                 &shader->key.ge.part.tcs.ls_prolog, shader, &vs_prolog_key);
1256            vs_prolog_key.vs_prolog.is_monolithic = true;
1257            si_llvm_build_vs_prolog(&ctx, &vs_prolog_key);
1258            parts[0] = ctx.main_fn;
1259         }
1260
1261         /* Reset the shader context. */
1262         ctx.shader = shader;
1263         ctx.stage = MESA_SHADER_TESS_CTRL;
1264
1265         si_build_wrapper_function(&ctx, parts + !vs_needs_prolog, 4 - !vs_needs_prolog,
1266                                   vs_needs_prolog, vs_needs_prolog ? 2 : 1,
1267                                   shader->key.ge.opt.same_patch_vertices);
1268      } else {
1269         LLVMValueRef parts[2];
1270         union si_shader_part_key epilog_key;
1271
1272         parts[0] = ctx.main_fn;
1273
1274         memset(&epilog_key, 0, sizeof(epilog_key));
1275         epilog_key.tcs_epilog.states = shader->key.ge.part.tcs.epilog;
1276         si_llvm_build_tcs_epilog(&ctx, &epilog_key);
1277         parts[1] = ctx.main_fn;
1278
1279         si_build_wrapper_function(&ctx, parts, 2, 0, 0, false);
1280      }
1281   } else if (shader->is_monolithic && sel->stage == MESA_SHADER_GEOMETRY) {
1282      if (ctx.screen->info.gfx_level >= GFX9) {
1283         struct si_shader_selector *es = shader->key.ge.part.gs.es;
1284         LLVMValueRef es_prolog = NULL;
1285         LLVMValueRef es_main = NULL;
1286         LLVMValueRef gs_main = ctx.main_fn;
1287
1288         /* ES main part */
1289         struct si_shader shader_es = {};
1290         shader_es.selector = es;
1291         shader_es.key.ge.part.vs.prolog = shader->key.ge.part.gs.vs_prolog;
1292         shader_es.key.ge.as_es = 1;
1293         shader_es.key.ge.as_ngg = shader->key.ge.as_ngg;
1294         shader_es.key.ge.mono = shader->key.ge.mono;
1295         shader_es.key.ge.opt = shader->key.ge.opt;
1296         shader_es.key.ge.opt.inline_uniforms = false; /* only GS can inline uniforms */
1297         /* kill_outputs was computed based on GS outputs so we can't use it to kill VS outputs */
1298         shader_es.key.ge.opt.kill_outputs = 0;
1299         shader_es.is_monolithic = true;
1300
1301         nir = si_get_nir_shader(&shader_es, &free_nir, 0);
1302         si_update_shader_binary_info(shader, nir);
1303
1304         if (!si_llvm_translate_nir(&ctx, &shader_es, nir, free_nir, false)) {
1305            si_llvm_dispose(&ctx);
1306            return false;
1307         }
1308         shader->info.uses_instanceid |= es->info.uses_instanceid;
1309         es_main = ctx.main_fn;
1310
1311         /* ES prolog */
1312         if (es->stage == MESA_SHADER_VERTEX &&
1313             si_vs_needs_prolog(es, &shader->key.ge.part.gs.vs_prolog, &shader->key, false, true)) {
1314            union si_shader_part_key vs_prolog_key;
1315            si_get_vs_prolog_key(&es->info, shader_es.info.num_input_sgprs, false,
1316                                 &shader->key.ge.part.gs.vs_prolog, shader, &vs_prolog_key);
1317            vs_prolog_key.vs_prolog.is_monolithic = true;
1318            si_llvm_build_vs_prolog(&ctx, &vs_prolog_key);
1319            es_prolog = ctx.main_fn;
1320         }
1321
1322         /* Reset the shader context. */
1323         ctx.shader = shader;
1324         ctx.stage = MESA_SHADER_GEOMETRY;
1325
1326         /* Prepare the array of shader parts. */
1327         LLVMValueRef parts[4];
1328         unsigned num_parts = 0, main_part;
1329
1330         if (es_prolog)
1331            parts[num_parts++] = es_prolog;
1332
1333         parts[main_part = num_parts++] = es_main;
1334         parts[num_parts++] = gs_main;
1335
1336         si_build_wrapper_function(&ctx, parts, num_parts, main_part, main_part + 1, false);
1337      } else {
1338         /* Nothing to do for gfx6-8. The shader has only 1 part and it's ctx.main_fn. */
1339      }
1340   } else if (shader->is_monolithic && sel->stage == MESA_SHADER_FRAGMENT) {
1341      si_llvm_build_monolithic_ps(&ctx, shader);
1342   }
1343
1344   si_llvm_optimize_module(&ctx);
1345
1346   /* Make sure the input is a pointer and not integer followed by inttoptr. */
1347   assert(LLVMGetTypeKind(LLVMTypeOf(LLVMGetParam(ctx.main_fn, 0))) == LLVMPointerTypeKind);
1348
1349   /* Compile to bytecode. */
1350   if (!si_compile_llvm(sscreen, &shader->binary, &shader->config, compiler, &ctx.ac, debug,
1351                        sel->stage, si_get_shader_name(shader),
1352                        si_should_optimize_less(compiler, shader->selector))) {
1353      si_llvm_dispose(&ctx);
1354      fprintf(stderr, "LLVM failed to compile shader\n");
1355      return false;
1356   }
1357
1358   si_llvm_dispose(&ctx);
1359   return true;
1360}
1361