1/*
2 * Copyright © 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24/* These passes enable converting uniforms to literals when it's profitable,
25 * effectively inlining uniform values in the IR. The main benefit is register
26 * usage decrease leading to better SMT (hyperthreading). It's accomplished
27 * by targetting uniforms that determine whether a conditional branch is
28 * taken or a loop can be unrolled.
29 *
30 * Only uniforms used in these places are analyzed:
31 *   1. if condition
32 *   2. loop terminator
33 *   3. init and update value of induction variable used in loop terminator
34 *
35 * nir_find_inlinable_uniforms finds uniforms that can be inlined and stores
36 * that information in shader_info.
37 *
38 * nir_inline_uniforms inlines uniform values.
39 *
40 * (uniforms must be lowered to load_ubo before calling this)
41 */
42
43#include "nir_builder.h"
44#include "nir_loop_analyze.h"
45
46/* Maximum value in shader_info::inlinable_uniform_dw_offsets[] */
47#define MAX_OFFSET (UINT16_MAX * 4)
48
49static bool
50src_only_uses_uniforms(const nir_src *src, int component,
51                       uint32_t *uni_offsets, unsigned *num_offsets)
52{
53   if (!src->is_ssa)
54      return false;
55
56   assert(component < src->ssa->num_components);
57
58   nir_instr *instr = src->ssa->parent_instr;
59
60   switch (instr->type) {
61   case nir_instr_type_alu: {
62      nir_alu_instr *alu = nir_instr_as_alu(instr);
63
64      /* Vector ops only need to check the corresponding component. */
65      if (nir_op_is_vec(alu->op)) {
66         nir_alu_src *alu_src = alu->src + component;
67         return src_only_uses_uniforms(&alu_src->src, alu_src->swizzle[0],
68                                       uni_offsets, num_offsets);
69      }
70
71      /* Return true if all sources return true. */
72      for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
73         nir_alu_src *alu_src = alu->src + i;
74         int input_sizes = nir_op_infos[alu->op].input_sizes[i];
75
76         if (input_sizes == 0) {
77            /* For ops which has no input size, each component of dest is
78             * only determined by the same component of srcs.
79             */
80            if (!src_only_uses_uniforms(&alu_src->src, alu_src->swizzle[component],
81                                        uni_offsets, num_offsets))
82               return false;
83         } else {
84            /* For ops which has input size, all components of dest are
85             * determined by all components of srcs (except vec ops).
86             */
87            for (unsigned j = 0; j < input_sizes; j++) {
88               if (!src_only_uses_uniforms(&alu_src->src, alu_src->swizzle[j],
89                                           uni_offsets, num_offsets))
90               return false;
91            }
92         }
93      }
94      return true;
95   }
96
97   case nir_instr_type_intrinsic: {
98      nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
99      /* Return true if the intrinsic loads from UBO 0 with a constant
100       * offset.
101       */
102      if (intr->intrinsic == nir_intrinsic_load_ubo &&
103          nir_src_is_const(intr->src[0]) &&
104          nir_src_as_uint(intr->src[0]) == 0 &&
105          nir_src_is_const(intr->src[1]) &&
106          nir_src_as_uint(intr->src[1]) <= MAX_OFFSET &&
107          /* TODO: Can't handle other bit sizes for now. */
108          intr->dest.ssa.bit_size == 32) {
109         uint32_t offset = nir_src_as_uint(intr->src[1]) + component * 4;
110         assert(offset < MAX_OFFSET);
111
112         /* Already recorded by other one */
113         for (int i = 0; i < *num_offsets; i++) {
114            if (uni_offsets[i] == offset)
115               return true;
116         }
117
118         /* Exceed uniform number limit */
119         if (*num_offsets == MAX_INLINABLE_UNIFORMS)
120            return false;
121
122         /* Record the uniform offset. */
123         uni_offsets[(*num_offsets)++] = offset;
124         return true;
125      }
126      return false;
127   }
128
129   case nir_instr_type_load_const:
130      /* Always return true for constants. */
131      return true;
132
133   default:
134      return false;
135   }
136}
137
138static bool
139is_induction_variable(const nir_src *src, int component, nir_loop_info *info,
140                      uint32_t *uni_offsets, unsigned *num_offsets)
141{
142   if (!src->is_ssa)
143      return false;
144
145   assert(component < src->ssa->num_components);
146
147   /* Return true for induction variable (ie. i in for loop) */
148   for (int i = 0; i < info->num_induction_vars; i++) {
149      nir_loop_induction_variable *var = info->induction_vars + i;
150      if (var->def == src->ssa) {
151         /* Induction variable should have constant initial value (ie. i = 0),
152          * constant update value (ie. i++) and constant end condition
153          * (ie. i < 10), so that we know the exact loop count for unrolling
154          * the loop.
155          *
156          * Add uniforms need to be inlined for this induction variable's
157          * initial and update value to be constant, for example:
158          *
159          *     for (i = init; i < count; i += step)
160          *
161          * We collect uniform "init" and "step" here.
162          */
163         if (var->init_src) {
164            if (!src_only_uses_uniforms(var->init_src, component,
165                                        uni_offsets, num_offsets))
166               return false;
167         }
168
169         if (var->update_src) {
170            nir_alu_src *alu_src = var->update_src;
171            if (!src_only_uses_uniforms(&alu_src->src,
172                                        alu_src->swizzle[component],
173                                        uni_offsets, num_offsets))
174               return false;
175         }
176
177         return true;
178      }
179   }
180
181   return false;
182}
183
184static void
185add_inlinable_uniforms(const nir_src *cond, nir_loop_info *info,
186                       uint32_t *uni_offsets, unsigned *num_offsets)
187{
188   unsigned new_num = *num_offsets;
189   /* If condition SSA is always scalar, so component is 0. */
190   unsigned component = 0;
191
192   /* Allow induction variable which means a loop terminator. */
193   if (info) {
194      nir_ssa_scalar cond_scalar = {cond->ssa, 0};
195
196      /* Limit terminator condition to loop unroll support case which is a simple
197       * comparison (ie. "i < count" is supported, but "i + 1 < count" is not).
198       */
199      if (nir_is_supported_terminator_condition(cond_scalar)) {
200         nir_alu_instr *alu = nir_instr_as_alu(cond->ssa->parent_instr);
201
202         /* One side of comparison is induction variable, the other side is
203          * only uniform.
204          */
205         for (int i = 0; i < 2; i++) {
206            if (is_induction_variable(&alu->src[i].src, alu->src[i].swizzle[0],
207                                      info, uni_offsets, &new_num)) {
208               cond = &alu->src[1 - i].src;
209               component = alu->src[1 - i].swizzle[0];
210               break;
211            }
212         }
213      }
214   }
215
216   /* Only update uniform number when all uniforms in the expression
217    * can be inlined. Partially inline uniforms can't lower if/loop.
218    *
219    * For example, uniform can be inlined for a shader is limited to 4,
220    * and we have already added 3 uniforms, then want to deal with
221    *
222    *     if (uniform0 + uniform1 == 10)
223    *
224    * only uniform0 can be inlined due to we exceed the 4 limit. But
225    * unless both uniform0 and uniform1 are inlined, can we eliminate
226    * the if statement.
227    *
228    * This is even possible when we deal with loop if the induction
229    * variable init and update also contains uniform like
230    *
231    *    for (i = uniform0; i < uniform1; i+= uniform2)
232    *
233    * unless uniform0, uniform1 and uniform2 can be inlined at once,
234    * can the loop be unrolled.
235    */
236   if (src_only_uses_uniforms(cond, component, uni_offsets, &new_num))
237      *num_offsets = new_num;
238}
239
240static void
241process_node(nir_cf_node *node, nir_loop_info *info,
242             uint32_t *uni_offsets, unsigned *num_offsets)
243{
244   switch (node->type) {
245   case nir_cf_node_if: {
246      nir_if *if_node = nir_cf_node_as_if(node);
247      const nir_src *cond = &if_node->condition;
248      add_inlinable_uniforms(cond, info, uni_offsets, num_offsets);
249
250      /* Do not pass loop info down so only alow induction variable
251       * in loop terminator "if":
252       *
253       *     for (i = 0; true; i++)
254       *         if (i == count)
255       *             if (i == num)
256       *                 <no break>
257       *             break
258       *
259       * so "num" won't be inlined due to the "if" is not a
260       * terminator.
261       */
262      info = NULL;
263
264      foreach_list_typed(nir_cf_node, nested_node, node, &if_node->then_list)
265         process_node(nested_node, info, uni_offsets, num_offsets);
266      foreach_list_typed(nir_cf_node, nested_node, node, &if_node->else_list)
267         process_node(nested_node, info, uni_offsets, num_offsets);
268      break;
269   }
270
271   case nir_cf_node_loop: {
272      nir_loop *loop = nir_cf_node_as_loop(node);
273
274      /* Replace loop info, no nested loop info currently:
275       *
276       *     for (i = 0; i < count0; i++)
277       *         for (j = 0; j < count1; j++)
278       *             if (i == num)
279       *
280       * so "num" won't be inlined due to "i" is an induction
281       * variable of upper loop.
282       */
283      info = loop->info;
284
285      foreach_list_typed(nir_cf_node, nested_node, node, &loop->body) {
286         bool is_terminator = false;
287         list_for_each_entry(nir_loop_terminator, terminator,
288                             &info->loop_terminator_list,
289                             loop_terminator_link) {
290            if (nested_node == &terminator->nif->cf_node) {
291               is_terminator = true;
292               break;
293            }
294         }
295
296         /* Allow induction variables for terminator "if" only:
297          *
298          *     for (i = 0; i < count; i++)
299          *         if (i == num)
300          *             <no break>
301          *
302          * so "num" won't be inlined due to the "if" is not a
303          * terminator.
304          */
305         nir_loop_info *use_info = is_terminator ? info : NULL;
306         process_node(nested_node, use_info, uni_offsets, num_offsets);
307      }
308      break;
309   }
310
311   default:
312      break;
313   }
314}
315
316void
317nir_find_inlinable_uniforms(nir_shader *shader)
318{
319   uint32_t uni_offsets[MAX_INLINABLE_UNIFORMS];
320   unsigned num_offsets = 0;
321
322   nir_foreach_function(function, shader) {
323      if (function->impl) {
324         nir_metadata_require(function->impl, nir_metadata_loop_analysis,
325                              nir_var_all, false);
326
327         foreach_list_typed(nir_cf_node, node, node, &function->impl->body)
328            process_node(node, NULL, uni_offsets, &num_offsets);
329      }
330   }
331
332   for (int i = 0; i < num_offsets; i++)
333      shader->info.inlinable_uniform_dw_offsets[i] = uni_offsets[i] / 4;
334   shader->info.num_inlinable_uniforms = num_offsets;
335}
336
337void
338nir_inline_uniforms(nir_shader *shader, unsigned num_uniforms,
339                    const uint32_t *uniform_values,
340                    const uint16_t *uniform_dw_offsets)
341{
342   if (!num_uniforms)
343      return;
344
345   nir_foreach_function(function, shader) {
346      if (function->impl) {
347         nir_builder b;
348         nir_builder_init(&b, function->impl);
349         nir_foreach_block(block, function->impl) {
350            nir_foreach_instr_safe(instr, block) {
351               if (instr->type != nir_instr_type_intrinsic)
352                  continue;
353
354               nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
355
356               /* Only replace UBO 0 with constant offsets. */
357               if (intr->intrinsic == nir_intrinsic_load_ubo &&
358                   nir_src_is_const(intr->src[0]) &&
359                   nir_src_as_uint(intr->src[0]) == 0 &&
360                   nir_src_is_const(intr->src[1]) &&
361                   /* TODO: Can't handle other bit sizes for now. */
362                   intr->dest.ssa.bit_size == 32) {
363                  int num_components = intr->dest.ssa.num_components;
364                  uint32_t offset = nir_src_as_uint(intr->src[1]) / 4;
365
366                  if (num_components == 1) {
367                     /* Just replace the uniform load to constant load. */
368                     for (unsigned i = 0; i < num_uniforms; i++) {
369                        if (offset == uniform_dw_offsets[i]) {
370                           b.cursor = nir_before_instr(&intr->instr);
371                           nir_ssa_def *def = nir_imm_int(&b, uniform_values[i]);
372                           nir_ssa_def_rewrite_uses(&intr->dest.ssa, def);
373                           nir_instr_remove(&intr->instr);
374                           break;
375                        }
376                     }
377                  } else {
378                     /* Lower vector uniform load to scalar and replace each
379                      * found component load with constant load.
380                      */
381                     uint32_t max_offset = offset + num_components;
382                     nir_ssa_def *components[NIR_MAX_VEC_COMPONENTS] = {0};
383                     bool found = false;
384
385                     b.cursor = nir_before_instr(&intr->instr);
386
387                     /* Find component to replace. */
388                     for (unsigned i = 0; i < num_uniforms; i++) {
389                        uint32_t uni_offset = uniform_dw_offsets[i];
390                        if (uni_offset >= offset && uni_offset < max_offset) {
391                           int index = uni_offset - offset;
392                           components[index] = nir_imm_int(&b, uniform_values[i]);
393                           found = true;
394                        }
395                     }
396
397                     if (!found)
398                        continue;
399
400                     /* Create per-component uniform load. */
401                     for (unsigned i = 0; i < num_components; i++) {
402                        if (!components[i]) {
403                           uint32_t scalar_offset = (offset + i) * 4;
404                           components[i] = nir_load_ubo(&b, 1, intr->dest.ssa.bit_size,
405                                                        intr->src[0].ssa,
406                                                        nir_imm_int(&b, scalar_offset));
407                           nir_intrinsic_instr *load =
408                              nir_instr_as_intrinsic(components[i]->parent_instr);
409                           nir_intrinsic_set_align(load, NIR_ALIGN_MUL_MAX, scalar_offset);
410                           nir_intrinsic_set_range_base(load, scalar_offset);
411                           nir_intrinsic_set_range(load, 4);
412                        }
413                     }
414
415                     /* Replace the original uniform load. */
416                     nir_ssa_def_rewrite_uses(&intr->dest.ssa,
417                                              nir_vec(&b, components, num_components));
418                     nir_instr_remove(&intr->instr);
419                  }
420               }
421            }
422         }
423
424         nir_metadata_preserve(function->impl, nir_metadata_block_index |
425                                               nir_metadata_dominance);
426      }
427   }
428}
429