1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include "nir.h"
25#include "nir_builder.h"
26
27static void
28build_constant_load(nir_builder *b, nir_deref_instr *deref, nir_constant *c)
29{
30   if (glsl_type_is_vector_or_scalar(deref->type)) {
31      nir_load_const_instr *load =
32         nir_load_const_instr_create(b->shader,
33                                     glsl_get_vector_elements(deref->type),
34                                     glsl_get_bit_size(deref->type));
35      memcpy(load->value, c->values, sizeof(*load->value) * load->def.num_components);
36      nir_builder_instr_insert(b, &load->instr);
37      nir_store_deref(b, deref, &load->def, ~0);
38   } else if (glsl_type_is_struct_or_ifc(deref->type)) {
39      unsigned len = glsl_get_length(deref->type);
40      for (unsigned i = 0; i < len; i++) {
41         build_constant_load(b, nir_build_deref_struct(b, deref, i),
42                             c->elements[i]);
43      }
44   } else {
45      assert(glsl_type_is_array(deref->type) ||
46             glsl_type_is_matrix(deref->type));
47      unsigned len = glsl_get_length(deref->type);
48      for (unsigned i = 0; i < len; i++) {
49         build_constant_load(b,
50                             nir_build_deref_array_imm(b, deref, i),
51                             c->elements[i]);
52      }
53   }
54}
55
56static bool
57lower_const_initializer(struct nir_builder *b, struct exec_list *var_list,
58                        nir_variable_mode modes)
59{
60   bool progress = false;
61
62   b->cursor = nir_before_cf_list(&b->impl->body);
63
64   nir_foreach_variable_in_list(var, var_list) {
65      if (!(var->data.mode & modes))
66         continue;
67
68      if (var->constant_initializer) {
69         build_constant_load(b, nir_build_deref_var(b, var),
70                             var->constant_initializer);
71
72         progress = true;
73         var->constant_initializer = NULL;
74      } else if (var->pointer_initializer) {
75         nir_deref_instr *src_deref = nir_build_deref_var(b, var->pointer_initializer);
76         nir_deref_instr *dst_deref = nir_build_deref_var(b, var);
77
78         /* Note that this stores a pointer to src into dst */
79         nir_store_deref(b, dst_deref, &src_deref->dest.ssa, ~0);
80
81         progress = true;
82         var->pointer_initializer = NULL;
83      }
84
85   }
86
87   return progress;
88}
89
90bool
91nir_lower_variable_initializers(nir_shader *shader, nir_variable_mode modes)
92{
93   bool progress = false;
94
95   /* Only some variables have initializers that we want to lower.  Others
96    * such as uniforms have initializers which are useful later during linking
97    * so we want to skip over those.  Restrict to only variable types where
98    * initializers make sense so that callers can use nir_var_all.
99    */
100   modes &= nir_var_shader_out |
101            nir_var_shader_temp |
102            nir_var_function_temp |
103            nir_var_system_value;
104
105   nir_foreach_function(function, shader) {
106      if (!function->impl)
107	 continue;
108
109      bool impl_progress = false;
110
111      nir_builder builder;
112      nir_builder_init(&builder, function->impl);
113
114      if ((modes & ~nir_var_function_temp) && function->is_entrypoint) {
115         impl_progress |= lower_const_initializer(&builder,
116                                                  &shader->variables,
117                                                  modes);
118      }
119
120      if (modes & nir_var_function_temp) {
121         impl_progress |= lower_const_initializer(&builder,
122                                                  &function->impl->locals,
123                                                  nir_var_function_temp);
124      }
125
126      if (impl_progress) {
127         progress = true;
128         nir_metadata_preserve(function->impl, nir_metadata_block_index |
129                                               nir_metadata_dominance |
130                                               nir_metadata_live_ssa_defs);
131      } else {
132         nir_metadata_preserve(function->impl, nir_metadata_all);
133      }
134   }
135
136   return progress;
137}
138
139/* Zero initialize shared_size bytes of shared memory by splitting work writes
140 * of chunk_size bytes among the invocations.
141 *
142 * Used for implementing VK_KHR_zero_initialize_workgroup_memory.
143 */
144bool
145nir_zero_initialize_shared_memory(nir_shader *shader,
146                                  const unsigned shared_size,
147                                  const unsigned chunk_size)
148{
149   assert(shared_size > 0);
150   assert(chunk_size > 0);
151   assert(chunk_size % 4 == 0);
152
153   nir_builder b;
154   nir_builder_init(&b, nir_shader_get_entrypoint(shader));
155   b.cursor = nir_before_cf_list(&b.impl->body);
156
157   assert(!shader->info.workgroup_size_variable);
158   const unsigned local_count = shader->info.workgroup_size[0] *
159                                shader->info.workgroup_size[1] *
160                                shader->info.workgroup_size[2];
161
162   /* The initialization logic is simplified if we can always split the memory
163    * in full chunk_size units.
164    */
165   assert(shared_size % chunk_size == 0);
166
167   const unsigned chunk_comps = chunk_size / 4;
168
169   nir_variable *it = nir_local_variable_create(b.impl, glsl_uint_type(),
170                                                "zero_init_iterator");
171   nir_ssa_def *local_index = nir_load_local_invocation_index(&b);
172   nir_ssa_def *first_offset = nir_imul_imm(&b, local_index, chunk_size);
173   nir_store_var(&b, it, first_offset, 0x1);
174
175   nir_loop *loop = nir_push_loop(&b);
176   {
177      nir_ssa_def *offset = nir_load_var(&b, it);
178
179      nir_push_if(&b, nir_uge(&b, offset, nir_imm_int(&b, shared_size)));
180      {
181         nir_jump(&b, nir_jump_break);
182      }
183      nir_pop_if(&b, NULL);
184
185      nir_store_shared(&b, nir_imm_zero(&b, chunk_comps, 32), offset,
186                       .align_mul=chunk_size,
187                       .write_mask=((1 << chunk_comps) - 1));
188
189      nir_ssa_def *new_offset = nir_iadd_imm(&b, offset, chunk_size * local_count);
190      nir_store_var(&b, it, new_offset, 0x1);
191   }
192   nir_pop_loop(&b, loop);
193
194   nir_scoped_barrier(&b, NIR_SCOPE_WORKGROUP, NIR_SCOPE_WORKGROUP,
195                      NIR_MEMORY_ACQ_REL, nir_var_mem_shared);
196
197   nir_metadata_preserve(nir_shader_get_entrypoint(shader), nir_metadata_none);
198
199   return true;
200}
201