1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_gl_types.h"
26 #include "nir_deref.h"
27 #include "gl_nir_linker.h"
28 #include "compiler/glsl/ir_uniform.h" /* for gl_uniform_storage */
29 #include "linker_util.h"
30 #include "util/u_dynarray.h"
31 #include "main/consts_exts.h"
32 #include "main/shader_types.h"
33
34 /**
35 * This file do the common link for GLSL uniforms, using NIR, instead of IR as
36 * the counter-part glsl/link_uniforms.cpp
37 */
38
39 #define UNMAPPED_UNIFORM_LOC ~0u
40
41 struct uniform_array_info {
42 /** List of dereferences of the uniform array. */
43 struct util_dynarray *deref_list;
44
45 /** Set of bit-flags to note which array elements have been accessed. */
46 BITSET_WORD *indices;
47 };
48
49 /**
50 * Built-in / reserved GL variables names start with "gl_"
51 */
52 static inline bool
is_gl_identifier(const char *s)53 is_gl_identifier(const char *s)
54 {
55 return s && s[0] == 'g' && s[1] == 'l' && s[2] == '_';
56 }
57
58 static unsigned
uniform_storage_size(const struct glsl_type *type)59 uniform_storage_size(const struct glsl_type *type)
60 {
61 switch (glsl_get_base_type(type)) {
62 case GLSL_TYPE_STRUCT:
63 case GLSL_TYPE_INTERFACE: {
64 unsigned size = 0;
65 for (unsigned i = 0; i < glsl_get_length(type); i++)
66 size += uniform_storage_size(glsl_get_struct_field(type, i));
67 return size;
68 }
69 case GLSL_TYPE_ARRAY: {
70 const struct glsl_type *e_type = glsl_get_array_element(type);
71 enum glsl_base_type e_base_type = glsl_get_base_type(e_type);
72 if (e_base_type == GLSL_TYPE_STRUCT ||
73 e_base_type == GLSL_TYPE_INTERFACE ||
74 e_base_type == GLSL_TYPE_ARRAY) {
75 unsigned length = !glsl_type_is_unsized_array(type) ?
76 glsl_get_length(type) : 1;
77 return length * uniform_storage_size(e_type);
78 } else
79 return 1;
80 }
81 default:
82 return 1;
83 }
84 }
85
86 /**
87 * Update the sizes of linked shader uniform arrays to the maximum
88 * array index used.
89 *
90 * From page 81 (page 95 of the PDF) of the OpenGL 2.1 spec:
91 *
92 * If one or more elements of an array are active,
93 * GetActiveUniform will return the name of the array in name,
94 * subject to the restrictions listed above. The type of the array
95 * is returned in type. The size parameter contains the highest
96 * array element index used, plus one. The compiler or linker
97 * determines the highest index used. There will be only one
98 * active uniform reported by the GL per uniform array.
99 */
100 static void
update_array_sizes(struct gl_shader_program *prog, nir_variable *var, struct hash_table **referenced_uniforms, unsigned current_var_stage)101 update_array_sizes(struct gl_shader_program *prog, nir_variable *var,
102 struct hash_table **referenced_uniforms,
103 unsigned current_var_stage)
104 {
105 /* For now we only resize 1D arrays.
106 * TODO: add support for resizing more complex array types ??
107 */
108 if (!glsl_type_is_array(var->type) ||
109 glsl_type_is_array(glsl_get_array_element(var->type)))
110 return;
111
112 /* GL_ARB_uniform_buffer_object says that std140 uniforms
113 * will not be eliminated. Since we always do std140, just
114 * don't resize arrays in UBOs.
115 *
116 * Atomic counters are supposed to get deterministic
117 * locations assigned based on the declaration ordering and
118 * sizes, array compaction would mess that up.
119 *
120 * Subroutine uniforms are not removed.
121 */
122 if (nir_variable_is_in_block(var) || glsl_contains_atomic(var->type) ||
123 glsl_get_base_type(glsl_without_array(var->type)) == GLSL_TYPE_SUBROUTINE ||
124 var->constant_initializer)
125 return;
126
127 struct uniform_array_info *ainfo = NULL;
128 int words = BITSET_WORDS(glsl_array_size(var->type));
129 int max_array_size = 0;
130 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
131 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
132 if (!sh)
133 continue;
134
135 struct hash_entry *entry =
136 _mesa_hash_table_search(referenced_uniforms[stage], var->name);
137 if (entry) {
138 ainfo = (struct uniform_array_info *) entry->data;
139 max_array_size = MAX2(BITSET_LAST_BIT_SIZED(ainfo->indices, words),
140 max_array_size);
141 }
142
143 if (max_array_size == glsl_array_size(var->type))
144 return;
145 }
146
147 if (max_array_size != glsl_array_size(var->type)) {
148 /* If this is a built-in uniform (i.e., it's backed by some
149 * fixed-function state), adjust the number of state slots to
150 * match the new array size. The number of slots per array entry
151 * is not known. It seems safe to assume that the total number of
152 * slots is an integer multiple of the number of array elements.
153 * Determine the number of slots per array element by dividing by
154 * the old (total) size.
155 */
156 const unsigned num_slots = var->num_state_slots;
157 if (num_slots > 0) {
158 var->num_state_slots =
159 (max_array_size * (num_slots / glsl_array_size(var->type)));
160 }
161
162 var->type = glsl_array_type(glsl_get_array_element(var->type),
163 max_array_size, 0);
164
165 /* Update the types of dereferences in case we changed any. */
166 struct hash_entry *entry =
167 _mesa_hash_table_search(referenced_uniforms[current_var_stage], var->name);
168 if (entry) {
169 struct uniform_array_info *ainfo =
170 (struct uniform_array_info *) entry->data;
171 util_dynarray_foreach(ainfo->deref_list, nir_deref_instr *, deref) {
172 (*deref)->type = var->type;
173 }
174 }
175 }
176 }
177
178 static void
nir_setup_uniform_remap_tables(const struct gl_constants *consts, struct gl_shader_program *prog)179 nir_setup_uniform_remap_tables(const struct gl_constants *consts,
180 struct gl_shader_program *prog)
181 {
182 unsigned total_entries = prog->NumExplicitUniformLocations;
183
184 /* For glsl this may have been allocated by reserve_explicit_locations() so
185 * that we can keep track of unused uniforms with explicit locations.
186 */
187 assert(!prog->data->spirv ||
188 (prog->data->spirv && !prog->UniformRemapTable));
189 if (!prog->UniformRemapTable) {
190 prog->UniformRemapTable = rzalloc_array(prog,
191 struct gl_uniform_storage *,
192 prog->NumUniformRemapTable);
193 }
194
195 union gl_constant_value *data =
196 rzalloc_array(prog->data,
197 union gl_constant_value, prog->data->NumUniformDataSlots);
198 if (!prog->UniformRemapTable || !data) {
199 linker_error(prog, "Out of memory during linking.\n");
200 return;
201 }
202 prog->data->UniformDataSlots = data;
203
204 prog->data->UniformDataDefaults =
205 rzalloc_array(prog->data->UniformDataSlots,
206 union gl_constant_value, prog->data->NumUniformDataSlots);
207
208 unsigned data_pos = 0;
209
210 /* Reserve all the explicit locations of the active uniforms. */
211 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
212 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
213
214 if (uniform->hidden)
215 continue;
216
217 if (uniform->is_shader_storage ||
218 glsl_get_base_type(uniform->type) == GLSL_TYPE_SUBROUTINE)
219 continue;
220
221 if (prog->data->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
222 continue;
223
224 /* How many new entries for this uniform? */
225 const unsigned entries = MAX2(1, uniform->array_elements);
226 unsigned num_slots = glsl_get_component_slots(uniform->type);
227
228 uniform->storage = &data[data_pos];
229
230 /* Set remap table entries point to correct gl_uniform_storage. */
231 for (unsigned j = 0; j < entries; j++) {
232 unsigned element_loc = uniform->remap_location + j;
233 prog->UniformRemapTable[element_loc] = uniform;
234
235 data_pos += num_slots;
236 }
237 }
238
239 /* Reserve locations for rest of the uniforms. */
240 if (prog->data->spirv)
241 link_util_update_empty_uniform_locations(prog);
242
243 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
244 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
245
246 if (uniform->hidden)
247 continue;
248
249 if (uniform->is_shader_storage ||
250 glsl_get_base_type(uniform->type) == GLSL_TYPE_SUBROUTINE)
251 continue;
252
253 /* Built-in uniforms should not get any location. */
254 if (uniform->builtin)
255 continue;
256
257 /* Explicit ones have been set already. */
258 if (uniform->remap_location != UNMAPPED_UNIFORM_LOC)
259 continue;
260
261 /* How many entries for this uniform? */
262 const unsigned entries = MAX2(1, uniform->array_elements);
263
264 /* Add new entries to the total amount for checking against MAX_UNIFORM-
265 * _LOCATIONS. This only applies to the default uniform block (-1),
266 * because locations of uniform block entries are not assignable.
267 */
268 if (prog->data->UniformStorage[i].block_index == -1)
269 total_entries += entries;
270
271 unsigned location =
272 link_util_find_empty_block(prog, &prog->data->UniformStorage[i]);
273
274 if (location == -1) {
275 location = prog->NumUniformRemapTable;
276
277 /* resize remap table to fit new entries */
278 prog->UniformRemapTable =
279 reralloc(prog,
280 prog->UniformRemapTable,
281 struct gl_uniform_storage *,
282 prog->NumUniformRemapTable + entries);
283 prog->NumUniformRemapTable += entries;
284 }
285
286 /* set the base location in remap table for the uniform */
287 uniform->remap_location = location;
288
289 unsigned num_slots = glsl_get_component_slots(uniform->type);
290
291 if (uniform->block_index == -1)
292 uniform->storage = &data[data_pos];
293
294 /* Set remap table entries point to correct gl_uniform_storage. */
295 for (unsigned j = 0; j < entries; j++) {
296 unsigned element_loc = uniform->remap_location + j;
297 prog->UniformRemapTable[element_loc] = uniform;
298
299 if (uniform->block_index == -1)
300 data_pos += num_slots;
301 }
302 }
303
304 /* Verify that total amount of entries for explicit and implicit locations
305 * is less than MAX_UNIFORM_LOCATIONS.
306 */
307 if (total_entries > consts->MaxUserAssignableUniformLocations) {
308 linker_error(prog, "count of uniform locations > MAX_UNIFORM_LOCATIONS"
309 "(%u > %u)", total_entries,
310 consts->MaxUserAssignableUniformLocations);
311 }
312
313 /* Reserve all the explicit locations of the active subroutine uniforms. */
314 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
315 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
316
317 if (glsl_get_base_type(uniform->type) != GLSL_TYPE_SUBROUTINE)
318 continue;
319
320 if (prog->data->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
321 continue;
322
323 /* How many new entries for this uniform? */
324 const unsigned entries =
325 MAX2(1, prog->data->UniformStorage[i].array_elements);
326
327 uniform->storage = &data[data_pos];
328
329 unsigned num_slots = glsl_get_component_slots(uniform->type);
330 unsigned mask = prog->data->linked_stages;
331 while (mask) {
332 const int j = u_bit_scan(&mask);
333 struct gl_program *p = prog->_LinkedShaders[j]->Program;
334
335 if (!prog->data->UniformStorage[i].opaque[j].active)
336 continue;
337
338 /* Set remap table entries point to correct gl_uniform_storage. */
339 for (unsigned k = 0; k < entries; k++) {
340 unsigned element_loc =
341 prog->data->UniformStorage[i].remap_location + k;
342 p->sh.SubroutineUniformRemapTable[element_loc] =
343 &prog->data->UniformStorage[i];
344
345 data_pos += num_slots;
346 }
347 }
348 }
349
350 /* reserve subroutine locations */
351 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
352 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
353
354 if (glsl_get_base_type(uniform->type) != GLSL_TYPE_SUBROUTINE)
355 continue;
356
357 if (prog->data->UniformStorage[i].remap_location !=
358 UNMAPPED_UNIFORM_LOC)
359 continue;
360
361 const unsigned entries =
362 MAX2(1, prog->data->UniformStorage[i].array_elements);
363
364 uniform->storage = &data[data_pos];
365
366 unsigned num_slots = glsl_get_component_slots(uniform->type);
367 unsigned mask = prog->data->linked_stages;
368 while (mask) {
369 const int j = u_bit_scan(&mask);
370 struct gl_program *p = prog->_LinkedShaders[j]->Program;
371
372 if (!prog->data->UniformStorage[i].opaque[j].active)
373 continue;
374
375 p->sh.SubroutineUniformRemapTable =
376 reralloc(p,
377 p->sh.SubroutineUniformRemapTable,
378 struct gl_uniform_storage *,
379 p->sh.NumSubroutineUniformRemapTable + entries);
380
381 for (unsigned k = 0; k < entries; k++) {
382 p->sh.SubroutineUniformRemapTable[p->sh.NumSubroutineUniformRemapTable + k] =
383 &prog->data->UniformStorage[i];
384
385 data_pos += num_slots;
386 }
387 prog->data->UniformStorage[i].remap_location =
388 p->sh.NumSubroutineUniformRemapTable;
389 p->sh.NumSubroutineUniformRemapTable += entries;
390 }
391 }
392
393 /* assign storage to hidden uniforms */
394 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
395 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
396
397 if (!uniform->hidden ||
398 glsl_get_base_type(uniform->type) == GLSL_TYPE_SUBROUTINE)
399 continue;
400
401 const unsigned entries =
402 MAX2(1, prog->data->UniformStorage[i].array_elements);
403
404 uniform->storage = &data[data_pos];
405
406 unsigned num_slots = glsl_get_component_slots(uniform->type);
407 for (unsigned k = 0; k < entries; k++)
408 data_pos += num_slots;
409 }
410 }
411
412 static void
add_var_use_deref(nir_deref_instr *deref, struct hash_table *live, struct array_deref_range **derefs, unsigned *derefs_size)413 add_var_use_deref(nir_deref_instr *deref, struct hash_table *live,
414 struct array_deref_range **derefs, unsigned *derefs_size)
415 {
416 nir_deref_path path;
417 nir_deref_path_init(&path, deref, NULL);
418
419 deref = path.path[0];
420 if (deref->deref_type != nir_deref_type_var ||
421 !nir_deref_mode_is_one_of(deref, nir_var_uniform |
422 nir_var_mem_ubo |
423 nir_var_mem_ssbo |
424 nir_var_image)) {
425 nir_deref_path_finish(&path);
426 return;
427 }
428
429 /* Number of derefs used in current processing. */
430 unsigned num_derefs = 0;
431
432 const struct glsl_type *deref_type = deref->var->type;
433 nir_deref_instr **p = &path.path[1];
434 for (; *p; p++) {
435 if ((*p)->deref_type == nir_deref_type_array) {
436
437 /* Skip matrix derefences */
438 if (!glsl_type_is_array(deref_type))
439 break;
440
441 if ((num_derefs + 1) * sizeof(struct array_deref_range) > *derefs_size) {
442 void *ptr = reralloc_size(NULL, *derefs, *derefs_size + 4096);
443
444 if (ptr == NULL) {
445 nir_deref_path_finish(&path);
446 return;
447 }
448
449 *derefs_size += 4096;
450 *derefs = (struct array_deref_range *)ptr;
451 }
452
453 struct array_deref_range *dr = &(*derefs)[num_derefs];
454 num_derefs++;
455
456 dr->size = glsl_get_length(deref_type);
457
458 if (nir_src_is_const((*p)->arr.index)) {
459 dr->index = nir_src_as_uint((*p)->arr.index);
460 } else {
461 /* An unsized array can occur at the end of an SSBO. We can't track
462 * accesses to such an array, so bail.
463 */
464 if (dr->size == 0) {
465 nir_deref_path_finish(&path);
466 return;
467 }
468
469 dr->index = dr->size;
470 }
471
472 deref_type = glsl_get_array_element(deref_type);
473 } else if ((*p)->deref_type == nir_deref_type_struct) {
474 /* We have reached the end of the array. */
475 break;
476 }
477 }
478
479 nir_deref_path_finish(&path);
480
481
482 struct uniform_array_info *ainfo = NULL;
483
484 struct hash_entry *entry =
485 _mesa_hash_table_search(live, deref->var->name);
486 if (!entry && glsl_type_is_array(deref->var->type)) {
487 ainfo = ralloc(live, struct uniform_array_info);
488
489 unsigned num_bits = MAX2(1, glsl_get_aoa_size(deref->var->type));
490 ainfo->indices = rzalloc_array(live, BITSET_WORD, BITSET_WORDS(num_bits));
491
492 ainfo->deref_list = ralloc(live, struct util_dynarray);
493 util_dynarray_init(ainfo->deref_list, live);
494 }
495
496 if (entry)
497 ainfo = (struct uniform_array_info *) entry->data;
498
499 if (glsl_type_is_array(deref->var->type)) {
500 /* Count the "depth" of the arrays-of-arrays. */
501 unsigned array_depth = 0;
502 for (const struct glsl_type *type = deref->var->type;
503 glsl_type_is_array(type);
504 type = glsl_get_array_element(type)) {
505 array_depth++;
506 }
507
508 link_util_mark_array_elements_referenced(*derefs, num_derefs, array_depth,
509 ainfo->indices);
510
511 util_dynarray_append(ainfo->deref_list, nir_deref_instr *, deref);
512 }
513
514 assert(deref->modes == deref->var->data.mode);
515 _mesa_hash_table_insert(live, deref->var->name, ainfo);
516 }
517
518 /* Iterate over the shader and collect infomation about uniform use */
519 static void
add_var_use_shader(nir_shader *shader, struct hash_table *live)520 add_var_use_shader(nir_shader *shader, struct hash_table *live)
521 {
522 /* Currently allocated buffer block of derefs. */
523 struct array_deref_range *derefs = NULL;
524
525 /* Size of the derefs buffer in bytes. */
526 unsigned derefs_size = 0;
527
528 nir_foreach_function(function, shader) {
529 if (function->impl) {
530 nir_foreach_block(block, function->impl) {
531 nir_foreach_instr(instr, block) {
532 if (instr->type == nir_instr_type_intrinsic) {
533 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
534 switch (intr->intrinsic) {
535 case nir_intrinsic_atomic_counter_read_deref:
536 case nir_intrinsic_atomic_counter_inc_deref:
537 case nir_intrinsic_atomic_counter_pre_dec_deref:
538 case nir_intrinsic_atomic_counter_post_dec_deref:
539 case nir_intrinsic_atomic_counter_add_deref:
540 case nir_intrinsic_atomic_counter_min_deref:
541 case nir_intrinsic_atomic_counter_max_deref:
542 case nir_intrinsic_atomic_counter_and_deref:
543 case nir_intrinsic_atomic_counter_or_deref:
544 case nir_intrinsic_atomic_counter_xor_deref:
545 case nir_intrinsic_atomic_counter_exchange_deref:
546 case nir_intrinsic_atomic_counter_comp_swap_deref:
547 case nir_intrinsic_image_deref_load:
548 case nir_intrinsic_image_deref_store:
549 case nir_intrinsic_image_deref_atomic_add:
550 case nir_intrinsic_image_deref_atomic_umin:
551 case nir_intrinsic_image_deref_atomic_imin:
552 case nir_intrinsic_image_deref_atomic_umax:
553 case nir_intrinsic_image_deref_atomic_imax:
554 case nir_intrinsic_image_deref_atomic_and:
555 case nir_intrinsic_image_deref_atomic_or:
556 case nir_intrinsic_image_deref_atomic_xor:
557 case nir_intrinsic_image_deref_atomic_exchange:
558 case nir_intrinsic_image_deref_atomic_comp_swap:
559 case nir_intrinsic_image_deref_size:
560 case nir_intrinsic_image_deref_samples:
561 case nir_intrinsic_load_deref:
562 case nir_intrinsic_store_deref:
563 add_var_use_deref(nir_src_as_deref(intr->src[0]), live,
564 &derefs, &derefs_size);
565 break;
566
567 default:
568 /* Nothing to do */
569 break;
570 }
571 } else if (instr->type == nir_instr_type_tex) {
572 nir_tex_instr *tex_instr = nir_instr_as_tex(instr);
573 int sampler_idx =
574 nir_tex_instr_src_index(tex_instr,
575 nir_tex_src_sampler_deref);
576 int texture_idx =
577 nir_tex_instr_src_index(tex_instr,
578 nir_tex_src_texture_deref);
579
580 if (sampler_idx >= 0) {
581 nir_deref_instr *deref =
582 nir_src_as_deref(tex_instr->src[sampler_idx].src);
583 add_var_use_deref(deref, live, &derefs, &derefs_size);
584 }
585
586 if (texture_idx >= 0) {
587 nir_deref_instr *deref =
588 nir_src_as_deref(tex_instr->src[texture_idx].src);
589 add_var_use_deref(deref, live, &derefs, &derefs_size);
590 }
591 }
592 }
593 }
594 }
595 }
596
597 ralloc_free(derefs);
598 }
599
600 static void
mark_stage_as_active(struct gl_uniform_storage *uniform, unsigned stage)601 mark_stage_as_active(struct gl_uniform_storage *uniform,
602 unsigned stage)
603 {
604 uniform->active_shader_mask |= 1 << stage;
605 }
606
607 /* Used to build a tree representing the glsl_type so that we can have a place
608 * to store the next index for opaque types. Array types are expanded so that
609 * they have a single child which is used for all elements of the array.
610 * Struct types have a child for each member. The tree is walked while
611 * processing a uniform so that we can recognise when an opaque type is
612 * encountered a second time in order to reuse the same range of indices that
613 * was reserved the first time. That way the sampler indices can be arranged
614 * so that members of an array are placed sequentially even if the array is an
615 * array of structs containing other opaque members.
616 */
617 struct type_tree_entry {
618 /* For opaque types, this will be the next index to use. If we haven’t
619 * encountered this member yet, it will be UINT_MAX.
620 */
621 unsigned next_index;
622 unsigned array_size;
623 struct type_tree_entry *parent;
624 struct type_tree_entry *next_sibling;
625 struct type_tree_entry *children;
626 };
627
628 struct nir_link_uniforms_state {
629 /* per-whole program */
630 unsigned num_hidden_uniforms;
631 unsigned num_values;
632 unsigned max_uniform_location;
633
634 /* per-shader stage */
635 unsigned next_bindless_image_index;
636 unsigned next_bindless_sampler_index;
637 unsigned next_image_index;
638 unsigned next_sampler_index;
639 unsigned next_subroutine;
640 unsigned num_shader_samplers;
641 unsigned num_shader_images;
642 unsigned num_shader_uniform_components;
643 unsigned shader_samplers_used;
644 unsigned shader_shadow_samplers;
645 unsigned shader_storage_blocks_write_access;
646 struct gl_program_parameter_list *params;
647
648 /* per-variable */
649 nir_variable *current_var;
650 const struct glsl_type *current_ifc_type;
651 int offset;
652 bool var_is_in_block;
653 bool set_top_level_array;
654 int top_level_array_size;
655 int top_level_array_stride;
656
657 struct type_tree_entry *current_type;
658 struct hash_table *referenced_uniforms[MESA_SHADER_STAGES];
659 struct hash_table *uniform_hash;
660 };
661
662 static void
add_parameter(struct gl_uniform_storage *uniform, const struct gl_constants *consts, struct gl_shader_program *prog, const struct glsl_type *type, struct nir_link_uniforms_state *state)663 add_parameter(struct gl_uniform_storage *uniform,
664 const struct gl_constants *consts,
665 struct gl_shader_program *prog,
666 const struct glsl_type *type,
667 struct nir_link_uniforms_state *state)
668 {
669 /* Builtin uniforms are backed by PROGRAM_STATE_VAR, so don't add them as
670 * uniforms.
671 */
672 if (uniform->builtin)
673 return;
674
675 if (!state->params || uniform->is_shader_storage ||
676 (glsl_contains_opaque(type) && !state->current_var->data.bindless))
677 return;
678
679 unsigned num_params = glsl_get_aoa_size(type);
680 num_params = MAX2(num_params, 1);
681 num_params *= glsl_get_matrix_columns(glsl_without_array(type));
682
683 bool is_dual_slot = glsl_type_is_dual_slot(glsl_without_array(type));
684 if (is_dual_slot)
685 num_params *= 2;
686
687 struct gl_program_parameter_list *params = state->params;
688 int base_index = params->NumParameters;
689 _mesa_reserve_parameter_storage(params, num_params, num_params);
690
691 if (consts->PackedDriverUniformStorage) {
692 for (unsigned i = 0; i < num_params; i++) {
693 unsigned dmul = glsl_type_is_64bit(glsl_without_array(type)) ? 2 : 1;
694 unsigned comps = glsl_get_vector_elements(glsl_without_array(type)) * dmul;
695 if (is_dual_slot) {
696 if (i & 0x1)
697 comps -= 4;
698 else
699 comps = 4;
700 }
701
702 /* TODO: This will waste space with 1 and 3 16-bit components. */
703 if (glsl_type_is_16bit(glsl_without_array(type)))
704 comps = DIV_ROUND_UP(comps, 2);
705
706 _mesa_add_parameter(params, PROGRAM_UNIFORM, uniform->name.string, comps,
707 glsl_get_gl_type(type), NULL, NULL, false);
708 }
709 } else {
710 for (unsigned i = 0; i < num_params; i++) {
711 _mesa_add_parameter(params, PROGRAM_UNIFORM, uniform->name.string, 4,
712 glsl_get_gl_type(type), NULL, NULL, true);
713 }
714 }
715
716 /* Each Parameter will hold the index to the backing uniform storage.
717 * This avoids relying on names to match parameters and uniform
718 * storages.
719 */
720 for (unsigned i = 0; i < num_params; i++) {
721 struct gl_program_parameter *param = ¶ms->Parameters[base_index + i];
722 param->UniformStorageIndex = uniform - prog->data->UniformStorage;
723 param->MainUniformStorageIndex = state->current_var->data.location;
724 }
725 }
726
727 static unsigned
get_next_index(struct nir_link_uniforms_state *state, const struct gl_uniform_storage *uniform, unsigned *next_index, bool *initialised)728 get_next_index(struct nir_link_uniforms_state *state,
729 const struct gl_uniform_storage *uniform,
730 unsigned *next_index, bool *initialised)
731 {
732 /* If we’ve already calculated an index for this member then we can just
733 * offset from there.
734 */
735 if (state->current_type->next_index == UINT_MAX) {
736 /* Otherwise we need to reserve enough indices for all of the arrays
737 * enclosing this member.
738 */
739
740 unsigned array_size = 1;
741
742 for (const struct type_tree_entry *p = state->current_type;
743 p;
744 p = p->parent) {
745 array_size *= p->array_size;
746 }
747
748 state->current_type->next_index = *next_index;
749 *next_index += array_size;
750 *initialised = true;
751 } else
752 *initialised = false;
753
754 unsigned index = state->current_type->next_index;
755
756 state->current_type->next_index += MAX2(1, uniform->array_elements);
757
758 return index;
759 }
760
761 /* Update the uniforms info for the current shader stage */
762 static void
update_uniforms_shader_info(struct gl_shader_program *prog, struct nir_link_uniforms_state *state, struct gl_uniform_storage *uniform, const struct glsl_type *type, unsigned stage)763 update_uniforms_shader_info(struct gl_shader_program *prog,
764 struct nir_link_uniforms_state *state,
765 struct gl_uniform_storage *uniform,
766 const struct glsl_type *type,
767 unsigned stage)
768 {
769 unsigned values = glsl_get_component_slots(type);
770 const struct glsl_type *type_no_array = glsl_without_array(type);
771
772 if (glsl_type_is_sampler(type_no_array)) {
773 bool init_idx;
774 /* ARB_bindless_texture spec says:
775 *
776 * "When used as shader inputs, outputs, uniform block members,
777 * or temporaries, the value of the sampler is a 64-bit unsigned
778 * integer handle and never refers to a texture image unit."
779 */
780 bool is_bindless = state->current_var->data.bindless || state->var_is_in_block;
781 unsigned *next_index = is_bindless ?
782 &state->next_bindless_sampler_index :
783 &state->next_sampler_index;
784 int sampler_index = get_next_index(state, uniform, next_index, &init_idx);
785 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
786
787 if (is_bindless) {
788 if (init_idx) {
789 sh->Program->sh.BindlessSamplers =
790 rerzalloc(sh->Program, sh->Program->sh.BindlessSamplers,
791 struct gl_bindless_sampler,
792 sh->Program->sh.NumBindlessSamplers,
793 state->next_bindless_sampler_index);
794
795 for (unsigned j = sh->Program->sh.NumBindlessSamplers;
796 j < state->next_bindless_sampler_index; j++) {
797 sh->Program->sh.BindlessSamplers[j].target =
798 glsl_get_sampler_target(type_no_array);
799 }
800
801 sh->Program->sh.NumBindlessSamplers =
802 state->next_bindless_sampler_index;
803 }
804
805 if (!state->var_is_in_block)
806 state->num_shader_uniform_components += values;
807 } else {
808 /* Samplers (bound or bindless) are counted as two components
809 * as specified by ARB_bindless_texture.
810 */
811 state->num_shader_samplers += values / 2;
812
813 if (init_idx) {
814 const unsigned shadow = glsl_sampler_type_is_shadow(type_no_array);
815 for (unsigned i = sampler_index;
816 i < MIN2(state->next_sampler_index, MAX_SAMPLERS); i++) {
817 sh->Program->sh.SamplerTargets[i] =
818 glsl_get_sampler_target(type_no_array);
819 state->shader_samplers_used |= 1U << i;
820 state->shader_shadow_samplers |= shadow << i;
821 }
822 }
823 }
824
825 uniform->opaque[stage].active = true;
826 uniform->opaque[stage].index = sampler_index;
827 } else if (glsl_type_is_image(type_no_array)) {
828 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
829
830 /* Set image access qualifiers */
831 enum gl_access_qualifier image_access =
832 state->current_var->data.access;
833 const GLenum access =
834 (image_access & ACCESS_NON_WRITEABLE) ?
835 ((image_access & ACCESS_NON_READABLE) ? GL_NONE :
836 GL_READ_ONLY) :
837 ((image_access & ACCESS_NON_READABLE) ? GL_WRITE_ONLY :
838 GL_READ_WRITE);
839
840 int image_index;
841 if (state->current_var->data.bindless) {
842 image_index = state->next_bindless_image_index;
843 state->next_bindless_image_index += MAX2(1, uniform->array_elements);
844
845 sh->Program->sh.BindlessImages =
846 rerzalloc(sh->Program, sh->Program->sh.BindlessImages,
847 struct gl_bindless_image,
848 sh->Program->sh.NumBindlessImages,
849 state->next_bindless_image_index);
850
851 for (unsigned j = sh->Program->sh.NumBindlessImages;
852 j < state->next_bindless_image_index; j++) {
853 sh->Program->sh.BindlessImages[j].access = access;
854 }
855
856 sh->Program->sh.NumBindlessImages = state->next_bindless_image_index;
857
858 } else {
859 image_index = state->next_image_index;
860 state->next_image_index += MAX2(1, uniform->array_elements);
861
862 /* Images (bound or bindless) are counted as two components as
863 * specified by ARB_bindless_texture.
864 */
865 state->num_shader_images += values / 2;
866
867 for (unsigned i = image_index;
868 i < MIN2(state->next_image_index, MAX_IMAGE_UNIFORMS); i++) {
869 sh->Program->sh.ImageAccess[i] = access;
870 }
871 }
872
873 uniform->opaque[stage].active = true;
874 uniform->opaque[stage].index = image_index;
875
876 if (!uniform->is_shader_storage)
877 state->num_shader_uniform_components += values;
878 } else {
879 if (glsl_get_base_type(type_no_array) == GLSL_TYPE_SUBROUTINE) {
880 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
881
882 uniform->opaque[stage].index = state->next_subroutine;
883 uniform->opaque[stage].active = true;
884
885 sh->Program->sh.NumSubroutineUniforms++;
886
887 /* Increment the subroutine index by 1 for non-arrays and by the
888 * number of array elements for arrays.
889 */
890 state->next_subroutine += MAX2(1, uniform->array_elements);
891 }
892
893 if (!state->var_is_in_block)
894 state->num_shader_uniform_components += values;
895 }
896 }
897
898 static bool
find_and_update_named_uniform_storage(const struct gl_constants *consts, struct gl_shader_program *prog, struct nir_link_uniforms_state *state, nir_variable *var, char **name, size_t name_length, const struct glsl_type *type, unsigned stage, bool *first_element)899 find_and_update_named_uniform_storage(const struct gl_constants *consts,
900 struct gl_shader_program *prog,
901 struct nir_link_uniforms_state *state,
902 nir_variable *var, char **name,
903 size_t name_length,
904 const struct glsl_type *type,
905 unsigned stage, bool *first_element)
906 {
907 /* gl_uniform_storage can cope with one level of array, so if the type is a
908 * composite type or an array where each element occupies more than one
909 * location than we need to recursively process it.
910 */
911 if (glsl_type_is_struct_or_ifc(type) ||
912 (glsl_type_is_array(type) &&
913 (glsl_type_is_array(glsl_get_array_element(type)) ||
914 glsl_type_is_struct_or_ifc(glsl_get_array_element(type))))) {
915
916 struct type_tree_entry *old_type = state->current_type;
917 state->current_type = old_type->children;
918
919 /* Shader storage block unsized arrays: add subscript [0] to variable
920 * names.
921 */
922 unsigned length = glsl_get_length(type);
923 if (glsl_type_is_unsized_array(type))
924 length = 1;
925
926 bool result = false;
927 for (unsigned i = 0; i < length; i++) {
928 const struct glsl_type *field_type;
929 size_t new_length = name_length;
930
931 if (glsl_type_is_struct_or_ifc(type)) {
932 field_type = glsl_get_struct_field(type, i);
933
934 /* Append '.field' to the current variable name. */
935 if (name) {
936 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s",
937 glsl_get_struct_elem_name(type, i));
938 }
939 } else {
940 field_type = glsl_get_array_element(type);
941
942 /* Append the subscript to the current variable name */
943 if (name)
944 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
945 }
946
947 result = find_and_update_named_uniform_storage(consts, prog, state,
948 var, name, new_length,
949 field_type, stage,
950 first_element);
951
952 if (glsl_type_is_struct_or_ifc(type))
953 state->current_type = state->current_type->next_sibling;
954
955 if (!result) {
956 state->current_type = old_type;
957 return false;
958 }
959 }
960
961 state->current_type = old_type;
962
963 return result;
964 } else {
965 struct hash_entry *entry =
966 _mesa_hash_table_search(state->uniform_hash, *name);
967 if (entry) {
968 unsigned i = (unsigned) (intptr_t) entry->data;
969 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
970
971 if (*first_element && !state->var_is_in_block) {
972 *first_element = false;
973 var->data.location = uniform - prog->data->UniformStorage;
974 }
975
976 update_uniforms_shader_info(prog, state, uniform, type, stage);
977
978 const struct glsl_type *type_no_array = glsl_without_array(type);
979 struct hash_entry *entry = prog->data->spirv ? NULL :
980 _mesa_hash_table_search(state->referenced_uniforms[stage],
981 state->current_var->name);
982 if (entry != NULL ||
983 glsl_get_base_type(type_no_array) == GLSL_TYPE_SUBROUTINE ||
984 prog->data->spirv)
985 uniform->active_shader_mask |= 1 << stage;
986
987 if (!state->var_is_in_block)
988 add_parameter(uniform, consts, prog, type, state);
989
990 return true;
991 }
992 }
993
994 return false;
995 }
996
997 /**
998 * Finds, returns, and updates the stage info for any uniform in UniformStorage
999 * defined by @var. For GLSL this is done using the name, for SPIR-V in general
1000 * is this done using the explicit location, except:
1001 *
1002 * * UBOs/SSBOs: as they lack explicit location, binding is used to locate
1003 * them. That means that more that one entry at the uniform storage can be
1004 * found. In that case all of them are updated, and the first entry is
1005 * returned, in order to update the location of the nir variable.
1006 *
1007 * * Special uniforms: like atomic counters. They lack a explicit location,
1008 * so they are skipped. They will be handled and assigned a location later.
1009 *
1010 */
1011 static bool
find_and_update_previous_uniform_storage(const struct gl_constants *consts, struct gl_shader_program *prog, struct nir_link_uniforms_state *state, nir_variable *var, char *name, const struct glsl_type *type, unsigned stage)1012 find_and_update_previous_uniform_storage(const struct gl_constants *consts,
1013 struct gl_shader_program *prog,
1014 struct nir_link_uniforms_state *state,
1015 nir_variable *var, char *name,
1016 const struct glsl_type *type,
1017 unsigned stage)
1018 {
1019 if (!prog->data->spirv) {
1020 bool first_element = true;
1021 char *name_tmp = ralloc_strdup(NULL, name);
1022 bool r = find_and_update_named_uniform_storage(consts, prog, state, var,
1023 &name_tmp,
1024 strlen(name_tmp), type,
1025 stage, &first_element);
1026 ralloc_free(name_tmp);
1027
1028 return r;
1029 }
1030
1031 if (nir_variable_is_in_block(var)) {
1032 struct gl_uniform_storage *uniform = NULL;
1033
1034 ASSERTED unsigned num_blks = nir_variable_is_in_ubo(var) ?
1035 prog->data->NumUniformBlocks :
1036 prog->data->NumShaderStorageBlocks;
1037
1038 struct gl_uniform_block *blks = nir_variable_is_in_ubo(var) ?
1039 prog->data->UniformBlocks : prog->data->ShaderStorageBlocks;
1040
1041 bool result = false;
1042 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1043 /* UniformStorage contains both variables from ubos and ssbos */
1044 if ( prog->data->UniformStorage[i].is_shader_storage !=
1045 nir_variable_is_in_ssbo(var))
1046 continue;
1047
1048 int block_index = prog->data->UniformStorage[i].block_index;
1049 if (block_index != -1) {
1050 assert(block_index < num_blks);
1051
1052 if (var->data.binding == blks[block_index].Binding) {
1053 if (!uniform)
1054 uniform = &prog->data->UniformStorage[i];
1055 mark_stage_as_active(&prog->data->UniformStorage[i],
1056 stage);
1057 result = true;
1058 }
1059 }
1060 }
1061
1062 if (result)
1063 var->data.location = uniform - prog->data->UniformStorage;
1064 return result;
1065 }
1066
1067 /* Beyond blocks, there are still some corner cases of uniforms without
1068 * location (ie: atomic counters) that would have a initial location equal
1069 * to -1. We just return on that case. Those uniforms will be handled
1070 * later.
1071 */
1072 if (var->data.location == -1)
1073 return false;
1074
1075 /* TODO: following search can be problematic with shaders with a lot of
1076 * uniforms. Would it be better to use some type of hash
1077 */
1078 for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
1079 if (prog->data->UniformStorage[i].remap_location == var->data.location) {
1080 mark_stage_as_active(&prog->data->UniformStorage[i], stage);
1081
1082 struct gl_uniform_storage *uniform = &prog->data->UniformStorage[i];
1083 var->data.location = uniform - prog->data->UniformStorage;
1084 add_parameter(uniform, consts, prog, var->type, state);
1085 return true;
1086 }
1087 }
1088
1089 return false;
1090 }
1091
1092 static struct type_tree_entry *
build_type_tree_for_type(const struct glsl_type *type)1093 build_type_tree_for_type(const struct glsl_type *type)
1094 {
1095 struct type_tree_entry *entry = malloc(sizeof *entry);
1096
1097 entry->array_size = 1;
1098 entry->next_index = UINT_MAX;
1099 entry->children = NULL;
1100 entry->next_sibling = NULL;
1101 entry->parent = NULL;
1102
1103 if (glsl_type_is_array(type)) {
1104 entry->array_size = glsl_get_length(type);
1105 entry->children = build_type_tree_for_type(glsl_get_array_element(type));
1106 entry->children->parent = entry;
1107 } else if (glsl_type_is_struct_or_ifc(type)) {
1108 struct type_tree_entry *last = NULL;
1109
1110 for (unsigned i = 0; i < glsl_get_length(type); i++) {
1111 const struct glsl_type *field_type = glsl_get_struct_field(type, i);
1112 struct type_tree_entry *field_entry =
1113 build_type_tree_for_type(field_type);
1114
1115 if (last == NULL)
1116 entry->children = field_entry;
1117 else
1118 last->next_sibling = field_entry;
1119
1120 field_entry->parent = entry;
1121
1122 last = field_entry;
1123 }
1124 }
1125
1126 return entry;
1127 }
1128
1129 static void
free_type_tree(struct type_tree_entry *entry)1130 free_type_tree(struct type_tree_entry *entry)
1131 {
1132 struct type_tree_entry *p, *next;
1133
1134 for (p = entry->children; p; p = next) {
1135 next = p->next_sibling;
1136 free_type_tree(p);
1137 }
1138
1139 free(entry);
1140 }
1141
1142 static void
hash_free_uniform_name(struct hash_entry *entry)1143 hash_free_uniform_name(struct hash_entry *entry)
1144 {
1145 free((void*)entry->key);
1146 }
1147
1148 static void
enter_record(struct nir_link_uniforms_state *state, const struct gl_constants *consts, const struct glsl_type *type, bool row_major)1149 enter_record(struct nir_link_uniforms_state *state,
1150 const struct gl_constants *consts,
1151 const struct glsl_type *type,
1152 bool row_major)
1153 {
1154 assert(glsl_type_is_struct(type));
1155 if (!state->var_is_in_block)
1156 return;
1157
1158 bool use_std430 = consts->UseSTD430AsDefaultPacking;
1159 const enum glsl_interface_packing packing =
1160 glsl_get_internal_ifc_packing(state->current_var->interface_type,
1161 use_std430);
1162
1163 if (packing == GLSL_INTERFACE_PACKING_STD430)
1164 state->offset = glsl_align(
1165 state->offset, glsl_get_std430_base_alignment(type, row_major));
1166 else
1167 state->offset = glsl_align(
1168 state->offset, glsl_get_std140_base_alignment(type, row_major));
1169 }
1170
1171 static void
leave_record(struct nir_link_uniforms_state *state, const struct gl_constants *consts, const struct glsl_type *type, bool row_major)1172 leave_record(struct nir_link_uniforms_state *state,
1173 const struct gl_constants *consts,
1174 const struct glsl_type *type,
1175 bool row_major)
1176 {
1177 assert(glsl_type_is_struct(type));
1178 if (!state->var_is_in_block)
1179 return;
1180
1181 bool use_std430 = consts->UseSTD430AsDefaultPacking;
1182 const enum glsl_interface_packing packing =
1183 glsl_get_internal_ifc_packing(state->current_var->interface_type,
1184 use_std430);
1185
1186 if (packing == GLSL_INTERFACE_PACKING_STD430)
1187 state->offset = glsl_align(
1188 state->offset, glsl_get_std430_base_alignment(type, row_major));
1189 else
1190 state->offset = glsl_align(
1191 state->offset, glsl_get_std140_base_alignment(type, row_major));
1192 }
1193
1194 /**
1195 * Creates the neccessary entries in UniformStorage for the uniform. Returns
1196 * the number of locations used or -1 on failure.
1197 */
1198 static int
nir_link_uniform(const struct gl_constants *consts, struct gl_shader_program *prog, struct gl_program *stage_program, gl_shader_stage stage, const struct glsl_type *type, unsigned index_in_parent, int location, struct nir_link_uniforms_state *state, char **name, size_t name_length, bool row_major)1199 nir_link_uniform(const struct gl_constants *consts,
1200 struct gl_shader_program *prog,
1201 struct gl_program *stage_program,
1202 gl_shader_stage stage,
1203 const struct glsl_type *type,
1204 unsigned index_in_parent,
1205 int location,
1206 struct nir_link_uniforms_state *state,
1207 char **name, size_t name_length, bool row_major)
1208 {
1209 struct gl_uniform_storage *uniform = NULL;
1210
1211 if (state->set_top_level_array &&
1212 nir_variable_is_in_ssbo(state->current_var)) {
1213 /* Type is the top level SSBO member */
1214 if (glsl_type_is_array(type) &&
1215 (glsl_type_is_array(glsl_get_array_element(type)) ||
1216 glsl_type_is_struct_or_ifc(glsl_get_array_element(type)))) {
1217 /* Type is a top-level array (array of aggregate types) */
1218 state->top_level_array_size = glsl_get_length(type);
1219 state->top_level_array_stride = glsl_get_explicit_stride(type);
1220 } else {
1221 state->top_level_array_size = 1;
1222 state->top_level_array_stride = 0;
1223 }
1224
1225 state->set_top_level_array = false;
1226 }
1227
1228 /* gl_uniform_storage can cope with one level of array, so if the type is a
1229 * composite type or an array where each element occupies more than one
1230 * location than we need to recursively process it.
1231 */
1232 if (glsl_type_is_struct_or_ifc(type) ||
1233 (glsl_type_is_array(type) &&
1234 (glsl_type_is_array(glsl_get_array_element(type)) ||
1235 glsl_type_is_struct_or_ifc(glsl_get_array_element(type))))) {
1236 int location_count = 0;
1237 struct type_tree_entry *old_type = state->current_type;
1238 unsigned int struct_base_offset = state->offset;
1239
1240 state->current_type = old_type->children;
1241
1242 /* Shader storage block unsized arrays: add subscript [0] to variable
1243 * names.
1244 */
1245 unsigned length = glsl_get_length(type);
1246 if (glsl_type_is_unsized_array(type))
1247 length = 1;
1248
1249 if (glsl_type_is_struct(type) && !prog->data->spirv)
1250 enter_record(state, consts, type, row_major);
1251
1252 for (unsigned i = 0; i < length; i++) {
1253 const struct glsl_type *field_type;
1254 size_t new_length = name_length;
1255 bool field_row_major = row_major;
1256
1257 if (glsl_type_is_struct_or_ifc(type)) {
1258 field_type = glsl_get_struct_field(type, i);
1259 /* Use the offset inside the struct only for variables backed by
1260 * a buffer object. For variables not backed by a buffer object,
1261 * offset is -1.
1262 */
1263 if (state->var_is_in_block) {
1264 if (prog->data->spirv) {
1265 state->offset =
1266 struct_base_offset + glsl_get_struct_field_offset(type, i);
1267 } else if (glsl_get_struct_field_offset(type, i) != -1 &&
1268 type == state->current_ifc_type) {
1269 state->offset = glsl_get_struct_field_offset(type, i);
1270 }
1271
1272 if (glsl_type_is_interface(type))
1273 state->set_top_level_array = true;
1274 }
1275
1276 /* Append '.field' to the current variable name. */
1277 if (name) {
1278 ralloc_asprintf_rewrite_tail(name, &new_length, ".%s",
1279 glsl_get_struct_elem_name(type, i));
1280 }
1281
1282
1283 /* The layout of structures at the top level of the block is set
1284 * during parsing. For matrices contained in multiple levels of
1285 * structures in the block, the inner structures have no layout.
1286 * These cases must potentially inherit the layout from the outer
1287 * levels.
1288 */
1289 const enum glsl_matrix_layout matrix_layout =
1290 glsl_get_struct_field_data(type, i)->matrix_layout;
1291 if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
1292 field_row_major = true;
1293 } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
1294 field_row_major = false;
1295 }
1296 } else {
1297 field_type = glsl_get_array_element(type);
1298
1299 /* Append the subscript to the current variable name */
1300 if (name)
1301 ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
1302 }
1303
1304 int entries = nir_link_uniform(consts, prog, stage_program, stage,
1305 field_type, i, location,
1306 state, name, new_length,
1307 field_row_major);
1308
1309 if (entries == -1)
1310 return -1;
1311
1312 if (location != -1)
1313 location += entries;
1314 location_count += entries;
1315
1316 if (glsl_type_is_struct_or_ifc(type))
1317 state->current_type = state->current_type->next_sibling;
1318 }
1319
1320 if (glsl_type_is_struct(type) && !prog->data->spirv)
1321 leave_record(state, consts, type, row_major);
1322
1323 state->current_type = old_type;
1324
1325 return location_count;
1326 } else {
1327 /* TODO: reallocating storage is slow, we should figure out a way to
1328 * allocate storage up front for spirv like we do for GLSL.
1329 */
1330 if (prog->data->spirv) {
1331 /* Create a new uniform storage entry */
1332 prog->data->UniformStorage =
1333 reralloc(prog->data,
1334 prog->data->UniformStorage,
1335 struct gl_uniform_storage,
1336 prog->data->NumUniformStorage + 1);
1337 if (!prog->data->UniformStorage) {
1338 linker_error(prog, "Out of memory during linking.\n");
1339 return -1;
1340 }
1341 }
1342
1343 uniform = &prog->data->UniformStorage[prog->data->NumUniformStorage];
1344 prog->data->NumUniformStorage++;
1345
1346 /* Initialize its members */
1347 memset(uniform, 0x00, sizeof(struct gl_uniform_storage));
1348
1349 uniform->name.string =
1350 name ? ralloc_strdup(prog->data->UniformStorage, *name) : NULL;
1351 resource_name_updated(&uniform->name);
1352
1353 const struct glsl_type *type_no_array = glsl_without_array(type);
1354 if (glsl_type_is_array(type)) {
1355 uniform->type = type_no_array;
1356 uniform->array_elements = glsl_get_length(type);
1357 } else {
1358 uniform->type = type;
1359 uniform->array_elements = 0;
1360 }
1361 uniform->top_level_array_size = state->top_level_array_size;
1362 uniform->top_level_array_stride = state->top_level_array_stride;
1363
1364 struct hash_entry *entry = prog->data->spirv ? NULL :
1365 _mesa_hash_table_search(state->referenced_uniforms[stage],
1366 state->current_var->name);
1367 if (entry != NULL ||
1368 glsl_get_base_type(type_no_array) == GLSL_TYPE_SUBROUTINE ||
1369 prog->data->spirv)
1370 uniform->active_shader_mask |= 1 << stage;
1371
1372 if (location >= 0) {
1373 /* Uniform has an explicit location */
1374 uniform->remap_location = location;
1375 } else {
1376 uniform->remap_location = UNMAPPED_UNIFORM_LOC;
1377 }
1378
1379 uniform->hidden = state->current_var->data.how_declared == nir_var_hidden;
1380 if (uniform->hidden)
1381 state->num_hidden_uniforms++;
1382
1383 uniform->is_shader_storage = nir_variable_is_in_ssbo(state->current_var);
1384 uniform->is_bindless = state->current_var->data.bindless;
1385
1386 /* Set fields whose default value depend on the variable being inside a
1387 * block.
1388 *
1389 * From the OpenGL 4.6 spec, 7.3 Program objects:
1390 *
1391 * "For the property ARRAY_STRIDE, ... For active variables not declared
1392 * as an array of basic types, zero is written to params. For active
1393 * variables not backed by a buffer object, -1 is written to params,
1394 * regardless of the variable type."
1395 *
1396 * "For the property MATRIX_STRIDE, ... For active variables not declared
1397 * as a matrix or array of matrices, zero is written to params. For active
1398 * variables not backed by a buffer object, -1 is written to params,
1399 * regardless of the variable type."
1400 *
1401 * For the property IS_ROW_MAJOR, ... For active variables backed by a
1402 * buffer object, declared as a single matrix or array of matrices, and
1403 * stored in row-major order, one is written to params. For all other
1404 * active variables, zero is written to params.
1405 */
1406 uniform->array_stride = -1;
1407 uniform->matrix_stride = -1;
1408 uniform->row_major = false;
1409
1410 if (state->var_is_in_block) {
1411 uniform->array_stride = glsl_type_is_array(type) ?
1412 glsl_get_explicit_stride(type) : 0;
1413
1414 if (glsl_type_is_matrix(uniform->type)) {
1415 uniform->matrix_stride = glsl_get_explicit_stride(uniform->type);
1416 uniform->row_major = glsl_matrix_type_is_row_major(uniform->type);
1417 } else {
1418 uniform->matrix_stride = 0;
1419 }
1420
1421 if (!prog->data->spirv) {
1422 bool use_std430 = consts->UseSTD430AsDefaultPacking;
1423 const enum glsl_interface_packing packing =
1424 glsl_get_internal_ifc_packing(state->current_var->interface_type,
1425 use_std430);
1426
1427 unsigned alignment =
1428 glsl_get_std140_base_alignment(type, uniform->row_major);
1429 if (packing == GLSL_INTERFACE_PACKING_STD430) {
1430 alignment =
1431 glsl_get_std430_base_alignment(type, uniform->row_major);
1432 }
1433 state->offset = glsl_align(state->offset, alignment);
1434 }
1435 }
1436
1437 uniform->offset = state->var_is_in_block ? state->offset : -1;
1438
1439 int buffer_block_index = -1;
1440 /* If the uniform is inside a uniform block determine its block index by
1441 * comparing the bindings, we can not use names.
1442 */
1443 if (state->var_is_in_block) {
1444 struct gl_uniform_block *blocks = nir_variable_is_in_ssbo(state->current_var) ?
1445 prog->data->ShaderStorageBlocks : prog->data->UniformBlocks;
1446
1447 int num_blocks = nir_variable_is_in_ssbo(state->current_var) ?
1448 prog->data->NumShaderStorageBlocks : prog->data->NumUniformBlocks;
1449
1450 if (!prog->data->spirv) {
1451 bool is_interface_array =
1452 glsl_without_array(state->current_var->type) == state->current_var->interface_type &&
1453 glsl_type_is_array(state->current_var->type);
1454
1455 const char *ifc_name =
1456 glsl_get_type_name(state->current_var->interface_type);
1457 if (is_interface_array) {
1458 unsigned l = strlen(ifc_name);
1459 for (unsigned i = 0; i < num_blocks; i++) {
1460 if (strncmp(ifc_name, blocks[i].name.string, l) == 0 &&
1461 blocks[i].name.string[l] == '[') {
1462 buffer_block_index = i;
1463 break;
1464 }
1465 }
1466 } else {
1467 for (unsigned i = 0; i < num_blocks; i++) {
1468 if (strcmp(ifc_name, blocks[i].name.string) == 0) {
1469 buffer_block_index = i;
1470 break;
1471 }
1472 }
1473 }
1474
1475 /* Compute the next offset. */
1476 bool use_std430 = consts->UseSTD430AsDefaultPacking;
1477 const enum glsl_interface_packing packing =
1478 glsl_get_internal_ifc_packing(state->current_var->interface_type,
1479 use_std430);
1480 if (packing == GLSL_INTERFACE_PACKING_STD430)
1481 state->offset += glsl_get_std430_size(type, uniform->row_major);
1482 else
1483 state->offset += glsl_get_std140_size(type, uniform->row_major);
1484 } else {
1485 for (unsigned i = 0; i < num_blocks; i++) {
1486 if (state->current_var->data.binding == blocks[i].Binding) {
1487 buffer_block_index = i;
1488 break;
1489 }
1490 }
1491
1492 /* Compute the next offset. */
1493 state->offset += glsl_get_explicit_size(type, true);
1494 }
1495 assert(buffer_block_index >= 0);
1496 }
1497
1498 uniform->block_index = buffer_block_index;
1499 uniform->builtin = is_gl_identifier(uniform->name.string);
1500 uniform->atomic_buffer_index = -1;
1501
1502 /* The following are not for features not supported by ARB_gl_spirv */
1503 uniform->num_compatible_subroutines = 0;
1504
1505 unsigned entries = MAX2(1, uniform->array_elements);
1506 unsigned values = glsl_get_component_slots(type);
1507
1508 update_uniforms_shader_info(prog, state, uniform, type, stage);
1509
1510 if (uniform->remap_location != UNMAPPED_UNIFORM_LOC &&
1511 state->max_uniform_location < uniform->remap_location + entries)
1512 state->max_uniform_location = uniform->remap_location + entries;
1513
1514 if (!state->var_is_in_block)
1515 add_parameter(uniform, consts, prog, type, state);
1516
1517 if (name) {
1518 _mesa_hash_table_insert(state->uniform_hash, strdup(*name),
1519 (void *) (intptr_t)
1520 (prog->data->NumUniformStorage - 1));
1521 }
1522
1523 if (!is_gl_identifier(uniform->name.string) && !uniform->is_shader_storage &&
1524 !state->var_is_in_block)
1525 state->num_values += values;
1526
1527 return MAX2(uniform->array_elements, 1);
1528 }
1529 }
1530
1531 bool
gl_nir_link_uniforms(const struct gl_constants *consts, struct gl_shader_program *prog, bool fill_parameters)1532 gl_nir_link_uniforms(const struct gl_constants *consts,
1533 struct gl_shader_program *prog,
1534 bool fill_parameters)
1535 {
1536 /* First free up any previous UniformStorage items */
1537 ralloc_free(prog->data->UniformStorage);
1538 prog->data->UniformStorage = NULL;
1539 prog->data->NumUniformStorage = 0;
1540
1541 /* Iterate through all linked shaders */
1542 struct nir_link_uniforms_state state = {0,};
1543
1544 if (!prog->data->spirv) {
1545 /* Gather information on uniform use */
1546 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
1547 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
1548 if (!sh)
1549 continue;
1550
1551 state.referenced_uniforms[stage] =
1552 _mesa_hash_table_create(NULL, _mesa_hash_string,
1553 _mesa_key_string_equal);
1554
1555 nir_shader *nir = sh->Program->nir;
1556 add_var_use_shader(nir, state.referenced_uniforms[stage]);
1557 }
1558
1559 /* Resize uniform arrays based on the maximum array index */
1560 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
1561 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
1562 if (!sh)
1563 continue;
1564
1565 nir_foreach_gl_uniform_variable(var, sh->Program->nir)
1566 update_array_sizes(prog, var, state.referenced_uniforms, stage);
1567 }
1568 }
1569
1570 /* Count total number of uniforms and allocate storage */
1571 unsigned storage_size = 0;
1572 if (!prog->data->spirv) {
1573 struct set *storage_counted =
1574 _mesa_set_create(NULL, _mesa_hash_string, _mesa_key_string_equal);
1575 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
1576 struct gl_linked_shader *sh = prog->_LinkedShaders[stage];
1577 if (!sh)
1578 continue;
1579
1580 nir_foreach_gl_uniform_variable(var, sh->Program->nir) {
1581 const struct glsl_type *type = var->type;
1582 const char *name = var->name;
1583 if (nir_variable_is_in_block(var) &&
1584 glsl_without_array(type) == var->interface_type) {
1585 type = glsl_without_array(var->type);
1586 name = glsl_get_type_name(type);
1587 }
1588
1589 struct set_entry *entry = _mesa_set_search(storage_counted, name);
1590 if (!entry) {
1591 storage_size += uniform_storage_size(type);
1592 _mesa_set_add(storage_counted, name);
1593 }
1594 }
1595 }
1596 _mesa_set_destroy(storage_counted, NULL);
1597
1598 prog->data->UniformStorage = rzalloc_array(prog->data,
1599 struct gl_uniform_storage,
1600 storage_size);
1601 if (!prog->data->UniformStorage) {
1602 linker_error(prog, "Out of memory while linking uniforms.\n");
1603 return false;
1604 }
1605 }
1606
1607 /* Iterate through all linked shaders */
1608 state.uniform_hash = _mesa_hash_table_create(NULL, _mesa_hash_string,
1609 _mesa_key_string_equal);
1610
1611 for (unsigned shader_type = 0; shader_type < MESA_SHADER_STAGES; shader_type++) {
1612 struct gl_linked_shader *sh = prog->_LinkedShaders[shader_type];
1613 if (!sh)
1614 continue;
1615
1616 nir_shader *nir = sh->Program->nir;
1617 assert(nir);
1618
1619 state.next_bindless_image_index = 0;
1620 state.next_bindless_sampler_index = 0;
1621 state.next_image_index = 0;
1622 state.next_sampler_index = 0;
1623 state.num_shader_samplers = 0;
1624 state.num_shader_images = 0;
1625 state.num_shader_uniform_components = 0;
1626 state.shader_storage_blocks_write_access = 0;
1627 state.shader_samplers_used = 0;
1628 state.shader_shadow_samplers = 0;
1629 state.params = fill_parameters ? sh->Program->Parameters : NULL;
1630
1631 nir_foreach_gl_uniform_variable(var, nir) {
1632 state.current_var = var;
1633 state.current_ifc_type = NULL;
1634 state.offset = 0;
1635 state.var_is_in_block = nir_variable_is_in_block(var);
1636 state.set_top_level_array = false;
1637 state.top_level_array_size = 0;
1638 state.top_level_array_stride = 0;
1639
1640 /*
1641 * From ARB_program_interface spec, issue (16):
1642 *
1643 * "RESOLVED: We will follow the default rule for enumerating block
1644 * members in the OpenGL API, which is:
1645 *
1646 * * If a variable is a member of an interface block without an
1647 * instance name, it is enumerated using just the variable name.
1648 *
1649 * * If a variable is a member of an interface block with an
1650 * instance name, it is enumerated as "BlockName.Member", where
1651 * "BlockName" is the name of the interface block (not the
1652 * instance name) and "Member" is the name of the variable.
1653 *
1654 * For example, in the following code:
1655 *
1656 * uniform Block1 {
1657 * int member1;
1658 * };
1659 * uniform Block2 {
1660 * int member2;
1661 * } instance2;
1662 * uniform Block3 {
1663 * int member3;
1664 * } instance3[2]; // uses two separate buffer bindings
1665 *
1666 * the three uniforms (if active) are enumerated as "member1",
1667 * "Block2.member2", and "Block3.member3"."
1668 *
1669 * Note that in the last example, with an array of ubo, only one
1670 * uniform is generated. For that reason, while unrolling the
1671 * uniforms of a ubo, or the variables of a ssbo, we need to treat
1672 * arrays of instance as a single block.
1673 */
1674 char *name;
1675 const struct glsl_type *type = var->type;
1676 if (state.var_is_in_block &&
1677 ((!prog->data->spirv && glsl_without_array(type) == var->interface_type) ||
1678 (prog->data->spirv && type == var->interface_type))) {
1679 type = glsl_without_array(var->type);
1680 state.current_ifc_type = type;
1681 name = ralloc_strdup(NULL, glsl_get_type_name(type));
1682 } else {
1683 state.set_top_level_array = true;
1684 name = ralloc_strdup(NULL, var->name);
1685 }
1686
1687 struct type_tree_entry *type_tree =
1688 build_type_tree_for_type(type);
1689 state.current_type = type_tree;
1690
1691 int location = var->data.location;
1692
1693 struct gl_uniform_block *blocks = NULL;
1694 int num_blocks = 0;
1695 int buffer_block_index = -1;
1696 bool is_interface_array = false;
1697 if (state.var_is_in_block) {
1698 /* If the uniform is inside a uniform block determine its block index by
1699 * comparing the bindings, we can not use names.
1700 */
1701 blocks = nir_variable_is_in_ssbo(state.current_var) ?
1702 prog->data->ShaderStorageBlocks : prog->data->UniformBlocks;
1703 num_blocks = nir_variable_is_in_ssbo(state.current_var) ?
1704 prog->data->NumShaderStorageBlocks : prog->data->NumUniformBlocks;
1705
1706 is_interface_array =
1707 glsl_without_array(state.current_var->type) == state.current_var->interface_type &&
1708 glsl_type_is_array(state.current_var->type);
1709
1710 const char *ifc_name =
1711 glsl_get_type_name(state.current_var->interface_type);
1712
1713 if (is_interface_array && !prog->data->spirv) {
1714 unsigned l = strlen(ifc_name);
1715
1716 /* Even when a match is found, do not "break" here. As this is
1717 * an array of instances, all elements of the array need to be
1718 * marked as referenced.
1719 */
1720 for (unsigned i = 0; i < num_blocks; i++) {
1721 if (strncmp(ifc_name, blocks[i].name.string, l) == 0 &&
1722 blocks[i].name.string[l] == '[') {
1723 if (buffer_block_index == -1)
1724 buffer_block_index = i;
1725
1726 struct hash_entry *entry =
1727 _mesa_hash_table_search(state.referenced_uniforms[shader_type],
1728 var->name);
1729 if (entry) {
1730 struct uniform_array_info *ainfo =
1731 (struct uniform_array_info *) entry->data;
1732 if (BITSET_TEST(ainfo->indices, blocks[i].linearized_array_index))
1733 blocks[i].stageref |= 1U << shader_type;
1734 }
1735 }
1736 }
1737 } else {
1738 for (unsigned i = 0; i < num_blocks; i++) {
1739 bool match = false;
1740 if (!prog->data->spirv) {
1741 match = strcmp(ifc_name, blocks[i].name.string) == 0;
1742 } else {
1743 match = var->data.binding == blocks[i].Binding;
1744 }
1745 if (match) {
1746 buffer_block_index = i;
1747
1748 if (!prog->data->spirv) {
1749 struct hash_entry *entry =
1750 _mesa_hash_table_search(state.referenced_uniforms[shader_type],
1751 var->name);
1752 if (entry)
1753 blocks[i].stageref |= 1U << shader_type;
1754 }
1755
1756 break;
1757 }
1758 }
1759 }
1760 }
1761
1762 if (nir_variable_is_in_ssbo(var) &&
1763 !(var->data.access & ACCESS_NON_WRITEABLE)) {
1764 unsigned array_size = is_interface_array ?
1765 glsl_get_length(var->type) : 1;
1766
1767 STATIC_ASSERT(MAX_SHADER_STORAGE_BUFFERS <= 32);
1768
1769 /* Buffers from each stage are pointers to the one stored in the program. We need
1770 * to account for this before computing the mask below otherwise the mask will be
1771 * incorrect.
1772 * sh->Program->sh.SSBlocks: [a][b][c][d][e][f]
1773 * VS prog->data->SSBlocks : [a][b][c]
1774 * FS prog->data->SSBlocks : [d][e][f]
1775 * eg for FS buffer 1, buffer_block_index will be 4 but sh_block_index will be 1.
1776 */
1777 int base = 0;
1778 base = sh->Program->sh.ShaderStorageBlocks[0] - prog->data->ShaderStorageBlocks;
1779
1780 assert(base >= 0);
1781
1782 int sh_block_index = buffer_block_index - base;
1783 /* Shaders that use too many SSBOs will fail to compile, which
1784 * we don't care about.
1785 *
1786 * This is true for shaders that do not use too many SSBOs:
1787 */
1788 if (sh_block_index + array_size <= 32) {
1789 state.shader_storage_blocks_write_access |=
1790 u_bit_consecutive(sh_block_index, array_size);
1791 }
1792 }
1793
1794 if (blocks && !prog->data->spirv && state.var_is_in_block) {
1795 if (glsl_without_array(state.current_var->type) != state.current_var->interface_type) {
1796 /* this is nested at some offset inside the block */
1797 bool found = false;
1798 char sentinel = '\0';
1799
1800 if (glsl_type_is_struct(state.current_var->type)) {
1801 sentinel = '.';
1802 } else if (glsl_type_is_array(state.current_var->type) &&
1803 (glsl_type_is_array(glsl_get_array_element(state.current_var->type))
1804 || glsl_type_is_struct(glsl_without_array(state.current_var->type)))) {
1805 sentinel = '[';
1806 }
1807
1808 const unsigned l = strlen(state.current_var->name);
1809 for (unsigned i = 0; i < num_blocks; i++) {
1810 for (unsigned j = 0; j < blocks[i].NumUniforms; j++) {
1811 if (sentinel) {
1812 const char *begin = blocks[i].Uniforms[j].Name;
1813 const char *end = strchr(begin, sentinel);
1814
1815 if (end == NULL)
1816 continue;
1817
1818 if ((ptrdiff_t) l != (end - begin))
1819 continue;
1820 found = strncmp(state.current_var->name, begin, l) == 0;
1821 } else {
1822 found = strcmp(state.current_var->name, blocks[i].Uniforms[j].Name) == 0;
1823 }
1824
1825 if (found) {
1826 location = j;
1827
1828 struct hash_entry *entry =
1829 _mesa_hash_table_search(state.referenced_uniforms[shader_type], var->name);
1830 if (entry)
1831 blocks[i].stageref |= 1U << shader_type;
1832
1833 break;
1834 }
1835 }
1836
1837 if (found)
1838 break;
1839 }
1840 assert(found);
1841 var->data.location = location;
1842 } else {
1843 /* this is the base block offset */
1844 var->data.location = buffer_block_index;
1845 location = 0;
1846 }
1847 assert(buffer_block_index >= 0);
1848 const struct gl_uniform_block *const block =
1849 &blocks[buffer_block_index];
1850 assert(location >= 0 && location < block->NumUniforms);
1851
1852 const struct gl_uniform_buffer_variable *const ubo_var =
1853 &block->Uniforms[location];
1854
1855 state.offset = ubo_var->Offset;
1856 }
1857
1858 /* Check if the uniform has been processed already for
1859 * other stage. If so, validate they are compatible and update
1860 * the active stage mask.
1861 */
1862 if (find_and_update_previous_uniform_storage(consts, prog, &state, var,
1863 name, type, shader_type)) {
1864 ralloc_free(name);
1865 free_type_tree(type_tree);
1866 continue;
1867 }
1868
1869 /* From now on the variable’s location will be its uniform index */
1870 if (!state.var_is_in_block)
1871 var->data.location = prog->data->NumUniformStorage;
1872 else
1873 location = -1;
1874
1875 bool row_major =
1876 var->data.matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
1877 int res = nir_link_uniform(consts, prog, sh->Program, shader_type, type,
1878 0, location,
1879 &state,
1880 !prog->data->spirv ? &name : NULL,
1881 !prog->data->spirv ? strlen(name) : 0,
1882 row_major);
1883
1884 free_type_tree(type_tree);
1885 ralloc_free(name);
1886
1887 if (res == -1)
1888 return false;
1889 }
1890
1891 if (!prog->data->spirv) {
1892 _mesa_hash_table_destroy(state.referenced_uniforms[shader_type],
1893 NULL);
1894 }
1895
1896 if (state.num_shader_samplers >
1897 consts->Program[shader_type].MaxTextureImageUnits) {
1898 linker_error(prog, "Too many %s shader texture samplers\n",
1899 _mesa_shader_stage_to_string(shader_type));
1900 continue;
1901 }
1902
1903 if (state.num_shader_images >
1904 consts->Program[shader_type].MaxImageUniforms) {
1905 linker_error(prog, "Too many %s shader image uniforms (%u > %u)\n",
1906 _mesa_shader_stage_to_string(shader_type),
1907 state.num_shader_images,
1908 consts->Program[shader_type].MaxImageUniforms);
1909 continue;
1910 }
1911
1912 sh->Program->SamplersUsed = state.shader_samplers_used;
1913 sh->Program->sh.ShaderStorageBlocksWriteAccess =
1914 state.shader_storage_blocks_write_access;
1915 sh->shadow_samplers = state.shader_shadow_samplers;
1916 sh->Program->info.num_textures = state.num_shader_samplers;
1917 sh->Program->info.num_images = state.num_shader_images;
1918 sh->num_uniform_components = state.num_shader_uniform_components;
1919 sh->num_combined_uniform_components = sh->num_uniform_components;
1920 }
1921
1922 prog->data->NumHiddenUniforms = state.num_hidden_uniforms;
1923 prog->data->NumUniformDataSlots = state.num_values;
1924
1925 assert(prog->data->spirv || prog->data->NumUniformStorage == storage_size);
1926
1927 if (prog->data->spirv)
1928 prog->NumUniformRemapTable = state.max_uniform_location;
1929
1930 nir_setup_uniform_remap_tables(consts, prog);
1931 gl_nir_set_uniform_initializers(consts, prog);
1932
1933 _mesa_hash_table_destroy(state.uniform_hash, hash_free_uniform_name);
1934
1935 return true;
1936 }
1937