1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Connor Abbott (cwabbott0@gmail.com)
25  *
26  */
27 
28 #include "nir.h"
29 #include "nir_xfb_info.h"
30 #include "c11/threads.h"
31 #include <assert.h>
32 
33 /*
34  * This file checks for invalid IR indicating a bug somewhere in the compiler.
35  */
36 
37 /* Since this file is just a pile of asserts, don't bother compiling it if
38  * we're not building a debug build.
39  */
40 #ifndef NDEBUG
41 
42 /*
43  * Per-register validation state.
44  */
45 
46 typedef struct {
47    /*
48     * equivalent to the uses and defs in nir_register, but built up by the
49     * validator. At the end, we verify that the sets have the same entries.
50     */
51    struct set *uses, *if_uses, *defs;
52    nir_function_impl *where_defined; /* NULL for global registers */
53 } reg_validate_state;
54 
55 typedef struct {
56    void *mem_ctx;
57 
58    /* map of register -> validation state (struct above) */
59    struct hash_table *regs;
60 
61    /* the current shader being validated */
62    nir_shader *shader;
63 
64    /* the current instruction being validated */
65    nir_instr *instr;
66 
67    /* the current variable being validated */
68    nir_variable *var;
69 
70    /* the current basic block being validated */
71    nir_block *block;
72 
73    /* the current if statement being validated */
74    nir_if *if_stmt;
75 
76    /* the current loop being visited */
77    nir_loop *loop;
78 
79    /* the parent of the current cf node being visited */
80    nir_cf_node *parent_node;
81 
82    /* the current function implementation being validated */
83    nir_function_impl *impl;
84 
85    /* Set of all blocks in the list */
86    struct set *blocks;
87 
88    /* Set of seen SSA sources */
89    struct set *ssa_srcs;
90 
91    /* bitset of ssa definitions we have found; used to check uniqueness */
92    BITSET_WORD *ssa_defs_found;
93 
94    /* bitset of registers we have currently found; used to check uniqueness */
95    BITSET_WORD *regs_found;
96 
97    /* map of variable -> function implementation where it is defined or NULL
98     * if it is a global variable
99     */
100    struct hash_table *var_defs;
101 
102    /* map of instruction/var/etc to failed assert string */
103    struct hash_table *errors;
104 
105    struct set *shader_gc_list;
106 } validate_state;
107 
108 static void
log_error(validate_state *state, const char *cond, const char *file, int line)109 log_error(validate_state *state, const char *cond, const char *file, int line)
110 {
111    const void *obj;
112 
113    if (state->instr)
114       obj = state->instr;
115    else if (state->var)
116       obj = state->var;
117    else
118       obj = cond;
119 
120    char *msg = ralloc_asprintf(state->errors, "error: %s (%s:%d)",
121                                cond, file, line);
122 
123    _mesa_hash_table_insert(state->errors, obj, msg);
124 }
125 
126 static bool
validate_assert_impl(validate_state *state, bool cond, const char *str, const char *file, unsigned line)127 validate_assert_impl(validate_state *state, bool cond, const char *str,
128                      const char *file, unsigned line)
129 {
130    if (!cond)
131       log_error(state, str, file, line);
132    return cond;
133 }
134 
135 #define validate_assert(state, cond) \
136    validate_assert_impl(state, (cond), #cond, __FILE__, __LINE__)
137 
138 
139 static void validate_src(nir_src *src, validate_state *state,
140                          unsigned bit_sizes, unsigned num_components);
141 
142 static void
validate_num_components(validate_state *state, unsigned num_components)143 validate_num_components(validate_state *state, unsigned num_components)
144 {
145    validate_assert(state, nir_num_components_valid(num_components));
146 }
147 
148 static void
validate_reg_src(nir_src *src, validate_state *state, unsigned bit_sizes, unsigned num_components)149 validate_reg_src(nir_src *src, validate_state *state,
150                  unsigned bit_sizes, unsigned num_components)
151 {
152    validate_assert(state, src->reg.reg != NULL);
153 
154    struct hash_entry *entry;
155    entry = _mesa_hash_table_search(state->regs, src->reg.reg);
156    validate_assert(state, entry);
157 
158    reg_validate_state *reg_state = (reg_validate_state *) entry->data;
159 
160    if (state->instr) {
161       _mesa_set_add(reg_state->uses, src);
162    } else {
163       validate_assert(state, state->if_stmt);
164       _mesa_set_add(reg_state->if_uses, src);
165    }
166 
167    validate_assert(state, reg_state->where_defined == state->impl &&
168           "using a register declared in a different function");
169 
170    if (bit_sizes)
171       validate_assert(state, src->reg.reg->bit_size & bit_sizes);
172    if (num_components)
173       validate_assert(state, src->reg.reg->num_components == num_components);
174 
175    validate_assert(state, (src->reg.reg->num_array_elems == 0 ||
176           src->reg.base_offset < src->reg.reg->num_array_elems) &&
177           "definitely out-of-bounds array access");
178 
179    if (src->reg.indirect) {
180       validate_assert(state, src->reg.reg->num_array_elems != 0);
181       validate_assert(state, (src->reg.indirect->is_ssa ||
182               src->reg.indirect->reg.indirect == NULL) &&
183              "only one level of indirection allowed");
184       validate_src(src->reg.indirect, state, 32, 1);
185    }
186 }
187 
188 #define SET_PTR_BIT(ptr, bit) \
189    (void *)(((uintptr_t)(ptr)) | (((uintptr_t)1) << bit))
190 
191 static void
validate_ssa_src(nir_src *src, validate_state *state, unsigned bit_sizes, unsigned num_components)192 validate_ssa_src(nir_src *src, validate_state *state,
193                  unsigned bit_sizes, unsigned num_components)
194 {
195    validate_assert(state, src->ssa != NULL);
196 
197    /* As we walk SSA defs, we add every use to this set.  We need to make sure
198     * our use is seen in a use list.
199     */
200    struct set_entry *entry;
201    if (state->instr) {
202       entry = _mesa_set_search(state->ssa_srcs, src);
203    } else {
204       entry = _mesa_set_search(state->ssa_srcs, SET_PTR_BIT(src, 0));
205    }
206    validate_assert(state, entry);
207 
208    /* This will let us prove that we've seen all the sources */
209    if (entry)
210       _mesa_set_remove(state->ssa_srcs, entry);
211 
212    if (bit_sizes)
213       validate_assert(state, src->ssa->bit_size & bit_sizes);
214    if (num_components)
215       validate_assert(state, src->ssa->num_components == num_components);
216 
217    /* TODO validate that the use is dominated by the definition */
218 }
219 
220 static void
validate_src(nir_src *src, validate_state *state, unsigned bit_sizes, unsigned num_components)221 validate_src(nir_src *src, validate_state *state,
222              unsigned bit_sizes, unsigned num_components)
223 {
224    if (state->instr)
225       validate_assert(state, src->parent_instr == state->instr);
226    else
227       validate_assert(state, src->parent_if == state->if_stmt);
228 
229    if (src->is_ssa)
230       validate_ssa_src(src, state, bit_sizes, num_components);
231    else
232       validate_reg_src(src, state, bit_sizes, num_components);
233 }
234 
235 static void
validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state)236 validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state)
237 {
238    nir_alu_src *src = &instr->src[index];
239 
240    if (instr->op == nir_op_mov)
241       assert(!src->abs && !src->negate);
242 
243    unsigned num_components = nir_src_num_components(src->src);
244    for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
245       validate_assert(state, src->swizzle[i] < NIR_MAX_VEC_COMPONENTS);
246 
247       if (nir_alu_instr_channel_used(instr, index, i))
248          validate_assert(state, src->swizzle[i] < num_components);
249    }
250 
251    validate_src(&src->src, state, 0, 0);
252 }
253 
254 static void
validate_reg_dest(nir_reg_dest *dest, validate_state *state, unsigned bit_sizes, unsigned num_components)255 validate_reg_dest(nir_reg_dest *dest, validate_state *state,
256                   unsigned bit_sizes, unsigned num_components)
257 {
258    validate_assert(state, dest->reg != NULL);
259 
260    validate_assert(state, dest->parent_instr == state->instr);
261 
262    struct hash_entry *entry2;
263    entry2 = _mesa_hash_table_search(state->regs, dest->reg);
264 
265    validate_assert(state, entry2);
266 
267    reg_validate_state *reg_state = (reg_validate_state *) entry2->data;
268    _mesa_set_add(reg_state->defs, dest);
269 
270    validate_assert(state, reg_state->where_defined == state->impl &&
271           "writing to a register declared in a different function");
272 
273    if (bit_sizes)
274       validate_assert(state, dest->reg->bit_size & bit_sizes);
275    if (num_components)
276       validate_assert(state, dest->reg->num_components == num_components);
277 
278    validate_assert(state, (dest->reg->num_array_elems == 0 ||
279           dest->base_offset < dest->reg->num_array_elems) &&
280           "definitely out-of-bounds array access");
281 
282    if (dest->indirect) {
283       validate_assert(state, dest->reg->num_array_elems != 0);
284       validate_assert(state, (dest->indirect->is_ssa || dest->indirect->reg.indirect == NULL) &&
285              "only one level of indirection allowed");
286       validate_src(dest->indirect, state, 32, 1);
287    }
288 }
289 
290 static void
validate_ssa_def(nir_ssa_def *def, validate_state *state)291 validate_ssa_def(nir_ssa_def *def, validate_state *state)
292 {
293    validate_assert(state, def->index < state->impl->ssa_alloc);
294    validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
295    BITSET_SET(state->ssa_defs_found, def->index);
296 
297    validate_assert(state, def->parent_instr == state->instr);
298    validate_num_components(state, def->num_components);
299 
300    list_validate(&def->uses);
301    nir_foreach_use(src, def) {
302       validate_assert(state, src->is_ssa);
303       validate_assert(state, src->ssa == def);
304       bool already_seen = false;
305       _mesa_set_search_and_add(state->ssa_srcs, src, &already_seen);
306       /* A nir_src should only appear once and only in one SSA def use list */
307       validate_assert(state, !already_seen);
308    }
309 
310    list_validate(&def->if_uses);
311    nir_foreach_if_use(src, def) {
312       validate_assert(state, src->is_ssa);
313       validate_assert(state, src->ssa == def);
314       bool already_seen = false;
315       _mesa_set_search_and_add(state->ssa_srcs, SET_PTR_BIT(src, 0),
316                                &already_seen);
317       /* A nir_src should only appear once and only in one SSA def use list */
318       validate_assert(state, !already_seen);
319    }
320 }
321 
322 static void
validate_dest(nir_dest *dest, validate_state *state, unsigned bit_sizes, unsigned num_components)323 validate_dest(nir_dest *dest, validate_state *state,
324               unsigned bit_sizes, unsigned num_components)
325 {
326    if (dest->is_ssa) {
327       if (bit_sizes)
328          validate_assert(state, dest->ssa.bit_size & bit_sizes);
329       if (num_components)
330          validate_assert(state, dest->ssa.num_components == num_components);
331       validate_ssa_def(&dest->ssa, state);
332    } else {
333       validate_reg_dest(&dest->reg, state, bit_sizes, num_components);
334    }
335 }
336 
337 static void
validate_alu_dest(nir_alu_instr *instr, validate_state *state)338 validate_alu_dest(nir_alu_instr *instr, validate_state *state)
339 {
340    nir_alu_dest *dest = &instr->dest;
341 
342    if (instr->op == nir_op_mov)
343       assert(!dest->saturate);
344 
345    unsigned dest_size = nir_dest_num_components(dest->dest);
346    /*
347     * validate that the instruction doesn't write to components not in the
348     * register/SSA value
349     */
350    validate_assert(state, !(dest->write_mask & ~((1 << dest_size) - 1)));
351 
352    /* validate that saturate is only ever used on instructions with
353     * destinations of type float
354     */
355    nir_alu_instr *alu = nir_instr_as_alu(state->instr);
356    validate_assert(state,
357           (nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) ==
358            nir_type_float) ||
359           !dest->saturate);
360 
361    validate_dest(&dest->dest, state, 0, 0);
362 }
363 
364 static void
validate_alu_instr(nir_alu_instr *instr, validate_state *state)365 validate_alu_instr(nir_alu_instr *instr, validate_state *state)
366 {
367    validate_assert(state, instr->op < nir_num_opcodes);
368 
369    unsigned instr_bit_size = 0;
370    for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
371       nir_alu_type src_type = nir_op_infos[instr->op].input_types[i];
372       unsigned src_bit_size = nir_src_bit_size(instr->src[i].src);
373       if (nir_alu_type_get_type_size(src_type)) {
374          validate_assert(state, src_bit_size == nir_alu_type_get_type_size(src_type));
375       } else if (instr_bit_size) {
376          validate_assert(state, src_bit_size == instr_bit_size);
377       } else {
378          instr_bit_size = src_bit_size;
379       }
380 
381       if (nir_alu_type_get_base_type(src_type) == nir_type_float) {
382          /* 8-bit float isn't a thing */
383          validate_assert(state, src_bit_size == 16 || src_bit_size == 32 ||
384                                 src_bit_size == 64);
385       }
386 
387       validate_alu_src(instr, i, state);
388    }
389 
390    nir_alu_type dest_type = nir_op_infos[instr->op].output_type;
391    unsigned dest_bit_size = nir_dest_bit_size(instr->dest.dest);
392    if (nir_alu_type_get_type_size(dest_type)) {
393       validate_assert(state, dest_bit_size == nir_alu_type_get_type_size(dest_type));
394    } else if (instr_bit_size) {
395       validate_assert(state, dest_bit_size == instr_bit_size);
396    } else {
397       /* The only unsized thing is the destination so it's vacuously valid */
398    }
399 
400    if (nir_alu_type_get_base_type(dest_type) == nir_type_float) {
401       /* 8-bit float isn't a thing */
402       validate_assert(state, dest_bit_size == 16 || dest_bit_size == 32 ||
403                              dest_bit_size == 64);
404    }
405 
406    validate_alu_dest(instr, state);
407 }
408 
409 static void
validate_var_use(nir_variable *var, validate_state *state)410 validate_var_use(nir_variable *var, validate_state *state)
411 {
412    struct hash_entry *entry = _mesa_hash_table_search(state->var_defs, var);
413    validate_assert(state, entry);
414    if (entry && var->data.mode == nir_var_function_temp)
415       validate_assert(state, (nir_function_impl *) entry->data == state->impl);
416 }
417 
418 static void
validate_deref_instr(nir_deref_instr *instr, validate_state *state)419 validate_deref_instr(nir_deref_instr *instr, validate_state *state)
420 {
421    if (instr->deref_type == nir_deref_type_var) {
422       /* Variable dereferences are stupid simple. */
423       validate_assert(state, instr->modes == instr->var->data.mode);
424       validate_assert(state, instr->type == instr->var->type);
425       validate_var_use(instr->var, state);
426    } else if (instr->deref_type == nir_deref_type_cast) {
427       /* For cast, we simply have to trust the instruction.  It's up to
428        * lowering passes and front/back-ends to make them sane.
429        */
430       validate_src(&instr->parent, state, 0, 0);
431 
432       /* Most variable modes in NIR can only exist by themselves. */
433       if (instr->modes & ~nir_var_mem_generic)
434          validate_assert(state, util_bitcount(instr->modes) == 1);
435 
436       nir_deref_instr *parent = nir_src_as_deref(instr->parent);
437       if (parent) {
438          /* Casts can change the mode but it can't change completely.  The new
439           * mode must have some bits in common with the old.
440           */
441          validate_assert(state, instr->modes & parent->modes);
442       } else {
443          /* If our parent isn't a deref, just assert the mode is there */
444          validate_assert(state, instr->modes != 0);
445       }
446 
447       /* We just validate that the type is there */
448       validate_assert(state, instr->type);
449       if (instr->cast.align_mul > 0) {
450          validate_assert(state, util_is_power_of_two_nonzero(instr->cast.align_mul));
451          validate_assert(state, instr->cast.align_offset < instr->cast.align_mul);
452       } else {
453          validate_assert(state, instr->cast.align_offset == 0);
454       }
455    } else {
456       /* We require the parent to be SSA.  This may be lifted in the future */
457       validate_assert(state, instr->parent.is_ssa);
458 
459       /* The parent pointer value must have the same number of components
460        * as the destination.
461        */
462       validate_src(&instr->parent, state, nir_dest_bit_size(instr->dest),
463                    nir_dest_num_components(instr->dest));
464 
465       nir_instr *parent_instr = instr->parent.ssa->parent_instr;
466 
467       /* The parent must come from another deref instruction */
468       validate_assert(state, parent_instr->type == nir_instr_type_deref);
469 
470       nir_deref_instr *parent = nir_instr_as_deref(parent_instr);
471 
472       validate_assert(state, instr->modes == parent->modes);
473 
474       switch (instr->deref_type) {
475       case nir_deref_type_struct:
476          validate_assert(state, glsl_type_is_struct_or_ifc(parent->type));
477          validate_assert(state,
478             instr->strct.index < glsl_get_length(parent->type));
479          validate_assert(state, instr->type ==
480             glsl_get_struct_field(parent->type, instr->strct.index));
481          break;
482 
483       case nir_deref_type_array:
484       case nir_deref_type_array_wildcard:
485          if (instr->modes & nir_var_vec_indexable_modes) {
486             /* Shared variables and UBO/SSBOs have a bit more relaxed rules
487              * because we need to be able to handle array derefs on vectors.
488              * Fortunately, nir_lower_io handles these just fine.
489              */
490             validate_assert(state, glsl_type_is_array(parent->type) ||
491                                    glsl_type_is_matrix(parent->type) ||
492                                    glsl_type_is_vector(parent->type));
493          } else {
494             /* Most of NIR cannot handle array derefs on vectors */
495             validate_assert(state, glsl_type_is_array(parent->type) ||
496                                    glsl_type_is_matrix(parent->type));
497          }
498          validate_assert(state,
499             instr->type == glsl_get_array_element(parent->type));
500 
501          if (instr->deref_type == nir_deref_type_array) {
502             validate_src(&instr->arr.index, state,
503                          nir_dest_bit_size(instr->dest), 1);
504          }
505          break;
506 
507       case nir_deref_type_ptr_as_array:
508          /* ptr_as_array derefs must have a parent that is either an array,
509           * ptr_as_array, or cast.  If the parent is a cast, we get the stride
510           * information (if any) from the cast deref.
511           */
512          validate_assert(state,
513                          parent->deref_type == nir_deref_type_array ||
514                          parent->deref_type == nir_deref_type_ptr_as_array ||
515                          parent->deref_type == nir_deref_type_cast);
516          validate_src(&instr->arr.index, state,
517                       nir_dest_bit_size(instr->dest), 1);
518          break;
519 
520       default:
521          unreachable("Invalid deref instruction type");
522       }
523    }
524 
525    /* We intentionally don't validate the size of the destination because we
526     * want to let other compiler components such as SPIR-V decide how big
527     * pointers should be.
528     */
529    validate_dest(&instr->dest, state, 0, 0);
530 
531    /* Deref instructions as if conditions don't make sense because if
532     * conditions expect well-formed Booleans.  If you want to compare with
533     * NULL, an explicit comparison operation should be used.
534     */
535    validate_assert(state, list_is_empty(&instr->dest.ssa.if_uses));
536 
537    /* Certain modes cannot be used as sources for phi instructions because
538     * way too many passes assume that they can always chase deref chains.
539     */
540    nir_foreach_use(use, &instr->dest.ssa) {
541       if (use->parent_instr->type == nir_instr_type_phi) {
542          validate_assert(state, !(instr->modes & (nir_var_shader_in |
543                                                   nir_var_shader_out |
544                                                   nir_var_shader_out |
545                                                   nir_var_uniform)));
546       }
547    }
548 }
549 
550 static bool
vectorized_intrinsic(nir_intrinsic_instr *intr)551 vectorized_intrinsic(nir_intrinsic_instr *intr)
552 {
553    const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
554 
555    if (info->dest_components == 0)
556       return true;
557 
558    for (unsigned i = 0; i < info->num_srcs; i++)
559       if (info->src_components[i] == 0)
560          return true;
561 
562    return false;
563 }
564 
565 /** Returns the image format or PIPE_FORMAT_COUNT for incomplete derefs
566  *
567  * We use PIPE_FORMAT_COUNT for incomplete derefs because PIPE_FORMAT_NONE
568  * indicates that we found the variable but it has no format specified.
569  */
570 static enum pipe_format
image_intrin_format(nir_intrinsic_instr *instr)571 image_intrin_format(nir_intrinsic_instr *instr)
572 {
573    if (nir_intrinsic_format(instr) != PIPE_FORMAT_NONE)
574       return nir_intrinsic_format(instr);
575 
576    /* If this not a deref intrinsic, PIPE_FORMAT_NONE is the best we can do */
577    if (nir_intrinsic_infos[instr->intrinsic].src_components[0] != -1)
578       return PIPE_FORMAT_NONE;
579 
580    nir_variable *var = nir_intrinsic_get_var(instr, 0);
581    if (var == NULL)
582       return PIPE_FORMAT_COUNT;
583 
584    return var->data.image.format;
585 }
586 
587 static void
validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)588 validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
589 {
590    unsigned dest_bit_size = 0;
591    unsigned src_bit_sizes[NIR_INTRINSIC_MAX_INPUTS] = { 0, };
592    switch (instr->intrinsic) {
593    case nir_intrinsic_convert_alu_types: {
594       nir_alu_type src_type = nir_intrinsic_src_type(instr);
595       nir_alu_type dest_type = nir_intrinsic_dest_type(instr);
596       dest_bit_size = nir_alu_type_get_type_size(dest_type);
597       src_bit_sizes[0] = nir_alu_type_get_type_size(src_type);
598       validate_assert(state, dest_bit_size != 0);
599       validate_assert(state, src_bit_sizes[0] != 0);
600       break;
601    }
602 
603    case nir_intrinsic_load_param: {
604       unsigned param_idx = nir_intrinsic_param_idx(instr);
605       validate_assert(state, param_idx < state->impl->function->num_params);
606       nir_parameter *param = &state->impl->function->params[param_idx];
607       validate_assert(state, instr->num_components == param->num_components);
608       dest_bit_size = param->bit_size;
609       break;
610    }
611 
612    case nir_intrinsic_load_deref: {
613       nir_deref_instr *src = nir_src_as_deref(instr->src[0]);
614       assert(src);
615       validate_assert(state, glsl_type_is_vector_or_scalar(src->type) ||
616                       (src->modes == nir_var_uniform &&
617                        glsl_get_base_type(src->type) == GLSL_TYPE_SUBROUTINE));
618       validate_assert(state, instr->num_components ==
619                              glsl_get_vector_elements(src->type));
620       dest_bit_size = glsl_get_bit_size(src->type);
621       /* Also allow 32-bit boolean load operations */
622       if (glsl_type_is_boolean(src->type))
623          dest_bit_size |= 32;
624       break;
625    }
626 
627    case nir_intrinsic_store_deref: {
628       nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
629       assert(dst);
630       validate_assert(state, glsl_type_is_vector_or_scalar(dst->type));
631       validate_assert(state, instr->num_components ==
632                              glsl_get_vector_elements(dst->type));
633       src_bit_sizes[1] = glsl_get_bit_size(dst->type);
634       /* Also allow 32-bit boolean store operations */
635       if (glsl_type_is_boolean(dst->type))
636          src_bit_sizes[1] |= 32;
637       validate_assert(state, !nir_deref_mode_may_be(dst, nir_var_read_only_modes));
638       break;
639    }
640 
641    case nir_intrinsic_copy_deref: {
642       nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
643       nir_deref_instr *src = nir_src_as_deref(instr->src[1]);
644       validate_assert(state, glsl_get_bare_type(dst->type) ==
645                              glsl_get_bare_type(src->type));
646       validate_assert(state, !nir_deref_mode_may_be(dst, nir_var_read_only_modes));
647       break;
648    }
649 
650    case nir_intrinsic_load_ubo_vec4: {
651       int bit_size = nir_dest_bit_size(instr->dest);
652       validate_assert(state, bit_size >= 8);
653       validate_assert(state, (nir_intrinsic_component(instr) +
654                               instr->num_components) * (bit_size / 8) <= 16);
655       break;
656    }
657 
658    case nir_intrinsic_load_ubo:
659       /* Make sure that the creator didn't forget to set the range_base+range. */
660       validate_assert(state, nir_intrinsic_range(instr) != 0);
661       FALLTHROUGH;
662    case nir_intrinsic_load_ssbo:
663    case nir_intrinsic_load_shared:
664    case nir_intrinsic_load_global:
665    case nir_intrinsic_load_global_constant:
666    case nir_intrinsic_load_scratch:
667    case nir_intrinsic_load_constant:
668       /* These memory load operations must have alignments */
669       validate_assert(state,
670          util_is_power_of_two_nonzero(nir_intrinsic_align_mul(instr)));
671       validate_assert(state, nir_intrinsic_align_offset(instr) <
672                              nir_intrinsic_align_mul(instr));
673       FALLTHROUGH;
674 
675    case nir_intrinsic_load_uniform:
676    case nir_intrinsic_load_input:
677    case nir_intrinsic_load_per_vertex_input:
678    case nir_intrinsic_load_interpolated_input:
679    case nir_intrinsic_load_output:
680    case nir_intrinsic_load_per_vertex_output:
681    case nir_intrinsic_load_per_primitive_output:
682    case nir_intrinsic_load_push_constant:
683       /* All memory load operations must load at least a byte */
684       validate_assert(state, nir_dest_bit_size(instr->dest) >= 8);
685       break;
686 
687    case nir_intrinsic_store_ssbo:
688    case nir_intrinsic_store_shared:
689    case nir_intrinsic_store_global:
690    case nir_intrinsic_store_scratch:
691       /* These memory store operations must also have alignments */
692       validate_assert(state,
693          util_is_power_of_two_nonzero(nir_intrinsic_align_mul(instr)));
694       validate_assert(state, nir_intrinsic_align_offset(instr) <
695                              nir_intrinsic_align_mul(instr));
696       FALLTHROUGH;
697 
698    case nir_intrinsic_store_output:
699    case nir_intrinsic_store_per_vertex_output:
700       /* All memory store operations must store at least a byte */
701       validate_assert(state, nir_src_bit_size(instr->src[0]) >= 8);
702       break;
703 
704    case nir_intrinsic_deref_mode_is:
705    case nir_intrinsic_addr_mode_is:
706       validate_assert(state,
707          util_bitcount(nir_intrinsic_memory_modes(instr)) == 1);
708       break;
709 
710    case nir_intrinsic_image_deref_atomic_add:
711    case nir_intrinsic_image_deref_atomic_imin:
712    case nir_intrinsic_image_deref_atomic_umin:
713    case nir_intrinsic_image_deref_atomic_imax:
714    case nir_intrinsic_image_deref_atomic_umax:
715    case nir_intrinsic_image_deref_atomic_and:
716    case nir_intrinsic_image_deref_atomic_or:
717    case nir_intrinsic_image_deref_atomic_xor:
718    case nir_intrinsic_image_deref_atomic_comp_swap:
719    case nir_intrinsic_image_atomic_add:
720    case nir_intrinsic_image_atomic_imin:
721    case nir_intrinsic_image_atomic_umin:
722    case nir_intrinsic_image_atomic_imax:
723    case nir_intrinsic_image_atomic_umax:
724    case nir_intrinsic_image_atomic_and:
725    case nir_intrinsic_image_atomic_or:
726    case nir_intrinsic_image_atomic_xor:
727    case nir_intrinsic_image_atomic_comp_swap:
728    case nir_intrinsic_bindless_image_atomic_add:
729    case nir_intrinsic_bindless_image_atomic_imin:
730    case nir_intrinsic_bindless_image_atomic_umin:
731    case nir_intrinsic_bindless_image_atomic_imax:
732    case nir_intrinsic_bindless_image_atomic_umax:
733    case nir_intrinsic_bindless_image_atomic_and:
734    case nir_intrinsic_bindless_image_atomic_or:
735    case nir_intrinsic_bindless_image_atomic_xor:
736    case nir_intrinsic_bindless_image_atomic_comp_swap: {
737       enum pipe_format format = image_intrin_format(instr);
738       if (format != PIPE_FORMAT_COUNT) {
739          validate_assert(state, format == PIPE_FORMAT_R32_UINT ||
740                                 format == PIPE_FORMAT_R32_SINT ||
741                                 format == PIPE_FORMAT_R64_UINT ||
742                                 format == PIPE_FORMAT_R64_SINT);
743          validate_assert(state, nir_dest_bit_size(instr->dest) ==
744                                 util_format_get_blocksizebits(format));
745       }
746       break;
747    }
748 
749    case nir_intrinsic_image_deref_atomic_exchange:
750    case nir_intrinsic_image_atomic_exchange:
751    case nir_intrinsic_bindless_image_atomic_exchange: {
752       enum pipe_format format = image_intrin_format(instr);
753       if (format != PIPE_FORMAT_COUNT) {
754          validate_assert(state, format == PIPE_FORMAT_R32_UINT ||
755                                 format == PIPE_FORMAT_R32_SINT ||
756                                 format == PIPE_FORMAT_R32_FLOAT ||
757                                 format == PIPE_FORMAT_R64_UINT ||
758                                 format == PIPE_FORMAT_R64_SINT);
759          validate_assert(state, nir_dest_bit_size(instr->dest) ==
760                                 util_format_get_blocksizebits(format));
761       }
762       break;
763    }
764 
765    case nir_intrinsic_image_deref_atomic_fadd:
766    case nir_intrinsic_image_atomic_fadd:
767    case nir_intrinsic_bindless_image_atomic_fadd: {
768       enum pipe_format format = image_intrin_format(instr);
769       validate_assert(state, format == PIPE_FORMAT_COUNT ||
770                              format == PIPE_FORMAT_R32_FLOAT);
771       validate_assert(state, nir_dest_bit_size(instr->dest) == 32);
772       break;
773    }
774 
775    case nir_intrinsic_image_deref_atomic_fmin:
776    case nir_intrinsic_image_deref_atomic_fmax:
777    case nir_intrinsic_image_atomic_fmin:
778    case nir_intrinsic_image_atomic_fmax:
779    case nir_intrinsic_bindless_image_atomic_fmin:
780    case nir_intrinsic_bindless_image_atomic_fmax: {
781       enum pipe_format format = image_intrin_format(instr);
782       validate_assert(state, format == PIPE_FORMAT_COUNT ||
783                              format == PIPE_FORMAT_R16_FLOAT ||
784                              format == PIPE_FORMAT_R32_FLOAT ||
785                              format == PIPE_FORMAT_R64_FLOAT);
786       validate_assert(state, nir_dest_bit_size(instr->dest) ==
787                              util_format_get_blocksizebits(format));
788       break;
789    }
790 
791    default:
792       break;
793    }
794 
795    if (instr->num_components > 0)
796       validate_num_components(state, instr->num_components);
797 
798    const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
799    unsigned num_srcs = info->num_srcs;
800    for (unsigned i = 0; i < num_srcs; i++) {
801       unsigned components_read = nir_intrinsic_src_components(instr, i);
802 
803       validate_num_components(state, components_read);
804 
805       validate_src(&instr->src[i], state, src_bit_sizes[i], components_read);
806    }
807 
808    if (nir_intrinsic_infos[instr->intrinsic].has_dest) {
809       unsigned components_written = nir_intrinsic_dest_components(instr);
810       unsigned bit_sizes = info->dest_bit_sizes;
811       if (!bit_sizes && info->bit_size_src >= 0)
812          bit_sizes = nir_src_bit_size(instr->src[info->bit_size_src]);
813 
814       validate_num_components(state, components_written);
815       if (dest_bit_size && bit_sizes)
816          validate_assert(state, dest_bit_size & bit_sizes);
817       else
818          dest_bit_size = dest_bit_size ? dest_bit_size : bit_sizes;
819 
820       validate_dest(&instr->dest, state, dest_bit_size, components_written);
821    }
822 
823    if (!vectorized_intrinsic(instr))
824       validate_assert(state, instr->num_components == 0);
825 
826    if (nir_intrinsic_has_write_mask(instr)) {
827       unsigned component_mask = BITFIELD_MASK(instr->num_components);
828       validate_assert(state, (nir_intrinsic_write_mask(instr) & ~component_mask) == 0);
829    }
830 
831    if (nir_intrinsic_has_io_xfb(instr)) {
832       unsigned used_mask = 0;
833 
834       for (unsigned i = 0; i < 4; i++) {
835          nir_io_xfb xfb = i < 2 ? nir_intrinsic_io_xfb(instr) :
836                                   nir_intrinsic_io_xfb2(instr);
837          unsigned xfb_mask = BITFIELD_RANGE(i, xfb.out[i % 2].num_components);
838 
839          /* Each component can be used only once by transform feedback info. */
840          validate_assert(state, (xfb_mask & used_mask) == 0);
841          used_mask |= xfb_mask;
842       }
843    }
844 
845    if (nir_intrinsic_has_io_semantics(instr) &&
846        !nir_intrinsic_infos[instr->intrinsic].has_dest) {
847       nir_io_semantics sem = nir_intrinsic_io_semantics(instr);
848 
849       /* An output that has no effect shouldn't be present in the IR. */
850       validate_assert(state,
851                       (nir_slot_is_sysval_output(sem.location) &&
852                        !sem.no_sysval_output) ||
853                       (nir_slot_is_varying(sem.location) && !sem.no_varying) ||
854                       nir_instr_xfb_write_mask(instr));
855    }
856 }
857 
858 static void
validate_tex_instr(nir_tex_instr *instr, validate_state *state)859 validate_tex_instr(nir_tex_instr *instr, validate_state *state)
860 {
861    bool src_type_seen[nir_num_tex_src_types];
862    for (unsigned i = 0; i < nir_num_tex_src_types; i++)
863       src_type_seen[i] = false;
864 
865    for (unsigned i = 0; i < instr->num_srcs; i++) {
866       validate_assert(state, !src_type_seen[instr->src[i].src_type]);
867       src_type_seen[instr->src[i].src_type] = true;
868       validate_src(&instr->src[i].src, state,
869                    0, nir_tex_instr_src_size(instr, i));
870 
871       switch (instr->src[i].src_type) {
872 
873       case nir_tex_src_comparator:
874          validate_assert(state, instr->is_shadow);
875          break;
876 
877       case nir_tex_src_bias:
878          validate_assert(state, instr->op == nir_texop_txb ||
879                                 instr->op == nir_texop_tg4);
880          break;
881 
882       case nir_tex_src_lod:
883          validate_assert(state, instr->op != nir_texop_tex &&
884                                 instr->op != nir_texop_txb &&
885                                 instr->op != nir_texop_txd &&
886                                 instr->op != nir_texop_lod);
887          break;
888 
889       case nir_tex_src_ddx:
890       case nir_tex_src_ddy:
891          validate_assert(state, instr->op == nir_texop_txd);
892          break;
893 
894       case nir_tex_src_texture_deref: {
895          nir_deref_instr *deref = nir_src_as_deref(instr->src[i].src);
896          if (!validate_assert(state, deref))
897             break;
898 
899          validate_assert(state, glsl_type_is_image(deref->type) ||
900                                 glsl_type_is_texture(deref->type) ||
901                                 glsl_type_is_sampler(deref->type));
902          break;
903       }
904 
905       case nir_tex_src_sampler_deref: {
906          nir_deref_instr *deref = nir_src_as_deref(instr->src[i].src);
907          if (!validate_assert(state, deref))
908             break;
909 
910          validate_assert(state, glsl_type_is_sampler(deref->type));
911          break;
912       }
913 
914       case nir_tex_src_coord:
915       case nir_tex_src_projector:
916       case nir_tex_src_offset:
917       case nir_tex_src_min_lod:
918       case nir_tex_src_ms_index:
919       case nir_tex_src_texture_offset:
920       case nir_tex_src_sampler_offset:
921       case nir_tex_src_plane:
922       case nir_tex_src_texture_handle:
923       case nir_tex_src_sampler_handle:
924          break;
925 
926       default:
927          break;
928       }
929    }
930 
931    if (instr->op != nir_texop_tg4)
932       validate_assert(state, instr->component == 0);
933 
934    if (nir_tex_instr_has_explicit_tg4_offsets(instr)) {
935       validate_assert(state, instr->op == nir_texop_tg4);
936       validate_assert(state, !src_type_seen[nir_tex_src_offset]);
937    }
938 
939    validate_dest(&instr->dest, state, 0, nir_tex_instr_dest_size(instr));
940 
941    unsigned bit_size = nir_alu_type_get_type_size(instr->dest_type);
942    validate_assert(state,
943                    (bit_size ? bit_size : 32) ==
944                    nir_dest_bit_size(instr->dest));
945 }
946 
947 static void
validate_call_instr(nir_call_instr *instr, validate_state *state)948 validate_call_instr(nir_call_instr *instr, validate_state *state)
949 {
950    validate_assert(state, instr->num_params == instr->callee->num_params);
951 
952    for (unsigned i = 0; i < instr->num_params; i++) {
953       validate_src(&instr->params[i], state,
954                    instr->callee->params[i].bit_size,
955                    instr->callee->params[i].num_components);
956    }
957 }
958 
959 static void
validate_const_value(nir_const_value *val, unsigned bit_size, validate_state *state)960 validate_const_value(nir_const_value *val, unsigned bit_size,
961                      validate_state *state)
962 {
963    /* In order for block copies to work properly for things like instruction
964     * comparisons and [de]serialization, we require the unused bits of the
965     * nir_const_value to be zero.
966     */
967    nir_const_value cmp_val;
968    memset(&cmp_val, 0, sizeof(cmp_val));
969    switch (bit_size) {
970    case 1:
971       cmp_val.b = val->b;
972       break;
973    case 8:
974       cmp_val.u8 = val->u8;
975       break;
976    case 16:
977       cmp_val.u16 = val->u16;
978       break;
979    case 32:
980       cmp_val.u32 = val->u32;
981       break;
982    case 64:
983       cmp_val.u64 = val->u64;
984       break;
985    default:
986       validate_assert(state, !"Invalid load_const bit size");
987    }
988    validate_assert(state, memcmp(val, &cmp_val, sizeof(cmp_val)) == 0);
989 }
990 
991 static void
validate_load_const_instr(nir_load_const_instr *instr, validate_state *state)992 validate_load_const_instr(nir_load_const_instr *instr, validate_state *state)
993 {
994    validate_ssa_def(&instr->def, state);
995 
996    for (unsigned i = 0; i < instr->def.num_components; i++)
997       validate_const_value(&instr->value[i], instr->def.bit_size, state);
998 }
999 
1000 static void
validate_ssa_undef_instr(nir_ssa_undef_instr *instr, validate_state *state)1001 validate_ssa_undef_instr(nir_ssa_undef_instr *instr, validate_state *state)
1002 {
1003    validate_ssa_def(&instr->def, state);
1004 }
1005 
1006 static void
validate_phi_instr(nir_phi_instr *instr, validate_state *state)1007 validate_phi_instr(nir_phi_instr *instr, validate_state *state)
1008 {
1009    /*
1010     * don't validate the sources until we get to them from their predecessor
1011     * basic blocks, to avoid validating an SSA use before its definition.
1012     */
1013 
1014    validate_dest(&instr->dest, state, 0, 0);
1015 
1016    exec_list_validate(&instr->srcs);
1017    validate_assert(state, exec_list_length(&instr->srcs) ==
1018           state->block->predecessors->entries);
1019 }
1020 
1021 static void
validate_jump_instr(nir_jump_instr *instr, validate_state *state)1022 validate_jump_instr(nir_jump_instr *instr, validate_state *state)
1023 {
1024    nir_block *block = state->block;
1025    validate_assert(state, &instr->instr == nir_block_last_instr(block));
1026 
1027    switch (instr->type) {
1028    case nir_jump_return:
1029    case nir_jump_halt:
1030       validate_assert(state, block->successors[0] == state->impl->end_block);
1031       validate_assert(state, block->successors[1] == NULL);
1032       validate_assert(state, instr->target == NULL);
1033       validate_assert(state, instr->else_target == NULL);
1034       break;
1035 
1036    case nir_jump_break:
1037       validate_assert(state, state->impl->structured);
1038       validate_assert(state, state->loop != NULL);
1039       if (state->loop) {
1040          nir_block *after =
1041             nir_cf_node_as_block(nir_cf_node_next(&state->loop->cf_node));
1042          validate_assert(state, block->successors[0] == after);
1043       }
1044       validate_assert(state, block->successors[1] == NULL);
1045       validate_assert(state, instr->target == NULL);
1046       validate_assert(state, instr->else_target == NULL);
1047       break;
1048 
1049    case nir_jump_continue:
1050       validate_assert(state, state->impl->structured);
1051       validate_assert(state, state->loop != NULL);
1052       if (state->loop) {
1053          nir_block *first = nir_loop_first_block(state->loop);
1054          validate_assert(state, block->successors[0] == first);
1055       }
1056       validate_assert(state, block->successors[1] == NULL);
1057       validate_assert(state, instr->target == NULL);
1058       validate_assert(state, instr->else_target == NULL);
1059       break;
1060 
1061    case nir_jump_goto:
1062       validate_assert(state, !state->impl->structured);
1063       validate_assert(state, instr->target == block->successors[0]);
1064       validate_assert(state, instr->target != NULL);
1065       validate_assert(state, instr->else_target == NULL);
1066       break;
1067 
1068    case nir_jump_goto_if:
1069       validate_assert(state, !state->impl->structured);
1070       validate_assert(state, instr->target == block->successors[1]);
1071       validate_assert(state, instr->else_target == block->successors[0]);
1072       validate_src(&instr->condition, state, 0, 1);
1073       validate_assert(state, instr->target != NULL);
1074       validate_assert(state, instr->else_target != NULL);
1075       break;
1076 
1077    default:
1078       validate_assert(state, !"Invalid jump instruction type");
1079       break;
1080    }
1081 }
1082 
1083 static void
validate_instr(nir_instr *instr, validate_state *state)1084 validate_instr(nir_instr *instr, validate_state *state)
1085 {
1086    validate_assert(state, instr->block == state->block);
1087 
1088    state->instr = instr;
1089 
1090    if (state->shader_gc_list)
1091       validate_assert(state, _mesa_set_search(state->shader_gc_list, instr));
1092 
1093    switch (instr->type) {
1094    case nir_instr_type_alu:
1095       validate_alu_instr(nir_instr_as_alu(instr), state);
1096       break;
1097 
1098    case nir_instr_type_deref:
1099       validate_deref_instr(nir_instr_as_deref(instr), state);
1100       break;
1101 
1102    case nir_instr_type_call:
1103       validate_call_instr(nir_instr_as_call(instr), state);
1104       break;
1105 
1106    case nir_instr_type_intrinsic:
1107       validate_intrinsic_instr(nir_instr_as_intrinsic(instr), state);
1108       break;
1109 
1110    case nir_instr_type_tex:
1111       validate_tex_instr(nir_instr_as_tex(instr), state);
1112       break;
1113 
1114    case nir_instr_type_load_const:
1115       validate_load_const_instr(nir_instr_as_load_const(instr), state);
1116       break;
1117 
1118    case nir_instr_type_phi:
1119       validate_phi_instr(nir_instr_as_phi(instr), state);
1120       break;
1121 
1122    case nir_instr_type_ssa_undef:
1123       validate_ssa_undef_instr(nir_instr_as_ssa_undef(instr), state);
1124       break;
1125 
1126    case nir_instr_type_jump:
1127       validate_jump_instr(nir_instr_as_jump(instr), state);
1128       break;
1129 
1130    default:
1131       validate_assert(state, !"Invalid ALU instruction type");
1132       break;
1133    }
1134 
1135    state->instr = NULL;
1136 }
1137 
1138 static void
validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state)1139 validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state)
1140 {
1141    state->instr = &instr->instr;
1142 
1143    validate_assert(state, instr->dest.is_ssa);
1144 
1145    exec_list_validate(&instr->srcs);
1146    nir_foreach_phi_src(src, instr) {
1147       if (src->pred == pred) {
1148          validate_assert(state, src->src.is_ssa);
1149          validate_src(&src->src, state, instr->dest.ssa.bit_size,
1150                       instr->dest.ssa.num_components);
1151          state->instr = NULL;
1152          return;
1153       }
1154    }
1155    validate_assert(state, !"Phi does not have a source corresponding to one "
1156                            "of its predecessor blocks");
1157 }
1158 
1159 static void
validate_phi_srcs(nir_block *block, nir_block *succ, validate_state *state)1160 validate_phi_srcs(nir_block *block, nir_block *succ, validate_state *state)
1161 {
1162    nir_foreach_instr(instr, succ) {
1163       if (instr->type != nir_instr_type_phi)
1164          break;
1165 
1166       validate_phi_src(nir_instr_as_phi(instr), block, state);
1167    }
1168 }
1169 
1170 static void
collect_blocks(struct exec_list *cf_list, validate_state *state)1171 collect_blocks(struct exec_list *cf_list, validate_state *state)
1172 {
1173    /* We walk the blocks manually here rather than using nir_foreach_block for
1174     * a few reasons:
1175     *
1176     *  1. nir_foreach_block() doesn't work properly for unstructured NIR and
1177     *     we need to be able to handle all forms of NIR here.
1178     *
1179     *  2. We want to call exec_list_validate() on every linked list in the IR
1180     *     which means we need to touch every linked and just walking blocks
1181     *     with nir_foreach_block() would make that difficult.  In particular,
1182     *     we want to validate each list before the first time we walk it so
1183     *     that we catch broken lists in exec_list_validate() instead of
1184     *     getting stuck in a hard-to-debug infinite loop in the validator.
1185     *
1186     *  3. nir_foreach_block() depends on several invariants of the CF node
1187     *     hierarchy which nir_validate_shader() is responsible for verifying.
1188     *     If we used nir_foreach_block() in nir_validate_shader(), we could
1189     *     end up blowing up on a bad list walk instead of throwing the much
1190     *     easier to debug validation error.
1191     */
1192    exec_list_validate(cf_list);
1193    foreach_list_typed(nir_cf_node, node, node, cf_list) {
1194       switch (node->type) {
1195       case nir_cf_node_block:
1196          _mesa_set_add(state->blocks, nir_cf_node_as_block(node));
1197          break;
1198 
1199       case nir_cf_node_if:
1200          collect_blocks(&nir_cf_node_as_if(node)->then_list, state);
1201          collect_blocks(&nir_cf_node_as_if(node)->else_list, state);
1202          break;
1203 
1204       case nir_cf_node_loop:
1205          collect_blocks(&nir_cf_node_as_loop(node)->body, state);
1206          break;
1207 
1208       default:
1209          unreachable("Invalid CF node type");
1210       }
1211    }
1212 }
1213 
1214 static void validate_cf_node(nir_cf_node *node, validate_state *state);
1215 
1216 static void
validate_block_predecessors(nir_block *block, validate_state *state)1217 validate_block_predecessors(nir_block *block, validate_state *state)
1218 {
1219    for (unsigned i = 0; i < 2; i++) {
1220       if (block->successors[i] == NULL)
1221          continue;
1222 
1223       /* The block has to exist in the nir_function_impl */
1224       validate_assert(state, _mesa_set_search(state->blocks,
1225                                               block->successors[i]));
1226 
1227       /* And we have to be in our successor's predecessors set */
1228       validate_assert(state,
1229          _mesa_set_search(block->successors[i]->predecessors, block));
1230 
1231       validate_phi_srcs(block, block->successors[i], state);
1232    }
1233 
1234    /* The start block cannot have any predecessors */
1235    if (block == nir_start_block(state->impl))
1236       validate_assert(state, block->predecessors->entries == 0);
1237 
1238    set_foreach(block->predecessors, entry) {
1239       const nir_block *pred = entry->key;
1240       validate_assert(state, _mesa_set_search(state->blocks, pred));
1241       validate_assert(state, pred->successors[0] == block ||
1242                              pred->successors[1] == block);
1243    }
1244 }
1245 
1246 static void
validate_block(nir_block *block, validate_state *state)1247 validate_block(nir_block *block, validate_state *state)
1248 {
1249    validate_assert(state, block->cf_node.parent == state->parent_node);
1250 
1251    state->block = block;
1252 
1253    exec_list_validate(&block->instr_list);
1254    nir_foreach_instr(instr, block) {
1255       if (instr->type == nir_instr_type_phi) {
1256          validate_assert(state, instr == nir_block_first_instr(block) ||
1257                 nir_instr_prev(instr)->type == nir_instr_type_phi);
1258       }
1259 
1260       validate_instr(instr, state);
1261    }
1262 
1263    validate_assert(state, block->successors[0] != NULL);
1264    validate_assert(state, block->successors[0] != block->successors[1]);
1265    validate_block_predecessors(block, state);
1266 
1267    if (!state->impl->structured) {
1268       validate_assert(state, nir_block_ends_in_jump(block));
1269    } else if (!nir_block_ends_in_jump(block)) {
1270       nir_cf_node *next = nir_cf_node_next(&block->cf_node);
1271       if (next == NULL) {
1272          switch (state->parent_node->type) {
1273          case nir_cf_node_loop: {
1274             nir_block *first = nir_loop_first_block(state->loop);
1275             validate_assert(state, block->successors[0] == first);
1276             /* due to the hack for infinite loops, block->successors[1] may
1277              * point to the block after the loop.
1278              */
1279             break;
1280          }
1281 
1282          case nir_cf_node_if: {
1283             nir_block *after =
1284                nir_cf_node_as_block(nir_cf_node_next(state->parent_node));
1285             validate_assert(state, block->successors[0] == after);
1286             validate_assert(state, block->successors[1] == NULL);
1287             break;
1288          }
1289 
1290          case nir_cf_node_function:
1291             validate_assert(state, block->successors[0] == state->impl->end_block);
1292             validate_assert(state, block->successors[1] == NULL);
1293             break;
1294 
1295          default:
1296             unreachable("unknown control flow node type");
1297          }
1298       } else {
1299          if (next->type == nir_cf_node_if) {
1300             nir_if *if_stmt = nir_cf_node_as_if(next);
1301             validate_assert(state, block->successors[0] ==
1302                    nir_if_first_then_block(if_stmt));
1303             validate_assert(state, block->successors[1] ==
1304                    nir_if_first_else_block(if_stmt));
1305          } else if (next->type == nir_cf_node_loop) {
1306             nir_loop *loop = nir_cf_node_as_loop(next);
1307             validate_assert(state, block->successors[0] ==
1308                    nir_loop_first_block(loop));
1309             validate_assert(state, block->successors[1] == NULL);
1310          } else {
1311             validate_assert(state,
1312                !"Structured NIR cannot have consecutive blocks");
1313          }
1314       }
1315    }
1316 }
1317 
1318 
1319 static void
validate_end_block(nir_block *block, validate_state *state)1320 validate_end_block(nir_block *block, validate_state *state)
1321 {
1322    validate_assert(state, block->cf_node.parent == &state->impl->cf_node);
1323 
1324    exec_list_validate(&block->instr_list);
1325    validate_assert(state, exec_list_is_empty(&block->instr_list));
1326 
1327    validate_assert(state, block->successors[0] == NULL);
1328    validate_assert(state, block->successors[1] == NULL);
1329    validate_block_predecessors(block, state);
1330 }
1331 
1332 static void
validate_if(nir_if *if_stmt, validate_state *state)1333 validate_if(nir_if *if_stmt, validate_state *state)
1334 {
1335    validate_assert(state, state->impl->structured);
1336 
1337    state->if_stmt = if_stmt;
1338 
1339    validate_assert(state, !exec_node_is_head_sentinel(if_stmt->cf_node.node.prev));
1340    nir_cf_node *prev_node = nir_cf_node_prev(&if_stmt->cf_node);
1341    validate_assert(state, prev_node->type == nir_cf_node_block);
1342 
1343    validate_assert(state, !exec_node_is_tail_sentinel(if_stmt->cf_node.node.next));
1344    nir_cf_node *next_node = nir_cf_node_next(&if_stmt->cf_node);
1345    validate_assert(state, next_node->type == nir_cf_node_block);
1346 
1347    validate_src(&if_stmt->condition, state, 0, 1);
1348 
1349    validate_assert(state, !exec_list_is_empty(&if_stmt->then_list));
1350    validate_assert(state, !exec_list_is_empty(&if_stmt->else_list));
1351 
1352    nir_cf_node *old_parent = state->parent_node;
1353    state->parent_node = &if_stmt->cf_node;
1354 
1355    foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->then_list) {
1356       validate_cf_node(cf_node, state);
1357    }
1358 
1359    foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->else_list) {
1360       validate_cf_node(cf_node, state);
1361    }
1362 
1363    state->parent_node = old_parent;
1364    state->if_stmt = NULL;
1365 }
1366 
1367 static void
validate_loop(nir_loop *loop, validate_state *state)1368 validate_loop(nir_loop *loop, validate_state *state)
1369 {
1370    validate_assert(state, state->impl->structured);
1371 
1372    validate_assert(state, !exec_node_is_head_sentinel(loop->cf_node.node.prev));
1373    nir_cf_node *prev_node = nir_cf_node_prev(&loop->cf_node);
1374    validate_assert(state, prev_node->type == nir_cf_node_block);
1375 
1376    validate_assert(state, !exec_node_is_tail_sentinel(loop->cf_node.node.next));
1377    nir_cf_node *next_node = nir_cf_node_next(&loop->cf_node);
1378    validate_assert(state, next_node->type == nir_cf_node_block);
1379 
1380    validate_assert(state, !exec_list_is_empty(&loop->body));
1381 
1382    nir_cf_node *old_parent = state->parent_node;
1383    state->parent_node = &loop->cf_node;
1384    nir_loop *old_loop = state->loop;
1385    state->loop = loop;
1386 
1387    foreach_list_typed(nir_cf_node, cf_node, node, &loop->body) {
1388       validate_cf_node(cf_node, state);
1389    }
1390 
1391    state->parent_node = old_parent;
1392    state->loop = old_loop;
1393 }
1394 
1395 static void
validate_cf_node(nir_cf_node *node, validate_state *state)1396 validate_cf_node(nir_cf_node *node, validate_state *state)
1397 {
1398    validate_assert(state, node->parent == state->parent_node);
1399 
1400    switch (node->type) {
1401    case nir_cf_node_block:
1402       validate_block(nir_cf_node_as_block(node), state);
1403       break;
1404 
1405    case nir_cf_node_if:
1406       validate_if(nir_cf_node_as_if(node), state);
1407       break;
1408 
1409    case nir_cf_node_loop:
1410       validate_loop(nir_cf_node_as_loop(node), state);
1411       break;
1412 
1413    default:
1414       unreachable("Invalid CF node type");
1415    }
1416 }
1417 
1418 static void
prevalidate_reg_decl(nir_register *reg, validate_state *state)1419 prevalidate_reg_decl(nir_register *reg, validate_state *state)
1420 {
1421    validate_assert(state, reg->index < state->impl->reg_alloc);
1422    validate_assert(state, !BITSET_TEST(state->regs_found, reg->index));
1423    validate_num_components(state, reg->num_components);
1424    BITSET_SET(state->regs_found, reg->index);
1425 
1426    list_validate(&reg->uses);
1427    list_validate(&reg->defs);
1428    list_validate(&reg->if_uses);
1429 
1430    reg_validate_state *reg_state = ralloc(state->regs, reg_validate_state);
1431    reg_state->uses = _mesa_pointer_set_create(reg_state);
1432    reg_state->if_uses = _mesa_pointer_set_create(reg_state);
1433    reg_state->defs = _mesa_pointer_set_create(reg_state);
1434 
1435    reg_state->where_defined = state->impl;
1436 
1437    _mesa_hash_table_insert(state->regs, reg, reg_state);
1438 }
1439 
1440 static void
postvalidate_reg_decl(nir_register *reg, validate_state *state)1441 postvalidate_reg_decl(nir_register *reg, validate_state *state)
1442 {
1443    struct hash_entry *entry = _mesa_hash_table_search(state->regs, reg);
1444 
1445    assume(entry);
1446    reg_validate_state *reg_state = (reg_validate_state *) entry->data;
1447 
1448    nir_foreach_use(src, reg) {
1449       struct set_entry *entry = _mesa_set_search(reg_state->uses, src);
1450       validate_assert(state, entry);
1451       _mesa_set_remove(reg_state->uses, entry);
1452    }
1453    validate_assert(state, reg_state->uses->entries == 0);
1454 
1455    nir_foreach_if_use(src, reg) {
1456       struct set_entry *entry = _mesa_set_search(reg_state->if_uses, src);
1457       validate_assert(state, entry);
1458       _mesa_set_remove(reg_state->if_uses, entry);
1459    }
1460    validate_assert(state, reg_state->if_uses->entries == 0);
1461 
1462    nir_foreach_def(src, reg) {
1463       struct set_entry *entry = _mesa_set_search(reg_state->defs, src);
1464       validate_assert(state, entry);
1465       _mesa_set_remove(reg_state->defs, entry);
1466    }
1467    validate_assert(state, reg_state->defs->entries == 0);
1468 }
1469 
1470 static void
validate_constant(nir_constant *c, const struct glsl_type *type, validate_state *state)1471 validate_constant(nir_constant *c, const struct glsl_type *type,
1472                   validate_state *state)
1473 {
1474    if (glsl_type_is_vector_or_scalar(type)) {
1475       unsigned num_components = glsl_get_vector_elements(type);
1476       unsigned bit_size = glsl_get_bit_size(type);
1477       for (unsigned i = 0; i < num_components; i++)
1478          validate_const_value(&c->values[i], bit_size, state);
1479       for (unsigned i = num_components; i < NIR_MAX_VEC_COMPONENTS; i++)
1480          validate_assert(state, c->values[i].u64 == 0);
1481    } else {
1482       validate_assert(state, c->num_elements == glsl_get_length(type));
1483       if (glsl_type_is_struct_or_ifc(type)) {
1484          for (unsigned i = 0; i < c->num_elements; i++) {
1485             const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
1486             validate_constant(c->elements[i], elem_type, state);
1487          }
1488       } else if (glsl_type_is_array_or_matrix(type)) {
1489          const struct glsl_type *elem_type = glsl_get_array_element(type);
1490          for (unsigned i = 0; i < c->num_elements; i++)
1491             validate_constant(c->elements[i], elem_type, state);
1492       } else {
1493          validate_assert(state, !"Invalid type for nir_constant");
1494       }
1495    }
1496 }
1497 
1498 static void
validate_var_decl(nir_variable *var, nir_variable_mode valid_modes, validate_state *state)1499 validate_var_decl(nir_variable *var, nir_variable_mode valid_modes,
1500                   validate_state *state)
1501 {
1502    state->var = var;
1503 
1504    /* Must have exactly one mode set */
1505    validate_assert(state, util_is_power_of_two_nonzero(var->data.mode));
1506    validate_assert(state, var->data.mode & valid_modes);
1507 
1508    if (var->data.compact) {
1509       /* The "compact" flag is only valid on arrays of scalars. */
1510       assert(glsl_type_is_array(var->type));
1511 
1512       const struct glsl_type *type = glsl_get_array_element(var->type);
1513       if (nir_is_arrayed_io(var, state->shader->info.stage)) {
1514          if (var->data.per_view) {
1515             assert(glsl_type_is_array(type));
1516             type = glsl_get_array_element(type);
1517          }
1518          assert(glsl_type_is_array(type));
1519          assert(glsl_type_is_scalar(glsl_get_array_element(type)));
1520       } else {
1521          assert(glsl_type_is_scalar(type));
1522       }
1523    }
1524 
1525    if (var->num_members > 0) {
1526       const struct glsl_type *without_array = glsl_without_array(var->type);
1527       validate_assert(state, glsl_type_is_struct_or_ifc(without_array));
1528       validate_assert(state, var->num_members == glsl_get_length(without_array));
1529       validate_assert(state, var->members != NULL);
1530    }
1531 
1532    if (var->data.per_view)
1533       validate_assert(state, glsl_type_is_array(var->type));
1534 
1535    if (var->constant_initializer)
1536       validate_constant(var->constant_initializer, var->type, state);
1537 
1538    if (var->data.mode == nir_var_image) {
1539       validate_assert(state, !var->data.bindless);
1540       validate_assert(state, glsl_type_is_image(glsl_without_array(var->type)));
1541    }
1542 
1543    /*
1544     * TODO validate some things ir_validate.cpp does (requires more GLSL type
1545     * support)
1546     */
1547 
1548    _mesa_hash_table_insert(state->var_defs, var,
1549                            valid_modes == nir_var_function_temp ?
1550                            state->impl : NULL);
1551 
1552    state->var = NULL;
1553 }
1554 
1555 static bool
validate_ssa_def_dominance(nir_ssa_def *def, void *_state)1556 validate_ssa_def_dominance(nir_ssa_def *def, void *_state)
1557 {
1558    validate_state *state = _state;
1559 
1560    validate_assert(state, def->index < state->impl->ssa_alloc);
1561    validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
1562    BITSET_SET(state->ssa_defs_found, def->index);
1563 
1564    return true;
1565 }
1566 
1567 static bool
validate_src_dominance(nir_src *src, void *_state)1568 validate_src_dominance(nir_src *src, void *_state)
1569 {
1570    validate_state *state = _state;
1571    if (!src->is_ssa)
1572       return true;
1573 
1574    if (src->ssa->parent_instr->block == src->parent_instr->block) {
1575       validate_assert(state, src->ssa->index < state->impl->ssa_alloc);
1576       validate_assert(state, BITSET_TEST(state->ssa_defs_found,
1577                                          src->ssa->index));
1578    } else {
1579       validate_assert(state, nir_block_dominates(src->ssa->parent_instr->block,
1580                                                  src->parent_instr->block));
1581    }
1582    return true;
1583 }
1584 
1585 static void
validate_ssa_dominance(nir_function_impl *impl, validate_state *state)1586 validate_ssa_dominance(nir_function_impl *impl, validate_state *state)
1587 {
1588    nir_metadata_require(impl, nir_metadata_dominance);
1589 
1590    nir_foreach_block(block, impl) {
1591       state->block = block;
1592       nir_foreach_instr(instr, block) {
1593          state->instr = instr;
1594          if (instr->type == nir_instr_type_phi) {
1595             nir_phi_instr *phi = nir_instr_as_phi(instr);
1596             nir_foreach_phi_src(src, phi) {
1597                validate_assert(state,
1598                   nir_block_dominates(src->src.ssa->parent_instr->block,
1599                                       src->pred));
1600             }
1601          } else {
1602             nir_foreach_src(instr, validate_src_dominance, state);
1603          }
1604          nir_foreach_ssa_def(instr, validate_ssa_def_dominance, state);
1605       }
1606    }
1607 }
1608 
1609 static void
validate_function_impl(nir_function_impl *impl, validate_state *state)1610 validate_function_impl(nir_function_impl *impl, validate_state *state)
1611 {
1612    /* Resize the ssa_srcs set.  It's likely that the size of this set will
1613     * never actually hit the number of SSA defs because we remove sources from
1614     * the set as we visit them.  (It could actually be much larger because
1615     * each SSA def can be used more than once.)  However, growing it now costs
1616     * us very little (the extra memory is already dwarfed by the SSA defs
1617     * themselves) and makes collisions much less likely.
1618     */
1619    _mesa_set_resize(state->ssa_srcs, impl->ssa_alloc);
1620 
1621    validate_assert(state, impl->function->impl == impl);
1622    validate_assert(state, impl->cf_node.parent == NULL);
1623 
1624    if (impl->preamble) {
1625       validate_assert(state, impl->function->is_entrypoint);
1626       validate_assert(state, impl->preamble->is_preamble);
1627    }
1628 
1629    validate_assert(state, exec_list_is_empty(&impl->end_block->instr_list));
1630    validate_assert(state, impl->end_block->successors[0] == NULL);
1631    validate_assert(state, impl->end_block->successors[1] == NULL);
1632 
1633    state->impl = impl;
1634    state->parent_node = &impl->cf_node;
1635 
1636    exec_list_validate(&impl->locals);
1637    nir_foreach_function_temp_variable(var, impl) {
1638       validate_var_decl(var, nir_var_function_temp, state);
1639    }
1640 
1641    state->regs_found = reralloc(state->mem_ctx, state->regs_found,
1642                                 BITSET_WORD, BITSET_WORDS(impl->reg_alloc));
1643    memset(state->regs_found, 0, BITSET_WORDS(impl->reg_alloc) *
1644                                 sizeof(BITSET_WORD));
1645    exec_list_validate(&impl->registers);
1646    foreach_list_typed(nir_register, reg, node, &impl->registers) {
1647       prevalidate_reg_decl(reg, state);
1648    }
1649 
1650    state->ssa_defs_found = reralloc(state->mem_ctx, state->ssa_defs_found,
1651                                     BITSET_WORD, BITSET_WORDS(impl->ssa_alloc));
1652    memset(state->ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) *
1653                                     sizeof(BITSET_WORD));
1654 
1655    _mesa_set_clear(state->blocks, NULL);
1656    _mesa_set_resize(state->blocks, impl->num_blocks);
1657    collect_blocks(&impl->body, state);
1658    _mesa_set_add(state->blocks, impl->end_block);
1659    validate_assert(state, !exec_list_is_empty(&impl->body));
1660    foreach_list_typed(nir_cf_node, node, node, &impl->body) {
1661       validate_cf_node(node, state);
1662    }
1663    validate_end_block(impl->end_block, state);
1664 
1665    foreach_list_typed(nir_register, reg, node, &impl->registers) {
1666       postvalidate_reg_decl(reg, state);
1667    }
1668 
1669    validate_assert(state, state->ssa_srcs->entries == 0);
1670    _mesa_set_clear(state->ssa_srcs, NULL);
1671 
1672    static int validate_dominance = -1;
1673    if (validate_dominance < 0) {
1674       validate_dominance =
1675          NIR_DEBUG(VALIDATE_SSA_DOMINANCE);
1676    }
1677    if (validate_dominance)
1678       validate_ssa_dominance(impl, state);
1679 }
1680 
1681 static void
validate_function(nir_function *func, validate_state *state)1682 validate_function(nir_function *func, validate_state *state)
1683 {
1684    if (func->impl != NULL) {
1685       validate_assert(state, func->impl->function == func);
1686       validate_function_impl(func->impl, state);
1687    }
1688 }
1689 
1690 static void
init_validate_state(validate_state *state)1691 init_validate_state(validate_state *state)
1692 {
1693    state->mem_ctx = ralloc_context(NULL);
1694    state->regs = _mesa_pointer_hash_table_create(state->mem_ctx);
1695    state->ssa_srcs = _mesa_pointer_set_create(state->mem_ctx);
1696    state->ssa_defs_found = NULL;
1697    state->regs_found = NULL;
1698    state->blocks = _mesa_pointer_set_create(state->mem_ctx);
1699    state->var_defs = _mesa_pointer_hash_table_create(state->mem_ctx);
1700    state->errors = _mesa_pointer_hash_table_create(state->mem_ctx);
1701    state->shader_gc_list = NIR_DEBUG(VALIDATE_GC_LIST) ?
1702                            _mesa_pointer_set_create(state->mem_ctx) : NULL;
1703 
1704    state->loop = NULL;
1705    state->instr = NULL;
1706    state->var = NULL;
1707 }
1708 
1709 static void
destroy_validate_state(validate_state *state)1710 destroy_validate_state(validate_state *state)
1711 {
1712    ralloc_free(state->mem_ctx);
1713 }
1714 
1715 mtx_t fail_dump_mutex = _MTX_INITIALIZER_NP;
1716 
1717 static void
dump_errors(validate_state *state, const char *when)1718 dump_errors(validate_state *state, const char *when)
1719 {
1720    struct hash_table *errors = state->errors;
1721 
1722    /* Lock around dumping so that we get clean dumps in a multi-threaded
1723     * scenario
1724     */
1725    mtx_lock(&fail_dump_mutex);
1726 
1727    if (when) {
1728       fprintf(stderr, "NIR validation failed %s\n", when);
1729       fprintf(stderr, "%d errors:\n", _mesa_hash_table_num_entries(errors));
1730    } else {
1731       fprintf(stderr, "NIR validation failed with %d errors:\n",
1732               _mesa_hash_table_num_entries(errors));
1733    }
1734 
1735    nir_print_shader_annotated(state->shader, stderr, errors);
1736 
1737    if (_mesa_hash_table_num_entries(errors) > 0) {
1738       fprintf(stderr, "%d additional errors:\n",
1739               _mesa_hash_table_num_entries(errors));
1740       hash_table_foreach(errors, entry) {
1741          fprintf(stderr, "%s\n", (char *)entry->data);
1742       }
1743    }
1744 
1745    mtx_unlock(&fail_dump_mutex);
1746 
1747    abort();
1748 }
1749 
1750 void
nir_validate_shader(nir_shader *shader, const char *when)1751 nir_validate_shader(nir_shader *shader, const char *when)
1752 {
1753    if (NIR_DEBUG(NOVALIDATE))
1754       return;
1755 
1756    validate_state state;
1757    init_validate_state(&state);
1758 
1759    if (state.shader_gc_list) {
1760       list_for_each_entry(nir_instr, instr, &shader->gc_list, gc_node) {
1761          if (instr->node.prev || instr->node.next)
1762             _mesa_set_add(state.shader_gc_list, instr);
1763       }
1764    }
1765 
1766    state.shader = shader;
1767 
1768    nir_variable_mode valid_modes =
1769       nir_var_shader_in |
1770       nir_var_shader_out |
1771       nir_var_shader_temp |
1772       nir_var_uniform |
1773       nir_var_mem_ubo |
1774       nir_var_system_value |
1775       nir_var_mem_ssbo |
1776       nir_var_mem_shared |
1777       nir_var_mem_global |
1778       nir_var_mem_push_const |
1779       nir_var_mem_constant |
1780       nir_var_image;
1781 
1782    if (gl_shader_stage_is_callable(shader->info.stage))
1783       valid_modes |= nir_var_shader_call_data;
1784 
1785    if (shader->info.stage == MESA_SHADER_ANY_HIT ||
1786        shader->info.stage == MESA_SHADER_CLOSEST_HIT ||
1787        shader->info.stage == MESA_SHADER_INTERSECTION)
1788       valid_modes |= nir_var_ray_hit_attrib;
1789 
1790    if (shader->info.stage == MESA_SHADER_TASK ||
1791        shader->info.stage == MESA_SHADER_MESH)
1792       valid_modes |= nir_var_mem_task_payload;
1793 
1794    exec_list_validate(&shader->variables);
1795    nir_foreach_variable_in_shader(var, shader)
1796      validate_var_decl(var, valid_modes, &state);
1797 
1798    exec_list_validate(&shader->functions);
1799    foreach_list_typed(nir_function, func, node, &shader->functions) {
1800       validate_function(func, &state);
1801    }
1802 
1803    if (shader->xfb_info != NULL) {
1804       /* At least validate that, if nir_shader::xfb_info exists, the shader
1805        * has real transform feedback going on.
1806        */
1807       validate_assert(&state, shader->info.stage == MESA_SHADER_VERTEX ||
1808                               shader->info.stage == MESA_SHADER_TESS_EVAL ||
1809                               shader->info.stage == MESA_SHADER_GEOMETRY);
1810       validate_assert(&state, shader->xfb_info->buffers_written != 0);
1811       validate_assert(&state, shader->xfb_info->streams_written != 0);
1812       validate_assert(&state, shader->xfb_info->output_count > 0);
1813    }
1814 
1815    if (_mesa_hash_table_num_entries(state.errors) > 0)
1816       dump_errors(&state, when);
1817 
1818    destroy_validate_state(&state);
1819 }
1820 
1821 void
nir_validate_ssa_dominance(nir_shader *shader, const char *when)1822 nir_validate_ssa_dominance(nir_shader *shader, const char *when)
1823 {
1824    if (NIR_DEBUG(NOVALIDATE))
1825       return;
1826 
1827    validate_state state;
1828    init_validate_state(&state);
1829 
1830    state.shader = shader;
1831 
1832    nir_foreach_function(func, shader) {
1833       if (func->impl == NULL)
1834          continue;
1835 
1836       state.ssa_defs_found = reralloc(state.mem_ctx, state.ssa_defs_found,
1837                                       BITSET_WORD,
1838                                       BITSET_WORDS(func->impl->ssa_alloc));
1839       memset(state.ssa_defs_found, 0, BITSET_WORDS(func->impl->ssa_alloc) *
1840                                       sizeof(BITSET_WORD));
1841 
1842       state.impl = func->impl;
1843       validate_ssa_dominance(func->impl, &state);
1844    }
1845 
1846    if (_mesa_hash_table_num_entries(state.errors) > 0)
1847       dump_errors(&state, when);
1848 
1849    destroy_validate_state(&state);
1850 }
1851 
1852 #endif /* NDEBUG */
1853