Added few more stubs so that control reaches to DestroyDevice().
[mesa.git] / src / compiler / nir / nir_validate.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include "c11/threads.h"
30 #include <assert.h>
31
32 /*
33 * This file checks for invalid IR indicating a bug somewhere in the compiler.
34 */
35
36 /* Since this file is just a pile of asserts, don't bother compiling it if
37 * we're not building a debug build.
38 */
39 #ifndef NDEBUG
40
41 /*
42 * Per-register validation state.
43 */
44
45 typedef struct {
46 /*
47 * equivalent to the uses and defs in nir_register, but built up by the
48 * validator. At the end, we verify that the sets have the same entries.
49 */
50 struct set *uses, *if_uses, *defs;
51 nir_function_impl *where_defined; /* NULL for global registers */
52 } reg_validate_state;
53
54 typedef struct {
55 void *mem_ctx;
56
57 /* map of register -> validation state (struct above) */
58 struct hash_table *regs;
59
60 /* the current shader being validated */
61 nir_shader *shader;
62
63 /* the current instruction being validated */
64 nir_instr *instr;
65
66 /* the current variable being validated */
67 nir_variable *var;
68
69 /* the current basic block being validated */
70 nir_block *block;
71
72 /* the current if statement being validated */
73 nir_if *if_stmt;
74
75 /* the current loop being visited */
76 nir_loop *loop;
77
78 /* the parent of the current cf node being visited */
79 nir_cf_node *parent_node;
80
81 /* the current function implementation being validated */
82 nir_function_impl *impl;
83
84 /* Set of seen SSA sources */
85 struct set *ssa_srcs;
86
87 /* bitset of ssa definitions we have found; used to check uniqueness */
88 BITSET_WORD *ssa_defs_found;
89
90 /* bitset of registers we have currently found; used to check uniqueness */
91 BITSET_WORD *regs_found;
92
93 /* map of variable -> function implementation where it is defined or NULL
94 * if it is a global variable
95 */
96 struct hash_table *var_defs;
97
98 /* map of instruction/var/etc to failed assert string */
99 struct hash_table *errors;
100 } validate_state;
101
102 static void
103 log_error(validate_state *state, const char *cond, const char *file, int line)
104 {
105 const void *obj;
106
107 if (state->instr)
108 obj = state->instr;
109 else if (state->var)
110 obj = state->var;
111 else
112 obj = cond;
113
114 char *msg = ralloc_asprintf(state->errors, "error: %s (%s:%d)",
115 cond, file, line);
116
117 _mesa_hash_table_insert(state->errors, obj, msg);
118 }
119
120 #define validate_assert(state, cond) do { \
121 if (!(cond)) \
122 log_error(state, #cond, __FILE__, __LINE__); \
123 } while (0)
124
125 static void validate_src(nir_src *src, validate_state *state,
126 unsigned bit_sizes, unsigned num_components);
127
128 static void
129 validate_num_components(validate_state *state, unsigned num_components)
130 {
131 validate_assert(state, nir_num_components_valid(num_components));
132 }
133
134 static void
135 validate_reg_src(nir_src *src, validate_state *state,
136 unsigned bit_sizes, unsigned num_components)
137 {
138 validate_assert(state, src->reg.reg != NULL);
139
140 struct hash_entry *entry;
141 entry = _mesa_hash_table_search(state->regs, src->reg.reg);
142 validate_assert(state, entry);
143
144 reg_validate_state *reg_state = (reg_validate_state *) entry->data;
145
146 if (state->instr) {
147 _mesa_set_add(reg_state->uses, src);
148 } else {
149 validate_assert(state, state->if_stmt);
150 _mesa_set_add(reg_state->if_uses, src);
151 }
152
153 validate_assert(state, reg_state->where_defined == state->impl &&
154 "using a register declared in a different function");
155
156 if (bit_sizes)
157 validate_assert(state, src->reg.reg->bit_size & bit_sizes);
158 if (num_components)
159 validate_assert(state, src->reg.reg->num_components == num_components);
160
161 validate_assert(state, (src->reg.reg->num_array_elems == 0 ||
162 src->reg.base_offset < src->reg.reg->num_array_elems) &&
163 "definitely out-of-bounds array access");
164
165 if (src->reg.indirect) {
166 validate_assert(state, src->reg.reg->num_array_elems != 0);
167 validate_assert(state, (src->reg.indirect->is_ssa ||
168 src->reg.indirect->reg.indirect == NULL) &&
169 "only one level of indirection allowed");
170 validate_src(src->reg.indirect, state, 32, 1);
171 }
172 }
173
174 #define SET_PTR_BIT(ptr, bit) \
175 (void *)(((uintptr_t)(ptr)) | (((uintptr_t)1) << bit))
176
177 static void
178 validate_ssa_src(nir_src *src, validate_state *state,
179 unsigned bit_sizes, unsigned num_components)
180 {
181 validate_assert(state, src->ssa != NULL);
182
183 /* As we walk SSA defs, we add every use to this set. We need to make sure
184 * our use is seen in a use list.
185 */
186 struct set_entry *entry;
187 if (state->instr) {
188 entry = _mesa_set_search(state->ssa_srcs, src);
189 } else {
190 entry = _mesa_set_search(state->ssa_srcs, SET_PTR_BIT(src, 0));
191 }
192 validate_assert(state, entry);
193
194 /* This will let us prove that we've seen all the sources */
195 if (entry)
196 _mesa_set_remove(state->ssa_srcs, entry);
197
198 if (bit_sizes)
199 validate_assert(state, src->ssa->bit_size & bit_sizes);
200 if (num_components)
201 validate_assert(state, src->ssa->num_components == num_components);
202
203 /* TODO validate that the use is dominated by the definition */
204 }
205
206 static void
207 validate_src(nir_src *src, validate_state *state,
208 unsigned bit_sizes, unsigned num_components)
209 {
210 if (state->instr)
211 validate_assert(state, src->parent_instr == state->instr);
212 else
213 validate_assert(state, src->parent_if == state->if_stmt);
214
215 if (src->is_ssa)
216 validate_ssa_src(src, state, bit_sizes, num_components);
217 else
218 validate_reg_src(src, state, bit_sizes, num_components);
219 }
220
221 static void
222 validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state)
223 {
224 nir_alu_src *src = &instr->src[index];
225
226 if (instr->op == nir_op_mov)
227 assert(!src->abs && !src->negate);
228
229 unsigned num_components = nir_src_num_components(src->src);
230 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
231 validate_assert(state, src->swizzle[i] < NIR_MAX_VEC_COMPONENTS);
232
233 if (nir_alu_instr_channel_used(instr, index, i))
234 validate_assert(state, src->swizzle[i] < num_components);
235 }
236
237 validate_src(&src->src, state, 0, 0);
238 }
239
240 static void
241 validate_reg_dest(nir_reg_dest *dest, validate_state *state,
242 unsigned bit_sizes, unsigned num_components)
243 {
244 validate_assert(state, dest->reg != NULL);
245
246 validate_assert(state, dest->parent_instr == state->instr);
247
248 struct hash_entry *entry2;
249 entry2 = _mesa_hash_table_search(state->regs, dest->reg);
250
251 validate_assert(state, entry2);
252
253 reg_validate_state *reg_state = (reg_validate_state *) entry2->data;
254 _mesa_set_add(reg_state->defs, dest);
255
256 validate_assert(state, reg_state->where_defined == state->impl &&
257 "writing to a register declared in a different function");
258
259 if (bit_sizes)
260 validate_assert(state, dest->reg->bit_size & bit_sizes);
261 if (num_components)
262 validate_assert(state, dest->reg->num_components == num_components);
263
264 validate_assert(state, (dest->reg->num_array_elems == 0 ||
265 dest->base_offset < dest->reg->num_array_elems) &&
266 "definitely out-of-bounds array access");
267
268 if (dest->indirect) {
269 validate_assert(state, dest->reg->num_array_elems != 0);
270 validate_assert(state, (dest->indirect->is_ssa || dest->indirect->reg.indirect == NULL) &&
271 "only one level of indirection allowed");
272 validate_src(dest->indirect, state, 32, 1);
273 }
274 }
275
276 static void
277 validate_ssa_def(nir_ssa_def *def, validate_state *state)
278 {
279 validate_assert(state, def->index < state->impl->ssa_alloc);
280 validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
281 BITSET_SET(state->ssa_defs_found, def->index);
282
283 validate_assert(state, def->parent_instr == state->instr);
284 validate_num_components(state, def->num_components);
285
286 list_validate(&def->uses);
287 nir_foreach_use(src, def) {
288 validate_assert(state, src->is_ssa);
289 validate_assert(state, src->ssa == def);
290 bool already_seen = false;
291 _mesa_set_search_and_add(state->ssa_srcs, src, &already_seen);
292 /* A nir_src should only appear once and only in one SSA def use list */
293 validate_assert(state, !already_seen);
294 }
295
296 list_validate(&def->if_uses);
297 nir_foreach_if_use(src, def) {
298 validate_assert(state, src->is_ssa);
299 validate_assert(state, src->ssa == def);
300 bool already_seen = false;
301 _mesa_set_search_and_add(state->ssa_srcs, SET_PTR_BIT(src, 0),
302 &already_seen);
303 /* A nir_src should only appear once and only in one SSA def use list */
304 validate_assert(state, !already_seen);
305 }
306 }
307
308 static void
309 validate_dest(nir_dest *dest, validate_state *state,
310 unsigned bit_sizes, unsigned num_components)
311 {
312 if (dest->is_ssa) {
313 if (bit_sizes)
314 validate_assert(state, dest->ssa.bit_size & bit_sizes);
315 if (num_components)
316 validate_assert(state, dest->ssa.num_components == num_components);
317 validate_ssa_def(&dest->ssa, state);
318 } else {
319 validate_reg_dest(&dest->reg, state, bit_sizes, num_components);
320 }
321 }
322
323 static void
324 validate_alu_dest(nir_alu_instr *instr, validate_state *state)
325 {
326 nir_alu_dest *dest = &instr->dest;
327
328 if (instr->op == nir_op_mov)
329 assert(!dest->saturate);
330
331 unsigned dest_size = nir_dest_num_components(dest->dest);
332 /*
333 * validate that the instruction doesn't write to components not in the
334 * register/SSA value
335 */
336 validate_assert(state, !(dest->write_mask & ~((1 << dest_size) - 1)));
337
338 /* validate that saturate is only ever used on instructions with
339 * destinations of type float
340 */
341 nir_alu_instr *alu = nir_instr_as_alu(state->instr);
342 validate_assert(state,
343 (nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) ==
344 nir_type_float) ||
345 !dest->saturate);
346
347 validate_dest(&dest->dest, state, 0, 0);
348 }
349
350 static void
351 validate_alu_instr(nir_alu_instr *instr, validate_state *state)
352 {
353 validate_assert(state, instr->op < nir_num_opcodes);
354
355 unsigned instr_bit_size = 0;
356 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
357 nir_alu_type src_type = nir_op_infos[instr->op].input_types[i];
358 unsigned src_bit_size = nir_src_bit_size(instr->src[i].src);
359 if (nir_alu_type_get_type_size(src_type)) {
360 validate_assert(state, src_bit_size == nir_alu_type_get_type_size(src_type));
361 } else if (instr_bit_size) {
362 validate_assert(state, src_bit_size == instr_bit_size);
363 } else {
364 instr_bit_size = src_bit_size;
365 }
366
367 if (nir_alu_type_get_base_type(src_type) == nir_type_float) {
368 /* 8-bit float isn't a thing */
369 validate_assert(state, src_bit_size == 16 || src_bit_size == 32 ||
370 src_bit_size == 64);
371 }
372
373 validate_alu_src(instr, i, state);
374 }
375
376 nir_alu_type dest_type = nir_op_infos[instr->op].output_type;
377 unsigned dest_bit_size = nir_dest_bit_size(instr->dest.dest);
378 if (nir_alu_type_get_type_size(dest_type)) {
379 validate_assert(state, dest_bit_size == nir_alu_type_get_type_size(dest_type));
380 } else if (instr_bit_size) {
381 validate_assert(state, dest_bit_size == instr_bit_size);
382 } else {
383 /* The only unsized thing is the destination so it's vacuously valid */
384 }
385
386 if (nir_alu_type_get_base_type(dest_type) == nir_type_float) {
387 /* 8-bit float isn't a thing */
388 validate_assert(state, dest_bit_size == 16 || dest_bit_size == 32 ||
389 dest_bit_size == 64);
390 }
391
392 validate_alu_dest(instr, state);
393 }
394
395 static void
396 validate_var_use(nir_variable *var, validate_state *state)
397 {
398 struct hash_entry *entry = _mesa_hash_table_search(state->var_defs, var);
399 validate_assert(state, entry);
400 if (entry && var->data.mode == nir_var_function_temp)
401 validate_assert(state, (nir_function_impl *) entry->data == state->impl);
402 }
403
404 static void
405 validate_deref_instr(nir_deref_instr *instr, validate_state *state)
406 {
407 if (instr->deref_type == nir_deref_type_var) {
408 /* Variable dereferences are stupid simple. */
409 validate_assert(state, instr->mode == instr->var->data.mode);
410 validate_assert(state, instr->type == instr->var->type);
411 validate_var_use(instr->var, state);
412 } else if (instr->deref_type == nir_deref_type_cast) {
413 /* For cast, we simply have to trust the instruction. It's up to
414 * lowering passes and front/back-ends to make them sane.
415 */
416 validate_src(&instr->parent, state, 0, 0);
417
418 /* We just validate that the type and mode are there */
419 validate_assert(state, instr->mode);
420 validate_assert(state, instr->type);
421 if (instr->cast.align_mul > 0) {
422 validate_assert(state, util_is_power_of_two_nonzero(instr->cast.align_mul));
423 validate_assert(state, instr->cast.align_offset < instr->cast.align_mul);
424 } else {
425 validate_assert(state, instr->cast.align_offset == 0);
426 }
427 } else {
428 /* We require the parent to be SSA. This may be lifted in the future */
429 validate_assert(state, instr->parent.is_ssa);
430
431 /* The parent pointer value must have the same number of components
432 * as the destination.
433 */
434 validate_src(&instr->parent, state, nir_dest_bit_size(instr->dest),
435 nir_dest_num_components(instr->dest));
436
437 nir_instr *parent_instr = instr->parent.ssa->parent_instr;
438
439 /* The parent must come from another deref instruction */
440 validate_assert(state, parent_instr->type == nir_instr_type_deref);
441
442 nir_deref_instr *parent = nir_instr_as_deref(parent_instr);
443
444 validate_assert(state, instr->mode == parent->mode);
445
446 switch (instr->deref_type) {
447 case nir_deref_type_struct:
448 validate_assert(state, glsl_type_is_struct_or_ifc(parent->type));
449 validate_assert(state,
450 instr->strct.index < glsl_get_length(parent->type));
451 validate_assert(state, instr->type ==
452 glsl_get_struct_field(parent->type, instr->strct.index));
453 break;
454
455 case nir_deref_type_array:
456 case nir_deref_type_array_wildcard:
457 if (instr->mode == nir_var_mem_ubo ||
458 instr->mode == nir_var_mem_ssbo ||
459 instr->mode == nir_var_mem_shared ||
460 instr->mode == nir_var_mem_global) {
461 /* Shared variables and UBO/SSBOs have a bit more relaxed rules
462 * because we need to be able to handle array derefs on vectors.
463 * Fortunately, nir_lower_io handles these just fine.
464 */
465 validate_assert(state, glsl_type_is_array(parent->type) ||
466 glsl_type_is_matrix(parent->type) ||
467 glsl_type_is_vector(parent->type));
468 } else {
469 /* Most of NIR cannot handle array derefs on vectors */
470 validate_assert(state, glsl_type_is_array(parent->type) ||
471 glsl_type_is_matrix(parent->type));
472 }
473 validate_assert(state,
474 instr->type == glsl_get_array_element(parent->type));
475
476 if (instr->deref_type == nir_deref_type_array) {
477 validate_src(&instr->arr.index, state,
478 nir_dest_bit_size(instr->dest), 1);
479 }
480 break;
481
482 case nir_deref_type_ptr_as_array:
483 /* ptr_as_array derefs must have a parent that is either an array,
484 * ptr_as_array, or cast. If the parent is a cast, we get the stride
485 * information (if any) from the cast deref.
486 */
487 validate_assert(state,
488 parent->deref_type == nir_deref_type_array ||
489 parent->deref_type == nir_deref_type_ptr_as_array ||
490 parent->deref_type == nir_deref_type_cast);
491 validate_src(&instr->arr.index, state,
492 nir_dest_bit_size(instr->dest), 1);
493 break;
494
495 default:
496 unreachable("Invalid deref instruction type");
497 }
498 }
499
500 /* We intentionally don't validate the size of the destination because we
501 * want to let other compiler components such as SPIR-V decide how big
502 * pointers should be.
503 */
504 validate_dest(&instr->dest, state, 0, 0);
505
506 /* Deref instructions as if conditions don't make sense because if
507 * conditions expect well-formed Booleans. If you want to compare with
508 * NULL, an explicit comparison operation should be used.
509 */
510 validate_assert(state, list_is_empty(&instr->dest.ssa.if_uses));
511
512 /* Only certain modes can be used as sources for phi instructions. */
513 nir_foreach_use(use, &instr->dest.ssa) {
514 if (use->parent_instr->type == nir_instr_type_phi) {
515 validate_assert(state, instr->mode == nir_var_mem_ubo ||
516 instr->mode == nir_var_mem_ssbo ||
517 instr->mode == nir_var_mem_shared ||
518 instr->mode == nir_var_mem_global ||
519 instr->mode == nir_var_mem_constant);
520 }
521 }
522 }
523
524 static bool
525 vectorized_intrinsic(nir_intrinsic_instr *intr)
526 {
527 const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
528
529 if (info->dest_components == 0)
530 return true;
531
532 for (unsigned i = 0; i < info->num_srcs; i++)
533 if (info->src_components[i] == 0)
534 return true;
535
536 return false;
537 }
538
539 static void
540 validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
541 {
542 unsigned dest_bit_size = 0;
543 unsigned src_bit_sizes[NIR_INTRINSIC_MAX_INPUTS] = { 0, };
544 switch (instr->intrinsic) {
545 case nir_intrinsic_load_param: {
546 unsigned param_idx = nir_intrinsic_param_idx(instr);
547 validate_assert(state, param_idx < state->impl->function->num_params);
548 nir_parameter *param = &state->impl->function->params[param_idx];
549 validate_assert(state, instr->num_components == param->num_components);
550 dest_bit_size = param->bit_size;
551 break;
552 }
553
554 case nir_intrinsic_load_deref: {
555 nir_deref_instr *src = nir_src_as_deref(instr->src[0]);
556 assert(src);
557 validate_assert(state, glsl_type_is_vector_or_scalar(src->type) ||
558 (src->mode == nir_var_uniform &&
559 glsl_get_base_type(src->type) == GLSL_TYPE_SUBROUTINE));
560 validate_assert(state, instr->num_components ==
561 glsl_get_vector_elements(src->type));
562 dest_bit_size = glsl_get_bit_size(src->type);
563 /* Also allow 32-bit boolean load operations */
564 if (glsl_type_is_boolean(src->type))
565 dest_bit_size |= 32;
566 break;
567 }
568
569 case nir_intrinsic_store_deref: {
570 nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
571 assert(dst);
572 validate_assert(state, glsl_type_is_vector_or_scalar(dst->type));
573 validate_assert(state, instr->num_components ==
574 glsl_get_vector_elements(dst->type));
575 src_bit_sizes[1] = glsl_get_bit_size(dst->type);
576 /* Also allow 32-bit boolean store operations */
577 if (glsl_type_is_boolean(dst->type))
578 src_bit_sizes[1] |= 32;
579 validate_assert(state, (dst->mode & (nir_var_shader_in |
580 nir_var_uniform)) == 0);
581 validate_assert(state, (nir_intrinsic_write_mask(instr) & ~((1 << instr->num_components) - 1)) == 0);
582 break;
583 }
584
585 case nir_intrinsic_copy_deref: {
586 nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
587 nir_deref_instr *src = nir_src_as_deref(instr->src[1]);
588 validate_assert(state, glsl_get_bare_type(dst->type) ==
589 glsl_get_bare_type(src->type));
590 validate_assert(state, (dst->mode & (nir_var_shader_in |
591 nir_var_uniform)) == 0);
592 break;
593 }
594
595 case nir_intrinsic_load_ubo_vec4: {
596 int bit_size = nir_dest_bit_size(instr->dest);
597 validate_assert(state, bit_size >= 8);
598 validate_assert(state, (nir_intrinsic_component(instr) +
599 instr->num_components) * (bit_size / 8) <= 16);
600 break;
601 }
602
603 case nir_intrinsic_load_ubo:
604 case nir_intrinsic_load_ssbo:
605 case nir_intrinsic_load_shared:
606 case nir_intrinsic_load_global:
607 case nir_intrinsic_load_global_constant:
608 case nir_intrinsic_load_scratch:
609 case nir_intrinsic_load_constant:
610 /* These memory load operations must have alignments */
611 validate_assert(state,
612 util_is_power_of_two_nonzero(nir_intrinsic_align_mul(instr)));
613 validate_assert(state, nir_intrinsic_align_offset(instr) <
614 nir_intrinsic_align_mul(instr));
615 /* Fall through */
616
617 case nir_intrinsic_load_uniform:
618 case nir_intrinsic_load_input:
619 case nir_intrinsic_load_per_vertex_input:
620 case nir_intrinsic_load_interpolated_input:
621 case nir_intrinsic_load_output:
622 case nir_intrinsic_load_per_vertex_output:
623 case nir_intrinsic_load_push_constant:
624 /* All memory load operations must load at least a byte */
625 validate_assert(state, nir_dest_bit_size(instr->dest) >= 8);
626 break;
627
628 case nir_intrinsic_store_ssbo:
629 case nir_intrinsic_store_shared:
630 case nir_intrinsic_store_global:
631 case nir_intrinsic_store_scratch:
632 /* These memory store operations must also have alignments */
633 validate_assert(state,
634 util_is_power_of_two_nonzero(nir_intrinsic_align_mul(instr)));
635 validate_assert(state, nir_intrinsic_align_offset(instr) <
636 nir_intrinsic_align_mul(instr));
637 /* Fall through */
638
639 case nir_intrinsic_store_output:
640 case nir_intrinsic_store_per_vertex_output:
641 /* All memory store operations must store at least a byte */
642 validate_assert(state, nir_src_bit_size(instr->src[0]) >= 8);
643 break;
644
645 default:
646 break;
647 }
648
649 if (instr->num_components > 0)
650 validate_num_components(state, instr->num_components);
651
652 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
653 unsigned num_srcs = info->num_srcs;
654 for (unsigned i = 0; i < num_srcs; i++) {
655 unsigned components_read = nir_intrinsic_src_components(instr, i);
656
657 validate_num_components(state, components_read);
658
659 validate_src(&instr->src[i], state, src_bit_sizes[i], components_read);
660 }
661
662 if (nir_intrinsic_infos[instr->intrinsic].has_dest) {
663 unsigned components_written = nir_intrinsic_dest_components(instr);
664 unsigned bit_sizes = nir_intrinsic_infos[instr->intrinsic].dest_bit_sizes;
665
666 validate_num_components(state, components_written);
667 if (dest_bit_size && bit_sizes)
668 validate_assert(state, dest_bit_size & bit_sizes);
669 else
670 dest_bit_size = dest_bit_size ? dest_bit_size : bit_sizes;
671
672 validate_dest(&instr->dest, state, dest_bit_size, components_written);
673 }
674
675 if (!vectorized_intrinsic(instr))
676 validate_assert(state, instr->num_components == 0);
677 }
678
679 static void
680 validate_tex_instr(nir_tex_instr *instr, validate_state *state)
681 {
682 bool src_type_seen[nir_num_tex_src_types];
683 for (unsigned i = 0; i < nir_num_tex_src_types; i++)
684 src_type_seen[i] = false;
685
686 for (unsigned i = 0; i < instr->num_srcs; i++) {
687 validate_assert(state, !src_type_seen[instr->src[i].src_type]);
688 src_type_seen[instr->src[i].src_type] = true;
689 validate_src(&instr->src[i].src, state,
690 0, nir_tex_instr_src_size(instr, i));
691
692 switch (instr->src[i].src_type) {
693 case nir_tex_src_texture_deref:
694 case nir_tex_src_sampler_deref:
695 validate_assert(state, instr->src[i].src.is_ssa);
696 validate_assert(state,
697 instr->src[i].src.ssa->parent_instr->type == nir_instr_type_deref);
698 break;
699 default:
700 break;
701 }
702 }
703
704 if (nir_tex_instr_has_explicit_tg4_offsets(instr)) {
705 validate_assert(state, instr->op == nir_texop_tg4);
706 validate_assert(state, !src_type_seen[nir_tex_src_offset]);
707 }
708
709 validate_dest(&instr->dest, state, 0, nir_tex_instr_dest_size(instr));
710 }
711
712 static void
713 validate_call_instr(nir_call_instr *instr, validate_state *state)
714 {
715 validate_assert(state, instr->num_params == instr->callee->num_params);
716
717 for (unsigned i = 0; i < instr->num_params; i++) {
718 validate_src(&instr->params[i], state,
719 instr->callee->params[i].bit_size,
720 instr->callee->params[i].num_components);
721 }
722 }
723
724 static void
725 validate_const_value(nir_const_value *val, unsigned bit_size,
726 validate_state *state)
727 {
728 /* In order for block copies to work properly for things like instruction
729 * comparisons and [de]serialization, we require the unused bits of the
730 * nir_const_value to be zero.
731 */
732 nir_const_value cmp_val;
733 memset(&cmp_val, 0, sizeof(cmp_val));
734 switch (bit_size) {
735 case 1:
736 cmp_val.b = val->b;
737 break;
738 case 8:
739 cmp_val.u8 = val->u8;
740 break;
741 case 16:
742 cmp_val.u16 = val->u16;
743 break;
744 case 32:
745 cmp_val.u32 = val->u32;
746 break;
747 case 64:
748 cmp_val.u64 = val->u64;
749 break;
750 default:
751 validate_assert(state, !"Invalid load_const bit size");
752 }
753 validate_assert(state, memcmp(val, &cmp_val, sizeof(cmp_val)) == 0);
754 }
755
756 static void
757 validate_load_const_instr(nir_load_const_instr *instr, validate_state *state)
758 {
759 validate_ssa_def(&instr->def, state);
760
761 for (unsigned i = 0; i < instr->def.num_components; i++)
762 validate_const_value(&instr->value[i], instr->def.bit_size, state);
763 }
764
765 static void
766 validate_ssa_undef_instr(nir_ssa_undef_instr *instr, validate_state *state)
767 {
768 validate_ssa_def(&instr->def, state);
769 }
770
771 static void
772 validate_phi_instr(nir_phi_instr *instr, validate_state *state)
773 {
774 /*
775 * don't validate the sources until we get to them from their predecessor
776 * basic blocks, to avoid validating an SSA use before its definition.
777 */
778
779 validate_dest(&instr->dest, state, 0, 0);
780
781 exec_list_validate(&instr->srcs);
782 validate_assert(state, exec_list_length(&instr->srcs) ==
783 state->block->predecessors->entries);
784 }
785
786 static void
787 validate_jump_instr(nir_jump_instr *instr, validate_state *state)
788 {
789 nir_block *block = state->block;
790 validate_assert(state, &instr->instr == nir_block_last_instr(block));
791
792 switch (instr->type) {
793 case nir_jump_return:
794 validate_assert(state, block->successors[0] == state->impl->end_block);
795 validate_assert(state, block->successors[1] == NULL);
796 validate_assert(state, instr->target == NULL);
797 validate_assert(state, instr->else_target == NULL);
798 break;
799
800 case nir_jump_break:
801 validate_assert(state, state->impl->structured);
802 validate_assert(state, state->loop != NULL);
803 if (state->loop) {
804 nir_block *after =
805 nir_cf_node_as_block(nir_cf_node_next(&state->loop->cf_node));
806 validate_assert(state, block->successors[0] == after);
807 }
808 validate_assert(state, block->successors[1] == NULL);
809 validate_assert(state, instr->target == NULL);
810 validate_assert(state, instr->else_target == NULL);
811 break;
812
813 case nir_jump_continue:
814 validate_assert(state, state->impl->structured);
815 validate_assert(state, state->loop != NULL);
816 if (state->loop) {
817 nir_block *first = nir_loop_first_block(state->loop);
818 validate_assert(state, block->successors[0] == first);
819 }
820 validate_assert(state, block->successors[1] == NULL);
821 validate_assert(state, instr->target == NULL);
822 validate_assert(state, instr->else_target == NULL);
823 break;
824
825 case nir_jump_goto:
826 validate_assert(state, !state->impl->structured);
827 validate_assert(state, instr->target == block->successors[0]);
828 validate_assert(state, instr->target != NULL);
829 validate_assert(state, instr->else_target == NULL);
830 break;
831
832 case nir_jump_goto_if:
833 validate_assert(state, !state->impl->structured);
834 validate_assert(state, instr->target == block->successors[1]);
835 validate_assert(state, instr->else_target == block->successors[0]);
836 validate_src(&instr->condition, state, 0, 1);
837 validate_assert(state, instr->target != NULL);
838 validate_assert(state, instr->else_target != NULL);
839 break;
840
841 default:
842 validate_assert(state, !"Invalid jump instruction type");
843 break;
844 }
845 }
846
847 static void
848 validate_instr(nir_instr *instr, validate_state *state)
849 {
850 validate_assert(state, instr->block == state->block);
851
852 state->instr = instr;
853
854 switch (instr->type) {
855 case nir_instr_type_alu:
856 validate_alu_instr(nir_instr_as_alu(instr), state);
857 break;
858
859 case nir_instr_type_deref:
860 validate_deref_instr(nir_instr_as_deref(instr), state);
861 break;
862
863 case nir_instr_type_call:
864 validate_call_instr(nir_instr_as_call(instr), state);
865 break;
866
867 case nir_instr_type_intrinsic:
868 validate_intrinsic_instr(nir_instr_as_intrinsic(instr), state);
869 break;
870
871 case nir_instr_type_tex:
872 validate_tex_instr(nir_instr_as_tex(instr), state);
873 break;
874
875 case nir_instr_type_load_const:
876 validate_load_const_instr(nir_instr_as_load_const(instr), state);
877 break;
878
879 case nir_instr_type_phi:
880 validate_phi_instr(nir_instr_as_phi(instr), state);
881 break;
882
883 case nir_instr_type_ssa_undef:
884 validate_ssa_undef_instr(nir_instr_as_ssa_undef(instr), state);
885 break;
886
887 case nir_instr_type_jump:
888 validate_jump_instr(nir_instr_as_jump(instr), state);
889 break;
890
891 default:
892 validate_assert(state, !"Invalid ALU instruction type");
893 break;
894 }
895
896 state->instr = NULL;
897 }
898
899 static void
900 validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state)
901 {
902 state->instr = &instr->instr;
903
904 validate_assert(state, instr->dest.is_ssa);
905
906 exec_list_validate(&instr->srcs);
907 nir_foreach_phi_src(src, instr) {
908 if (src->pred == pred) {
909 validate_assert(state, src->src.is_ssa);
910 validate_src(&src->src, state, instr->dest.ssa.bit_size,
911 instr->dest.ssa.num_components);
912 state->instr = NULL;
913 return;
914 }
915 }
916 validate_assert(state, !"Phi does not have a source corresponding to one "
917 "of its predecessor blocks");
918 }
919
920 static void
921 validate_phi_srcs(nir_block *block, nir_block *succ, validate_state *state)
922 {
923 nir_foreach_instr(instr, succ) {
924 if (instr->type != nir_instr_type_phi)
925 break;
926
927 validate_phi_src(nir_instr_as_phi(instr), block, state);
928 }
929 }
930
931 static void validate_cf_node(nir_cf_node *node, validate_state *state);
932
933 static void
934 validate_block(nir_block *block, validate_state *state)
935 {
936 validate_assert(state, block->cf_node.parent == state->parent_node);
937
938 state->block = block;
939
940 exec_list_validate(&block->instr_list);
941 nir_foreach_instr(instr, block) {
942 if (instr->type == nir_instr_type_phi) {
943 validate_assert(state, instr == nir_block_first_instr(block) ||
944 nir_instr_prev(instr)->type == nir_instr_type_phi);
945 }
946
947 validate_instr(instr, state);
948 }
949
950 validate_assert(state, block->successors[0] != NULL);
951 validate_assert(state, block->successors[0] != block->successors[1]);
952
953 for (unsigned i = 0; i < 2; i++) {
954 if (block->successors[i] != NULL) {
955 struct set_entry *entry =
956 _mesa_set_search(block->successors[i]->predecessors, block);
957 validate_assert(state, entry);
958
959 validate_phi_srcs(block, block->successors[i], state);
960 }
961 }
962
963 set_foreach(block->predecessors, entry) {
964 const nir_block *pred = entry->key;
965 validate_assert(state, pred->successors[0] == block ||
966 pred->successors[1] == block);
967 }
968
969 if (!state->impl->structured) {
970 validate_assert(state, nir_block_ends_in_jump(block));
971 } else if (!nir_block_ends_in_jump(block)) {
972 nir_cf_node *next = nir_cf_node_next(&block->cf_node);
973 if (next == NULL) {
974 switch (state->parent_node->type) {
975 case nir_cf_node_loop: {
976 nir_block *first = nir_loop_first_block(state->loop);
977 validate_assert(state, block->successors[0] == first);
978 /* due to the hack for infinite loops, block->successors[1] may
979 * point to the block after the loop.
980 */
981 break;
982 }
983
984 case nir_cf_node_if: {
985 nir_block *after =
986 nir_cf_node_as_block(nir_cf_node_next(state->parent_node));
987 validate_assert(state, block->successors[0] == after);
988 validate_assert(state, block->successors[1] == NULL);
989 break;
990 }
991
992 case nir_cf_node_function:
993 validate_assert(state, block->successors[0] == state->impl->end_block);
994 validate_assert(state, block->successors[1] == NULL);
995 break;
996
997 default:
998 unreachable("unknown control flow node type");
999 }
1000 } else {
1001 if (next->type == nir_cf_node_if) {
1002 nir_if *if_stmt = nir_cf_node_as_if(next);
1003 validate_assert(state, block->successors[0] ==
1004 nir_if_first_then_block(if_stmt));
1005 validate_assert(state, block->successors[1] ==
1006 nir_if_first_else_block(if_stmt));
1007 } else if (next->type == nir_cf_node_loop) {
1008 nir_loop *loop = nir_cf_node_as_loop(next);
1009 validate_assert(state, block->successors[0] ==
1010 nir_loop_first_block(loop));
1011 validate_assert(state, block->successors[1] == NULL);
1012 } else {
1013 validate_assert(state,
1014 !"Structured NIR cannot have consecutive blocks");
1015 }
1016 }
1017 }
1018 }
1019
1020 static void
1021 validate_if(nir_if *if_stmt, validate_state *state)
1022 {
1023 validate_assert(state, state->impl->structured);
1024
1025 state->if_stmt = if_stmt;
1026
1027 validate_assert(state, !exec_node_is_head_sentinel(if_stmt->cf_node.node.prev));
1028 nir_cf_node *prev_node = nir_cf_node_prev(&if_stmt->cf_node);
1029 validate_assert(state, prev_node->type == nir_cf_node_block);
1030
1031 validate_assert(state, !exec_node_is_tail_sentinel(if_stmt->cf_node.node.next));
1032 nir_cf_node *next_node = nir_cf_node_next(&if_stmt->cf_node);
1033 validate_assert(state, next_node->type == nir_cf_node_block);
1034
1035 validate_src(&if_stmt->condition, state, 0, 1);
1036
1037 validate_assert(state, !exec_list_is_empty(&if_stmt->then_list));
1038 validate_assert(state, !exec_list_is_empty(&if_stmt->else_list));
1039
1040 nir_cf_node *old_parent = state->parent_node;
1041 state->parent_node = &if_stmt->cf_node;
1042
1043 exec_list_validate(&if_stmt->then_list);
1044 foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->then_list) {
1045 validate_cf_node(cf_node, state);
1046 }
1047
1048 exec_list_validate(&if_stmt->else_list);
1049 foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->else_list) {
1050 validate_cf_node(cf_node, state);
1051 }
1052
1053 state->parent_node = old_parent;
1054 state->if_stmt = NULL;
1055 }
1056
1057 static void
1058 validate_loop(nir_loop *loop, validate_state *state)
1059 {
1060 validate_assert(state, state->impl->structured);
1061
1062 validate_assert(state, !exec_node_is_head_sentinel(loop->cf_node.node.prev));
1063 nir_cf_node *prev_node = nir_cf_node_prev(&loop->cf_node);
1064 validate_assert(state, prev_node->type == nir_cf_node_block);
1065
1066 validate_assert(state, !exec_node_is_tail_sentinel(loop->cf_node.node.next));
1067 nir_cf_node *next_node = nir_cf_node_next(&loop->cf_node);
1068 validate_assert(state, next_node->type == nir_cf_node_block);
1069
1070 validate_assert(state, !exec_list_is_empty(&loop->body));
1071
1072 nir_cf_node *old_parent = state->parent_node;
1073 state->parent_node = &loop->cf_node;
1074 nir_loop *old_loop = state->loop;
1075 state->loop = loop;
1076
1077 exec_list_validate(&loop->body);
1078 foreach_list_typed(nir_cf_node, cf_node, node, &loop->body) {
1079 validate_cf_node(cf_node, state);
1080 }
1081
1082 state->parent_node = old_parent;
1083 state->loop = old_loop;
1084 }
1085
1086 static void
1087 validate_cf_node(nir_cf_node *node, validate_state *state)
1088 {
1089 validate_assert(state, node->parent == state->parent_node);
1090
1091 switch (node->type) {
1092 case nir_cf_node_block:
1093 validate_block(nir_cf_node_as_block(node), state);
1094 break;
1095
1096 case nir_cf_node_if:
1097 validate_if(nir_cf_node_as_if(node), state);
1098 break;
1099
1100 case nir_cf_node_loop:
1101 validate_loop(nir_cf_node_as_loop(node), state);
1102 break;
1103
1104 default:
1105 unreachable("Invalid CF node type");
1106 }
1107 }
1108
1109 static void
1110 prevalidate_reg_decl(nir_register *reg, validate_state *state)
1111 {
1112 validate_assert(state, reg->index < state->impl->reg_alloc);
1113 validate_assert(state, !BITSET_TEST(state->regs_found, reg->index));
1114 validate_num_components(state, reg->num_components);
1115 BITSET_SET(state->regs_found, reg->index);
1116
1117 list_validate(&reg->uses);
1118 list_validate(&reg->defs);
1119 list_validate(&reg->if_uses);
1120
1121 reg_validate_state *reg_state = ralloc(state->regs, reg_validate_state);
1122 reg_state->uses = _mesa_pointer_set_create(reg_state);
1123 reg_state->if_uses = _mesa_pointer_set_create(reg_state);
1124 reg_state->defs = _mesa_pointer_set_create(reg_state);
1125
1126 reg_state->where_defined = state->impl;
1127
1128 _mesa_hash_table_insert(state->regs, reg, reg_state);
1129 }
1130
1131 static void
1132 postvalidate_reg_decl(nir_register *reg, validate_state *state)
1133 {
1134 struct hash_entry *entry = _mesa_hash_table_search(state->regs, reg);
1135
1136 assume(entry);
1137 reg_validate_state *reg_state = (reg_validate_state *) entry->data;
1138
1139 nir_foreach_use(src, reg) {
1140 struct set_entry *entry = _mesa_set_search(reg_state->uses, src);
1141 validate_assert(state, entry);
1142 _mesa_set_remove(reg_state->uses, entry);
1143 }
1144 validate_assert(state, reg_state->uses->entries == 0);
1145
1146 nir_foreach_if_use(src, reg) {
1147 struct set_entry *entry = _mesa_set_search(reg_state->if_uses, src);
1148 validate_assert(state, entry);
1149 _mesa_set_remove(reg_state->if_uses, entry);
1150 }
1151 validate_assert(state, reg_state->if_uses->entries == 0);
1152
1153 nir_foreach_def(src, reg) {
1154 struct set_entry *entry = _mesa_set_search(reg_state->defs, src);
1155 validate_assert(state, entry);
1156 _mesa_set_remove(reg_state->defs, entry);
1157 }
1158 validate_assert(state, reg_state->defs->entries == 0);
1159 }
1160
1161 static void
1162 validate_var_decl(nir_variable *var, nir_variable_mode valid_modes,
1163 validate_state *state)
1164 {
1165 state->var = var;
1166
1167 /* Must have exactly one mode set */
1168 validate_assert(state, util_is_power_of_two_nonzero(var->data.mode));
1169 validate_assert(state, var->data.mode & valid_modes);
1170
1171 if (var->data.compact) {
1172 /* The "compact" flag is only valid on arrays of scalars. */
1173 assert(glsl_type_is_array(var->type));
1174
1175 const struct glsl_type *type = glsl_get_array_element(var->type);
1176 if (nir_is_per_vertex_io(var, state->shader->info.stage)) {
1177 assert(glsl_type_is_array(type));
1178 assert(glsl_type_is_scalar(glsl_get_array_element(type)));
1179 } else {
1180 assert(glsl_type_is_scalar(type));
1181 }
1182 }
1183
1184 if (var->num_members > 0) {
1185 const struct glsl_type *without_array = glsl_without_array(var->type);
1186 validate_assert(state, glsl_type_is_struct_or_ifc(without_array));
1187 validate_assert(state, var->num_members == glsl_get_length(without_array));
1188 validate_assert(state, var->members != NULL);
1189 }
1190
1191 if (var->data.per_view)
1192 validate_assert(state, glsl_type_is_array(var->type));
1193
1194 /*
1195 * TODO validate some things ir_validate.cpp does (requires more GLSL type
1196 * support)
1197 */
1198
1199 _mesa_hash_table_insert(state->var_defs, var,
1200 valid_modes == nir_var_function_temp ?
1201 state->impl : NULL);
1202
1203 state->var = NULL;
1204 }
1205
1206 static void
1207 validate_function_impl(nir_function_impl *impl, validate_state *state)
1208 {
1209 /* Resize the ssa_srcs set. It's likely that the size of this set will
1210 * never actually hit the number of SSA defs because we remove sources from
1211 * the set as we visit them. (It could actually be much larger because
1212 * each SSA def can be used more than once.) However, growing it now costs
1213 * us very little (the extra memory is already dwarfed by the SSA defs
1214 * themselves) and makes collisions much less likely.
1215 */
1216 _mesa_set_resize(state->ssa_srcs, impl->ssa_alloc);
1217
1218 validate_assert(state, impl->function->impl == impl);
1219 validate_assert(state, impl->cf_node.parent == NULL);
1220
1221 validate_assert(state, exec_list_is_empty(&impl->end_block->instr_list));
1222 validate_assert(state, impl->end_block->successors[0] == NULL);
1223 validate_assert(state, impl->end_block->successors[1] == NULL);
1224
1225 state->impl = impl;
1226 state->parent_node = &impl->cf_node;
1227
1228 exec_list_validate(&impl->locals);
1229 nir_foreach_function_temp_variable(var, impl) {
1230 validate_var_decl(var, nir_var_function_temp, state);
1231 }
1232
1233 state->regs_found = reralloc(state->mem_ctx, state->regs_found,
1234 BITSET_WORD, BITSET_WORDS(impl->reg_alloc));
1235 memset(state->regs_found, 0, BITSET_WORDS(impl->reg_alloc) *
1236 sizeof(BITSET_WORD));
1237 exec_list_validate(&impl->registers);
1238 foreach_list_typed(nir_register, reg, node, &impl->registers) {
1239 prevalidate_reg_decl(reg, state);
1240 }
1241
1242 state->ssa_defs_found = reralloc(state->mem_ctx, state->ssa_defs_found,
1243 BITSET_WORD, BITSET_WORDS(impl->ssa_alloc));
1244 memset(state->ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) *
1245 sizeof(BITSET_WORD));
1246 exec_list_validate(&impl->body);
1247 foreach_list_typed(nir_cf_node, node, node, &impl->body) {
1248 validate_cf_node(node, state);
1249 }
1250
1251 foreach_list_typed(nir_register, reg, node, &impl->registers) {
1252 postvalidate_reg_decl(reg, state);
1253 }
1254
1255 validate_assert(state, state->ssa_srcs->entries == 0);
1256 _mesa_set_clear(state->ssa_srcs, NULL);
1257 }
1258
1259 static void
1260 validate_function(nir_function *func, validate_state *state)
1261 {
1262 if (func->impl != NULL) {
1263 validate_assert(state, func->impl->function == func);
1264 validate_function_impl(func->impl, state);
1265 }
1266 }
1267
1268 static void
1269 init_validate_state(validate_state *state)
1270 {
1271 state->mem_ctx = ralloc_context(NULL);
1272 state->regs = _mesa_pointer_hash_table_create(state->mem_ctx);
1273 state->ssa_srcs = _mesa_pointer_set_create(state->mem_ctx);
1274 state->ssa_defs_found = NULL;
1275 state->regs_found = NULL;
1276 state->var_defs = _mesa_pointer_hash_table_create(state->mem_ctx);
1277 state->errors = _mesa_pointer_hash_table_create(state->mem_ctx);
1278
1279 state->loop = NULL;
1280 state->instr = NULL;
1281 state->var = NULL;
1282 }
1283
1284 static void
1285 destroy_validate_state(validate_state *state)
1286 {
1287 ralloc_free(state->mem_ctx);
1288 }
1289
1290 mtx_t fail_dump_mutex = _MTX_INITIALIZER_NP;
1291
1292 static void
1293 dump_errors(validate_state *state, const char *when)
1294 {
1295 struct hash_table *errors = state->errors;
1296
1297 /* Lock around dumping so that we get clean dumps in a multi-threaded
1298 * scenario
1299 */
1300 mtx_lock(&fail_dump_mutex);
1301
1302 if (when) {
1303 fprintf(stderr, "NIR validation failed %s\n", when);
1304 fprintf(stderr, "%d errors:\n", _mesa_hash_table_num_entries(errors));
1305 } else {
1306 fprintf(stderr, "NIR validation failed with %d errors:\n",
1307 _mesa_hash_table_num_entries(errors));
1308 }
1309
1310 nir_print_shader_annotated(state->shader, stderr, errors);
1311
1312 if (_mesa_hash_table_num_entries(errors) > 0) {
1313 fprintf(stderr, "%d additional errors:\n",
1314 _mesa_hash_table_num_entries(errors));
1315 hash_table_foreach(errors, entry) {
1316 fprintf(stderr, "%s\n", (char *)entry->data);
1317 }
1318 }
1319
1320 mtx_unlock(&fail_dump_mutex);
1321
1322 abort();
1323 }
1324
1325 void
1326 nir_validate_shader(nir_shader *shader, const char *when)
1327 {
1328 static int should_validate = -1;
1329 if (should_validate < 0)
1330 should_validate = env_var_as_boolean("NIR_VALIDATE", true);
1331 if (!should_validate)
1332 return;
1333
1334 validate_state state;
1335 init_validate_state(&state);
1336
1337 state.shader = shader;
1338
1339 nir_variable_mode valid_modes =
1340 nir_var_shader_in |
1341 nir_var_shader_out |
1342 nir_var_shader_temp |
1343 nir_var_uniform |
1344 nir_var_mem_ubo |
1345 nir_var_system_value |
1346 nir_var_mem_ssbo |
1347 nir_var_mem_shared |
1348 nir_var_mem_constant;
1349
1350 exec_list_validate(&shader->variables);
1351 nir_foreach_variable_in_shader(var, shader)
1352 validate_var_decl(var, valid_modes, &state);
1353
1354 exec_list_validate(&shader->functions);
1355 foreach_list_typed(nir_function, func, node, &shader->functions) {
1356 validate_function(func, &state);
1357 }
1358
1359 if (_mesa_hash_table_num_entries(state.errors) > 0)
1360 dump_errors(&state, when);
1361
1362 destroy_validate_state(&state);
1363 }
1364
1365 #endif /* NDEBUG */