nir: Add a load_global_constant intrinsic
[mesa.git] / src / compiler / nir / nir_validate.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include "c11/threads.h"
30 #include <assert.h>
31
32 /*
33 * This file checks for invalid IR indicating a bug somewhere in the compiler.
34 */
35
36 /* Since this file is just a pile of asserts, don't bother compiling it if
37 * we're not building a debug build.
38 */
39 #ifndef NDEBUG
40
41 /*
42 * Per-register validation state.
43 */
44
45 typedef struct {
46 /*
47 * equivalent to the uses and defs in nir_register, but built up by the
48 * validator. At the end, we verify that the sets have the same entries.
49 */
50 struct set *uses, *if_uses, *defs;
51 nir_function_impl *where_defined; /* NULL for global registers */
52 } reg_validate_state;
53
54 typedef struct {
55 void *mem_ctx;
56
57 /* map of register -> validation state (struct above) */
58 struct hash_table *regs;
59
60 /* the current shader being validated */
61 nir_shader *shader;
62
63 /* the current instruction being validated */
64 nir_instr *instr;
65
66 /* the current variable being validated */
67 nir_variable *var;
68
69 /* the current basic block being validated */
70 nir_block *block;
71
72 /* the current if statement being validated */
73 nir_if *if_stmt;
74
75 /* the current loop being visited */
76 nir_loop *loop;
77
78 /* the parent of the current cf node being visited */
79 nir_cf_node *parent_node;
80
81 /* the current function implementation being validated */
82 nir_function_impl *impl;
83
84 /* Set of seen SSA sources */
85 struct set *ssa_srcs;
86
87 /* bitset of ssa definitions we have found; used to check uniqueness */
88 BITSET_WORD *ssa_defs_found;
89
90 /* bitset of registers we have currently found; used to check uniqueness */
91 BITSET_WORD *regs_found;
92
93 /* map of variable -> function implementation where it is defined or NULL
94 * if it is a global variable
95 */
96 struct hash_table *var_defs;
97
98 /* map of instruction/var/etc to failed assert string */
99 struct hash_table *errors;
100 } validate_state;
101
102 static void
103 log_error(validate_state *state, const char *cond, const char *file, int line)
104 {
105 const void *obj;
106
107 if (state->instr)
108 obj = state->instr;
109 else if (state->var)
110 obj = state->var;
111 else
112 obj = cond;
113
114 char *msg = ralloc_asprintf(state->errors, "error: %s (%s:%d)",
115 cond, file, line);
116
117 _mesa_hash_table_insert(state->errors, obj, msg);
118 }
119
120 #define validate_assert(state, cond) do { \
121 if (!(cond)) \
122 log_error(state, #cond, __FILE__, __LINE__); \
123 } while (0)
124
125 static void validate_src(nir_src *src, validate_state *state,
126 unsigned bit_sizes, unsigned num_components);
127
128 static void
129 validate_num_components(validate_state *state, unsigned num_components)
130 {
131 validate_assert(state, nir_num_components_valid(num_components));
132 }
133
134 static void
135 validate_reg_src(nir_src *src, validate_state *state,
136 unsigned bit_sizes, unsigned num_components)
137 {
138 validate_assert(state, src->reg.reg != NULL);
139
140 struct hash_entry *entry;
141 entry = _mesa_hash_table_search(state->regs, src->reg.reg);
142 validate_assert(state, entry);
143
144 reg_validate_state *reg_state = (reg_validate_state *) entry->data;
145
146 if (state->instr) {
147 _mesa_set_add(reg_state->uses, src);
148 } else {
149 validate_assert(state, state->if_stmt);
150 _mesa_set_add(reg_state->if_uses, src);
151 }
152
153 validate_assert(state, reg_state->where_defined == state->impl &&
154 "using a register declared in a different function");
155
156 if (bit_sizes)
157 validate_assert(state, src->reg.reg->bit_size & bit_sizes);
158 if (num_components)
159 validate_assert(state, src->reg.reg->num_components == num_components);
160
161 validate_assert(state, (src->reg.reg->num_array_elems == 0 ||
162 src->reg.base_offset < src->reg.reg->num_array_elems) &&
163 "definitely out-of-bounds array access");
164
165 if (src->reg.indirect) {
166 validate_assert(state, src->reg.reg->num_array_elems != 0);
167 validate_assert(state, (src->reg.indirect->is_ssa ||
168 src->reg.indirect->reg.indirect == NULL) &&
169 "only one level of indirection allowed");
170 validate_src(src->reg.indirect, state, 32, 1);
171 }
172 }
173
174 #define SET_PTR_BIT(ptr, bit) \
175 (void *)(((uintptr_t)(ptr)) | (((uintptr_t)1) << bit))
176
177 static void
178 validate_ssa_src(nir_src *src, validate_state *state,
179 unsigned bit_sizes, unsigned num_components)
180 {
181 validate_assert(state, src->ssa != NULL);
182
183 /* As we walk SSA defs, we add every use to this set. We need to make sure
184 * our use is seen in a use list.
185 */
186 struct set_entry *entry;
187 if (state->instr) {
188 entry = _mesa_set_search(state->ssa_srcs, src);
189 } else {
190 entry = _mesa_set_search(state->ssa_srcs, SET_PTR_BIT(src, 0));
191 }
192 validate_assert(state, entry);
193
194 /* This will let us prove that we've seen all the sources */
195 if (entry)
196 _mesa_set_remove(state->ssa_srcs, entry);
197
198 if (bit_sizes)
199 validate_assert(state, src->ssa->bit_size & bit_sizes);
200 if (num_components)
201 validate_assert(state, src->ssa->num_components == num_components);
202
203 /* TODO validate that the use is dominated by the definition */
204 }
205
206 static void
207 validate_src(nir_src *src, validate_state *state,
208 unsigned bit_sizes, unsigned num_components)
209 {
210 if (state->instr)
211 validate_assert(state, src->parent_instr == state->instr);
212 else
213 validate_assert(state, src->parent_if == state->if_stmt);
214
215 if (src->is_ssa)
216 validate_ssa_src(src, state, bit_sizes, num_components);
217 else
218 validate_reg_src(src, state, bit_sizes, num_components);
219 }
220
221 static void
222 validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state)
223 {
224 nir_alu_src *src = &instr->src[index];
225
226 if (instr->op == nir_op_mov)
227 assert(!src->abs && !src->negate);
228
229 unsigned num_components = nir_src_num_components(src->src);
230 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
231 validate_assert(state, src->swizzle[i] < NIR_MAX_VEC_COMPONENTS);
232
233 if (nir_alu_instr_channel_used(instr, index, i))
234 validate_assert(state, src->swizzle[i] < num_components);
235 }
236
237 validate_src(&src->src, state, 0, 0);
238 }
239
240 static void
241 validate_reg_dest(nir_reg_dest *dest, validate_state *state,
242 unsigned bit_sizes, unsigned num_components)
243 {
244 validate_assert(state, dest->reg != NULL);
245
246 validate_assert(state, dest->parent_instr == state->instr);
247
248 struct hash_entry *entry2;
249 entry2 = _mesa_hash_table_search(state->regs, dest->reg);
250
251 validate_assert(state, entry2);
252
253 reg_validate_state *reg_state = (reg_validate_state *) entry2->data;
254 _mesa_set_add(reg_state->defs, dest);
255
256 validate_assert(state, reg_state->where_defined == state->impl &&
257 "writing to a register declared in a different function");
258
259 if (bit_sizes)
260 validate_assert(state, dest->reg->bit_size & bit_sizes);
261 if (num_components)
262 validate_assert(state, dest->reg->num_components == num_components);
263
264 validate_assert(state, (dest->reg->num_array_elems == 0 ||
265 dest->base_offset < dest->reg->num_array_elems) &&
266 "definitely out-of-bounds array access");
267
268 if (dest->indirect) {
269 validate_assert(state, dest->reg->num_array_elems != 0);
270 validate_assert(state, (dest->indirect->is_ssa || dest->indirect->reg.indirect == NULL) &&
271 "only one level of indirection allowed");
272 validate_src(dest->indirect, state, 32, 1);
273 }
274 }
275
276 static void
277 validate_ssa_def(nir_ssa_def *def, validate_state *state)
278 {
279 validate_assert(state, def->index < state->impl->ssa_alloc);
280 validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
281 BITSET_SET(state->ssa_defs_found, def->index);
282
283 validate_assert(state, def->parent_instr == state->instr);
284 validate_num_components(state, def->num_components);
285
286 list_validate(&def->uses);
287 nir_foreach_use(src, def) {
288 validate_assert(state, src->is_ssa);
289 validate_assert(state, src->ssa == def);
290 bool already_seen = false;
291 _mesa_set_search_and_add(state->ssa_srcs, src, &already_seen);
292 /* A nir_src should only appear once and only in one SSA def use list */
293 validate_assert(state, !already_seen);
294 }
295
296 list_validate(&def->if_uses);
297 nir_foreach_if_use(src, def) {
298 validate_assert(state, src->is_ssa);
299 validate_assert(state, src->ssa == def);
300 bool already_seen = false;
301 _mesa_set_search_and_add(state->ssa_srcs, SET_PTR_BIT(src, 0),
302 &already_seen);
303 /* A nir_src should only appear once and only in one SSA def use list */
304 validate_assert(state, !already_seen);
305 }
306 }
307
308 static void
309 validate_dest(nir_dest *dest, validate_state *state,
310 unsigned bit_sizes, unsigned num_components)
311 {
312 if (dest->is_ssa) {
313 if (bit_sizes)
314 validate_assert(state, dest->ssa.bit_size & bit_sizes);
315 if (num_components)
316 validate_assert(state, dest->ssa.num_components == num_components);
317 validate_ssa_def(&dest->ssa, state);
318 } else {
319 validate_reg_dest(&dest->reg, state, bit_sizes, num_components);
320 }
321 }
322
323 static void
324 validate_alu_dest(nir_alu_instr *instr, validate_state *state)
325 {
326 nir_alu_dest *dest = &instr->dest;
327
328 if (instr->op == nir_op_mov)
329 assert(!dest->saturate);
330
331 unsigned dest_size = nir_dest_num_components(dest->dest);
332 /*
333 * validate that the instruction doesn't write to components not in the
334 * register/SSA value
335 */
336 validate_assert(state, !(dest->write_mask & ~((1 << dest_size) - 1)));
337
338 /* validate that saturate is only ever used on instructions with
339 * destinations of type float
340 */
341 nir_alu_instr *alu = nir_instr_as_alu(state->instr);
342 validate_assert(state,
343 (nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) ==
344 nir_type_float) ||
345 !dest->saturate);
346
347 validate_dest(&dest->dest, state, 0, 0);
348 }
349
350 static void
351 validate_alu_instr(nir_alu_instr *instr, validate_state *state)
352 {
353 validate_assert(state, instr->op < nir_num_opcodes);
354
355 unsigned instr_bit_size = 0;
356 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
357 nir_alu_type src_type = nir_op_infos[instr->op].input_types[i];
358 unsigned src_bit_size = nir_src_bit_size(instr->src[i].src);
359 if (nir_alu_type_get_type_size(src_type)) {
360 validate_assert(state, src_bit_size == nir_alu_type_get_type_size(src_type));
361 } else if (instr_bit_size) {
362 validate_assert(state, src_bit_size == instr_bit_size);
363 } else {
364 instr_bit_size = src_bit_size;
365 }
366
367 if (nir_alu_type_get_base_type(src_type) == nir_type_float) {
368 /* 8-bit float isn't a thing */
369 validate_assert(state, src_bit_size == 16 || src_bit_size == 32 ||
370 src_bit_size == 64);
371 }
372
373 validate_alu_src(instr, i, state);
374 }
375
376 nir_alu_type dest_type = nir_op_infos[instr->op].output_type;
377 unsigned dest_bit_size = nir_dest_bit_size(instr->dest.dest);
378 if (nir_alu_type_get_type_size(dest_type)) {
379 validate_assert(state, dest_bit_size == nir_alu_type_get_type_size(dest_type));
380 } else if (instr_bit_size) {
381 validate_assert(state, dest_bit_size == instr_bit_size);
382 } else {
383 /* The only unsized thing is the destination so it's vacuously valid */
384 }
385
386 if (nir_alu_type_get_base_type(dest_type) == nir_type_float) {
387 /* 8-bit float isn't a thing */
388 validate_assert(state, dest_bit_size == 16 || dest_bit_size == 32 ||
389 dest_bit_size == 64);
390 }
391
392 validate_alu_dest(instr, state);
393 }
394
395 static void
396 validate_var_use(nir_variable *var, validate_state *state)
397 {
398 struct hash_entry *entry = _mesa_hash_table_search(state->var_defs, var);
399 validate_assert(state, entry);
400 if (entry && var->data.mode == nir_var_function_temp)
401 validate_assert(state, (nir_function_impl *) entry->data == state->impl);
402 }
403
404 static void
405 validate_deref_instr(nir_deref_instr *instr, validate_state *state)
406 {
407 if (instr->deref_type == nir_deref_type_var) {
408 /* Variable dereferences are stupid simple. */
409 validate_assert(state, instr->mode == instr->var->data.mode);
410 validate_assert(state, instr->type == instr->var->type);
411 validate_var_use(instr->var, state);
412 } else if (instr->deref_type == nir_deref_type_cast) {
413 /* For cast, we simply have to trust the instruction. It's up to
414 * lowering passes and front/back-ends to make them sane.
415 */
416 validate_src(&instr->parent, state, 0, 0);
417
418 /* We just validate that the type and mode are there */
419 validate_assert(state, instr->mode);
420 validate_assert(state, instr->type);
421 } else {
422 /* We require the parent to be SSA. This may be lifted in the future */
423 validate_assert(state, instr->parent.is_ssa);
424
425 /* The parent pointer value must have the same number of components
426 * as the destination.
427 */
428 validate_src(&instr->parent, state, nir_dest_bit_size(instr->dest),
429 nir_dest_num_components(instr->dest));
430
431 nir_instr *parent_instr = instr->parent.ssa->parent_instr;
432
433 /* The parent must come from another deref instruction */
434 validate_assert(state, parent_instr->type == nir_instr_type_deref);
435
436 nir_deref_instr *parent = nir_instr_as_deref(parent_instr);
437
438 validate_assert(state, instr->mode == parent->mode);
439
440 switch (instr->deref_type) {
441 case nir_deref_type_struct:
442 validate_assert(state, glsl_type_is_struct_or_ifc(parent->type));
443 validate_assert(state,
444 instr->strct.index < glsl_get_length(parent->type));
445 validate_assert(state, instr->type ==
446 glsl_get_struct_field(parent->type, instr->strct.index));
447 break;
448
449 case nir_deref_type_array:
450 case nir_deref_type_array_wildcard:
451 if (instr->mode == nir_var_mem_ubo ||
452 instr->mode == nir_var_mem_ssbo ||
453 instr->mode == nir_var_mem_shared ||
454 instr->mode == nir_var_mem_global) {
455 /* Shared variables and UBO/SSBOs have a bit more relaxed rules
456 * because we need to be able to handle array derefs on vectors.
457 * Fortunately, nir_lower_io handles these just fine.
458 */
459 validate_assert(state, glsl_type_is_array(parent->type) ||
460 glsl_type_is_matrix(parent->type) ||
461 glsl_type_is_vector(parent->type));
462 } else {
463 /* Most of NIR cannot handle array derefs on vectors */
464 validate_assert(state, glsl_type_is_array(parent->type) ||
465 glsl_type_is_matrix(parent->type));
466 }
467 validate_assert(state,
468 instr->type == glsl_get_array_element(parent->type));
469
470 if (instr->deref_type == nir_deref_type_array) {
471 validate_src(&instr->arr.index, state,
472 nir_dest_bit_size(instr->dest), 1);
473 }
474 break;
475
476 case nir_deref_type_ptr_as_array:
477 /* ptr_as_array derefs must have a parent that is either an array,
478 * ptr_as_array, or cast. If the parent is a cast, we get the stride
479 * information (if any) from the cast deref.
480 */
481 validate_assert(state,
482 parent->deref_type == nir_deref_type_array ||
483 parent->deref_type == nir_deref_type_ptr_as_array ||
484 parent->deref_type == nir_deref_type_cast);
485 validate_src(&instr->arr.index, state,
486 nir_dest_bit_size(instr->dest), 1);
487 break;
488
489 default:
490 unreachable("Invalid deref instruction type");
491 }
492 }
493
494 /* We intentionally don't validate the size of the destination because we
495 * want to let other compiler components such as SPIR-V decide how big
496 * pointers should be.
497 */
498 validate_dest(&instr->dest, state, 0, 0);
499
500 /* Deref instructions as if conditions don't make sense because if
501 * conditions expect well-formed Booleans. If you want to compare with
502 * NULL, an explicit comparison operation should be used.
503 */
504 validate_assert(state, list_is_empty(&instr->dest.ssa.if_uses));
505
506 /* Only certain modes can be used as sources for phi instructions. */
507 nir_foreach_use(use, &instr->dest.ssa) {
508 if (use->parent_instr->type == nir_instr_type_phi) {
509 validate_assert(state, instr->mode == nir_var_mem_ubo ||
510 instr->mode == nir_var_mem_ssbo ||
511 instr->mode == nir_var_mem_shared ||
512 instr->mode == nir_var_mem_global ||
513 instr->mode == nir_var_mem_constant);
514 }
515 }
516 }
517
518 static bool
519 vectorized_intrinsic(nir_intrinsic_instr *intr)
520 {
521 const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
522
523 if (info->dest_components == 0)
524 return true;
525
526 for (unsigned i = 0; i < info->num_srcs; i++)
527 if (info->src_components[i] == 0)
528 return true;
529
530 return false;
531 }
532
533 static void
534 validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
535 {
536 unsigned dest_bit_size = 0;
537 unsigned src_bit_sizes[NIR_INTRINSIC_MAX_INPUTS] = { 0, };
538 switch (instr->intrinsic) {
539 case nir_intrinsic_load_param: {
540 unsigned param_idx = nir_intrinsic_param_idx(instr);
541 validate_assert(state, param_idx < state->impl->function->num_params);
542 nir_parameter *param = &state->impl->function->params[param_idx];
543 validate_assert(state, instr->num_components == param->num_components);
544 dest_bit_size = param->bit_size;
545 break;
546 }
547
548 case nir_intrinsic_load_deref: {
549 nir_deref_instr *src = nir_src_as_deref(instr->src[0]);
550 assert(src);
551 validate_assert(state, glsl_type_is_vector_or_scalar(src->type) ||
552 (src->mode == nir_var_uniform &&
553 glsl_get_base_type(src->type) == GLSL_TYPE_SUBROUTINE));
554 validate_assert(state, instr->num_components ==
555 glsl_get_vector_elements(src->type));
556 dest_bit_size = glsl_get_bit_size(src->type);
557 /* Also allow 32-bit boolean load operations */
558 if (glsl_type_is_boolean(src->type))
559 dest_bit_size |= 32;
560 break;
561 }
562
563 case nir_intrinsic_store_deref: {
564 nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
565 assert(dst);
566 validate_assert(state, glsl_type_is_vector_or_scalar(dst->type));
567 validate_assert(state, instr->num_components ==
568 glsl_get_vector_elements(dst->type));
569 src_bit_sizes[1] = glsl_get_bit_size(dst->type);
570 /* Also allow 32-bit boolean store operations */
571 if (glsl_type_is_boolean(dst->type))
572 src_bit_sizes[1] |= 32;
573 validate_assert(state, (dst->mode & (nir_var_shader_in |
574 nir_var_uniform)) == 0);
575 validate_assert(state, (nir_intrinsic_write_mask(instr) & ~((1 << instr->num_components) - 1)) == 0);
576 break;
577 }
578
579 case nir_intrinsic_copy_deref: {
580 nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
581 nir_deref_instr *src = nir_src_as_deref(instr->src[1]);
582 validate_assert(state, glsl_get_bare_type(dst->type) ==
583 glsl_get_bare_type(src->type));
584 validate_assert(state, (dst->mode & (nir_var_shader_in |
585 nir_var_uniform)) == 0);
586 break;
587 }
588
589 case nir_intrinsic_load_ubo_vec4: {
590 int bit_size = nir_dest_bit_size(instr->dest);
591 validate_assert(state, bit_size >= 8);
592 validate_assert(state, (nir_intrinsic_component(instr) +
593 instr->num_components) * (bit_size / 8) <= 16);
594 break;
595 }
596
597 case nir_intrinsic_load_ubo:
598 case nir_intrinsic_load_ssbo:
599 case nir_intrinsic_load_shared:
600 case nir_intrinsic_load_global:
601 case nir_intrinsic_load_global_constant:
602 case nir_intrinsic_load_scratch:
603 case nir_intrinsic_load_constant:
604 /* These memory load operations must have alignments */
605 validate_assert(state,
606 util_is_power_of_two_nonzero(nir_intrinsic_align_mul(instr)));
607 validate_assert(state, nir_intrinsic_align_offset(instr) <
608 nir_intrinsic_align_mul(instr));
609 /* Fall through */
610
611 case nir_intrinsic_load_uniform:
612 case nir_intrinsic_load_input:
613 case nir_intrinsic_load_per_vertex_input:
614 case nir_intrinsic_load_interpolated_input:
615 case nir_intrinsic_load_output:
616 case nir_intrinsic_load_per_vertex_output:
617 case nir_intrinsic_load_push_constant:
618 /* All memory load operations must load at least a byte */
619 validate_assert(state, nir_dest_bit_size(instr->dest) >= 8);
620 break;
621
622 case nir_intrinsic_store_ssbo:
623 case nir_intrinsic_store_shared:
624 case nir_intrinsic_store_global:
625 case nir_intrinsic_store_scratch:
626 /* These memory store operations must also have alignments */
627 validate_assert(state,
628 util_is_power_of_two_nonzero(nir_intrinsic_align_mul(instr)));
629 validate_assert(state, nir_intrinsic_align_offset(instr) <
630 nir_intrinsic_align_mul(instr));
631 /* Fall through */
632
633 case nir_intrinsic_store_output:
634 case nir_intrinsic_store_per_vertex_output:
635 /* All memory store operations must store at least a byte */
636 validate_assert(state, nir_src_bit_size(instr->src[0]) >= 8);
637 break;
638
639 default:
640 break;
641 }
642
643 if (instr->num_components > 0)
644 validate_num_components(state, instr->num_components);
645
646 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
647 unsigned num_srcs = info->num_srcs;
648 for (unsigned i = 0; i < num_srcs; i++) {
649 unsigned components_read = nir_intrinsic_src_components(instr, i);
650
651 validate_num_components(state, components_read);
652
653 validate_src(&instr->src[i], state, src_bit_sizes[i], components_read);
654 }
655
656 if (nir_intrinsic_infos[instr->intrinsic].has_dest) {
657 unsigned components_written = nir_intrinsic_dest_components(instr);
658 unsigned bit_sizes = nir_intrinsic_infos[instr->intrinsic].dest_bit_sizes;
659
660 validate_num_components(state, components_written);
661 if (dest_bit_size && bit_sizes)
662 validate_assert(state, dest_bit_size & bit_sizes);
663 else
664 dest_bit_size = dest_bit_size ? dest_bit_size : bit_sizes;
665
666 validate_dest(&instr->dest, state, dest_bit_size, components_written);
667 }
668
669 if (!vectorized_intrinsic(instr))
670 validate_assert(state, instr->num_components == 0);
671 }
672
673 static void
674 validate_tex_instr(nir_tex_instr *instr, validate_state *state)
675 {
676 bool src_type_seen[nir_num_tex_src_types];
677 for (unsigned i = 0; i < nir_num_tex_src_types; i++)
678 src_type_seen[i] = false;
679
680 for (unsigned i = 0; i < instr->num_srcs; i++) {
681 validate_assert(state, !src_type_seen[instr->src[i].src_type]);
682 src_type_seen[instr->src[i].src_type] = true;
683 validate_src(&instr->src[i].src, state,
684 0, nir_tex_instr_src_size(instr, i));
685
686 switch (instr->src[i].src_type) {
687 case nir_tex_src_texture_deref:
688 case nir_tex_src_sampler_deref:
689 validate_assert(state, instr->src[i].src.is_ssa);
690 validate_assert(state,
691 instr->src[i].src.ssa->parent_instr->type == nir_instr_type_deref);
692 break;
693 default:
694 break;
695 }
696 }
697
698 if (nir_tex_instr_has_explicit_tg4_offsets(instr)) {
699 validate_assert(state, instr->op == nir_texop_tg4);
700 validate_assert(state, !src_type_seen[nir_tex_src_offset]);
701 }
702
703 validate_dest(&instr->dest, state, 0, nir_tex_instr_dest_size(instr));
704 }
705
706 static void
707 validate_call_instr(nir_call_instr *instr, validate_state *state)
708 {
709 validate_assert(state, instr->num_params == instr->callee->num_params);
710
711 for (unsigned i = 0; i < instr->num_params; i++) {
712 validate_src(&instr->params[i], state,
713 instr->callee->params[i].bit_size,
714 instr->callee->params[i].num_components);
715 }
716 }
717
718 static void
719 validate_const_value(nir_const_value *val, unsigned bit_size,
720 validate_state *state)
721 {
722 /* In order for block copies to work properly for things like instruction
723 * comparisons and [de]serialization, we require the unused bits of the
724 * nir_const_value to be zero.
725 */
726 nir_const_value cmp_val;
727 memset(&cmp_val, 0, sizeof(cmp_val));
728 switch (bit_size) {
729 case 1:
730 cmp_val.b = val->b;
731 break;
732 case 8:
733 cmp_val.u8 = val->u8;
734 break;
735 case 16:
736 cmp_val.u16 = val->u16;
737 break;
738 case 32:
739 cmp_val.u32 = val->u32;
740 break;
741 case 64:
742 cmp_val.u64 = val->u64;
743 break;
744 default:
745 validate_assert(state, !"Invalid load_const bit size");
746 }
747 validate_assert(state, memcmp(val, &cmp_val, sizeof(cmp_val)) == 0);
748 }
749
750 static void
751 validate_load_const_instr(nir_load_const_instr *instr, validate_state *state)
752 {
753 validate_ssa_def(&instr->def, state);
754
755 for (unsigned i = 0; i < instr->def.num_components; i++)
756 validate_const_value(&instr->value[i], instr->def.bit_size, state);
757 }
758
759 static void
760 validate_ssa_undef_instr(nir_ssa_undef_instr *instr, validate_state *state)
761 {
762 validate_ssa_def(&instr->def, state);
763 }
764
765 static void
766 validate_phi_instr(nir_phi_instr *instr, validate_state *state)
767 {
768 /*
769 * don't validate the sources until we get to them from their predecessor
770 * basic blocks, to avoid validating an SSA use before its definition.
771 */
772
773 validate_dest(&instr->dest, state, 0, 0);
774
775 exec_list_validate(&instr->srcs);
776 validate_assert(state, exec_list_length(&instr->srcs) ==
777 state->block->predecessors->entries);
778 }
779
780 static void
781 validate_jump_instr(nir_jump_instr *instr, validate_state *state)
782 {
783 nir_block *block = state->block;
784 validate_assert(state, &instr->instr == nir_block_last_instr(block));
785
786 switch (instr->type) {
787 case nir_jump_return:
788 validate_assert(state, block->successors[0] == state->impl->end_block);
789 validate_assert(state, block->successors[1] == NULL);
790 validate_assert(state, instr->target == NULL);
791 validate_assert(state, instr->else_target == NULL);
792 break;
793
794 case nir_jump_break:
795 validate_assert(state, state->impl->structured);
796 validate_assert(state, state->loop != NULL);
797 if (state->loop) {
798 nir_block *after =
799 nir_cf_node_as_block(nir_cf_node_next(&state->loop->cf_node));
800 validate_assert(state, block->successors[0] == after);
801 }
802 validate_assert(state, block->successors[1] == NULL);
803 validate_assert(state, instr->target == NULL);
804 validate_assert(state, instr->else_target == NULL);
805 break;
806
807 case nir_jump_continue:
808 validate_assert(state, state->impl->structured);
809 validate_assert(state, state->loop != NULL);
810 if (state->loop) {
811 nir_block *first = nir_loop_first_block(state->loop);
812 validate_assert(state, block->successors[0] == first);
813 }
814 validate_assert(state, block->successors[1] == NULL);
815 validate_assert(state, instr->target == NULL);
816 validate_assert(state, instr->else_target == NULL);
817 break;
818
819 case nir_jump_goto:
820 validate_assert(state, !state->impl->structured);
821 validate_assert(state, instr->target == block->successors[0]);
822 validate_assert(state, instr->target != NULL);
823 validate_assert(state, instr->else_target == NULL);
824 break;
825
826 case nir_jump_goto_if:
827 validate_assert(state, !state->impl->structured);
828 validate_assert(state, instr->target == block->successors[1]);
829 validate_assert(state, instr->else_target == block->successors[0]);
830 validate_src(&instr->condition, state, 0, 1);
831 validate_assert(state, instr->target != NULL);
832 validate_assert(state, instr->else_target != NULL);
833 break;
834
835 default:
836 validate_assert(state, !"Invalid jump instruction type");
837 break;
838 }
839 }
840
841 static void
842 validate_instr(nir_instr *instr, validate_state *state)
843 {
844 validate_assert(state, instr->block == state->block);
845
846 state->instr = instr;
847
848 switch (instr->type) {
849 case nir_instr_type_alu:
850 validate_alu_instr(nir_instr_as_alu(instr), state);
851 break;
852
853 case nir_instr_type_deref:
854 validate_deref_instr(nir_instr_as_deref(instr), state);
855 break;
856
857 case nir_instr_type_call:
858 validate_call_instr(nir_instr_as_call(instr), state);
859 break;
860
861 case nir_instr_type_intrinsic:
862 validate_intrinsic_instr(nir_instr_as_intrinsic(instr), state);
863 break;
864
865 case nir_instr_type_tex:
866 validate_tex_instr(nir_instr_as_tex(instr), state);
867 break;
868
869 case nir_instr_type_load_const:
870 validate_load_const_instr(nir_instr_as_load_const(instr), state);
871 break;
872
873 case nir_instr_type_phi:
874 validate_phi_instr(nir_instr_as_phi(instr), state);
875 break;
876
877 case nir_instr_type_ssa_undef:
878 validate_ssa_undef_instr(nir_instr_as_ssa_undef(instr), state);
879 break;
880
881 case nir_instr_type_jump:
882 validate_jump_instr(nir_instr_as_jump(instr), state);
883 break;
884
885 default:
886 validate_assert(state, !"Invalid ALU instruction type");
887 break;
888 }
889
890 state->instr = NULL;
891 }
892
893 static void
894 validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state)
895 {
896 state->instr = &instr->instr;
897
898 validate_assert(state, instr->dest.is_ssa);
899
900 exec_list_validate(&instr->srcs);
901 nir_foreach_phi_src(src, instr) {
902 if (src->pred == pred) {
903 validate_assert(state, src->src.is_ssa);
904 validate_src(&src->src, state, instr->dest.ssa.bit_size,
905 instr->dest.ssa.num_components);
906 state->instr = NULL;
907 return;
908 }
909 }
910 validate_assert(state, !"Phi does not have a source corresponding to one "
911 "of its predecessor blocks");
912 }
913
914 static void
915 validate_phi_srcs(nir_block *block, nir_block *succ, validate_state *state)
916 {
917 nir_foreach_instr(instr, succ) {
918 if (instr->type != nir_instr_type_phi)
919 break;
920
921 validate_phi_src(nir_instr_as_phi(instr), block, state);
922 }
923 }
924
925 static void validate_cf_node(nir_cf_node *node, validate_state *state);
926
927 static void
928 validate_block(nir_block *block, validate_state *state)
929 {
930 validate_assert(state, block->cf_node.parent == state->parent_node);
931
932 state->block = block;
933
934 exec_list_validate(&block->instr_list);
935 nir_foreach_instr(instr, block) {
936 if (instr->type == nir_instr_type_phi) {
937 validate_assert(state, instr == nir_block_first_instr(block) ||
938 nir_instr_prev(instr)->type == nir_instr_type_phi);
939 }
940
941 validate_instr(instr, state);
942 }
943
944 validate_assert(state, block->successors[0] != NULL);
945 validate_assert(state, block->successors[0] != block->successors[1]);
946
947 for (unsigned i = 0; i < 2; i++) {
948 if (block->successors[i] != NULL) {
949 struct set_entry *entry =
950 _mesa_set_search(block->successors[i]->predecessors, block);
951 validate_assert(state, entry);
952
953 validate_phi_srcs(block, block->successors[i], state);
954 }
955 }
956
957 set_foreach(block->predecessors, entry) {
958 const nir_block *pred = entry->key;
959 validate_assert(state, pred->successors[0] == block ||
960 pred->successors[1] == block);
961 }
962
963 if (!state->impl->structured) {
964 validate_assert(state, nir_block_ends_in_jump(block));
965 } else if (!nir_block_ends_in_jump(block)) {
966 nir_cf_node *next = nir_cf_node_next(&block->cf_node);
967 if (next == NULL) {
968 switch (state->parent_node->type) {
969 case nir_cf_node_loop: {
970 nir_block *first = nir_loop_first_block(state->loop);
971 validate_assert(state, block->successors[0] == first);
972 /* due to the hack for infinite loops, block->successors[1] may
973 * point to the block after the loop.
974 */
975 break;
976 }
977
978 case nir_cf_node_if: {
979 nir_block *after =
980 nir_cf_node_as_block(nir_cf_node_next(state->parent_node));
981 validate_assert(state, block->successors[0] == after);
982 validate_assert(state, block->successors[1] == NULL);
983 break;
984 }
985
986 case nir_cf_node_function:
987 validate_assert(state, block->successors[0] == state->impl->end_block);
988 validate_assert(state, block->successors[1] == NULL);
989 break;
990
991 default:
992 unreachable("unknown control flow node type");
993 }
994 } else {
995 if (next->type == nir_cf_node_if) {
996 nir_if *if_stmt = nir_cf_node_as_if(next);
997 validate_assert(state, block->successors[0] ==
998 nir_if_first_then_block(if_stmt));
999 validate_assert(state, block->successors[1] ==
1000 nir_if_first_else_block(if_stmt));
1001 } else if (next->type == nir_cf_node_loop) {
1002 nir_loop *loop = nir_cf_node_as_loop(next);
1003 validate_assert(state, block->successors[0] ==
1004 nir_loop_first_block(loop));
1005 validate_assert(state, block->successors[1] == NULL);
1006 } else {
1007 validate_assert(state,
1008 !"Structured NIR cannot have consecutive blocks");
1009 }
1010 }
1011 }
1012 }
1013
1014 static void
1015 validate_if(nir_if *if_stmt, validate_state *state)
1016 {
1017 validate_assert(state, state->impl->structured);
1018
1019 state->if_stmt = if_stmt;
1020
1021 validate_assert(state, !exec_node_is_head_sentinel(if_stmt->cf_node.node.prev));
1022 nir_cf_node *prev_node = nir_cf_node_prev(&if_stmt->cf_node);
1023 validate_assert(state, prev_node->type == nir_cf_node_block);
1024
1025 validate_assert(state, !exec_node_is_tail_sentinel(if_stmt->cf_node.node.next));
1026 nir_cf_node *next_node = nir_cf_node_next(&if_stmt->cf_node);
1027 validate_assert(state, next_node->type == nir_cf_node_block);
1028
1029 validate_src(&if_stmt->condition, state, 0, 1);
1030
1031 validate_assert(state, !exec_list_is_empty(&if_stmt->then_list));
1032 validate_assert(state, !exec_list_is_empty(&if_stmt->else_list));
1033
1034 nir_cf_node *old_parent = state->parent_node;
1035 state->parent_node = &if_stmt->cf_node;
1036
1037 exec_list_validate(&if_stmt->then_list);
1038 foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->then_list) {
1039 validate_cf_node(cf_node, state);
1040 }
1041
1042 exec_list_validate(&if_stmt->else_list);
1043 foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->else_list) {
1044 validate_cf_node(cf_node, state);
1045 }
1046
1047 state->parent_node = old_parent;
1048 state->if_stmt = NULL;
1049 }
1050
1051 static void
1052 validate_loop(nir_loop *loop, validate_state *state)
1053 {
1054 validate_assert(state, state->impl->structured);
1055
1056 validate_assert(state, !exec_node_is_head_sentinel(loop->cf_node.node.prev));
1057 nir_cf_node *prev_node = nir_cf_node_prev(&loop->cf_node);
1058 validate_assert(state, prev_node->type == nir_cf_node_block);
1059
1060 validate_assert(state, !exec_node_is_tail_sentinel(loop->cf_node.node.next));
1061 nir_cf_node *next_node = nir_cf_node_next(&loop->cf_node);
1062 validate_assert(state, next_node->type == nir_cf_node_block);
1063
1064 validate_assert(state, !exec_list_is_empty(&loop->body));
1065
1066 nir_cf_node *old_parent = state->parent_node;
1067 state->parent_node = &loop->cf_node;
1068 nir_loop *old_loop = state->loop;
1069 state->loop = loop;
1070
1071 exec_list_validate(&loop->body);
1072 foreach_list_typed(nir_cf_node, cf_node, node, &loop->body) {
1073 validate_cf_node(cf_node, state);
1074 }
1075
1076 state->parent_node = old_parent;
1077 state->loop = old_loop;
1078 }
1079
1080 static void
1081 validate_cf_node(nir_cf_node *node, validate_state *state)
1082 {
1083 validate_assert(state, node->parent == state->parent_node);
1084
1085 switch (node->type) {
1086 case nir_cf_node_block:
1087 validate_block(nir_cf_node_as_block(node), state);
1088 break;
1089
1090 case nir_cf_node_if:
1091 validate_if(nir_cf_node_as_if(node), state);
1092 break;
1093
1094 case nir_cf_node_loop:
1095 validate_loop(nir_cf_node_as_loop(node), state);
1096 break;
1097
1098 default:
1099 unreachable("Invalid CF node type");
1100 }
1101 }
1102
1103 static void
1104 prevalidate_reg_decl(nir_register *reg, validate_state *state)
1105 {
1106 validate_assert(state, reg->index < state->impl->reg_alloc);
1107 validate_assert(state, !BITSET_TEST(state->regs_found, reg->index));
1108 validate_num_components(state, reg->num_components);
1109 BITSET_SET(state->regs_found, reg->index);
1110
1111 list_validate(&reg->uses);
1112 list_validate(&reg->defs);
1113 list_validate(&reg->if_uses);
1114
1115 reg_validate_state *reg_state = ralloc(state->regs, reg_validate_state);
1116 reg_state->uses = _mesa_pointer_set_create(reg_state);
1117 reg_state->if_uses = _mesa_pointer_set_create(reg_state);
1118 reg_state->defs = _mesa_pointer_set_create(reg_state);
1119
1120 reg_state->where_defined = state->impl;
1121
1122 _mesa_hash_table_insert(state->regs, reg, reg_state);
1123 }
1124
1125 static void
1126 postvalidate_reg_decl(nir_register *reg, validate_state *state)
1127 {
1128 struct hash_entry *entry = _mesa_hash_table_search(state->regs, reg);
1129
1130 assume(entry);
1131 reg_validate_state *reg_state = (reg_validate_state *) entry->data;
1132
1133 nir_foreach_use(src, reg) {
1134 struct set_entry *entry = _mesa_set_search(reg_state->uses, src);
1135 validate_assert(state, entry);
1136 _mesa_set_remove(reg_state->uses, entry);
1137 }
1138 validate_assert(state, reg_state->uses->entries == 0);
1139
1140 nir_foreach_if_use(src, reg) {
1141 struct set_entry *entry = _mesa_set_search(reg_state->if_uses, src);
1142 validate_assert(state, entry);
1143 _mesa_set_remove(reg_state->if_uses, entry);
1144 }
1145 validate_assert(state, reg_state->if_uses->entries == 0);
1146
1147 nir_foreach_def(src, reg) {
1148 struct set_entry *entry = _mesa_set_search(reg_state->defs, src);
1149 validate_assert(state, entry);
1150 _mesa_set_remove(reg_state->defs, entry);
1151 }
1152 validate_assert(state, reg_state->defs->entries == 0);
1153 }
1154
1155 static void
1156 validate_var_decl(nir_variable *var, nir_variable_mode valid_modes,
1157 validate_state *state)
1158 {
1159 state->var = var;
1160
1161 /* Must have exactly one mode set */
1162 validate_assert(state, util_is_power_of_two_nonzero(var->data.mode));
1163 validate_assert(state, var->data.mode & valid_modes);
1164
1165 if (var->data.compact) {
1166 /* The "compact" flag is only valid on arrays of scalars. */
1167 assert(glsl_type_is_array(var->type));
1168
1169 const struct glsl_type *type = glsl_get_array_element(var->type);
1170 if (nir_is_per_vertex_io(var, state->shader->info.stage)) {
1171 assert(glsl_type_is_array(type));
1172 assert(glsl_type_is_scalar(glsl_get_array_element(type)));
1173 } else {
1174 assert(glsl_type_is_scalar(type));
1175 }
1176 }
1177
1178 if (var->num_members > 0) {
1179 const struct glsl_type *without_array = glsl_without_array(var->type);
1180 validate_assert(state, glsl_type_is_struct_or_ifc(without_array));
1181 validate_assert(state, var->num_members == glsl_get_length(without_array));
1182 validate_assert(state, var->members != NULL);
1183 }
1184
1185 if (var->data.per_view)
1186 validate_assert(state, glsl_type_is_array(var->type));
1187
1188 /*
1189 * TODO validate some things ir_validate.cpp does (requires more GLSL type
1190 * support)
1191 */
1192
1193 _mesa_hash_table_insert(state->var_defs, var,
1194 valid_modes == nir_var_function_temp ?
1195 state->impl : NULL);
1196
1197 state->var = NULL;
1198 }
1199
1200 static void
1201 validate_function_impl(nir_function_impl *impl, validate_state *state)
1202 {
1203 /* Resize the ssa_srcs set. It's likely that the size of this set will
1204 * never actually hit the number of SSA defs because we remove sources from
1205 * the set as we visit them. (It could actually be much larger because
1206 * each SSA def can be used more than once.) However, growing it now costs
1207 * us very little (the extra memory is already dwarfed by the SSA defs
1208 * themselves) and makes collisions much less likely.
1209 */
1210 _mesa_set_resize(state->ssa_srcs, impl->ssa_alloc);
1211
1212 validate_assert(state, impl->function->impl == impl);
1213 validate_assert(state, impl->cf_node.parent == NULL);
1214
1215 validate_assert(state, exec_list_is_empty(&impl->end_block->instr_list));
1216 validate_assert(state, impl->end_block->successors[0] == NULL);
1217 validate_assert(state, impl->end_block->successors[1] == NULL);
1218
1219 state->impl = impl;
1220 state->parent_node = &impl->cf_node;
1221
1222 exec_list_validate(&impl->locals);
1223 nir_foreach_function_temp_variable(var, impl) {
1224 validate_var_decl(var, nir_var_function_temp, state);
1225 }
1226
1227 state->regs_found = reralloc(state->mem_ctx, state->regs_found,
1228 BITSET_WORD, BITSET_WORDS(impl->reg_alloc));
1229 memset(state->regs_found, 0, BITSET_WORDS(impl->reg_alloc) *
1230 sizeof(BITSET_WORD));
1231 exec_list_validate(&impl->registers);
1232 foreach_list_typed(nir_register, reg, node, &impl->registers) {
1233 prevalidate_reg_decl(reg, state);
1234 }
1235
1236 state->ssa_defs_found = reralloc(state->mem_ctx, state->ssa_defs_found,
1237 BITSET_WORD, BITSET_WORDS(impl->ssa_alloc));
1238 memset(state->ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) *
1239 sizeof(BITSET_WORD));
1240 exec_list_validate(&impl->body);
1241 foreach_list_typed(nir_cf_node, node, node, &impl->body) {
1242 validate_cf_node(node, state);
1243 }
1244
1245 foreach_list_typed(nir_register, reg, node, &impl->registers) {
1246 postvalidate_reg_decl(reg, state);
1247 }
1248
1249 validate_assert(state, state->ssa_srcs->entries == 0);
1250 _mesa_set_clear(state->ssa_srcs, NULL);
1251 }
1252
1253 static void
1254 validate_function(nir_function *func, validate_state *state)
1255 {
1256 if (func->impl != NULL) {
1257 validate_assert(state, func->impl->function == func);
1258 validate_function_impl(func->impl, state);
1259 }
1260 }
1261
1262 static void
1263 init_validate_state(validate_state *state)
1264 {
1265 state->mem_ctx = ralloc_context(NULL);
1266 state->regs = _mesa_pointer_hash_table_create(state->mem_ctx);
1267 state->ssa_srcs = _mesa_pointer_set_create(state->mem_ctx);
1268 state->ssa_defs_found = NULL;
1269 state->regs_found = NULL;
1270 state->var_defs = _mesa_pointer_hash_table_create(state->mem_ctx);
1271 state->errors = _mesa_pointer_hash_table_create(state->mem_ctx);
1272
1273 state->loop = NULL;
1274 state->instr = NULL;
1275 state->var = NULL;
1276 }
1277
1278 static void
1279 destroy_validate_state(validate_state *state)
1280 {
1281 ralloc_free(state->mem_ctx);
1282 }
1283
1284 mtx_t fail_dump_mutex = _MTX_INITIALIZER_NP;
1285
1286 static void
1287 dump_errors(validate_state *state, const char *when)
1288 {
1289 struct hash_table *errors = state->errors;
1290
1291 /* Lock around dumping so that we get clean dumps in a multi-threaded
1292 * scenario
1293 */
1294 mtx_lock(&fail_dump_mutex);
1295
1296 if (when) {
1297 fprintf(stderr, "NIR validation failed %s\n", when);
1298 fprintf(stderr, "%d errors:\n", _mesa_hash_table_num_entries(errors));
1299 } else {
1300 fprintf(stderr, "NIR validation failed with %d errors:\n",
1301 _mesa_hash_table_num_entries(errors));
1302 }
1303
1304 nir_print_shader_annotated(state->shader, stderr, errors);
1305
1306 if (_mesa_hash_table_num_entries(errors) > 0) {
1307 fprintf(stderr, "%d additional errors:\n",
1308 _mesa_hash_table_num_entries(errors));
1309 hash_table_foreach(errors, entry) {
1310 fprintf(stderr, "%s\n", (char *)entry->data);
1311 }
1312 }
1313
1314 mtx_unlock(&fail_dump_mutex);
1315
1316 abort();
1317 }
1318
1319 void
1320 nir_validate_shader(nir_shader *shader, const char *when)
1321 {
1322 static int should_validate = -1;
1323 if (should_validate < 0)
1324 should_validate = env_var_as_boolean("NIR_VALIDATE", true);
1325 if (!should_validate)
1326 return;
1327
1328 validate_state state;
1329 init_validate_state(&state);
1330
1331 state.shader = shader;
1332
1333 nir_variable_mode valid_modes =
1334 nir_var_shader_in |
1335 nir_var_shader_out |
1336 nir_var_shader_temp |
1337 nir_var_uniform |
1338 nir_var_mem_ubo |
1339 nir_var_system_value |
1340 nir_var_mem_ssbo |
1341 nir_var_mem_shared |
1342 nir_var_mem_constant;
1343
1344 exec_list_validate(&shader->variables);
1345 nir_foreach_variable_in_shader(var, shader)
1346 validate_var_decl(var, valid_modes, &state);
1347
1348 exec_list_validate(&shader->functions);
1349 foreach_list_typed(nir_function, func, node, &shader->functions) {
1350 validate_function(func, &state);
1351 }
1352
1353 if (_mesa_hash_table_num_entries(state.errors) > 0)
1354 dump_errors(&state, when);
1355
1356 destroy_validate_state(&state);
1357 }
1358
1359 #endif /* NDEBUG */