nir/validate: validate that tex deref sources are actually derefs
[mesa.git] / src / compiler / nir / nir_validate.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include <assert.h>
30
31 /*
32 * This file checks for invalid IR indicating a bug somewhere in the compiler.
33 */
34
35 /* Since this file is just a pile of asserts, don't bother compiling it if
36 * we're not building a debug build.
37 */
38 #ifndef NDEBUG
39
40 /*
41 * Per-register validation state.
42 */
43
44 typedef struct {
45 /*
46 * equivalent to the uses and defs in nir_register, but built up by the
47 * validator. At the end, we verify that the sets have the same entries.
48 */
49 struct set *uses, *if_uses, *defs;
50 nir_function_impl *where_defined; /* NULL for global registers */
51 } reg_validate_state;
52
53 typedef struct {
54 /*
55 * equivalent to the uses in nir_ssa_def, but built up by the validator.
56 * At the end, we verify that the sets have the same entries.
57 */
58 struct set *uses, *if_uses;
59 nir_function_impl *where_defined;
60 } ssa_def_validate_state;
61
62 typedef struct {
63 /* map of register -> validation state (struct above) */
64 struct hash_table *regs;
65
66 /* the current shader being validated */
67 nir_shader *shader;
68
69 /* the current instruction being validated */
70 nir_instr *instr;
71
72 /* the current variable being validated */
73 nir_variable *var;
74
75 /* the current basic block being validated */
76 nir_block *block;
77
78 /* the current if statement being validated */
79 nir_if *if_stmt;
80
81 /* the current loop being visited */
82 nir_loop *loop;
83
84 /* the parent of the current cf node being visited */
85 nir_cf_node *parent_node;
86
87 /* the current function implementation being validated */
88 nir_function_impl *impl;
89
90 /* map of SSA value -> function implementation where it is defined */
91 struct hash_table *ssa_defs;
92
93 /* bitset of ssa definitions we have found; used to check uniqueness */
94 BITSET_WORD *ssa_defs_found;
95
96 /* bitset of registers we have currently found; used to check uniqueness */
97 BITSET_WORD *regs_found;
98
99 /* map of variable -> function implementation where it is defined or NULL
100 * if it is a global variable
101 */
102 struct hash_table *var_defs;
103
104 /* map of instruction/var/etc to failed assert string */
105 struct hash_table *errors;
106 } validate_state;
107
108 static void
109 log_error(validate_state *state, const char *cond, const char *file, int line)
110 {
111 const void *obj;
112
113 if (state->instr)
114 obj = state->instr;
115 else if (state->var)
116 obj = state->var;
117 else
118 obj = cond;
119
120 char *msg = ralloc_asprintf(state->errors, "error: %s (%s:%d)",
121 cond, file, line);
122
123 _mesa_hash_table_insert(state->errors, obj, msg);
124 }
125
126 #define validate_assert(state, cond) do { \
127 if (!(cond)) \
128 log_error(state, #cond, __FILE__, __LINE__); \
129 } while (0)
130
131 static void validate_src(nir_src *src, validate_state *state,
132 unsigned bit_sizes, unsigned num_components);
133
134 static void
135 validate_reg_src(nir_src *src, validate_state *state,
136 unsigned bit_sizes, unsigned num_components)
137 {
138 validate_assert(state, src->reg.reg != NULL);
139
140 struct hash_entry *entry;
141 entry = _mesa_hash_table_search(state->regs, src->reg.reg);
142 validate_assert(state, entry);
143
144 reg_validate_state *reg_state = (reg_validate_state *) entry->data;
145
146 if (state->instr) {
147 _mesa_set_add(reg_state->uses, src);
148 } else {
149 validate_assert(state, state->if_stmt);
150 _mesa_set_add(reg_state->if_uses, src);
151 }
152
153 if (!src->reg.reg->is_global) {
154 validate_assert(state, reg_state->where_defined == state->impl &&
155 "using a register declared in a different function");
156 }
157
158 if (!src->reg.reg->is_packed) {
159 if (bit_sizes)
160 validate_assert(state, src->reg.reg->bit_size & bit_sizes);
161 if (num_components)
162 validate_assert(state, src->reg.reg->num_components == num_components);
163 }
164
165 validate_assert(state, (src->reg.reg->num_array_elems == 0 ||
166 src->reg.base_offset < src->reg.reg->num_array_elems) &&
167 "definitely out-of-bounds array access");
168
169 if (src->reg.indirect) {
170 validate_assert(state, src->reg.reg->num_array_elems != 0);
171 validate_assert(state, (src->reg.indirect->is_ssa ||
172 src->reg.indirect->reg.indirect == NULL) &&
173 "only one level of indirection allowed");
174 validate_src(src->reg.indirect, state, 32, 1);
175 }
176 }
177
178 static void
179 validate_ssa_src(nir_src *src, validate_state *state,
180 unsigned bit_sizes, unsigned num_components)
181 {
182 validate_assert(state, src->ssa != NULL);
183
184 struct hash_entry *entry = _mesa_hash_table_search(state->ssa_defs, src->ssa);
185
186 validate_assert(state, entry);
187
188 if (!entry)
189 return;
190
191 ssa_def_validate_state *def_state = (ssa_def_validate_state *)entry->data;
192
193 validate_assert(state, def_state->where_defined == state->impl &&
194 "using an SSA value defined in a different function");
195
196 if (state->instr) {
197 _mesa_set_add(def_state->uses, src);
198 } else {
199 validate_assert(state, state->if_stmt);
200 _mesa_set_add(def_state->if_uses, src);
201 }
202
203 if (bit_sizes)
204 validate_assert(state, src->ssa->bit_size & bit_sizes);
205 if (num_components)
206 validate_assert(state, src->ssa->num_components == num_components);
207
208 /* TODO validate that the use is dominated by the definition */
209 }
210
211 static void
212 validate_src(nir_src *src, validate_state *state,
213 unsigned bit_sizes, unsigned num_components)
214 {
215 if (state->instr)
216 validate_assert(state, src->parent_instr == state->instr);
217 else
218 validate_assert(state, src->parent_if == state->if_stmt);
219
220 if (src->is_ssa)
221 validate_ssa_src(src, state, bit_sizes, num_components);
222 else
223 validate_reg_src(src, state, bit_sizes, num_components);
224 }
225
226 static void
227 validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state)
228 {
229 nir_alu_src *src = &instr->src[index];
230
231 unsigned num_components = nir_src_num_components(src->src);
232 if (!src->src.is_ssa && src->src.reg.reg->is_packed)
233 num_components = NIR_MAX_VEC_COMPONENTS; /* can't check anything */
234 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
235 validate_assert(state, src->swizzle[i] < NIR_MAX_VEC_COMPONENTS);
236
237 if (nir_alu_instr_channel_used(instr, index, i))
238 validate_assert(state, src->swizzle[i] < num_components);
239 }
240
241 validate_src(&src->src, state, 0, 0);
242 }
243
244 static void
245 validate_reg_dest(nir_reg_dest *dest, validate_state *state,
246 unsigned bit_sizes, unsigned num_components)
247 {
248 validate_assert(state, dest->reg != NULL);
249
250 validate_assert(state, dest->parent_instr == state->instr);
251
252 struct hash_entry *entry2;
253 entry2 = _mesa_hash_table_search(state->regs, dest->reg);
254
255 validate_assert(state, entry2);
256
257 reg_validate_state *reg_state = (reg_validate_state *) entry2->data;
258 _mesa_set_add(reg_state->defs, dest);
259
260 if (!dest->reg->is_global) {
261 validate_assert(state, reg_state->where_defined == state->impl &&
262 "writing to a register declared in a different function");
263 }
264
265 if (!dest->reg->is_packed) {
266 if (bit_sizes)
267 validate_assert(state, dest->reg->bit_size & bit_sizes);
268 if (num_components)
269 validate_assert(state, dest->reg->num_components == num_components);
270 }
271
272 validate_assert(state, (dest->reg->num_array_elems == 0 ||
273 dest->base_offset < dest->reg->num_array_elems) &&
274 "definitely out-of-bounds array access");
275
276 if (dest->indirect) {
277 validate_assert(state, dest->reg->num_array_elems != 0);
278 validate_assert(state, (dest->indirect->is_ssa || dest->indirect->reg.indirect == NULL) &&
279 "only one level of indirection allowed");
280 validate_src(dest->indirect, state, 32, 1);
281 }
282 }
283
284 static void
285 validate_ssa_def(nir_ssa_def *def, validate_state *state)
286 {
287 validate_assert(state, def->index < state->impl->ssa_alloc);
288 validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
289 BITSET_SET(state->ssa_defs_found, def->index);
290
291 validate_assert(state, def->parent_instr == state->instr);
292
293 validate_assert(state, (def->num_components <= 4) ||
294 (def->num_components == 8) ||
295 (def->num_components == 16));
296
297 list_validate(&def->uses);
298 list_validate(&def->if_uses);
299
300 ssa_def_validate_state *def_state = ralloc(state->ssa_defs,
301 ssa_def_validate_state);
302 def_state->where_defined = state->impl;
303 def_state->uses = _mesa_pointer_set_create(def_state);
304 def_state->if_uses = _mesa_pointer_set_create(def_state);
305 _mesa_hash_table_insert(state->ssa_defs, def, def_state);
306 }
307
308 static void
309 validate_dest(nir_dest *dest, validate_state *state,
310 unsigned bit_sizes, unsigned num_components)
311 {
312 if (dest->is_ssa) {
313 if (bit_sizes)
314 validate_assert(state, dest->ssa.bit_size & bit_sizes);
315 if (num_components)
316 validate_assert(state, dest->ssa.num_components == num_components);
317 validate_ssa_def(&dest->ssa, state);
318 } else {
319 validate_reg_dest(&dest->reg, state, bit_sizes, num_components);
320 }
321 }
322
323 static void
324 validate_alu_dest(nir_alu_instr *instr, validate_state *state)
325 {
326 nir_alu_dest *dest = &instr->dest;
327
328 unsigned dest_size = nir_dest_num_components(dest->dest);
329 bool is_packed = !dest->dest.is_ssa && dest->dest.reg.reg->is_packed;
330 /*
331 * validate that the instruction doesn't write to components not in the
332 * register/SSA value
333 */
334 validate_assert(state, is_packed || !(dest->write_mask & ~((1 << dest_size) - 1)));
335
336 /* validate that saturate is only ever used on instructions with
337 * destinations of type float
338 */
339 nir_alu_instr *alu = nir_instr_as_alu(state->instr);
340 validate_assert(state,
341 (nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) ==
342 nir_type_float) ||
343 !dest->saturate);
344
345 validate_dest(&dest->dest, state, 0, 0);
346 }
347
348 static void
349 validate_alu_instr(nir_alu_instr *instr, validate_state *state)
350 {
351 validate_assert(state, instr->op < nir_num_opcodes);
352
353 unsigned instr_bit_size = 0;
354 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
355 nir_alu_type src_type = nir_op_infos[instr->op].input_types[i];
356 unsigned src_bit_size = nir_src_bit_size(instr->src[i].src);
357 if (nir_alu_type_get_type_size(src_type)) {
358 validate_assert(state, src_bit_size == nir_alu_type_get_type_size(src_type));
359 } else if (instr_bit_size) {
360 validate_assert(state, src_bit_size == instr_bit_size);
361 } else {
362 instr_bit_size = src_bit_size;
363 }
364
365 if (nir_alu_type_get_base_type(src_type) == nir_type_float) {
366 /* 8-bit float isn't a thing */
367 validate_assert(state, src_bit_size == 16 || src_bit_size == 32 ||
368 src_bit_size == 64);
369 }
370
371 validate_alu_src(instr, i, state);
372 }
373
374 nir_alu_type dest_type = nir_op_infos[instr->op].output_type;
375 unsigned dest_bit_size = nir_dest_bit_size(instr->dest.dest);
376 if (nir_alu_type_get_type_size(dest_type)) {
377 validate_assert(state, dest_bit_size == nir_alu_type_get_type_size(dest_type));
378 } else if (instr_bit_size) {
379 validate_assert(state, dest_bit_size == instr_bit_size);
380 } else {
381 /* The only unsized thing is the destination so it's vacuously valid */
382 }
383
384 if (nir_alu_type_get_base_type(dest_type) == nir_type_float) {
385 /* 8-bit float isn't a thing */
386 validate_assert(state, dest_bit_size == 16 || dest_bit_size == 32 ||
387 dest_bit_size == 64);
388 }
389
390 validate_alu_dest(instr, state);
391 }
392
393 static void
394 validate_var_use(nir_variable *var, validate_state *state)
395 {
396 struct hash_entry *entry = _mesa_hash_table_search(state->var_defs, var);
397 validate_assert(state, entry);
398 if (var->data.mode == nir_var_function_temp)
399 validate_assert(state, (nir_function_impl *) entry->data == state->impl);
400 }
401
402 static void
403 validate_deref_instr(nir_deref_instr *instr, validate_state *state)
404 {
405 if (instr->deref_type == nir_deref_type_var) {
406 /* Variable dereferences are stupid simple. */
407 validate_assert(state, instr->mode == instr->var->data.mode);
408 validate_assert(state, instr->type == instr->var->type);
409 validate_var_use(instr->var, state);
410 } else if (instr->deref_type == nir_deref_type_cast) {
411 /* For cast, we simply have to trust the instruction. It's up to
412 * lowering passes and front/back-ends to make them sane.
413 */
414 validate_src(&instr->parent, state, 0, 0);
415
416 /* We just validate that the type and mode are there */
417 validate_assert(state, instr->mode);
418 validate_assert(state, instr->type);
419 } else {
420 /* We require the parent to be SSA. This may be lifted in the future */
421 validate_assert(state, instr->parent.is_ssa);
422
423 /* The parent pointer value must have the same number of components
424 * as the destination.
425 */
426 validate_src(&instr->parent, state, nir_dest_bit_size(instr->dest),
427 nir_dest_num_components(instr->dest));
428
429 nir_instr *parent_instr = instr->parent.ssa->parent_instr;
430
431 /* The parent must come from another deref instruction */
432 validate_assert(state, parent_instr->type == nir_instr_type_deref);
433
434 nir_deref_instr *parent = nir_instr_as_deref(parent_instr);
435
436 validate_assert(state, instr->mode == parent->mode);
437
438 switch (instr->deref_type) {
439 case nir_deref_type_struct:
440 validate_assert(state, glsl_type_is_struct_or_ifc(parent->type));
441 validate_assert(state,
442 instr->strct.index < glsl_get_length(parent->type));
443 validate_assert(state, instr->type ==
444 glsl_get_struct_field(parent->type, instr->strct.index));
445 break;
446
447 case nir_deref_type_array:
448 case nir_deref_type_array_wildcard:
449 if (instr->mode == nir_var_mem_ubo ||
450 instr->mode == nir_var_mem_ssbo ||
451 instr->mode == nir_var_mem_shared ||
452 instr->mode == nir_var_mem_global) {
453 /* Shared variables and UBO/SSBOs have a bit more relaxed rules
454 * because we need to be able to handle array derefs on vectors.
455 * Fortunately, nir_lower_io handles these just fine.
456 */
457 validate_assert(state, glsl_type_is_array(parent->type) ||
458 glsl_type_is_matrix(parent->type) ||
459 glsl_type_is_vector(parent->type));
460 } else {
461 /* Most of NIR cannot handle array derefs on vectors */
462 validate_assert(state, glsl_type_is_array(parent->type) ||
463 glsl_type_is_matrix(parent->type));
464 }
465 validate_assert(state,
466 instr->type == glsl_get_array_element(parent->type));
467
468 if (instr->deref_type == nir_deref_type_array) {
469 validate_src(&instr->arr.index, state,
470 nir_dest_bit_size(instr->dest), 1);
471 }
472 break;
473
474 case nir_deref_type_ptr_as_array:
475 /* ptr_as_array derefs must have a parent that is either an array,
476 * ptr_as_array, or cast. If the parent is a cast, we get the stride
477 * information (if any) from the cast deref.
478 */
479 validate_assert(state,
480 parent->deref_type == nir_deref_type_array ||
481 parent->deref_type == nir_deref_type_ptr_as_array ||
482 parent->deref_type == nir_deref_type_cast);
483 validate_src(&instr->arr.index, state,
484 nir_dest_bit_size(instr->dest), 1);
485 break;
486
487 default:
488 unreachable("Invalid deref instruction type");
489 }
490 }
491
492 /* We intentionally don't validate the size of the destination because we
493 * want to let other compiler components such as SPIR-V decide how big
494 * pointers should be.
495 */
496 validate_dest(&instr->dest, state, 0, 0);
497
498 /* Deref instructions as if conditions don't make sense because if
499 * conditions expect well-formed Booleans. If you want to compare with
500 * NULL, an explicit comparison operation should be used.
501 */
502 validate_assert(state, list_empty(&instr->dest.ssa.if_uses));
503 }
504
505 static void
506 validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
507 {
508 unsigned dest_bit_size = 0;
509 unsigned src_bit_sizes[NIR_INTRINSIC_MAX_INPUTS] = { 0, };
510 switch (instr->intrinsic) {
511 case nir_intrinsic_load_param: {
512 unsigned param_idx = nir_intrinsic_param_idx(instr);
513 validate_assert(state, param_idx < state->impl->function->num_params);
514 nir_parameter *param = &state->impl->function->params[param_idx];
515 validate_assert(state, instr->num_components == param->num_components);
516 dest_bit_size = param->bit_size;
517 break;
518 }
519
520 case nir_intrinsic_load_deref: {
521 nir_deref_instr *src = nir_src_as_deref(instr->src[0]);
522 validate_assert(state, glsl_type_is_vector_or_scalar(src->type) ||
523 (src->mode == nir_var_uniform &&
524 glsl_get_base_type(src->type) == GLSL_TYPE_SUBROUTINE));
525 validate_assert(state, instr->num_components ==
526 glsl_get_vector_elements(src->type));
527 dest_bit_size = glsl_get_bit_size(src->type);
528 /* Also allow 32-bit boolean load operations */
529 if (glsl_type_is_boolean(src->type))
530 dest_bit_size |= 32;
531 break;
532 }
533
534 case nir_intrinsic_store_deref: {
535 nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
536 validate_assert(state, glsl_type_is_vector_or_scalar(dst->type));
537 validate_assert(state, instr->num_components ==
538 glsl_get_vector_elements(dst->type));
539 src_bit_sizes[1] = glsl_get_bit_size(dst->type);
540 /* Also allow 32-bit boolean store operations */
541 if (glsl_type_is_boolean(dst->type))
542 src_bit_sizes[1] |= 32;
543 validate_assert(state, (dst->mode & (nir_var_shader_in |
544 nir_var_uniform)) == 0);
545 validate_assert(state, (nir_intrinsic_write_mask(instr) & ~((1 << instr->num_components) - 1)) == 0);
546 break;
547 }
548
549 case nir_intrinsic_copy_deref: {
550 nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
551 nir_deref_instr *src = nir_src_as_deref(instr->src[1]);
552 validate_assert(state, glsl_get_bare_type(dst->type) ==
553 glsl_get_bare_type(src->type));
554 validate_assert(state, (dst->mode & (nir_var_shader_in |
555 nir_var_uniform)) == 0);
556 break;
557 }
558
559 default:
560 break;
561 }
562
563 unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
564 for (unsigned i = 0; i < num_srcs; i++) {
565 unsigned components_read = nir_intrinsic_src_components(instr, i);
566
567 validate_assert(state, components_read > 0);
568
569 validate_src(&instr->src[i], state, src_bit_sizes[i], components_read);
570 }
571
572 if (nir_intrinsic_infos[instr->intrinsic].has_dest) {
573 unsigned components_written = nir_intrinsic_dest_components(instr);
574 unsigned bit_sizes = nir_intrinsic_infos[instr->intrinsic].dest_bit_sizes;
575
576 validate_assert(state, components_written > 0);
577
578 if (dest_bit_size && bit_sizes)
579 validate_assert(state, dest_bit_size & bit_sizes);
580 else
581 dest_bit_size = dest_bit_size ? dest_bit_size : bit_sizes;
582
583 validate_dest(&instr->dest, state, dest_bit_size, components_written);
584 }
585 }
586
587 static void
588 validate_tex_instr(nir_tex_instr *instr, validate_state *state)
589 {
590 bool src_type_seen[nir_num_tex_src_types];
591 for (unsigned i = 0; i < nir_num_tex_src_types; i++)
592 src_type_seen[i] = false;
593
594 for (unsigned i = 0; i < instr->num_srcs; i++) {
595 validate_assert(state, !src_type_seen[instr->src[i].src_type]);
596 src_type_seen[instr->src[i].src_type] = true;
597 validate_src(&instr->src[i].src, state,
598 0, nir_tex_instr_src_size(instr, i));
599
600 switch (instr->src[i].src_type) {
601 case nir_tex_src_texture_deref:
602 case nir_tex_src_sampler_deref:
603 validate_assert(state, instr->src[i].src.is_ssa);
604 validate_assert(state,
605 instr->src[i].src.ssa->parent_instr->type == nir_instr_type_deref);
606 break;
607 default:
608 break;
609 }
610 }
611
612 if (nir_tex_instr_has_explicit_tg4_offsets(instr)) {
613 validate_assert(state, instr->op == nir_texop_tg4);
614 validate_assert(state, !src_type_seen[nir_tex_src_offset]);
615 }
616
617 validate_dest(&instr->dest, state, 0, nir_tex_instr_dest_size(instr));
618 }
619
620 static void
621 validate_call_instr(nir_call_instr *instr, validate_state *state)
622 {
623 validate_assert(state, instr->num_params == instr->callee->num_params);
624
625 for (unsigned i = 0; i < instr->num_params; i++) {
626 validate_src(&instr->params[i], state,
627 instr->callee->params[i].bit_size,
628 instr->callee->params[i].num_components);
629 }
630 }
631
632 static void
633 validate_load_const_instr(nir_load_const_instr *instr, validate_state *state)
634 {
635 validate_ssa_def(&instr->def, state);
636 }
637
638 static void
639 validate_ssa_undef_instr(nir_ssa_undef_instr *instr, validate_state *state)
640 {
641 validate_ssa_def(&instr->def, state);
642 }
643
644 static void
645 validate_phi_instr(nir_phi_instr *instr, validate_state *state)
646 {
647 /*
648 * don't validate the sources until we get to them from their predecessor
649 * basic blocks, to avoid validating an SSA use before its definition.
650 */
651
652 validate_dest(&instr->dest, state, 0, 0);
653
654 exec_list_validate(&instr->srcs);
655 validate_assert(state, exec_list_length(&instr->srcs) ==
656 state->block->predecessors->entries);
657 }
658
659 static void
660 validate_instr(nir_instr *instr, validate_state *state)
661 {
662 validate_assert(state, instr->block == state->block);
663
664 state->instr = instr;
665
666 switch (instr->type) {
667 case nir_instr_type_alu:
668 validate_alu_instr(nir_instr_as_alu(instr), state);
669 break;
670
671 case nir_instr_type_deref:
672 validate_deref_instr(nir_instr_as_deref(instr), state);
673 break;
674
675 case nir_instr_type_call:
676 validate_call_instr(nir_instr_as_call(instr), state);
677 break;
678
679 case nir_instr_type_intrinsic:
680 validate_intrinsic_instr(nir_instr_as_intrinsic(instr), state);
681 break;
682
683 case nir_instr_type_tex:
684 validate_tex_instr(nir_instr_as_tex(instr), state);
685 break;
686
687 case nir_instr_type_load_const:
688 validate_load_const_instr(nir_instr_as_load_const(instr), state);
689 break;
690
691 case nir_instr_type_phi:
692 validate_phi_instr(nir_instr_as_phi(instr), state);
693 break;
694
695 case nir_instr_type_ssa_undef:
696 validate_ssa_undef_instr(nir_instr_as_ssa_undef(instr), state);
697 break;
698
699 case nir_instr_type_jump:
700 break;
701
702 default:
703 validate_assert(state, !"Invalid ALU instruction type");
704 break;
705 }
706
707 state->instr = NULL;
708 }
709
710 static void
711 validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state)
712 {
713 state->instr = &instr->instr;
714
715 validate_assert(state, instr->dest.is_ssa);
716
717 exec_list_validate(&instr->srcs);
718 nir_foreach_phi_src(src, instr) {
719 if (src->pred == pred) {
720 validate_assert(state, src->src.is_ssa);
721 validate_src(&src->src, state, instr->dest.ssa.bit_size,
722 instr->dest.ssa.num_components);
723 state->instr = NULL;
724 return;
725 }
726 }
727
728 abort();
729 }
730
731 static void
732 validate_phi_srcs(nir_block *block, nir_block *succ, validate_state *state)
733 {
734 nir_foreach_instr(instr, succ) {
735 if (instr->type != nir_instr_type_phi)
736 break;
737
738 validate_phi_src(nir_instr_as_phi(instr), block, state);
739 }
740 }
741
742 static void validate_cf_node(nir_cf_node *node, validate_state *state);
743
744 static void
745 validate_block(nir_block *block, validate_state *state)
746 {
747 validate_assert(state, block->cf_node.parent == state->parent_node);
748
749 state->block = block;
750
751 exec_list_validate(&block->instr_list);
752 nir_foreach_instr(instr, block) {
753 if (instr->type == nir_instr_type_phi) {
754 validate_assert(state, instr == nir_block_first_instr(block) ||
755 nir_instr_prev(instr)->type == nir_instr_type_phi);
756 }
757
758 if (instr->type == nir_instr_type_jump) {
759 validate_assert(state, instr == nir_block_last_instr(block));
760 }
761
762 validate_instr(instr, state);
763 }
764
765 validate_assert(state, block->successors[0] != NULL);
766 validate_assert(state, block->successors[0] != block->successors[1]);
767
768 for (unsigned i = 0; i < 2; i++) {
769 if (block->successors[i] != NULL) {
770 struct set_entry *entry =
771 _mesa_set_search(block->successors[i]->predecessors, block);
772 validate_assert(state, entry);
773
774 validate_phi_srcs(block, block->successors[i], state);
775 }
776 }
777
778 set_foreach(block->predecessors, entry) {
779 const nir_block *pred = entry->key;
780 validate_assert(state, pred->successors[0] == block ||
781 pred->successors[1] == block);
782 }
783
784 if (!exec_list_is_empty(&block->instr_list) &&
785 nir_block_last_instr(block)->type == nir_instr_type_jump) {
786 validate_assert(state, block->successors[1] == NULL);
787 nir_jump_instr *jump = nir_instr_as_jump(nir_block_last_instr(block));
788 switch (jump->type) {
789 case nir_jump_break: {
790 nir_block *after =
791 nir_cf_node_as_block(nir_cf_node_next(&state->loop->cf_node));
792 validate_assert(state, block->successors[0] == after);
793 break;
794 }
795
796 case nir_jump_continue: {
797 nir_block *first = nir_loop_first_block(state->loop);
798 validate_assert(state, block->successors[0] == first);
799 break;
800 }
801
802 case nir_jump_return:
803 validate_assert(state, block->successors[0] == state->impl->end_block);
804 break;
805
806 default:
807 unreachable("bad jump type");
808 }
809 } else {
810 nir_cf_node *next = nir_cf_node_next(&block->cf_node);
811 if (next == NULL) {
812 switch (state->parent_node->type) {
813 case nir_cf_node_loop: {
814 nir_block *first = nir_loop_first_block(state->loop);
815 validate_assert(state, block->successors[0] == first);
816 /* due to the hack for infinite loops, block->successors[1] may
817 * point to the block after the loop.
818 */
819 break;
820 }
821
822 case nir_cf_node_if: {
823 nir_block *after =
824 nir_cf_node_as_block(nir_cf_node_next(state->parent_node));
825 validate_assert(state, block->successors[0] == after);
826 validate_assert(state, block->successors[1] == NULL);
827 break;
828 }
829
830 case nir_cf_node_function:
831 validate_assert(state, block->successors[0] == state->impl->end_block);
832 validate_assert(state, block->successors[1] == NULL);
833 break;
834
835 default:
836 unreachable("unknown control flow node type");
837 }
838 } else {
839 if (next->type == nir_cf_node_if) {
840 nir_if *if_stmt = nir_cf_node_as_if(next);
841 validate_assert(state, block->successors[0] ==
842 nir_if_first_then_block(if_stmt));
843 validate_assert(state, block->successors[1] ==
844 nir_if_first_else_block(if_stmt));
845 } else {
846 validate_assert(state, next->type == nir_cf_node_loop);
847 nir_loop *loop = nir_cf_node_as_loop(next);
848 validate_assert(state, block->successors[0] ==
849 nir_loop_first_block(loop));
850 validate_assert(state, block->successors[1] == NULL);
851 }
852 }
853 }
854 }
855
856 static void
857 validate_if(nir_if *if_stmt, validate_state *state)
858 {
859 state->if_stmt = if_stmt;
860
861 validate_assert(state, !exec_node_is_head_sentinel(if_stmt->cf_node.node.prev));
862 nir_cf_node *prev_node = nir_cf_node_prev(&if_stmt->cf_node);
863 validate_assert(state, prev_node->type == nir_cf_node_block);
864
865 validate_assert(state, !exec_node_is_tail_sentinel(if_stmt->cf_node.node.next));
866 nir_cf_node *next_node = nir_cf_node_next(&if_stmt->cf_node);
867 validate_assert(state, next_node->type == nir_cf_node_block);
868
869 validate_src(&if_stmt->condition, state, 0, 1);
870
871 validate_assert(state, !exec_list_is_empty(&if_stmt->then_list));
872 validate_assert(state, !exec_list_is_empty(&if_stmt->else_list));
873
874 nir_cf_node *old_parent = state->parent_node;
875 state->parent_node = &if_stmt->cf_node;
876
877 exec_list_validate(&if_stmt->then_list);
878 foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->then_list) {
879 validate_cf_node(cf_node, state);
880 }
881
882 exec_list_validate(&if_stmt->else_list);
883 foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->else_list) {
884 validate_cf_node(cf_node, state);
885 }
886
887 state->parent_node = old_parent;
888 state->if_stmt = NULL;
889 }
890
891 static void
892 validate_loop(nir_loop *loop, validate_state *state)
893 {
894 validate_assert(state, !exec_node_is_head_sentinel(loop->cf_node.node.prev));
895 nir_cf_node *prev_node = nir_cf_node_prev(&loop->cf_node);
896 validate_assert(state, prev_node->type == nir_cf_node_block);
897
898 validate_assert(state, !exec_node_is_tail_sentinel(loop->cf_node.node.next));
899 nir_cf_node *next_node = nir_cf_node_next(&loop->cf_node);
900 validate_assert(state, next_node->type == nir_cf_node_block);
901
902 validate_assert(state, !exec_list_is_empty(&loop->body));
903
904 nir_cf_node *old_parent = state->parent_node;
905 state->parent_node = &loop->cf_node;
906 nir_loop *old_loop = state->loop;
907 state->loop = loop;
908
909 exec_list_validate(&loop->body);
910 foreach_list_typed(nir_cf_node, cf_node, node, &loop->body) {
911 validate_cf_node(cf_node, state);
912 }
913
914 state->parent_node = old_parent;
915 state->loop = old_loop;
916 }
917
918 static void
919 validate_cf_node(nir_cf_node *node, validate_state *state)
920 {
921 validate_assert(state, node->parent == state->parent_node);
922
923 switch (node->type) {
924 case nir_cf_node_block:
925 validate_block(nir_cf_node_as_block(node), state);
926 break;
927
928 case nir_cf_node_if:
929 validate_if(nir_cf_node_as_if(node), state);
930 break;
931
932 case nir_cf_node_loop:
933 validate_loop(nir_cf_node_as_loop(node), state);
934 break;
935
936 default:
937 unreachable("Invalid CF node type");
938 }
939 }
940
941 static void
942 prevalidate_reg_decl(nir_register *reg, bool is_global, validate_state *state)
943 {
944 validate_assert(state, reg->is_global == is_global);
945
946 if (is_global)
947 validate_assert(state, reg->index < state->shader->reg_alloc);
948 else
949 validate_assert(state, reg->index < state->impl->reg_alloc);
950 validate_assert(state, !BITSET_TEST(state->regs_found, reg->index));
951 BITSET_SET(state->regs_found, reg->index);
952
953 list_validate(&reg->uses);
954 list_validate(&reg->defs);
955 list_validate(&reg->if_uses);
956
957 reg_validate_state *reg_state = ralloc(state->regs, reg_validate_state);
958 reg_state->uses = _mesa_pointer_set_create(reg_state);
959 reg_state->if_uses = _mesa_pointer_set_create(reg_state);
960 reg_state->defs = _mesa_pointer_set_create(reg_state);
961
962 reg_state->where_defined = is_global ? NULL : state->impl;
963
964 _mesa_hash_table_insert(state->regs, reg, reg_state);
965 }
966
967 static void
968 postvalidate_reg_decl(nir_register *reg, validate_state *state)
969 {
970 struct hash_entry *entry = _mesa_hash_table_search(state->regs, reg);
971
972 assume(entry);
973 reg_validate_state *reg_state = (reg_validate_state *) entry->data;
974
975 nir_foreach_use(src, reg) {
976 struct set_entry *entry = _mesa_set_search(reg_state->uses, src);
977 validate_assert(state, entry);
978 _mesa_set_remove(reg_state->uses, entry);
979 }
980
981 if (reg_state->uses->entries != 0) {
982 printf("extra entries in register uses:\n");
983 set_foreach(reg_state->uses, entry)
984 printf("%p\n", entry->key);
985
986 abort();
987 }
988
989 nir_foreach_if_use(src, reg) {
990 struct set_entry *entry = _mesa_set_search(reg_state->if_uses, src);
991 validate_assert(state, entry);
992 _mesa_set_remove(reg_state->if_uses, entry);
993 }
994
995 if (reg_state->if_uses->entries != 0) {
996 printf("extra entries in register if_uses:\n");
997 set_foreach(reg_state->if_uses, entry)
998 printf("%p\n", entry->key);
999
1000 abort();
1001 }
1002
1003 nir_foreach_def(src, reg) {
1004 struct set_entry *entry = _mesa_set_search(reg_state->defs, src);
1005 validate_assert(state, entry);
1006 _mesa_set_remove(reg_state->defs, entry);
1007 }
1008
1009 if (reg_state->defs->entries != 0) {
1010 printf("extra entries in register defs:\n");
1011 set_foreach(reg_state->defs, entry)
1012 printf("%p\n", entry->key);
1013
1014 abort();
1015 }
1016 }
1017
1018 static void
1019 validate_var_decl(nir_variable *var, bool is_global, validate_state *state)
1020 {
1021 state->var = var;
1022
1023 validate_assert(state, is_global == nir_variable_is_global(var));
1024
1025 /* Must have exactly one mode set */
1026 validate_assert(state, util_is_power_of_two_nonzero(var->data.mode));
1027
1028 if (var->data.compact) {
1029 /* The "compact" flag is only valid on arrays of scalars. */
1030 assert(glsl_type_is_array(var->type));
1031
1032 const struct glsl_type *type = glsl_get_array_element(var->type);
1033 if (nir_is_per_vertex_io(var, state->shader->info.stage)) {
1034 assert(glsl_type_is_array(type));
1035 assert(glsl_type_is_scalar(glsl_get_array_element(type)));
1036 } else {
1037 assert(glsl_type_is_scalar(type));
1038 }
1039 }
1040
1041 if (var->num_members > 0) {
1042 const struct glsl_type *without_array = glsl_without_array(var->type);
1043 validate_assert(state, glsl_type_is_struct_or_ifc(without_array));
1044 validate_assert(state, var->num_members == glsl_get_length(without_array));
1045 validate_assert(state, var->members != NULL);
1046 }
1047
1048 /*
1049 * TODO validate some things ir_validate.cpp does (requires more GLSL type
1050 * support)
1051 */
1052
1053 _mesa_hash_table_insert(state->var_defs, var,
1054 is_global ? NULL : state->impl);
1055
1056 state->var = NULL;
1057 }
1058
1059 static bool
1060 postvalidate_ssa_def(nir_ssa_def *def, void *void_state)
1061 {
1062 validate_state *state = void_state;
1063
1064 struct hash_entry *entry = _mesa_hash_table_search(state->ssa_defs, def);
1065
1066 assume(entry);
1067 ssa_def_validate_state *def_state = (ssa_def_validate_state *)entry->data;
1068
1069 nir_foreach_use(src, def) {
1070 struct set_entry *entry = _mesa_set_search(def_state->uses, src);
1071 validate_assert(state, entry);
1072 _mesa_set_remove(def_state->uses, entry);
1073 }
1074
1075 if (def_state->uses->entries != 0) {
1076 printf("extra entries in SSA def uses:\n");
1077 set_foreach(def_state->uses, entry)
1078 printf("%p\n", entry->key);
1079
1080 abort();
1081 }
1082
1083 nir_foreach_if_use(src, def) {
1084 struct set_entry *entry = _mesa_set_search(def_state->if_uses, src);
1085 validate_assert(state, entry);
1086 _mesa_set_remove(def_state->if_uses, entry);
1087 }
1088
1089 if (def_state->if_uses->entries != 0) {
1090 printf("extra entries in SSA def uses:\n");
1091 set_foreach(def_state->if_uses, entry)
1092 printf("%p\n", entry->key);
1093
1094 abort();
1095 }
1096
1097 return true;
1098 }
1099
1100 static void
1101 validate_function_impl(nir_function_impl *impl, validate_state *state)
1102 {
1103 validate_assert(state, impl->function->impl == impl);
1104 validate_assert(state, impl->cf_node.parent == NULL);
1105
1106 validate_assert(state, exec_list_is_empty(&impl->end_block->instr_list));
1107 validate_assert(state, impl->end_block->successors[0] == NULL);
1108 validate_assert(state, impl->end_block->successors[1] == NULL);
1109
1110 state->impl = impl;
1111 state->parent_node = &impl->cf_node;
1112
1113 exec_list_validate(&impl->locals);
1114 nir_foreach_variable(var, &impl->locals) {
1115 validate_var_decl(var, false, state);
1116 }
1117
1118 state->regs_found = realloc(state->regs_found,
1119 BITSET_WORDS(impl->reg_alloc) *
1120 sizeof(BITSET_WORD));
1121 memset(state->regs_found, 0, BITSET_WORDS(impl->reg_alloc) *
1122 sizeof(BITSET_WORD));
1123 exec_list_validate(&impl->registers);
1124 foreach_list_typed(nir_register, reg, node, &impl->registers) {
1125 prevalidate_reg_decl(reg, false, state);
1126 }
1127
1128 state->ssa_defs_found = realloc(state->ssa_defs_found,
1129 BITSET_WORDS(impl->ssa_alloc) *
1130 sizeof(BITSET_WORD));
1131 memset(state->ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) *
1132 sizeof(BITSET_WORD));
1133 exec_list_validate(&impl->body);
1134 foreach_list_typed(nir_cf_node, node, node, &impl->body) {
1135 validate_cf_node(node, state);
1136 }
1137
1138 foreach_list_typed(nir_register, reg, node, &impl->registers) {
1139 postvalidate_reg_decl(reg, state);
1140 }
1141
1142 nir_foreach_block(block, impl) {
1143 nir_foreach_instr(instr, block)
1144 nir_foreach_ssa_def(instr, postvalidate_ssa_def, state);
1145 }
1146 }
1147
1148 static void
1149 validate_function(nir_function *func, validate_state *state)
1150 {
1151 if (func->impl != NULL) {
1152 validate_assert(state, func->impl->function == func);
1153 validate_function_impl(func->impl, state);
1154 }
1155 }
1156
1157 static void
1158 init_validate_state(validate_state *state)
1159 {
1160 state->regs = _mesa_pointer_hash_table_create(NULL);
1161 state->ssa_defs = _mesa_pointer_hash_table_create(NULL);
1162 state->ssa_defs_found = NULL;
1163 state->regs_found = NULL;
1164 state->var_defs = _mesa_pointer_hash_table_create(NULL);
1165 state->errors = _mesa_pointer_hash_table_create(NULL);
1166
1167 state->loop = NULL;
1168 state->instr = NULL;
1169 state->var = NULL;
1170 }
1171
1172 static void
1173 destroy_validate_state(validate_state *state)
1174 {
1175 _mesa_hash_table_destroy(state->regs, NULL);
1176 _mesa_hash_table_destroy(state->ssa_defs, NULL);
1177 free(state->ssa_defs_found);
1178 free(state->regs_found);
1179 _mesa_hash_table_destroy(state->var_defs, NULL);
1180 _mesa_hash_table_destroy(state->errors, NULL);
1181 }
1182
1183 static void
1184 dump_errors(validate_state *state, const char *when)
1185 {
1186 struct hash_table *errors = state->errors;
1187
1188 if (when) {
1189 fprintf(stderr, "NIR validation failed %s\n", when);
1190 fprintf(stderr, "%d errors:\n", _mesa_hash_table_num_entries(errors));
1191 } else {
1192 fprintf(stderr, "NIR validation failed with %d errors:\n",
1193 _mesa_hash_table_num_entries(errors));
1194 }
1195
1196 nir_print_shader_annotated(state->shader, stderr, errors);
1197
1198 if (_mesa_hash_table_num_entries(errors) > 0) {
1199 fprintf(stderr, "%d additional errors:\n",
1200 _mesa_hash_table_num_entries(errors));
1201 hash_table_foreach(errors, entry) {
1202 fprintf(stderr, "%s\n", (char *)entry->data);
1203 }
1204 }
1205
1206 abort();
1207 }
1208
1209 void
1210 nir_validate_shader(nir_shader *shader, const char *when)
1211 {
1212 static int should_validate = -1;
1213 if (should_validate < 0)
1214 should_validate = env_var_as_boolean("NIR_VALIDATE", true);
1215 if (!should_validate)
1216 return;
1217
1218 validate_state state;
1219 init_validate_state(&state);
1220
1221 state.shader = shader;
1222
1223 exec_list_validate(&shader->uniforms);
1224 nir_foreach_variable(var, &shader->uniforms) {
1225 validate_var_decl(var, true, &state);
1226 }
1227
1228 exec_list_validate(&shader->inputs);
1229 nir_foreach_variable(var, &shader->inputs) {
1230 validate_var_decl(var, true, &state);
1231 }
1232
1233 exec_list_validate(&shader->outputs);
1234 nir_foreach_variable(var, &shader->outputs) {
1235 validate_var_decl(var, true, &state);
1236 }
1237
1238 exec_list_validate(&shader->shared);
1239 nir_foreach_variable(var, &shader->shared) {
1240 validate_var_decl(var, true, &state);
1241 }
1242
1243 exec_list_validate(&shader->globals);
1244 nir_foreach_variable(var, &shader->globals) {
1245 validate_var_decl(var, true, &state);
1246 }
1247
1248 exec_list_validate(&shader->system_values);
1249 nir_foreach_variable(var, &shader->system_values) {
1250 validate_var_decl(var, true, &state);
1251 }
1252
1253 state.regs_found = realloc(state.regs_found,
1254 BITSET_WORDS(shader->reg_alloc) *
1255 sizeof(BITSET_WORD));
1256 memset(state.regs_found, 0, BITSET_WORDS(shader->reg_alloc) *
1257 sizeof(BITSET_WORD));
1258 exec_list_validate(&shader->registers);
1259 foreach_list_typed(nir_register, reg, node, &shader->registers) {
1260 prevalidate_reg_decl(reg, true, &state);
1261 }
1262
1263 exec_list_validate(&shader->functions);
1264 foreach_list_typed(nir_function, func, node, &shader->functions) {
1265 validate_function(func, &state);
1266 }
1267
1268 foreach_list_typed(nir_register, reg, node, &shader->registers) {
1269 postvalidate_reg_decl(reg, &state);
1270 }
1271
1272 if (_mesa_hash_table_num_entries(state.errors) > 0)
1273 dump_errors(&state, when);
1274
1275 destroy_validate_state(&state);
1276 }
1277
1278 #endif /* NDEBUG */