ffee4b54621dd480247e338b1ac5810c63b186e4
[mesa.git] / src / compiler / nir / nir_validate.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include <assert.h>
30
31 /*
32 * This file checks for invalid IR indicating a bug somewhere in the compiler.
33 */
34
35 /* Since this file is just a pile of asserts, don't bother compiling it if
36 * we're not building a debug build.
37 */
38 #ifndef NDEBUG
39
40 /*
41 * Per-register validation state.
42 */
43
44 typedef struct {
45 /*
46 * equivalent to the uses and defs in nir_register, but built up by the
47 * validator. At the end, we verify that the sets have the same entries.
48 */
49 struct set *uses, *if_uses, *defs;
50 nir_function_impl *where_defined; /* NULL for global registers */
51 } reg_validate_state;
52
53 typedef struct {
54 /*
55 * equivalent to the uses in nir_ssa_def, but built up by the validator.
56 * At the end, we verify that the sets have the same entries.
57 */
58 struct set *uses, *if_uses;
59 nir_function_impl *where_defined;
60 } ssa_def_validate_state;
61
62 typedef struct {
63 /* map of register -> validation state (struct above) */
64 struct hash_table *regs;
65
66 /* the current shader being validated */
67 nir_shader *shader;
68
69 /* the current instruction being validated */
70 nir_instr *instr;
71
72 /* the current variable being validated */
73 nir_variable *var;
74
75 /* the current basic block being validated */
76 nir_block *block;
77
78 /* the current if statement being validated */
79 nir_if *if_stmt;
80
81 /* the current loop being visited */
82 nir_loop *loop;
83
84 /* the parent of the current cf node being visited */
85 nir_cf_node *parent_node;
86
87 /* the current function implementation being validated */
88 nir_function_impl *impl;
89
90 /* map of SSA value -> function implementation where it is defined */
91 struct hash_table *ssa_defs;
92
93 /* bitset of ssa definitions we have found; used to check uniqueness */
94 BITSET_WORD *ssa_defs_found;
95
96 /* bitset of registers we have currently found; used to check uniqueness */
97 BITSET_WORD *regs_found;
98
99 /* map of variable -> function implementation where it is defined or NULL
100 * if it is a global variable
101 */
102 struct hash_table *var_defs;
103
104 /* map of instruction/var/etc to failed assert string */
105 struct hash_table *errors;
106 } validate_state;
107
108 static void
109 log_error(validate_state *state, const char *cond, const char *file, int line)
110 {
111 const void *obj;
112
113 if (state->instr)
114 obj = state->instr;
115 else if (state->var)
116 obj = state->var;
117 else
118 obj = cond;
119
120 char *msg = ralloc_asprintf(state->errors, "error: %s (%s:%d)",
121 cond, file, line);
122
123 _mesa_hash_table_insert(state->errors, obj, msg);
124 }
125
126 #define validate_assert(state, cond) do { \
127 if (!(cond)) \
128 log_error(state, #cond, __FILE__, __LINE__); \
129 } while (0)
130
131 static void validate_src(nir_src *src, validate_state *state,
132 unsigned bit_size, unsigned num_components);
133
134 static void
135 validate_reg_src(nir_src *src, validate_state *state,
136 unsigned bit_size, unsigned num_components)
137 {
138 validate_assert(state, src->reg.reg != NULL);
139
140 struct hash_entry *entry;
141 entry = _mesa_hash_table_search(state->regs, src->reg.reg);
142 validate_assert(state, entry);
143
144 reg_validate_state *reg_state = (reg_validate_state *) entry->data;
145
146 if (state->instr) {
147 _mesa_set_add(reg_state->uses, src);
148 } else {
149 validate_assert(state, state->if_stmt);
150 _mesa_set_add(reg_state->if_uses, src);
151 }
152
153 if (!src->reg.reg->is_global) {
154 validate_assert(state, reg_state->where_defined == state->impl &&
155 "using a register declared in a different function");
156 }
157
158 if (!src->reg.reg->is_packed) {
159 if (bit_size)
160 validate_assert(state, src->reg.reg->bit_size == bit_size);
161 if (num_components)
162 validate_assert(state, src->reg.reg->num_components == num_components);
163 }
164
165 validate_assert(state, (src->reg.reg->num_array_elems == 0 ||
166 src->reg.base_offset < src->reg.reg->num_array_elems) &&
167 "definitely out-of-bounds array access");
168
169 if (src->reg.indirect) {
170 validate_assert(state, src->reg.reg->num_array_elems != 0);
171 validate_assert(state, (src->reg.indirect->is_ssa ||
172 src->reg.indirect->reg.indirect == NULL) &&
173 "only one level of indirection allowed");
174 validate_src(src->reg.indirect, state, 32, 1);
175 }
176 }
177
178 static void
179 validate_ssa_src(nir_src *src, validate_state *state,
180 unsigned bit_size, unsigned num_components)
181 {
182 validate_assert(state, src->ssa != NULL);
183
184 struct hash_entry *entry = _mesa_hash_table_search(state->ssa_defs, src->ssa);
185
186 validate_assert(state, entry);
187
188 if (!entry)
189 return;
190
191 ssa_def_validate_state *def_state = (ssa_def_validate_state *)entry->data;
192
193 validate_assert(state, def_state->where_defined == state->impl &&
194 "using an SSA value defined in a different function");
195
196 if (state->instr) {
197 _mesa_set_add(def_state->uses, src);
198 } else {
199 validate_assert(state, state->if_stmt);
200 _mesa_set_add(def_state->if_uses, src);
201 }
202
203 if (bit_size)
204 validate_assert(state, src->ssa->bit_size == bit_size);
205 if (num_components)
206 validate_assert(state, src->ssa->num_components == num_components);
207
208 /* TODO validate that the use is dominated by the definition */
209 }
210
211 static void
212 validate_src(nir_src *src, validate_state *state,
213 unsigned bit_size, unsigned num_components)
214 {
215 if (state->instr)
216 validate_assert(state, src->parent_instr == state->instr);
217 else
218 validate_assert(state, src->parent_if == state->if_stmt);
219
220 if (src->is_ssa)
221 validate_ssa_src(src, state, bit_size, num_components);
222 else
223 validate_reg_src(src, state, bit_size, num_components);
224 }
225
226 static void
227 validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state)
228 {
229 nir_alu_src *src = &instr->src[index];
230
231 unsigned num_components = nir_src_num_components(src->src);
232 if (!src->src.is_ssa && src->src.reg.reg->is_packed)
233 num_components = 4; /* can't check anything */
234 for (unsigned i = 0; i < 4; i++) {
235 validate_assert(state, src->swizzle[i] < 4);
236
237 if (nir_alu_instr_channel_used(instr, index, i))
238 validate_assert(state, src->swizzle[i] < num_components);
239 }
240
241 validate_src(&src->src, state, 0, 0);
242 }
243
244 static void
245 validate_reg_dest(nir_reg_dest *dest, validate_state *state,
246 unsigned bit_size, unsigned num_components)
247 {
248 validate_assert(state, dest->reg != NULL);
249
250 validate_assert(state, dest->parent_instr == state->instr);
251
252 struct hash_entry *entry2;
253 entry2 = _mesa_hash_table_search(state->regs, dest->reg);
254
255 validate_assert(state, entry2);
256
257 reg_validate_state *reg_state = (reg_validate_state *) entry2->data;
258 _mesa_set_add(reg_state->defs, dest);
259
260 if (!dest->reg->is_global) {
261 validate_assert(state, reg_state->where_defined == state->impl &&
262 "writing to a register declared in a different function");
263 }
264
265 if (!dest->reg->is_packed) {
266 if (bit_size)
267 validate_assert(state, dest->reg->bit_size == bit_size);
268 if (num_components)
269 validate_assert(state, dest->reg->num_components == num_components);
270 }
271
272 validate_assert(state, (dest->reg->num_array_elems == 0 ||
273 dest->base_offset < dest->reg->num_array_elems) &&
274 "definitely out-of-bounds array access");
275
276 if (dest->indirect) {
277 validate_assert(state, dest->reg->num_array_elems != 0);
278 validate_assert(state, (dest->indirect->is_ssa || dest->indirect->reg.indirect == NULL) &&
279 "only one level of indirection allowed");
280 validate_src(dest->indirect, state, 32, 1);
281 }
282 }
283
284 static void
285 validate_ssa_def(nir_ssa_def *def, validate_state *state)
286 {
287 validate_assert(state, def->index < state->impl->ssa_alloc);
288 validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
289 BITSET_SET(state->ssa_defs_found, def->index);
290
291 validate_assert(state, def->parent_instr == state->instr);
292
293 validate_assert(state, (def->num_components <= 4) ||
294 (def->num_components == 8) ||
295 (def->num_components == 16));
296
297 list_validate(&def->uses);
298 list_validate(&def->if_uses);
299
300 ssa_def_validate_state *def_state = ralloc(state->ssa_defs,
301 ssa_def_validate_state);
302 def_state->where_defined = state->impl;
303 def_state->uses = _mesa_set_create(def_state, _mesa_hash_pointer,
304 _mesa_key_pointer_equal);
305 def_state->if_uses = _mesa_set_create(def_state, _mesa_hash_pointer,
306 _mesa_key_pointer_equal);
307 _mesa_hash_table_insert(state->ssa_defs, def, def_state);
308 }
309
310 static void
311 validate_dest(nir_dest *dest, validate_state *state,
312 unsigned bit_size, unsigned num_components)
313 {
314 if (dest->is_ssa) {
315 if (bit_size)
316 validate_assert(state, dest->ssa.bit_size == bit_size);
317 if (num_components)
318 validate_assert(state, dest->ssa.num_components == num_components);
319 validate_ssa_def(&dest->ssa, state);
320 } else {
321 validate_reg_dest(&dest->reg, state, bit_size, num_components);
322 }
323 }
324
325 static void
326 validate_alu_dest(nir_alu_instr *instr, validate_state *state)
327 {
328 nir_alu_dest *dest = &instr->dest;
329
330 unsigned dest_size = nir_dest_num_components(dest->dest);
331 bool is_packed = !dest->dest.is_ssa && dest->dest.reg.reg->is_packed;
332 /*
333 * validate that the instruction doesn't write to components not in the
334 * register/SSA value
335 */
336 validate_assert(state, is_packed || !(dest->write_mask & ~((1 << dest_size) - 1)));
337
338 /* validate that saturate is only ever used on instructions with
339 * destinations of type float
340 */
341 nir_alu_instr *alu = nir_instr_as_alu(state->instr);
342 validate_assert(state,
343 (nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) ==
344 nir_type_float) ||
345 !dest->saturate);
346
347 validate_dest(&dest->dest, state, 0, 0);
348 }
349
350 static void
351 validate_alu_instr(nir_alu_instr *instr, validate_state *state)
352 {
353 validate_assert(state, instr->op < nir_num_opcodes);
354
355 unsigned instr_bit_size = 0;
356 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
357 nir_alu_type src_type = nir_op_infos[instr->op].input_types[i];
358 unsigned src_bit_size = nir_src_bit_size(instr->src[i].src);
359 if (nir_alu_type_get_type_size(src_type)) {
360 validate_assert(state, src_bit_size == nir_alu_type_get_type_size(src_type));
361 } else if (instr_bit_size) {
362 validate_assert(state, src_bit_size == instr_bit_size);
363 } else {
364 instr_bit_size = src_bit_size;
365 }
366
367 if (nir_alu_type_get_base_type(src_type) == nir_type_float) {
368 /* 8-bit float isn't a thing */
369 validate_assert(state, src_bit_size == 16 || src_bit_size == 32 ||
370 src_bit_size == 64);
371 }
372
373 validate_alu_src(instr, i, state);
374 }
375
376 nir_alu_type dest_type = nir_op_infos[instr->op].output_type;
377 unsigned dest_bit_size = nir_dest_bit_size(instr->dest.dest);
378 if (nir_alu_type_get_type_size(dest_type)) {
379 validate_assert(state, dest_bit_size == nir_alu_type_get_type_size(dest_type));
380 } else if (instr_bit_size) {
381 validate_assert(state, dest_bit_size == instr_bit_size);
382 } else {
383 /* The only unsized thing is the destination so it's vacuously valid */
384 }
385
386 if (nir_alu_type_get_base_type(dest_type) == nir_type_float) {
387 /* 8-bit float isn't a thing */
388 validate_assert(state, dest_bit_size == 16 || dest_bit_size == 32 ||
389 dest_bit_size == 64);
390 }
391
392 validate_alu_dest(instr, state);
393 }
394
395 static void
396 validate_deref_chain(nir_deref *deref, nir_variable_mode mode,
397 validate_state *state)
398 {
399 validate_assert(state, deref->child == NULL || ralloc_parent(deref->child) == deref);
400
401 nir_deref *parent = NULL;
402 while (deref != NULL) {
403 switch (deref->deref_type) {
404 case nir_deref_type_array:
405 if (mode == nir_var_shared) {
406 /* Shared variables have a bit more relaxed rules because we need
407 * to be able to handle array derefs on vectors. Fortunately,
408 * nir_lower_io handles these just fine.
409 */
410 validate_assert(state, glsl_type_is_array(parent->type) ||
411 glsl_type_is_matrix(parent->type) ||
412 glsl_type_is_vector(parent->type));
413 } else {
414 /* Most of NIR cannot handle array derefs on vectors */
415 validate_assert(state, glsl_type_is_array(parent->type) ||
416 glsl_type_is_matrix(parent->type));
417 }
418 validate_assert(state, deref->type == glsl_get_array_element(parent->type));
419 if (nir_deref_as_array(deref)->deref_array_type ==
420 nir_deref_array_type_indirect)
421 validate_src(&nir_deref_as_array(deref)->indirect, state, 32, 1);
422 break;
423
424 case nir_deref_type_struct:
425 assume(parent); /* cannot happen: deref change starts w/ nir_deref_var */
426 validate_assert(state, deref->type ==
427 glsl_get_struct_field(parent->type,
428 nir_deref_as_struct(deref)->index));
429 break;
430
431 case nir_deref_type_var:
432 break;
433
434 default:
435 validate_assert(state, !"Invalid deref type");
436 break;
437 }
438
439 parent = deref;
440 deref = deref->child;
441 }
442 }
443
444 static void
445 validate_var_use(nir_variable *var, validate_state *state)
446 {
447 struct hash_entry *entry = _mesa_hash_table_search(state->var_defs, var);
448 validate_assert(state, entry);
449 if (var->data.mode == nir_var_local)
450 validate_assert(state, (nir_function_impl *) entry->data == state->impl);
451 }
452
453 static void
454 validate_deref_var(void *parent_mem_ctx, nir_deref_var *deref, validate_state *state)
455 {
456 validate_assert(state, deref != NULL);
457 validate_assert(state, ralloc_parent(deref) == parent_mem_ctx);
458 validate_assert(state, deref->deref.type == deref->var->type);
459
460 validate_var_use(deref->var, state);
461
462 validate_deref_chain(&deref->deref, deref->var->data.mode, state);
463 }
464
465 static void
466 validate_deref_instr(nir_deref_instr *instr, validate_state *state)
467 {
468 if (instr->deref_type == nir_deref_type_var) {
469 /* Variable dereferences are stupid simple. */
470 validate_assert(state, instr->mode == instr->var->data.mode);
471 validate_assert(state, instr->type == instr->var->type);
472 validate_var_use(instr->var, state);
473 } else if (instr->deref_type == nir_deref_type_cast) {
474 /* For cast, we simply have to trust the instruction. It's up to
475 * lowering passes and front/back-ends to make them sane.
476 */
477 validate_src(&instr->parent, state, 0, 0);
478
479 /* We just validate that the type and mode are there */
480 validate_assert(state, instr->mode);
481 validate_assert(state, instr->type);
482 } else {
483 /* We require the parent to be SSA. This may be lifted in the future */
484 validate_assert(state, instr->parent.is_ssa);
485
486 /* The parent pointer value must have the same number of components
487 * as the destination.
488 */
489 validate_src(&instr->parent, state, nir_dest_bit_size(instr->dest),
490 nir_dest_num_components(instr->dest));
491
492 nir_instr *parent_instr = instr->parent.ssa->parent_instr;
493
494 /* The parent must come from another deref instruction */
495 validate_assert(state, parent_instr->type == nir_instr_type_deref);
496
497 nir_deref_instr *parent = nir_instr_as_deref(parent_instr);
498
499 validate_assert(state, instr->mode == parent->mode);
500
501 switch (instr->deref_type) {
502 case nir_deref_type_struct:
503 validate_assert(state, glsl_type_is_struct(parent->type));
504 validate_assert(state,
505 instr->strct.index < glsl_get_length(parent->type));
506 validate_assert(state, instr->type ==
507 glsl_get_struct_field(parent->type, instr->strct.index));
508 break;
509
510 case nir_deref_type_array:
511 case nir_deref_type_array_wildcard:
512 if (instr->mode == nir_var_shared) {
513 /* Shared variables have a bit more relaxed rules because we need
514 * to be able to handle array derefs on vectors. Fortunately,
515 * nir_lower_io handles these just fine.
516 */
517 validate_assert(state, glsl_type_is_array(parent->type) ||
518 glsl_type_is_matrix(parent->type) ||
519 glsl_type_is_vector(parent->type));
520 } else {
521 /* Most of NIR cannot handle array derefs on vectors */
522 validate_assert(state, glsl_type_is_array(parent->type) ||
523 glsl_type_is_matrix(parent->type));
524 }
525 validate_assert(state,
526 instr->type == glsl_get_array_element(parent->type));
527
528 if (instr->deref_type == nir_deref_type_array)
529 validate_src(&instr->arr.index, state, 32, 1);
530 break;
531
532 default:
533 unreachable("Invalid deref instruction type");
534 }
535 }
536
537 /* We intentionally don't validate the size of the destination because we
538 * want to let other compiler components such as SPIR-V decide how big
539 * pointers should be.
540 */
541 validate_dest(&instr->dest, state, 0, 0);
542 }
543
544 static void
545 validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
546 {
547 unsigned dest_bit_size = 0;
548 unsigned src_bit_sizes[NIR_INTRINSIC_MAX_INPUTS] = { 0, };
549 switch (instr->intrinsic) {
550 case nir_intrinsic_load_param: {
551 unsigned param_idx = nir_intrinsic_param_idx(instr);
552 validate_assert(state, param_idx < state->impl->function->num_params);
553 nir_parameter *param = &state->impl->function->params[param_idx];
554 validate_assert(state, instr->num_components == param->num_components);
555 dest_bit_size = param->bit_size;
556 break;
557 }
558
559 case nir_intrinsic_load_deref: {
560 nir_deref_instr *src = nir_src_as_deref(instr->src[0]);
561 validate_assert(state, glsl_type_is_vector_or_scalar(src->type) ||
562 (src->mode == nir_var_uniform &&
563 glsl_get_base_type(src->type) == GLSL_TYPE_SUBROUTINE));
564 validate_assert(state, instr->num_components ==
565 glsl_get_vector_elements(src->type));
566 dest_bit_size = glsl_get_bit_size(src->type);
567 break;
568 }
569
570 case nir_intrinsic_store_deref: {
571 nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
572 validate_assert(state, glsl_type_is_vector_or_scalar(dst->type));
573 validate_assert(state, instr->num_components ==
574 glsl_get_vector_elements(dst->type));
575 src_bit_sizes[1] = glsl_get_bit_size(dst->type);
576 validate_assert(state, (dst->mode & (nir_var_shader_in |
577 nir_var_uniform |
578 nir_var_shader_storage)) == 0);
579 validate_assert(state, (nir_intrinsic_write_mask(instr) & ~((1 << instr->num_components) - 1)) == 0);
580 break;
581 }
582
583 case nir_intrinsic_copy_deref: {
584 nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
585 nir_deref_instr *src = nir_src_as_deref(instr->src[1]);
586 validate_assert(state, dst->type == src->type);
587 validate_assert(state, (dst->mode & (nir_var_shader_in |
588 nir_var_uniform |
589 nir_var_shader_storage)) == 0);
590 break;
591 }
592
593 case nir_intrinsic_load_var: {
594 const struct glsl_type *type =
595 nir_deref_tail(&instr->variables[0]->deref)->type;
596 validate_assert(state, glsl_type_is_vector_or_scalar(type) ||
597 (instr->variables[0]->var->data.mode == nir_var_uniform &&
598 glsl_get_base_type(type) == GLSL_TYPE_SUBROUTINE));
599 validate_assert(state, instr->num_components ==
600 glsl_get_vector_elements(type));
601 dest_bit_size = glsl_get_bit_size(type);
602 break;
603 }
604
605 case nir_intrinsic_store_var: {
606 const struct glsl_type *type =
607 nir_deref_tail(&instr->variables[0]->deref)->type;
608 validate_assert(state, glsl_type_is_vector_or_scalar(type) ||
609 (instr->variables[0]->var->data.mode == nir_var_uniform &&
610 glsl_get_base_type(type) == GLSL_TYPE_SUBROUTINE));
611 validate_assert(state, instr->num_components == glsl_get_vector_elements(type));
612 src_bit_sizes[0] = glsl_get_bit_size(type);
613 validate_assert(state, instr->variables[0]->var->data.mode != nir_var_shader_in &&
614 instr->variables[0]->var->data.mode != nir_var_uniform &&
615 instr->variables[0]->var->data.mode != nir_var_shader_storage);
616 validate_assert(state, (nir_intrinsic_write_mask(instr) & ~((1 << instr->num_components) - 1)) == 0);
617 break;
618 }
619
620 case nir_intrinsic_copy_var:
621 validate_assert(state, nir_deref_tail(&instr->variables[0]->deref)->type ==
622 nir_deref_tail(&instr->variables[1]->deref)->type);
623 validate_assert(state, instr->variables[0]->var->data.mode != nir_var_shader_in &&
624 instr->variables[0]->var->data.mode != nir_var_uniform &&
625 instr->variables[0]->var->data.mode != nir_var_shader_storage);
626 break;
627
628 default:
629 break;
630 }
631
632 unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
633 for (unsigned i = 0; i < num_srcs; i++) {
634 unsigned components_read = nir_intrinsic_src_components(instr, i);
635
636 validate_assert(state, components_read > 0);
637
638 validate_src(&instr->src[i], state, src_bit_sizes[i], components_read);
639 }
640
641 unsigned num_vars = nir_intrinsic_infos[instr->intrinsic].num_variables;
642 for (unsigned i = 0; i < num_vars; i++) {
643 validate_deref_var(instr, instr->variables[i], state);
644 }
645
646 if (nir_intrinsic_infos[instr->intrinsic].has_dest) {
647 unsigned components_written = nir_intrinsic_dest_components(instr);
648
649 validate_assert(state, components_written > 0);
650
651 validate_dest(&instr->dest, state, dest_bit_size, components_written);
652 }
653 }
654
655 static void
656 validate_tex_instr(nir_tex_instr *instr, validate_state *state)
657 {
658 bool src_type_seen[nir_num_tex_src_types];
659 for (unsigned i = 0; i < nir_num_tex_src_types; i++)
660 src_type_seen[i] = false;
661
662 for (unsigned i = 0; i < instr->num_srcs; i++) {
663 validate_assert(state, !src_type_seen[instr->src[i].src_type]);
664 src_type_seen[instr->src[i].src_type] = true;
665 validate_src(&instr->src[i].src, state,
666 0, nir_tex_instr_src_size(instr, i));
667 }
668
669 if (instr->texture != NULL)
670 validate_deref_var(instr, instr->texture, state);
671
672 if (instr->sampler != NULL)
673 validate_deref_var(instr, instr->sampler, state);
674
675 validate_dest(&instr->dest, state, 0, nir_tex_instr_dest_size(instr));
676 }
677
678 static void
679 validate_call_instr(nir_call_instr *instr, validate_state *state)
680 {
681 validate_assert(state, instr->num_params == instr->callee->num_params);
682
683 for (unsigned i = 0; i < instr->num_params; i++) {
684 validate_src(&instr->params[i], state,
685 instr->callee->params[i].bit_size,
686 instr->callee->params[i].num_components);
687 }
688 }
689
690 static void
691 validate_load_const_instr(nir_load_const_instr *instr, validate_state *state)
692 {
693 validate_ssa_def(&instr->def, state);
694 }
695
696 static void
697 validate_ssa_undef_instr(nir_ssa_undef_instr *instr, validate_state *state)
698 {
699 validate_ssa_def(&instr->def, state);
700 }
701
702 static void
703 validate_phi_instr(nir_phi_instr *instr, validate_state *state)
704 {
705 /*
706 * don't validate the sources until we get to them from their predecessor
707 * basic blocks, to avoid validating an SSA use before its definition.
708 */
709
710 validate_dest(&instr->dest, state, 0, 0);
711
712 exec_list_validate(&instr->srcs);
713 validate_assert(state, exec_list_length(&instr->srcs) ==
714 state->block->predecessors->entries);
715 }
716
717 static void
718 validate_instr(nir_instr *instr, validate_state *state)
719 {
720 validate_assert(state, instr->block == state->block);
721
722 state->instr = instr;
723
724 switch (instr->type) {
725 case nir_instr_type_alu:
726 validate_alu_instr(nir_instr_as_alu(instr), state);
727 break;
728
729 case nir_instr_type_deref:
730 validate_deref_instr(nir_instr_as_deref(instr), state);
731 break;
732
733 case nir_instr_type_call:
734 validate_call_instr(nir_instr_as_call(instr), state);
735 break;
736
737 case nir_instr_type_intrinsic:
738 validate_intrinsic_instr(nir_instr_as_intrinsic(instr), state);
739 break;
740
741 case nir_instr_type_tex:
742 validate_tex_instr(nir_instr_as_tex(instr), state);
743 break;
744
745 case nir_instr_type_load_const:
746 validate_load_const_instr(nir_instr_as_load_const(instr), state);
747 break;
748
749 case nir_instr_type_phi:
750 validate_phi_instr(nir_instr_as_phi(instr), state);
751 break;
752
753 case nir_instr_type_ssa_undef:
754 validate_ssa_undef_instr(nir_instr_as_ssa_undef(instr), state);
755 break;
756
757 case nir_instr_type_jump:
758 break;
759
760 default:
761 validate_assert(state, !"Invalid ALU instruction type");
762 break;
763 }
764
765 state->instr = NULL;
766 }
767
768 static void
769 validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state)
770 {
771 state->instr = &instr->instr;
772
773 validate_assert(state, instr->dest.is_ssa);
774
775 exec_list_validate(&instr->srcs);
776 nir_foreach_phi_src(src, instr) {
777 if (src->pred == pred) {
778 validate_assert(state, src->src.is_ssa);
779 validate_src(&src->src, state, instr->dest.ssa.bit_size,
780 instr->dest.ssa.num_components);
781 state->instr = NULL;
782 return;
783 }
784 }
785
786 abort();
787 }
788
789 static void
790 validate_phi_srcs(nir_block *block, nir_block *succ, validate_state *state)
791 {
792 nir_foreach_instr(instr, succ) {
793 if (instr->type != nir_instr_type_phi)
794 break;
795
796 validate_phi_src(nir_instr_as_phi(instr), block, state);
797 }
798 }
799
800 static void validate_cf_node(nir_cf_node *node, validate_state *state);
801
802 static void
803 validate_block(nir_block *block, validate_state *state)
804 {
805 validate_assert(state, block->cf_node.parent == state->parent_node);
806
807 state->block = block;
808
809 exec_list_validate(&block->instr_list);
810 nir_foreach_instr(instr, block) {
811 if (instr->type == nir_instr_type_phi) {
812 validate_assert(state, instr == nir_block_first_instr(block) ||
813 nir_instr_prev(instr)->type == nir_instr_type_phi);
814 }
815
816 if (instr->type == nir_instr_type_jump) {
817 validate_assert(state, instr == nir_block_last_instr(block));
818 }
819
820 validate_instr(instr, state);
821 }
822
823 validate_assert(state, block->successors[0] != NULL);
824 validate_assert(state, block->successors[0] != block->successors[1]);
825
826 for (unsigned i = 0; i < 2; i++) {
827 if (block->successors[i] != NULL) {
828 struct set_entry *entry =
829 _mesa_set_search(block->successors[i]->predecessors, block);
830 validate_assert(state, entry);
831
832 validate_phi_srcs(block, block->successors[i], state);
833 }
834 }
835
836 struct set_entry *entry;
837 set_foreach(block->predecessors, entry) {
838 const nir_block *pred = entry->key;
839 validate_assert(state, pred->successors[0] == block ||
840 pred->successors[1] == block);
841 }
842
843 if (!exec_list_is_empty(&block->instr_list) &&
844 nir_block_last_instr(block)->type == nir_instr_type_jump) {
845 validate_assert(state, block->successors[1] == NULL);
846 nir_jump_instr *jump = nir_instr_as_jump(nir_block_last_instr(block));
847 switch (jump->type) {
848 case nir_jump_break: {
849 nir_block *after =
850 nir_cf_node_as_block(nir_cf_node_next(&state->loop->cf_node));
851 validate_assert(state, block->successors[0] == after);
852 break;
853 }
854
855 case nir_jump_continue: {
856 nir_block *first = nir_loop_first_block(state->loop);
857 validate_assert(state, block->successors[0] == first);
858 break;
859 }
860
861 case nir_jump_return:
862 validate_assert(state, block->successors[0] == state->impl->end_block);
863 break;
864
865 default:
866 unreachable("bad jump type");
867 }
868 } else {
869 nir_cf_node *next = nir_cf_node_next(&block->cf_node);
870 if (next == NULL) {
871 switch (state->parent_node->type) {
872 case nir_cf_node_loop: {
873 nir_block *first = nir_loop_first_block(state->loop);
874 validate_assert(state, block->successors[0] == first);
875 /* due to the hack for infinite loops, block->successors[1] may
876 * point to the block after the loop.
877 */
878 break;
879 }
880
881 case nir_cf_node_if: {
882 nir_block *after =
883 nir_cf_node_as_block(nir_cf_node_next(state->parent_node));
884 validate_assert(state, block->successors[0] == after);
885 validate_assert(state, block->successors[1] == NULL);
886 break;
887 }
888
889 case nir_cf_node_function:
890 validate_assert(state, block->successors[0] == state->impl->end_block);
891 validate_assert(state, block->successors[1] == NULL);
892 break;
893
894 default:
895 unreachable("unknown control flow node type");
896 }
897 } else {
898 if (next->type == nir_cf_node_if) {
899 nir_if *if_stmt = nir_cf_node_as_if(next);
900 validate_assert(state, block->successors[0] ==
901 nir_if_first_then_block(if_stmt));
902 validate_assert(state, block->successors[1] ==
903 nir_if_first_else_block(if_stmt));
904 } else {
905 validate_assert(state, next->type == nir_cf_node_loop);
906 nir_loop *loop = nir_cf_node_as_loop(next);
907 validate_assert(state, block->successors[0] ==
908 nir_loop_first_block(loop));
909 validate_assert(state, block->successors[1] == NULL);
910 }
911 }
912 }
913 }
914
915 static void
916 validate_if(nir_if *if_stmt, validate_state *state)
917 {
918 state->if_stmt = if_stmt;
919
920 validate_assert(state, !exec_node_is_head_sentinel(if_stmt->cf_node.node.prev));
921 nir_cf_node *prev_node = nir_cf_node_prev(&if_stmt->cf_node);
922 validate_assert(state, prev_node->type == nir_cf_node_block);
923
924 validate_assert(state, !exec_node_is_tail_sentinel(if_stmt->cf_node.node.next));
925 nir_cf_node *next_node = nir_cf_node_next(&if_stmt->cf_node);
926 validate_assert(state, next_node->type == nir_cf_node_block);
927
928 validate_src(&if_stmt->condition, state, 32, 1);
929
930 validate_assert(state, !exec_list_is_empty(&if_stmt->then_list));
931 validate_assert(state, !exec_list_is_empty(&if_stmt->else_list));
932
933 nir_cf_node *old_parent = state->parent_node;
934 state->parent_node = &if_stmt->cf_node;
935
936 exec_list_validate(&if_stmt->then_list);
937 foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->then_list) {
938 validate_cf_node(cf_node, state);
939 }
940
941 exec_list_validate(&if_stmt->else_list);
942 foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->else_list) {
943 validate_cf_node(cf_node, state);
944 }
945
946 state->parent_node = old_parent;
947 state->if_stmt = NULL;
948 }
949
950 static void
951 validate_loop(nir_loop *loop, validate_state *state)
952 {
953 validate_assert(state, !exec_node_is_head_sentinel(loop->cf_node.node.prev));
954 nir_cf_node *prev_node = nir_cf_node_prev(&loop->cf_node);
955 validate_assert(state, prev_node->type == nir_cf_node_block);
956
957 validate_assert(state, !exec_node_is_tail_sentinel(loop->cf_node.node.next));
958 nir_cf_node *next_node = nir_cf_node_next(&loop->cf_node);
959 validate_assert(state, next_node->type == nir_cf_node_block);
960
961 validate_assert(state, !exec_list_is_empty(&loop->body));
962
963 nir_cf_node *old_parent = state->parent_node;
964 state->parent_node = &loop->cf_node;
965 nir_loop *old_loop = state->loop;
966 state->loop = loop;
967
968 exec_list_validate(&loop->body);
969 foreach_list_typed(nir_cf_node, cf_node, node, &loop->body) {
970 validate_cf_node(cf_node, state);
971 }
972
973 state->parent_node = old_parent;
974 state->loop = old_loop;
975 }
976
977 static void
978 validate_cf_node(nir_cf_node *node, validate_state *state)
979 {
980 validate_assert(state, node->parent == state->parent_node);
981
982 switch (node->type) {
983 case nir_cf_node_block:
984 validate_block(nir_cf_node_as_block(node), state);
985 break;
986
987 case nir_cf_node_if:
988 validate_if(nir_cf_node_as_if(node), state);
989 break;
990
991 case nir_cf_node_loop:
992 validate_loop(nir_cf_node_as_loop(node), state);
993 break;
994
995 default:
996 unreachable("Invalid CF node type");
997 }
998 }
999
1000 static void
1001 prevalidate_reg_decl(nir_register *reg, bool is_global, validate_state *state)
1002 {
1003 validate_assert(state, reg->is_global == is_global);
1004
1005 if (is_global)
1006 validate_assert(state, reg->index < state->shader->reg_alloc);
1007 else
1008 validate_assert(state, reg->index < state->impl->reg_alloc);
1009 validate_assert(state, !BITSET_TEST(state->regs_found, reg->index));
1010 BITSET_SET(state->regs_found, reg->index);
1011
1012 list_validate(&reg->uses);
1013 list_validate(&reg->defs);
1014 list_validate(&reg->if_uses);
1015
1016 reg_validate_state *reg_state = ralloc(state->regs, reg_validate_state);
1017 reg_state->uses = _mesa_set_create(reg_state, _mesa_hash_pointer,
1018 _mesa_key_pointer_equal);
1019 reg_state->if_uses = _mesa_set_create(reg_state, _mesa_hash_pointer,
1020 _mesa_key_pointer_equal);
1021 reg_state->defs = _mesa_set_create(reg_state, _mesa_hash_pointer,
1022 _mesa_key_pointer_equal);
1023
1024 reg_state->where_defined = is_global ? NULL : state->impl;
1025
1026 _mesa_hash_table_insert(state->regs, reg, reg_state);
1027 }
1028
1029 static void
1030 postvalidate_reg_decl(nir_register *reg, validate_state *state)
1031 {
1032 struct hash_entry *entry = _mesa_hash_table_search(state->regs, reg);
1033
1034 assume(entry);
1035 reg_validate_state *reg_state = (reg_validate_state *) entry->data;
1036
1037 nir_foreach_use(src, reg) {
1038 struct set_entry *entry = _mesa_set_search(reg_state->uses, src);
1039 validate_assert(state, entry);
1040 _mesa_set_remove(reg_state->uses, entry);
1041 }
1042
1043 if (reg_state->uses->entries != 0) {
1044 printf("extra entries in register uses:\n");
1045 struct set_entry *entry;
1046 set_foreach(reg_state->uses, entry)
1047 printf("%p\n", entry->key);
1048
1049 abort();
1050 }
1051
1052 nir_foreach_if_use(src, reg) {
1053 struct set_entry *entry = _mesa_set_search(reg_state->if_uses, src);
1054 validate_assert(state, entry);
1055 _mesa_set_remove(reg_state->if_uses, entry);
1056 }
1057
1058 if (reg_state->if_uses->entries != 0) {
1059 printf("extra entries in register if_uses:\n");
1060 struct set_entry *entry;
1061 set_foreach(reg_state->if_uses, entry)
1062 printf("%p\n", entry->key);
1063
1064 abort();
1065 }
1066
1067 nir_foreach_def(src, reg) {
1068 struct set_entry *entry = _mesa_set_search(reg_state->defs, src);
1069 validate_assert(state, entry);
1070 _mesa_set_remove(reg_state->defs, entry);
1071 }
1072
1073 if (reg_state->defs->entries != 0) {
1074 printf("extra entries in register defs:\n");
1075 struct set_entry *entry;
1076 set_foreach(reg_state->defs, entry)
1077 printf("%p\n", entry->key);
1078
1079 abort();
1080 }
1081 }
1082
1083 static void
1084 validate_var_decl(nir_variable *var, bool is_global, validate_state *state)
1085 {
1086 state->var = var;
1087
1088 validate_assert(state, is_global == nir_variable_is_global(var));
1089
1090 /* Must have exactly one mode set */
1091 validate_assert(state, util_is_power_of_two_nonzero(var->data.mode));
1092
1093 if (var->data.compact) {
1094 /* The "compact" flag is only valid on arrays of scalars. */
1095 assert(glsl_type_is_array(var->type));
1096
1097 const struct glsl_type *type = glsl_get_array_element(var->type);
1098 if (nir_is_per_vertex_io(var, state->shader->info.stage)) {
1099 assert(glsl_type_is_array(type));
1100 assert(glsl_type_is_scalar(glsl_get_array_element(type)));
1101 } else {
1102 assert(glsl_type_is_scalar(type));
1103 }
1104 }
1105
1106 if (var->num_members > 0) {
1107 const struct glsl_type *without_array = glsl_without_array(var->type);
1108 validate_assert(state, glsl_type_is_struct(without_array));
1109 validate_assert(state, var->num_members == glsl_get_length(without_array));
1110 validate_assert(state, var->members != NULL);
1111 }
1112
1113 /*
1114 * TODO validate some things ir_validate.cpp does (requires more GLSL type
1115 * support)
1116 */
1117
1118 _mesa_hash_table_insert(state->var_defs, var,
1119 is_global ? NULL : state->impl);
1120
1121 state->var = NULL;
1122 }
1123
1124 static bool
1125 postvalidate_ssa_def(nir_ssa_def *def, void *void_state)
1126 {
1127 validate_state *state = void_state;
1128
1129 struct hash_entry *entry = _mesa_hash_table_search(state->ssa_defs, def);
1130
1131 assume(entry);
1132 ssa_def_validate_state *def_state = (ssa_def_validate_state *)entry->data;
1133
1134 nir_foreach_use(src, def) {
1135 struct set_entry *entry = _mesa_set_search(def_state->uses, src);
1136 validate_assert(state, entry);
1137 _mesa_set_remove(def_state->uses, entry);
1138 }
1139
1140 if (def_state->uses->entries != 0) {
1141 printf("extra entries in SSA def uses:\n");
1142 struct set_entry *entry;
1143 set_foreach(def_state->uses, entry)
1144 printf("%p\n", entry->key);
1145
1146 abort();
1147 }
1148
1149 nir_foreach_if_use(src, def) {
1150 struct set_entry *entry = _mesa_set_search(def_state->if_uses, src);
1151 validate_assert(state, entry);
1152 _mesa_set_remove(def_state->if_uses, entry);
1153 }
1154
1155 if (def_state->if_uses->entries != 0) {
1156 printf("extra entries in SSA def uses:\n");
1157 struct set_entry *entry;
1158 set_foreach(def_state->if_uses, entry)
1159 printf("%p\n", entry->key);
1160
1161 abort();
1162 }
1163
1164 return true;
1165 }
1166
1167 static void
1168 validate_function_impl(nir_function_impl *impl, validate_state *state)
1169 {
1170 validate_assert(state, impl->function->impl == impl);
1171 validate_assert(state, impl->cf_node.parent == NULL);
1172
1173 validate_assert(state, exec_list_is_empty(&impl->end_block->instr_list));
1174 validate_assert(state, impl->end_block->successors[0] == NULL);
1175 validate_assert(state, impl->end_block->successors[1] == NULL);
1176
1177 state->impl = impl;
1178 state->parent_node = &impl->cf_node;
1179
1180 exec_list_validate(&impl->locals);
1181 nir_foreach_variable(var, &impl->locals) {
1182 validate_var_decl(var, false, state);
1183 }
1184
1185 state->regs_found = realloc(state->regs_found,
1186 BITSET_WORDS(impl->reg_alloc) *
1187 sizeof(BITSET_WORD));
1188 memset(state->regs_found, 0, BITSET_WORDS(impl->reg_alloc) *
1189 sizeof(BITSET_WORD));
1190 exec_list_validate(&impl->registers);
1191 foreach_list_typed(nir_register, reg, node, &impl->registers) {
1192 prevalidate_reg_decl(reg, false, state);
1193 }
1194
1195 state->ssa_defs_found = realloc(state->ssa_defs_found,
1196 BITSET_WORDS(impl->ssa_alloc) *
1197 sizeof(BITSET_WORD));
1198 memset(state->ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) *
1199 sizeof(BITSET_WORD));
1200 exec_list_validate(&impl->body);
1201 foreach_list_typed(nir_cf_node, node, node, &impl->body) {
1202 validate_cf_node(node, state);
1203 }
1204
1205 foreach_list_typed(nir_register, reg, node, &impl->registers) {
1206 postvalidate_reg_decl(reg, state);
1207 }
1208
1209 nir_foreach_block(block, impl) {
1210 nir_foreach_instr(instr, block)
1211 nir_foreach_ssa_def(instr, postvalidate_ssa_def, state);
1212 }
1213 }
1214
1215 static void
1216 validate_function(nir_function *func, validate_state *state)
1217 {
1218 if (func->impl != NULL) {
1219 validate_assert(state, func->impl->function == func);
1220 validate_function_impl(func->impl, state);
1221 }
1222 }
1223
1224 static void
1225 init_validate_state(validate_state *state)
1226 {
1227 state->regs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1228 _mesa_key_pointer_equal);
1229 state->ssa_defs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1230 _mesa_key_pointer_equal);
1231 state->ssa_defs_found = NULL;
1232 state->regs_found = NULL;
1233 state->var_defs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1234 _mesa_key_pointer_equal);
1235 state->errors = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1236 _mesa_key_pointer_equal);
1237
1238 state->loop = NULL;
1239 state->instr = NULL;
1240 state->var = NULL;
1241 }
1242
1243 static void
1244 destroy_validate_state(validate_state *state)
1245 {
1246 _mesa_hash_table_destroy(state->regs, NULL);
1247 _mesa_hash_table_destroy(state->ssa_defs, NULL);
1248 free(state->ssa_defs_found);
1249 free(state->regs_found);
1250 _mesa_hash_table_destroy(state->var_defs, NULL);
1251 _mesa_hash_table_destroy(state->errors, NULL);
1252 }
1253
1254 static void
1255 dump_errors(validate_state *state)
1256 {
1257 struct hash_table *errors = state->errors;
1258
1259 fprintf(stderr, "%d errors:\n", _mesa_hash_table_num_entries(errors));
1260
1261 nir_print_shader_annotated(state->shader, stderr, errors);
1262
1263 if (_mesa_hash_table_num_entries(errors) > 0) {
1264 fprintf(stderr, "%d additional errors:\n",
1265 _mesa_hash_table_num_entries(errors));
1266 struct hash_entry *entry;
1267 hash_table_foreach(errors, entry) {
1268 fprintf(stderr, "%s\n", (char *)entry->data);
1269 }
1270 }
1271
1272 abort();
1273 }
1274
1275 void
1276 nir_validate_shader(nir_shader *shader)
1277 {
1278 static int should_validate = -1;
1279 if (should_validate < 0)
1280 should_validate = env_var_as_boolean("NIR_VALIDATE", true);
1281 if (!should_validate)
1282 return;
1283
1284 validate_state state;
1285 init_validate_state(&state);
1286
1287 state.shader = shader;
1288
1289 exec_list_validate(&shader->uniforms);
1290 nir_foreach_variable(var, &shader->uniforms) {
1291 validate_var_decl(var, true, &state);
1292 }
1293
1294 exec_list_validate(&shader->inputs);
1295 nir_foreach_variable(var, &shader->inputs) {
1296 validate_var_decl(var, true, &state);
1297 }
1298
1299 exec_list_validate(&shader->outputs);
1300 nir_foreach_variable(var, &shader->outputs) {
1301 validate_var_decl(var, true, &state);
1302 }
1303
1304 exec_list_validate(&shader->shared);
1305 nir_foreach_variable(var, &shader->shared) {
1306 validate_var_decl(var, true, &state);
1307 }
1308
1309 exec_list_validate(&shader->globals);
1310 nir_foreach_variable(var, &shader->globals) {
1311 validate_var_decl(var, true, &state);
1312 }
1313
1314 exec_list_validate(&shader->system_values);
1315 nir_foreach_variable(var, &shader->system_values) {
1316 validate_var_decl(var, true, &state);
1317 }
1318
1319 state.regs_found = realloc(state.regs_found,
1320 BITSET_WORDS(shader->reg_alloc) *
1321 sizeof(BITSET_WORD));
1322 memset(state.regs_found, 0, BITSET_WORDS(shader->reg_alloc) *
1323 sizeof(BITSET_WORD));
1324 exec_list_validate(&shader->registers);
1325 foreach_list_typed(nir_register, reg, node, &shader->registers) {
1326 prevalidate_reg_decl(reg, true, &state);
1327 }
1328
1329 exec_list_validate(&shader->functions);
1330 foreach_list_typed(nir_function, func, node, &shader->functions) {
1331 validate_function(func, &state);
1332 }
1333
1334 foreach_list_typed(nir_register, reg, node, &shader->registers) {
1335 postvalidate_reg_decl(reg, &state);
1336 }
1337
1338 if (_mesa_hash_table_num_entries(state.errors) > 0)
1339 dump_errors(&state);
1340
1341 destroy_validate_state(&state);
1342 }
1343
1344 #endif /* NDEBUG */