nir: Add _deref versions of all of the _var intrinsics
[mesa.git] / src / compiler / nir / nir_validate.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include <assert.h>
30
31 /*
32 * This file checks for invalid IR indicating a bug somewhere in the compiler.
33 */
34
35 /* Since this file is just a pile of asserts, don't bother compiling it if
36 * we're not building a debug build.
37 */
38 #ifndef NDEBUG
39
40 /*
41 * Per-register validation state.
42 */
43
44 typedef struct {
45 /*
46 * equivalent to the uses and defs in nir_register, but built up by the
47 * validator. At the end, we verify that the sets have the same entries.
48 */
49 struct set *uses, *if_uses, *defs;
50 nir_function_impl *where_defined; /* NULL for global registers */
51 } reg_validate_state;
52
53 typedef struct {
54 /*
55 * equivalent to the uses in nir_ssa_def, but built up by the validator.
56 * At the end, we verify that the sets have the same entries.
57 */
58 struct set *uses, *if_uses;
59 nir_function_impl *where_defined;
60 } ssa_def_validate_state;
61
62 typedef struct {
63 /* map of register -> validation state (struct above) */
64 struct hash_table *regs;
65
66 /* the current shader being validated */
67 nir_shader *shader;
68
69 /* the current instruction being validated */
70 nir_instr *instr;
71
72 /* the current variable being validated */
73 nir_variable *var;
74
75 /* the current basic block being validated */
76 nir_block *block;
77
78 /* the current if statement being validated */
79 nir_if *if_stmt;
80
81 /* the current loop being visited */
82 nir_loop *loop;
83
84 /* the parent of the current cf node being visited */
85 nir_cf_node *parent_node;
86
87 /* the current function implementation being validated */
88 nir_function_impl *impl;
89
90 /* map of SSA value -> function implementation where it is defined */
91 struct hash_table *ssa_defs;
92
93 /* bitset of ssa definitions we have found; used to check uniqueness */
94 BITSET_WORD *ssa_defs_found;
95
96 /* bitset of registers we have currently found; used to check uniqueness */
97 BITSET_WORD *regs_found;
98
99 /* map of variable -> function implementation where it is defined or NULL
100 * if it is a global variable
101 */
102 struct hash_table *var_defs;
103
104 /* map of instruction/var/etc to failed assert string */
105 struct hash_table *errors;
106 } validate_state;
107
108 static void
109 log_error(validate_state *state, const char *cond, const char *file, int line)
110 {
111 const void *obj;
112
113 if (state->instr)
114 obj = state->instr;
115 else if (state->var)
116 obj = state->var;
117 else
118 obj = cond;
119
120 char *msg = ralloc_asprintf(state->errors, "error: %s (%s:%d)",
121 cond, file, line);
122
123 _mesa_hash_table_insert(state->errors, obj, msg);
124 }
125
126 #define validate_assert(state, cond) do { \
127 if (!(cond)) \
128 log_error(state, #cond, __FILE__, __LINE__); \
129 } while (0)
130
131 static void validate_src(nir_src *src, validate_state *state,
132 unsigned bit_size, unsigned num_components);
133
134 static void
135 validate_reg_src(nir_src *src, validate_state *state,
136 unsigned bit_size, unsigned num_components)
137 {
138 validate_assert(state, src->reg.reg != NULL);
139
140 struct hash_entry *entry;
141 entry = _mesa_hash_table_search(state->regs, src->reg.reg);
142 validate_assert(state, entry);
143
144 reg_validate_state *reg_state = (reg_validate_state *) entry->data;
145
146 if (state->instr) {
147 _mesa_set_add(reg_state->uses, src);
148 } else {
149 validate_assert(state, state->if_stmt);
150 _mesa_set_add(reg_state->if_uses, src);
151 }
152
153 if (!src->reg.reg->is_global) {
154 validate_assert(state, reg_state->where_defined == state->impl &&
155 "using a register declared in a different function");
156 }
157
158 if (!src->reg.reg->is_packed) {
159 if (bit_size)
160 validate_assert(state, src->reg.reg->bit_size == bit_size);
161 if (num_components)
162 validate_assert(state, src->reg.reg->num_components == num_components);
163 }
164
165 validate_assert(state, (src->reg.reg->num_array_elems == 0 ||
166 src->reg.base_offset < src->reg.reg->num_array_elems) &&
167 "definitely out-of-bounds array access");
168
169 if (src->reg.indirect) {
170 validate_assert(state, src->reg.reg->num_array_elems != 0);
171 validate_assert(state, (src->reg.indirect->is_ssa ||
172 src->reg.indirect->reg.indirect == NULL) &&
173 "only one level of indirection allowed");
174 validate_src(src->reg.indirect, state, 32, 1);
175 }
176 }
177
178 static void
179 validate_ssa_src(nir_src *src, validate_state *state,
180 unsigned bit_size, unsigned num_components)
181 {
182 validate_assert(state, src->ssa != NULL);
183
184 struct hash_entry *entry = _mesa_hash_table_search(state->ssa_defs, src->ssa);
185
186 validate_assert(state, entry);
187
188 if (!entry)
189 return;
190
191 ssa_def_validate_state *def_state = (ssa_def_validate_state *)entry->data;
192
193 validate_assert(state, def_state->where_defined == state->impl &&
194 "using an SSA value defined in a different function");
195
196 if (state->instr) {
197 _mesa_set_add(def_state->uses, src);
198 } else {
199 validate_assert(state, state->if_stmt);
200 _mesa_set_add(def_state->if_uses, src);
201 }
202
203 if (bit_size)
204 validate_assert(state, src->ssa->bit_size == bit_size);
205 if (num_components)
206 validate_assert(state, src->ssa->num_components == num_components);
207
208 /* TODO validate that the use is dominated by the definition */
209 }
210
211 static void
212 validate_src(nir_src *src, validate_state *state,
213 unsigned bit_size, unsigned num_components)
214 {
215 if (state->instr)
216 validate_assert(state, src->parent_instr == state->instr);
217 else
218 validate_assert(state, src->parent_if == state->if_stmt);
219
220 if (src->is_ssa)
221 validate_ssa_src(src, state, bit_size, num_components);
222 else
223 validate_reg_src(src, state, bit_size, num_components);
224 }
225
226 static void
227 validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state)
228 {
229 nir_alu_src *src = &instr->src[index];
230
231 unsigned num_components = nir_src_num_components(src->src);
232 if (!src->src.is_ssa && src->src.reg.reg->is_packed)
233 num_components = 4; /* can't check anything */
234 for (unsigned i = 0; i < 4; i++) {
235 validate_assert(state, src->swizzle[i] < 4);
236
237 if (nir_alu_instr_channel_used(instr, index, i))
238 validate_assert(state, src->swizzle[i] < num_components);
239 }
240
241 validate_src(&src->src, state, 0, 0);
242 }
243
244 static void
245 validate_reg_dest(nir_reg_dest *dest, validate_state *state,
246 unsigned bit_size, unsigned num_components)
247 {
248 validate_assert(state, dest->reg != NULL);
249
250 validate_assert(state, dest->parent_instr == state->instr);
251
252 struct hash_entry *entry2;
253 entry2 = _mesa_hash_table_search(state->regs, dest->reg);
254
255 validate_assert(state, entry2);
256
257 reg_validate_state *reg_state = (reg_validate_state *) entry2->data;
258 _mesa_set_add(reg_state->defs, dest);
259
260 if (!dest->reg->is_global) {
261 validate_assert(state, reg_state->where_defined == state->impl &&
262 "writing to a register declared in a different function");
263 }
264
265 if (!dest->reg->is_packed) {
266 if (bit_size)
267 validate_assert(state, dest->reg->bit_size == bit_size);
268 if (num_components)
269 validate_assert(state, dest->reg->num_components == num_components);
270 }
271
272 validate_assert(state, (dest->reg->num_array_elems == 0 ||
273 dest->base_offset < dest->reg->num_array_elems) &&
274 "definitely out-of-bounds array access");
275
276 if (dest->indirect) {
277 validate_assert(state, dest->reg->num_array_elems != 0);
278 validate_assert(state, (dest->indirect->is_ssa || dest->indirect->reg.indirect == NULL) &&
279 "only one level of indirection allowed");
280 validate_src(dest->indirect, state, 32, 1);
281 }
282 }
283
284 static void
285 validate_ssa_def(nir_ssa_def *def, validate_state *state)
286 {
287 validate_assert(state, def->index < state->impl->ssa_alloc);
288 validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
289 BITSET_SET(state->ssa_defs_found, def->index);
290
291 validate_assert(state, def->parent_instr == state->instr);
292
293 validate_assert(state, (def->num_components <= 4) ||
294 (def->num_components == 8) ||
295 (def->num_components == 16));
296
297 list_validate(&def->uses);
298 list_validate(&def->if_uses);
299
300 ssa_def_validate_state *def_state = ralloc(state->ssa_defs,
301 ssa_def_validate_state);
302 def_state->where_defined = state->impl;
303 def_state->uses = _mesa_set_create(def_state, _mesa_hash_pointer,
304 _mesa_key_pointer_equal);
305 def_state->if_uses = _mesa_set_create(def_state, _mesa_hash_pointer,
306 _mesa_key_pointer_equal);
307 _mesa_hash_table_insert(state->ssa_defs, def, def_state);
308 }
309
310 static void
311 validate_dest(nir_dest *dest, validate_state *state,
312 unsigned bit_size, unsigned num_components)
313 {
314 if (dest->is_ssa) {
315 if (bit_size)
316 validate_assert(state, dest->ssa.bit_size == bit_size);
317 if (num_components)
318 validate_assert(state, dest->ssa.num_components == num_components);
319 validate_ssa_def(&dest->ssa, state);
320 } else {
321 validate_reg_dest(&dest->reg, state, bit_size, num_components);
322 }
323 }
324
325 static void
326 validate_alu_dest(nir_alu_instr *instr, validate_state *state)
327 {
328 nir_alu_dest *dest = &instr->dest;
329
330 unsigned dest_size = nir_dest_num_components(dest->dest);
331 bool is_packed = !dest->dest.is_ssa && dest->dest.reg.reg->is_packed;
332 /*
333 * validate that the instruction doesn't write to components not in the
334 * register/SSA value
335 */
336 validate_assert(state, is_packed || !(dest->write_mask & ~((1 << dest_size) - 1)));
337
338 /* validate that saturate is only ever used on instructions with
339 * destinations of type float
340 */
341 nir_alu_instr *alu = nir_instr_as_alu(state->instr);
342 validate_assert(state,
343 (nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) ==
344 nir_type_float) ||
345 !dest->saturate);
346
347 validate_dest(&dest->dest, state, 0, 0);
348 }
349
350 static void
351 validate_alu_instr(nir_alu_instr *instr, validate_state *state)
352 {
353 validate_assert(state, instr->op < nir_num_opcodes);
354
355 unsigned instr_bit_size = 0;
356 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
357 nir_alu_type src_type = nir_op_infos[instr->op].input_types[i];
358 unsigned src_bit_size = nir_src_bit_size(instr->src[i].src);
359 if (nir_alu_type_get_type_size(src_type)) {
360 validate_assert(state, src_bit_size == nir_alu_type_get_type_size(src_type));
361 } else if (instr_bit_size) {
362 validate_assert(state, src_bit_size == instr_bit_size);
363 } else {
364 instr_bit_size = src_bit_size;
365 }
366
367 if (nir_alu_type_get_base_type(src_type) == nir_type_float) {
368 /* 8-bit float isn't a thing */
369 validate_assert(state, src_bit_size == 16 || src_bit_size == 32 ||
370 src_bit_size == 64);
371 }
372
373 validate_alu_src(instr, i, state);
374 }
375
376 nir_alu_type dest_type = nir_op_infos[instr->op].output_type;
377 unsigned dest_bit_size = nir_dest_bit_size(instr->dest.dest);
378 if (nir_alu_type_get_type_size(dest_type)) {
379 validate_assert(state, dest_bit_size == nir_alu_type_get_type_size(dest_type));
380 } else if (instr_bit_size) {
381 validate_assert(state, dest_bit_size == instr_bit_size);
382 } else {
383 /* The only unsized thing is the destination so it's vacuously valid */
384 }
385
386 if (nir_alu_type_get_base_type(dest_type) == nir_type_float) {
387 /* 8-bit float isn't a thing */
388 validate_assert(state, dest_bit_size == 16 || dest_bit_size == 32 ||
389 dest_bit_size == 64);
390 }
391
392 validate_alu_dest(instr, state);
393 }
394
395 static void
396 validate_deref_chain(nir_deref *deref, nir_variable_mode mode,
397 validate_state *state)
398 {
399 validate_assert(state, deref->child == NULL || ralloc_parent(deref->child) == deref);
400
401 nir_deref *parent = NULL;
402 while (deref != NULL) {
403 switch (deref->deref_type) {
404 case nir_deref_type_array:
405 if (mode == nir_var_shared) {
406 /* Shared variables have a bit more relaxed rules because we need
407 * to be able to handle array derefs on vectors. Fortunately,
408 * nir_lower_io handles these just fine.
409 */
410 validate_assert(state, glsl_type_is_array(parent->type) ||
411 glsl_type_is_matrix(parent->type) ||
412 glsl_type_is_vector(parent->type));
413 } else {
414 /* Most of NIR cannot handle array derefs on vectors */
415 validate_assert(state, glsl_type_is_array(parent->type) ||
416 glsl_type_is_matrix(parent->type));
417 }
418 validate_assert(state, deref->type == glsl_get_array_element(parent->type));
419 if (nir_deref_as_array(deref)->deref_array_type ==
420 nir_deref_array_type_indirect)
421 validate_src(&nir_deref_as_array(deref)->indirect, state, 32, 1);
422 break;
423
424 case nir_deref_type_struct:
425 assume(parent); /* cannot happen: deref change starts w/ nir_deref_var */
426 validate_assert(state, deref->type ==
427 glsl_get_struct_field(parent->type,
428 nir_deref_as_struct(deref)->index));
429 break;
430
431 case nir_deref_type_var:
432 break;
433
434 default:
435 validate_assert(state, !"Invalid deref type");
436 break;
437 }
438
439 parent = deref;
440 deref = deref->child;
441 }
442 }
443
444 static void
445 validate_var_use(nir_variable *var, validate_state *state)
446 {
447 struct hash_entry *entry = _mesa_hash_table_search(state->var_defs, var);
448 validate_assert(state, entry);
449 if (var->data.mode == nir_var_local)
450 validate_assert(state, (nir_function_impl *) entry->data == state->impl);
451 }
452
453 static void
454 validate_deref_var(void *parent_mem_ctx, nir_deref_var *deref, validate_state *state)
455 {
456 validate_assert(state, deref != NULL);
457 validate_assert(state, ralloc_parent(deref) == parent_mem_ctx);
458 validate_assert(state, deref->deref.type == deref->var->type);
459
460 validate_var_use(deref->var, state);
461
462 validate_deref_chain(&deref->deref, deref->var->data.mode, state);
463 }
464
465 static void
466 validate_deref_instr(nir_deref_instr *instr, validate_state *state)
467 {
468 if (instr->deref_type == nir_deref_type_var) {
469 /* Variable dereferences are stupid simple. */
470 validate_assert(state, instr->mode == instr->var->data.mode);
471 validate_assert(state, instr->type == instr->var->type);
472 validate_var_use(instr->var, state);
473 } else if (instr->deref_type == nir_deref_type_cast) {
474 /* For cast, we simply have to trust the instruction. It's up to
475 * lowering passes and front/back-ends to make them sane.
476 */
477 validate_src(&instr->parent, state, 0, 0);
478
479 /* We just validate that the type and mode are there */
480 validate_assert(state, instr->mode);
481 validate_assert(state, instr->type);
482 } else {
483 /* We require the parent to be SSA. This may be lifted in the future */
484 validate_assert(state, instr->parent.is_ssa);
485
486 /* The parent pointer value must have the same number of components
487 * as the destination.
488 */
489 validate_src(&instr->parent, state, nir_dest_bit_size(instr->dest),
490 nir_dest_num_components(instr->dest));
491
492 nir_instr *parent_instr = instr->parent.ssa->parent_instr;
493
494 /* The parent must come from another deref instruction */
495 validate_assert(state, parent_instr->type == nir_instr_type_deref);
496
497 nir_deref_instr *parent = nir_instr_as_deref(parent_instr);
498
499 validate_assert(state, instr->mode == parent->mode);
500
501 switch (instr->deref_type) {
502 case nir_deref_type_struct:
503 validate_assert(state, glsl_type_is_struct(parent->type));
504 validate_assert(state,
505 instr->strct.index < glsl_get_length(parent->type));
506 validate_assert(state, instr->type ==
507 glsl_get_struct_field(parent->type, instr->strct.index));
508 break;
509
510 case nir_deref_type_array:
511 case nir_deref_type_array_wildcard:
512 if (instr->mode == nir_var_shared) {
513 /* Shared variables have a bit more relaxed rules because we need
514 * to be able to handle array derefs on vectors. Fortunately,
515 * nir_lower_io handles these just fine.
516 */
517 validate_assert(state, glsl_type_is_array(parent->type) ||
518 glsl_type_is_matrix(parent->type) ||
519 glsl_type_is_vector(parent->type));
520 } else {
521 /* Most of NIR cannot handle array derefs on vectors */
522 validate_assert(state, glsl_type_is_array(parent->type) ||
523 glsl_type_is_matrix(parent->type));
524 }
525 validate_assert(state,
526 instr->type == glsl_get_array_element(parent->type));
527
528 if (instr->deref_type == nir_deref_type_array)
529 validate_src(&instr->arr.index, state, 32, 1);
530 break;
531
532 default:
533 unreachable("Invalid deref instruction type");
534 }
535 }
536
537 /* We intentionally don't validate the size of the destination because we
538 * want to let other compiler components such as SPIR-V decide how big
539 * pointers should be.
540 */
541 validate_dest(&instr->dest, state, 0, 0);
542 }
543
544 static void
545 validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
546 {
547 unsigned dest_bit_size = 0;
548 unsigned src_bit_sizes[NIR_INTRINSIC_MAX_INPUTS] = { 0, };
549 switch (instr->intrinsic) {
550 case nir_intrinsic_load_deref: {
551 nir_deref_instr *src = nir_src_as_deref(instr->src[0]);
552 validate_assert(state, glsl_type_is_vector_or_scalar(src->type) ||
553 (src->mode == nir_var_uniform &&
554 glsl_get_base_type(src->type) == GLSL_TYPE_SUBROUTINE));
555 validate_assert(state, instr->num_components ==
556 glsl_get_vector_elements(src->type));
557 dest_bit_size = glsl_get_bit_size(src->type);
558 break;
559 }
560
561 case nir_intrinsic_store_deref: {
562 nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
563 validate_assert(state, glsl_type_is_vector_or_scalar(dst->type));
564 validate_assert(state, instr->num_components ==
565 glsl_get_vector_elements(dst->type));
566 src_bit_sizes[1] = glsl_get_bit_size(dst->type);
567 validate_assert(state, (dst->mode & (nir_var_shader_in |
568 nir_var_uniform |
569 nir_var_shader_storage)) == 0);
570 validate_assert(state, (nir_intrinsic_write_mask(instr) & ~((1 << instr->num_components) - 1)) == 0);
571 break;
572 }
573
574 case nir_intrinsic_copy_deref: {
575 nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
576 nir_deref_instr *src = nir_src_as_deref(instr->src[1]);
577 validate_assert(state, dst->type == src->type);
578 validate_assert(state, (dst->mode & (nir_var_shader_in |
579 nir_var_uniform |
580 nir_var_shader_storage)) == 0);
581 break;
582 }
583
584 case nir_intrinsic_load_var: {
585 const struct glsl_type *type =
586 nir_deref_tail(&instr->variables[0]->deref)->type;
587 validate_assert(state, glsl_type_is_vector_or_scalar(type) ||
588 (instr->variables[0]->var->data.mode == nir_var_uniform &&
589 glsl_get_base_type(type) == GLSL_TYPE_SUBROUTINE));
590 validate_assert(state, instr->num_components ==
591 glsl_get_vector_elements(type));
592 dest_bit_size = glsl_get_bit_size(type);
593 break;
594 }
595
596 case nir_intrinsic_store_var: {
597 const struct glsl_type *type =
598 nir_deref_tail(&instr->variables[0]->deref)->type;
599 validate_assert(state, glsl_type_is_vector_or_scalar(type) ||
600 (instr->variables[0]->var->data.mode == nir_var_uniform &&
601 glsl_get_base_type(type) == GLSL_TYPE_SUBROUTINE));
602 validate_assert(state, instr->num_components == glsl_get_vector_elements(type));
603 src_bit_sizes[0] = glsl_get_bit_size(type);
604 validate_assert(state, instr->variables[0]->var->data.mode != nir_var_shader_in &&
605 instr->variables[0]->var->data.mode != nir_var_uniform &&
606 instr->variables[0]->var->data.mode != nir_var_shader_storage);
607 validate_assert(state, (nir_intrinsic_write_mask(instr) & ~((1 << instr->num_components) - 1)) == 0);
608 break;
609 }
610
611 case nir_intrinsic_copy_var:
612 validate_assert(state, nir_deref_tail(&instr->variables[0]->deref)->type ==
613 nir_deref_tail(&instr->variables[1]->deref)->type);
614 validate_assert(state, instr->variables[0]->var->data.mode != nir_var_shader_in &&
615 instr->variables[0]->var->data.mode != nir_var_uniform &&
616 instr->variables[0]->var->data.mode != nir_var_shader_storage);
617 break;
618
619 default:
620 break;
621 }
622
623 unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
624 for (unsigned i = 0; i < num_srcs; i++) {
625 unsigned components_read = nir_intrinsic_src_components(instr, i);
626
627 validate_assert(state, components_read > 0);
628
629 validate_src(&instr->src[i], state, src_bit_sizes[i], components_read);
630 }
631
632 unsigned num_vars = nir_intrinsic_infos[instr->intrinsic].num_variables;
633 for (unsigned i = 0; i < num_vars; i++) {
634 validate_deref_var(instr, instr->variables[i], state);
635 }
636
637 if (nir_intrinsic_infos[instr->intrinsic].has_dest) {
638 unsigned components_written = nir_intrinsic_dest_components(instr);
639
640 validate_assert(state, components_written > 0);
641
642 validate_dest(&instr->dest, state, dest_bit_size, components_written);
643 }
644 }
645
646 static void
647 validate_tex_instr(nir_tex_instr *instr, validate_state *state)
648 {
649 bool src_type_seen[nir_num_tex_src_types];
650 for (unsigned i = 0; i < nir_num_tex_src_types; i++)
651 src_type_seen[i] = false;
652
653 for (unsigned i = 0; i < instr->num_srcs; i++) {
654 validate_assert(state, !src_type_seen[instr->src[i].src_type]);
655 src_type_seen[instr->src[i].src_type] = true;
656 validate_src(&instr->src[i].src, state,
657 0, nir_tex_instr_src_size(instr, i));
658 }
659
660 if (instr->texture != NULL)
661 validate_deref_var(instr, instr->texture, state);
662
663 if (instr->sampler != NULL)
664 validate_deref_var(instr, instr->sampler, state);
665
666 validate_dest(&instr->dest, state, 0, nir_tex_instr_dest_size(instr));
667 }
668
669 static void
670 validate_call_instr(nir_call_instr *instr, validate_state *state)
671 {
672 if (instr->return_deref == NULL) {
673 validate_assert(state, glsl_type_is_void(instr->callee->return_type));
674 } else {
675 validate_assert(state, instr->return_deref->deref.type == instr->callee->return_type);
676 validate_deref_var(instr, instr->return_deref, state);
677 }
678
679 validate_assert(state, instr->num_params == instr->callee->num_params);
680
681 for (unsigned i = 0; i < instr->num_params; i++) {
682 validate_assert(state, instr->callee->params[i].type == instr->params[i]->deref.type);
683 validate_deref_var(instr, instr->params[i], state);
684 }
685 }
686
687 static void
688 validate_load_const_instr(nir_load_const_instr *instr, validate_state *state)
689 {
690 validate_ssa_def(&instr->def, state);
691 }
692
693 static void
694 validate_ssa_undef_instr(nir_ssa_undef_instr *instr, validate_state *state)
695 {
696 validate_ssa_def(&instr->def, state);
697 }
698
699 static void
700 validate_phi_instr(nir_phi_instr *instr, validate_state *state)
701 {
702 /*
703 * don't validate the sources until we get to them from their predecessor
704 * basic blocks, to avoid validating an SSA use before its definition.
705 */
706
707 validate_dest(&instr->dest, state, 0, 0);
708
709 exec_list_validate(&instr->srcs);
710 validate_assert(state, exec_list_length(&instr->srcs) ==
711 state->block->predecessors->entries);
712 }
713
714 static void
715 validate_instr(nir_instr *instr, validate_state *state)
716 {
717 validate_assert(state, instr->block == state->block);
718
719 state->instr = instr;
720
721 switch (instr->type) {
722 case nir_instr_type_alu:
723 validate_alu_instr(nir_instr_as_alu(instr), state);
724 break;
725
726 case nir_instr_type_deref:
727 validate_deref_instr(nir_instr_as_deref(instr), state);
728 break;
729
730 case nir_instr_type_call:
731 validate_call_instr(nir_instr_as_call(instr), state);
732 break;
733
734 case nir_instr_type_intrinsic:
735 validate_intrinsic_instr(nir_instr_as_intrinsic(instr), state);
736 break;
737
738 case nir_instr_type_tex:
739 validate_tex_instr(nir_instr_as_tex(instr), state);
740 break;
741
742 case nir_instr_type_load_const:
743 validate_load_const_instr(nir_instr_as_load_const(instr), state);
744 break;
745
746 case nir_instr_type_phi:
747 validate_phi_instr(nir_instr_as_phi(instr), state);
748 break;
749
750 case nir_instr_type_ssa_undef:
751 validate_ssa_undef_instr(nir_instr_as_ssa_undef(instr), state);
752 break;
753
754 case nir_instr_type_jump:
755 break;
756
757 default:
758 validate_assert(state, !"Invalid ALU instruction type");
759 break;
760 }
761
762 state->instr = NULL;
763 }
764
765 static void
766 validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state)
767 {
768 state->instr = &instr->instr;
769
770 validate_assert(state, instr->dest.is_ssa);
771
772 exec_list_validate(&instr->srcs);
773 nir_foreach_phi_src(src, instr) {
774 if (src->pred == pred) {
775 validate_assert(state, src->src.is_ssa);
776 validate_src(&src->src, state, instr->dest.ssa.bit_size,
777 instr->dest.ssa.num_components);
778 state->instr = NULL;
779 return;
780 }
781 }
782
783 abort();
784 }
785
786 static void
787 validate_phi_srcs(nir_block *block, nir_block *succ, validate_state *state)
788 {
789 nir_foreach_instr(instr, succ) {
790 if (instr->type != nir_instr_type_phi)
791 break;
792
793 validate_phi_src(nir_instr_as_phi(instr), block, state);
794 }
795 }
796
797 static void validate_cf_node(nir_cf_node *node, validate_state *state);
798
799 static void
800 validate_block(nir_block *block, validate_state *state)
801 {
802 validate_assert(state, block->cf_node.parent == state->parent_node);
803
804 state->block = block;
805
806 exec_list_validate(&block->instr_list);
807 nir_foreach_instr(instr, block) {
808 if (instr->type == nir_instr_type_phi) {
809 validate_assert(state, instr == nir_block_first_instr(block) ||
810 nir_instr_prev(instr)->type == nir_instr_type_phi);
811 }
812
813 if (instr->type == nir_instr_type_jump) {
814 validate_assert(state, instr == nir_block_last_instr(block));
815 }
816
817 validate_instr(instr, state);
818 }
819
820 validate_assert(state, block->successors[0] != NULL);
821 validate_assert(state, block->successors[0] != block->successors[1]);
822
823 for (unsigned i = 0; i < 2; i++) {
824 if (block->successors[i] != NULL) {
825 struct set_entry *entry =
826 _mesa_set_search(block->successors[i]->predecessors, block);
827 validate_assert(state, entry);
828
829 validate_phi_srcs(block, block->successors[i], state);
830 }
831 }
832
833 struct set_entry *entry;
834 set_foreach(block->predecessors, entry) {
835 const nir_block *pred = entry->key;
836 validate_assert(state, pred->successors[0] == block ||
837 pred->successors[1] == block);
838 }
839
840 if (!exec_list_is_empty(&block->instr_list) &&
841 nir_block_last_instr(block)->type == nir_instr_type_jump) {
842 validate_assert(state, block->successors[1] == NULL);
843 nir_jump_instr *jump = nir_instr_as_jump(nir_block_last_instr(block));
844 switch (jump->type) {
845 case nir_jump_break: {
846 nir_block *after =
847 nir_cf_node_as_block(nir_cf_node_next(&state->loop->cf_node));
848 validate_assert(state, block->successors[0] == after);
849 break;
850 }
851
852 case nir_jump_continue: {
853 nir_block *first = nir_loop_first_block(state->loop);
854 validate_assert(state, block->successors[0] == first);
855 break;
856 }
857
858 case nir_jump_return:
859 validate_assert(state, block->successors[0] == state->impl->end_block);
860 break;
861
862 default:
863 unreachable("bad jump type");
864 }
865 } else {
866 nir_cf_node *next = nir_cf_node_next(&block->cf_node);
867 if (next == NULL) {
868 switch (state->parent_node->type) {
869 case nir_cf_node_loop: {
870 nir_block *first = nir_loop_first_block(state->loop);
871 validate_assert(state, block->successors[0] == first);
872 /* due to the hack for infinite loops, block->successors[1] may
873 * point to the block after the loop.
874 */
875 break;
876 }
877
878 case nir_cf_node_if: {
879 nir_block *after =
880 nir_cf_node_as_block(nir_cf_node_next(state->parent_node));
881 validate_assert(state, block->successors[0] == after);
882 validate_assert(state, block->successors[1] == NULL);
883 break;
884 }
885
886 case nir_cf_node_function:
887 validate_assert(state, block->successors[0] == state->impl->end_block);
888 validate_assert(state, block->successors[1] == NULL);
889 break;
890
891 default:
892 unreachable("unknown control flow node type");
893 }
894 } else {
895 if (next->type == nir_cf_node_if) {
896 nir_if *if_stmt = nir_cf_node_as_if(next);
897 validate_assert(state, block->successors[0] ==
898 nir_if_first_then_block(if_stmt));
899 validate_assert(state, block->successors[1] ==
900 nir_if_first_else_block(if_stmt));
901 } else {
902 validate_assert(state, next->type == nir_cf_node_loop);
903 nir_loop *loop = nir_cf_node_as_loop(next);
904 validate_assert(state, block->successors[0] ==
905 nir_loop_first_block(loop));
906 validate_assert(state, block->successors[1] == NULL);
907 }
908 }
909 }
910 }
911
912 static void
913 validate_if(nir_if *if_stmt, validate_state *state)
914 {
915 state->if_stmt = if_stmt;
916
917 validate_assert(state, !exec_node_is_head_sentinel(if_stmt->cf_node.node.prev));
918 nir_cf_node *prev_node = nir_cf_node_prev(&if_stmt->cf_node);
919 validate_assert(state, prev_node->type == nir_cf_node_block);
920
921 validate_assert(state, !exec_node_is_tail_sentinel(if_stmt->cf_node.node.next));
922 nir_cf_node *next_node = nir_cf_node_next(&if_stmt->cf_node);
923 validate_assert(state, next_node->type == nir_cf_node_block);
924
925 validate_src(&if_stmt->condition, state, 32, 1);
926
927 validate_assert(state, !exec_list_is_empty(&if_stmt->then_list));
928 validate_assert(state, !exec_list_is_empty(&if_stmt->else_list));
929
930 nir_cf_node *old_parent = state->parent_node;
931 state->parent_node = &if_stmt->cf_node;
932
933 exec_list_validate(&if_stmt->then_list);
934 foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->then_list) {
935 validate_cf_node(cf_node, state);
936 }
937
938 exec_list_validate(&if_stmt->else_list);
939 foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->else_list) {
940 validate_cf_node(cf_node, state);
941 }
942
943 state->parent_node = old_parent;
944 state->if_stmt = NULL;
945 }
946
947 static void
948 validate_loop(nir_loop *loop, validate_state *state)
949 {
950 validate_assert(state, !exec_node_is_head_sentinel(loop->cf_node.node.prev));
951 nir_cf_node *prev_node = nir_cf_node_prev(&loop->cf_node);
952 validate_assert(state, prev_node->type == nir_cf_node_block);
953
954 validate_assert(state, !exec_node_is_tail_sentinel(loop->cf_node.node.next));
955 nir_cf_node *next_node = nir_cf_node_next(&loop->cf_node);
956 validate_assert(state, next_node->type == nir_cf_node_block);
957
958 validate_assert(state, !exec_list_is_empty(&loop->body));
959
960 nir_cf_node *old_parent = state->parent_node;
961 state->parent_node = &loop->cf_node;
962 nir_loop *old_loop = state->loop;
963 state->loop = loop;
964
965 exec_list_validate(&loop->body);
966 foreach_list_typed(nir_cf_node, cf_node, node, &loop->body) {
967 validate_cf_node(cf_node, state);
968 }
969
970 state->parent_node = old_parent;
971 state->loop = old_loop;
972 }
973
974 static void
975 validate_cf_node(nir_cf_node *node, validate_state *state)
976 {
977 validate_assert(state, node->parent == state->parent_node);
978
979 switch (node->type) {
980 case nir_cf_node_block:
981 validate_block(nir_cf_node_as_block(node), state);
982 break;
983
984 case nir_cf_node_if:
985 validate_if(nir_cf_node_as_if(node), state);
986 break;
987
988 case nir_cf_node_loop:
989 validate_loop(nir_cf_node_as_loop(node), state);
990 break;
991
992 default:
993 unreachable("Invalid CF node type");
994 }
995 }
996
997 static void
998 prevalidate_reg_decl(nir_register *reg, bool is_global, validate_state *state)
999 {
1000 validate_assert(state, reg->is_global == is_global);
1001
1002 if (is_global)
1003 validate_assert(state, reg->index < state->shader->reg_alloc);
1004 else
1005 validate_assert(state, reg->index < state->impl->reg_alloc);
1006 validate_assert(state, !BITSET_TEST(state->regs_found, reg->index));
1007 BITSET_SET(state->regs_found, reg->index);
1008
1009 list_validate(&reg->uses);
1010 list_validate(&reg->defs);
1011 list_validate(&reg->if_uses);
1012
1013 reg_validate_state *reg_state = ralloc(state->regs, reg_validate_state);
1014 reg_state->uses = _mesa_set_create(reg_state, _mesa_hash_pointer,
1015 _mesa_key_pointer_equal);
1016 reg_state->if_uses = _mesa_set_create(reg_state, _mesa_hash_pointer,
1017 _mesa_key_pointer_equal);
1018 reg_state->defs = _mesa_set_create(reg_state, _mesa_hash_pointer,
1019 _mesa_key_pointer_equal);
1020
1021 reg_state->where_defined = is_global ? NULL : state->impl;
1022
1023 _mesa_hash_table_insert(state->regs, reg, reg_state);
1024 }
1025
1026 static void
1027 postvalidate_reg_decl(nir_register *reg, validate_state *state)
1028 {
1029 struct hash_entry *entry = _mesa_hash_table_search(state->regs, reg);
1030
1031 assume(entry);
1032 reg_validate_state *reg_state = (reg_validate_state *) entry->data;
1033
1034 nir_foreach_use(src, reg) {
1035 struct set_entry *entry = _mesa_set_search(reg_state->uses, src);
1036 validate_assert(state, entry);
1037 _mesa_set_remove(reg_state->uses, entry);
1038 }
1039
1040 if (reg_state->uses->entries != 0) {
1041 printf("extra entries in register uses:\n");
1042 struct set_entry *entry;
1043 set_foreach(reg_state->uses, entry)
1044 printf("%p\n", entry->key);
1045
1046 abort();
1047 }
1048
1049 nir_foreach_if_use(src, reg) {
1050 struct set_entry *entry = _mesa_set_search(reg_state->if_uses, src);
1051 validate_assert(state, entry);
1052 _mesa_set_remove(reg_state->if_uses, entry);
1053 }
1054
1055 if (reg_state->if_uses->entries != 0) {
1056 printf("extra entries in register if_uses:\n");
1057 struct set_entry *entry;
1058 set_foreach(reg_state->if_uses, entry)
1059 printf("%p\n", entry->key);
1060
1061 abort();
1062 }
1063
1064 nir_foreach_def(src, reg) {
1065 struct set_entry *entry = _mesa_set_search(reg_state->defs, src);
1066 validate_assert(state, entry);
1067 _mesa_set_remove(reg_state->defs, entry);
1068 }
1069
1070 if (reg_state->defs->entries != 0) {
1071 printf("extra entries in register defs:\n");
1072 struct set_entry *entry;
1073 set_foreach(reg_state->defs, entry)
1074 printf("%p\n", entry->key);
1075
1076 abort();
1077 }
1078 }
1079
1080 static void
1081 validate_var_decl(nir_variable *var, bool is_global, validate_state *state)
1082 {
1083 state->var = var;
1084
1085 validate_assert(state, is_global == nir_variable_is_global(var));
1086
1087 /* Must have exactly one mode set */
1088 validate_assert(state, util_is_power_of_two_nonzero(var->data.mode));
1089
1090 if (var->data.compact) {
1091 /* The "compact" flag is only valid on arrays of scalars. */
1092 assert(glsl_type_is_array(var->type));
1093
1094 const struct glsl_type *type = glsl_get_array_element(var->type);
1095 if (nir_is_per_vertex_io(var, state->shader->info.stage)) {
1096 assert(glsl_type_is_array(type));
1097 assert(glsl_type_is_scalar(glsl_get_array_element(type)));
1098 } else {
1099 assert(glsl_type_is_scalar(type));
1100 }
1101 }
1102
1103 /*
1104 * TODO validate some things ir_validate.cpp does (requires more GLSL type
1105 * support)
1106 */
1107
1108 _mesa_hash_table_insert(state->var_defs, var,
1109 is_global ? NULL : state->impl);
1110
1111 state->var = NULL;
1112 }
1113
1114 static bool
1115 postvalidate_ssa_def(nir_ssa_def *def, void *void_state)
1116 {
1117 validate_state *state = void_state;
1118
1119 struct hash_entry *entry = _mesa_hash_table_search(state->ssa_defs, def);
1120
1121 assume(entry);
1122 ssa_def_validate_state *def_state = (ssa_def_validate_state *)entry->data;
1123
1124 nir_foreach_use(src, def) {
1125 struct set_entry *entry = _mesa_set_search(def_state->uses, src);
1126 validate_assert(state, entry);
1127 _mesa_set_remove(def_state->uses, entry);
1128 }
1129
1130 if (def_state->uses->entries != 0) {
1131 printf("extra entries in SSA def uses:\n");
1132 struct set_entry *entry;
1133 set_foreach(def_state->uses, entry)
1134 printf("%p\n", entry->key);
1135
1136 abort();
1137 }
1138
1139 nir_foreach_if_use(src, def) {
1140 struct set_entry *entry = _mesa_set_search(def_state->if_uses, src);
1141 validate_assert(state, entry);
1142 _mesa_set_remove(def_state->if_uses, entry);
1143 }
1144
1145 if (def_state->if_uses->entries != 0) {
1146 printf("extra entries in SSA def uses:\n");
1147 struct set_entry *entry;
1148 set_foreach(def_state->if_uses, entry)
1149 printf("%p\n", entry->key);
1150
1151 abort();
1152 }
1153
1154 return true;
1155 }
1156
1157 static void
1158 validate_function_impl(nir_function_impl *impl, validate_state *state)
1159 {
1160 validate_assert(state, impl->function->impl == impl);
1161 validate_assert(state, impl->cf_node.parent == NULL);
1162
1163 validate_assert(state, impl->num_params == impl->function->num_params);
1164 for (unsigned i = 0; i < impl->num_params; i++) {
1165 validate_assert(state, impl->params[i]->type == impl->function->params[i].type);
1166 validate_assert(state, impl->params[i]->data.mode == nir_var_param);
1167 validate_assert(state, impl->params[i]->data.location == i);
1168 validate_var_decl(impl->params[i], false, state);
1169 }
1170
1171 if (glsl_type_is_void(impl->function->return_type)) {
1172 validate_assert(state, impl->return_var == NULL);
1173 } else {
1174 validate_assert(state, impl->return_var->type == impl->function->return_type);
1175 validate_assert(state, impl->return_var->data.mode == nir_var_param);
1176 validate_assert(state, impl->return_var->data.location == -1);
1177 validate_var_decl(impl->return_var, false, state);
1178 }
1179
1180 validate_assert(state, exec_list_is_empty(&impl->end_block->instr_list));
1181 validate_assert(state, impl->end_block->successors[0] == NULL);
1182 validate_assert(state, impl->end_block->successors[1] == NULL);
1183
1184 state->impl = impl;
1185 state->parent_node = &impl->cf_node;
1186
1187 exec_list_validate(&impl->locals);
1188 nir_foreach_variable(var, &impl->locals) {
1189 validate_var_decl(var, false, state);
1190 }
1191
1192 state->regs_found = realloc(state->regs_found,
1193 BITSET_WORDS(impl->reg_alloc) *
1194 sizeof(BITSET_WORD));
1195 memset(state->regs_found, 0, BITSET_WORDS(impl->reg_alloc) *
1196 sizeof(BITSET_WORD));
1197 exec_list_validate(&impl->registers);
1198 foreach_list_typed(nir_register, reg, node, &impl->registers) {
1199 prevalidate_reg_decl(reg, false, state);
1200 }
1201
1202 state->ssa_defs_found = realloc(state->ssa_defs_found,
1203 BITSET_WORDS(impl->ssa_alloc) *
1204 sizeof(BITSET_WORD));
1205 memset(state->ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) *
1206 sizeof(BITSET_WORD));
1207 exec_list_validate(&impl->body);
1208 foreach_list_typed(nir_cf_node, node, node, &impl->body) {
1209 validate_cf_node(node, state);
1210 }
1211
1212 foreach_list_typed(nir_register, reg, node, &impl->registers) {
1213 postvalidate_reg_decl(reg, state);
1214 }
1215
1216 nir_foreach_block(block, impl) {
1217 nir_foreach_instr(instr, block)
1218 nir_foreach_ssa_def(instr, postvalidate_ssa_def, state);
1219 }
1220 }
1221
1222 static void
1223 validate_function(nir_function *func, validate_state *state)
1224 {
1225 if (func->impl != NULL) {
1226 validate_assert(state, func->impl->function == func);
1227 validate_function_impl(func->impl, state);
1228 }
1229 }
1230
1231 static void
1232 init_validate_state(validate_state *state)
1233 {
1234 state->regs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1235 _mesa_key_pointer_equal);
1236 state->ssa_defs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1237 _mesa_key_pointer_equal);
1238 state->ssa_defs_found = NULL;
1239 state->regs_found = NULL;
1240 state->var_defs = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1241 _mesa_key_pointer_equal);
1242 state->errors = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
1243 _mesa_key_pointer_equal);
1244
1245 state->loop = NULL;
1246 state->instr = NULL;
1247 state->var = NULL;
1248 }
1249
1250 static void
1251 destroy_validate_state(validate_state *state)
1252 {
1253 _mesa_hash_table_destroy(state->regs, NULL);
1254 _mesa_hash_table_destroy(state->ssa_defs, NULL);
1255 free(state->ssa_defs_found);
1256 free(state->regs_found);
1257 _mesa_hash_table_destroy(state->var_defs, NULL);
1258 _mesa_hash_table_destroy(state->errors, NULL);
1259 }
1260
1261 static void
1262 dump_errors(validate_state *state)
1263 {
1264 struct hash_table *errors = state->errors;
1265
1266 fprintf(stderr, "%d errors:\n", _mesa_hash_table_num_entries(errors));
1267
1268 nir_print_shader_annotated(state->shader, stderr, errors);
1269
1270 if (_mesa_hash_table_num_entries(errors) > 0) {
1271 fprintf(stderr, "%d additional errors:\n",
1272 _mesa_hash_table_num_entries(errors));
1273 struct hash_entry *entry;
1274 hash_table_foreach(errors, entry) {
1275 fprintf(stderr, "%s\n", (char *)entry->data);
1276 }
1277 }
1278
1279 abort();
1280 }
1281
1282 void
1283 nir_validate_shader(nir_shader *shader)
1284 {
1285 static int should_validate = -1;
1286 if (should_validate < 0)
1287 should_validate = env_var_as_boolean("NIR_VALIDATE", true);
1288 if (!should_validate)
1289 return;
1290
1291 validate_state state;
1292 init_validate_state(&state);
1293
1294 state.shader = shader;
1295
1296 exec_list_validate(&shader->uniforms);
1297 nir_foreach_variable(var, &shader->uniforms) {
1298 validate_var_decl(var, true, &state);
1299 }
1300
1301 exec_list_validate(&shader->inputs);
1302 nir_foreach_variable(var, &shader->inputs) {
1303 validate_var_decl(var, true, &state);
1304 }
1305
1306 exec_list_validate(&shader->outputs);
1307 nir_foreach_variable(var, &shader->outputs) {
1308 validate_var_decl(var, true, &state);
1309 }
1310
1311 exec_list_validate(&shader->shared);
1312 nir_foreach_variable(var, &shader->shared) {
1313 validate_var_decl(var, true, &state);
1314 }
1315
1316 exec_list_validate(&shader->globals);
1317 nir_foreach_variable(var, &shader->globals) {
1318 validate_var_decl(var, true, &state);
1319 }
1320
1321 exec_list_validate(&shader->system_values);
1322 nir_foreach_variable(var, &shader->system_values) {
1323 validate_var_decl(var, true, &state);
1324 }
1325
1326 state.regs_found = realloc(state.regs_found,
1327 BITSET_WORDS(shader->reg_alloc) *
1328 sizeof(BITSET_WORD));
1329 memset(state.regs_found, 0, BITSET_WORDS(shader->reg_alloc) *
1330 sizeof(BITSET_WORD));
1331 exec_list_validate(&shader->registers);
1332 foreach_list_typed(nir_register, reg, node, &shader->registers) {
1333 prevalidate_reg_decl(reg, true, &state);
1334 }
1335
1336 exec_list_validate(&shader->functions);
1337 foreach_list_typed(nir_function, func, node, &shader->functions) {
1338 validate_function(func, &state);
1339 }
1340
1341 foreach_list_typed(nir_register, reg, node, &shader->registers) {
1342 postvalidate_reg_decl(reg, &state);
1343 }
1344
1345 if (_mesa_hash_table_num_entries(state.errors) > 0)
1346 dump_errors(&state);
1347
1348 destroy_validate_state(&state);
1349 }
1350
1351 #endif /* NDEBUG */