nir: Get rid of nir_register::is_packed
[mesa.git] / src / compiler / nir / nir_validate.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include "c11/threads.h"
30 #include <assert.h>
31
32 /*
33 * This file checks for invalid IR indicating a bug somewhere in the compiler.
34 */
35
36 /* Since this file is just a pile of asserts, don't bother compiling it if
37 * we're not building a debug build.
38 */
39 #ifndef NDEBUG
40
41 /*
42 * Per-register validation state.
43 */
44
45 typedef struct {
46 /*
47 * equivalent to the uses and defs in nir_register, but built up by the
48 * validator. At the end, we verify that the sets have the same entries.
49 */
50 struct set *uses, *if_uses, *defs;
51 nir_function_impl *where_defined; /* NULL for global registers */
52 } reg_validate_state;
53
54 typedef struct {
55 /*
56 * equivalent to the uses in nir_ssa_def, but built up by the validator.
57 * At the end, we verify that the sets have the same entries.
58 */
59 struct set *uses, *if_uses;
60 nir_function_impl *where_defined;
61 } ssa_def_validate_state;
62
63 typedef struct {
64 /* map of register -> validation state (struct above) */
65 struct hash_table *regs;
66
67 /* the current shader being validated */
68 nir_shader *shader;
69
70 /* the current instruction being validated */
71 nir_instr *instr;
72
73 /* the current variable being validated */
74 nir_variable *var;
75
76 /* the current basic block being validated */
77 nir_block *block;
78
79 /* the current if statement being validated */
80 nir_if *if_stmt;
81
82 /* the current loop being visited */
83 nir_loop *loop;
84
85 /* the parent of the current cf node being visited */
86 nir_cf_node *parent_node;
87
88 /* the current function implementation being validated */
89 nir_function_impl *impl;
90
91 /* map of SSA value -> function implementation where it is defined */
92 struct hash_table *ssa_defs;
93
94 /* bitset of ssa definitions we have found; used to check uniqueness */
95 BITSET_WORD *ssa_defs_found;
96
97 /* bitset of registers we have currently found; used to check uniqueness */
98 BITSET_WORD *regs_found;
99
100 /* map of variable -> function implementation where it is defined or NULL
101 * if it is a global variable
102 */
103 struct hash_table *var_defs;
104
105 /* map of instruction/var/etc to failed assert string */
106 struct hash_table *errors;
107 } validate_state;
108
109 static void
110 log_error(validate_state *state, const char *cond, const char *file, int line)
111 {
112 const void *obj;
113
114 if (state->instr)
115 obj = state->instr;
116 else if (state->var)
117 obj = state->var;
118 else
119 obj = cond;
120
121 char *msg = ralloc_asprintf(state->errors, "error: %s (%s:%d)",
122 cond, file, line);
123
124 _mesa_hash_table_insert(state->errors, obj, msg);
125 }
126
127 #define validate_assert(state, cond) do { \
128 if (!(cond)) \
129 log_error(state, #cond, __FILE__, __LINE__); \
130 } while (0)
131
132 static void validate_src(nir_src *src, validate_state *state,
133 unsigned bit_sizes, unsigned num_components);
134
135 static void
136 validate_reg_src(nir_src *src, validate_state *state,
137 unsigned bit_sizes, unsigned num_components)
138 {
139 validate_assert(state, src->reg.reg != NULL);
140
141 struct hash_entry *entry;
142 entry = _mesa_hash_table_search(state->regs, src->reg.reg);
143 validate_assert(state, entry);
144
145 reg_validate_state *reg_state = (reg_validate_state *) entry->data;
146
147 if (state->instr) {
148 _mesa_set_add(reg_state->uses, src);
149 } else {
150 validate_assert(state, state->if_stmt);
151 _mesa_set_add(reg_state->if_uses, src);
152 }
153
154 if (!src->reg.reg->is_global) {
155 validate_assert(state, reg_state->where_defined == state->impl &&
156 "using a register declared in a different function");
157 }
158
159 if (bit_sizes)
160 validate_assert(state, src->reg.reg->bit_size & bit_sizes);
161 if (num_components)
162 validate_assert(state, src->reg.reg->num_components == num_components);
163
164 validate_assert(state, (src->reg.reg->num_array_elems == 0 ||
165 src->reg.base_offset < src->reg.reg->num_array_elems) &&
166 "definitely out-of-bounds array access");
167
168 if (src->reg.indirect) {
169 validate_assert(state, src->reg.reg->num_array_elems != 0);
170 validate_assert(state, (src->reg.indirect->is_ssa ||
171 src->reg.indirect->reg.indirect == NULL) &&
172 "only one level of indirection allowed");
173 validate_src(src->reg.indirect, state, 32, 1);
174 }
175 }
176
177 static void
178 validate_ssa_src(nir_src *src, validate_state *state,
179 unsigned bit_sizes, unsigned num_components)
180 {
181 validate_assert(state, src->ssa != NULL);
182
183 struct hash_entry *entry = _mesa_hash_table_search(state->ssa_defs, src->ssa);
184
185 validate_assert(state, entry);
186
187 if (!entry)
188 return;
189
190 ssa_def_validate_state *def_state = (ssa_def_validate_state *)entry->data;
191
192 validate_assert(state, def_state->where_defined == state->impl &&
193 "using an SSA value defined in a different function");
194
195 if (state->instr) {
196 _mesa_set_add(def_state->uses, src);
197 } else {
198 validate_assert(state, state->if_stmt);
199 _mesa_set_add(def_state->if_uses, src);
200 }
201
202 if (bit_sizes)
203 validate_assert(state, src->ssa->bit_size & bit_sizes);
204 if (num_components)
205 validate_assert(state, src->ssa->num_components == num_components);
206
207 /* TODO validate that the use is dominated by the definition */
208 }
209
210 static void
211 validate_src(nir_src *src, validate_state *state,
212 unsigned bit_sizes, unsigned num_components)
213 {
214 if (state->instr)
215 validate_assert(state, src->parent_instr == state->instr);
216 else
217 validate_assert(state, src->parent_if == state->if_stmt);
218
219 if (src->is_ssa)
220 validate_ssa_src(src, state, bit_sizes, num_components);
221 else
222 validate_reg_src(src, state, bit_sizes, num_components);
223 }
224
225 static void
226 validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state)
227 {
228 nir_alu_src *src = &instr->src[index];
229
230 unsigned num_components = nir_src_num_components(src->src);
231 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
232 validate_assert(state, src->swizzle[i] < NIR_MAX_VEC_COMPONENTS);
233
234 if (nir_alu_instr_channel_used(instr, index, i))
235 validate_assert(state, src->swizzle[i] < num_components);
236 }
237
238 validate_src(&src->src, state, 0, 0);
239 }
240
241 static void
242 validate_reg_dest(nir_reg_dest *dest, validate_state *state,
243 unsigned bit_sizes, unsigned num_components)
244 {
245 validate_assert(state, dest->reg != NULL);
246
247 validate_assert(state, dest->parent_instr == state->instr);
248
249 struct hash_entry *entry2;
250 entry2 = _mesa_hash_table_search(state->regs, dest->reg);
251
252 validate_assert(state, entry2);
253
254 reg_validate_state *reg_state = (reg_validate_state *) entry2->data;
255 _mesa_set_add(reg_state->defs, dest);
256
257 if (!dest->reg->is_global) {
258 validate_assert(state, reg_state->where_defined == state->impl &&
259 "writing to a register declared in a different function");
260 }
261
262 if (bit_sizes)
263 validate_assert(state, dest->reg->bit_size & bit_sizes);
264 if (num_components)
265 validate_assert(state, dest->reg->num_components == num_components);
266
267 validate_assert(state, (dest->reg->num_array_elems == 0 ||
268 dest->base_offset < dest->reg->num_array_elems) &&
269 "definitely out-of-bounds array access");
270
271 if (dest->indirect) {
272 validate_assert(state, dest->reg->num_array_elems != 0);
273 validate_assert(state, (dest->indirect->is_ssa || dest->indirect->reg.indirect == NULL) &&
274 "only one level of indirection allowed");
275 validate_src(dest->indirect, state, 32, 1);
276 }
277 }
278
279 static void
280 validate_ssa_def(nir_ssa_def *def, validate_state *state)
281 {
282 validate_assert(state, def->index < state->impl->ssa_alloc);
283 validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
284 BITSET_SET(state->ssa_defs_found, def->index);
285
286 validate_assert(state, def->parent_instr == state->instr);
287
288 validate_assert(state, (def->num_components <= 4) ||
289 (def->num_components == 8) ||
290 (def->num_components == 16));
291
292 list_validate(&def->uses);
293 list_validate(&def->if_uses);
294
295 ssa_def_validate_state *def_state = ralloc(state->ssa_defs,
296 ssa_def_validate_state);
297 def_state->where_defined = state->impl;
298 def_state->uses = _mesa_pointer_set_create(def_state);
299 def_state->if_uses = _mesa_pointer_set_create(def_state);
300 _mesa_hash_table_insert(state->ssa_defs, def, def_state);
301 }
302
303 static void
304 validate_dest(nir_dest *dest, validate_state *state,
305 unsigned bit_sizes, unsigned num_components)
306 {
307 if (dest->is_ssa) {
308 if (bit_sizes)
309 validate_assert(state, dest->ssa.bit_size & bit_sizes);
310 if (num_components)
311 validate_assert(state, dest->ssa.num_components == num_components);
312 validate_ssa_def(&dest->ssa, state);
313 } else {
314 validate_reg_dest(&dest->reg, state, bit_sizes, num_components);
315 }
316 }
317
318 static void
319 validate_alu_dest(nir_alu_instr *instr, validate_state *state)
320 {
321 nir_alu_dest *dest = &instr->dest;
322
323 unsigned dest_size = nir_dest_num_components(dest->dest);
324 /*
325 * validate that the instruction doesn't write to components not in the
326 * register/SSA value
327 */
328 validate_assert(state, !(dest->write_mask & ~((1 << dest_size) - 1)));
329
330 /* validate that saturate is only ever used on instructions with
331 * destinations of type float
332 */
333 nir_alu_instr *alu = nir_instr_as_alu(state->instr);
334 validate_assert(state,
335 (nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) ==
336 nir_type_float) ||
337 !dest->saturate);
338
339 validate_dest(&dest->dest, state, 0, 0);
340 }
341
342 static void
343 validate_alu_instr(nir_alu_instr *instr, validate_state *state)
344 {
345 validate_assert(state, instr->op < nir_num_opcodes);
346
347 unsigned instr_bit_size = 0;
348 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
349 nir_alu_type src_type = nir_op_infos[instr->op].input_types[i];
350 unsigned src_bit_size = nir_src_bit_size(instr->src[i].src);
351 if (nir_alu_type_get_type_size(src_type)) {
352 validate_assert(state, src_bit_size == nir_alu_type_get_type_size(src_type));
353 } else if (instr_bit_size) {
354 validate_assert(state, src_bit_size == instr_bit_size);
355 } else {
356 instr_bit_size = src_bit_size;
357 }
358
359 if (nir_alu_type_get_base_type(src_type) == nir_type_float) {
360 /* 8-bit float isn't a thing */
361 validate_assert(state, src_bit_size == 16 || src_bit_size == 32 ||
362 src_bit_size == 64);
363 }
364
365 validate_alu_src(instr, i, state);
366 }
367
368 nir_alu_type dest_type = nir_op_infos[instr->op].output_type;
369 unsigned dest_bit_size = nir_dest_bit_size(instr->dest.dest);
370 if (nir_alu_type_get_type_size(dest_type)) {
371 validate_assert(state, dest_bit_size == nir_alu_type_get_type_size(dest_type));
372 } else if (instr_bit_size) {
373 validate_assert(state, dest_bit_size == instr_bit_size);
374 } else {
375 /* The only unsized thing is the destination so it's vacuously valid */
376 }
377
378 if (nir_alu_type_get_base_type(dest_type) == nir_type_float) {
379 /* 8-bit float isn't a thing */
380 validate_assert(state, dest_bit_size == 16 || dest_bit_size == 32 ||
381 dest_bit_size == 64);
382 }
383
384 validate_alu_dest(instr, state);
385 }
386
387 static void
388 validate_var_use(nir_variable *var, validate_state *state)
389 {
390 struct hash_entry *entry = _mesa_hash_table_search(state->var_defs, var);
391 validate_assert(state, entry);
392 if (var->data.mode == nir_var_function_temp)
393 validate_assert(state, (nir_function_impl *) entry->data == state->impl);
394 }
395
396 static void
397 validate_deref_instr(nir_deref_instr *instr, validate_state *state)
398 {
399 if (instr->deref_type == nir_deref_type_var) {
400 /* Variable dereferences are stupid simple. */
401 validate_assert(state, instr->mode == instr->var->data.mode);
402 validate_assert(state, instr->type == instr->var->type);
403 validate_var_use(instr->var, state);
404 } else if (instr->deref_type == nir_deref_type_cast) {
405 /* For cast, we simply have to trust the instruction. It's up to
406 * lowering passes and front/back-ends to make them sane.
407 */
408 validate_src(&instr->parent, state, 0, 0);
409
410 /* We just validate that the type and mode are there */
411 validate_assert(state, instr->mode);
412 validate_assert(state, instr->type);
413 } else {
414 /* We require the parent to be SSA. This may be lifted in the future */
415 validate_assert(state, instr->parent.is_ssa);
416
417 /* The parent pointer value must have the same number of components
418 * as the destination.
419 */
420 validate_src(&instr->parent, state, nir_dest_bit_size(instr->dest),
421 nir_dest_num_components(instr->dest));
422
423 nir_instr *parent_instr = instr->parent.ssa->parent_instr;
424
425 /* The parent must come from another deref instruction */
426 validate_assert(state, parent_instr->type == nir_instr_type_deref);
427
428 nir_deref_instr *parent = nir_instr_as_deref(parent_instr);
429
430 validate_assert(state, instr->mode == parent->mode);
431
432 switch (instr->deref_type) {
433 case nir_deref_type_struct:
434 validate_assert(state, glsl_type_is_struct_or_ifc(parent->type));
435 validate_assert(state,
436 instr->strct.index < glsl_get_length(parent->type));
437 validate_assert(state, instr->type ==
438 glsl_get_struct_field(parent->type, instr->strct.index));
439 break;
440
441 case nir_deref_type_array:
442 case nir_deref_type_array_wildcard:
443 if (instr->mode == nir_var_mem_ubo ||
444 instr->mode == nir_var_mem_ssbo ||
445 instr->mode == nir_var_mem_shared ||
446 instr->mode == nir_var_mem_global) {
447 /* Shared variables and UBO/SSBOs have a bit more relaxed rules
448 * because we need to be able to handle array derefs on vectors.
449 * Fortunately, nir_lower_io handles these just fine.
450 */
451 validate_assert(state, glsl_type_is_array(parent->type) ||
452 glsl_type_is_matrix(parent->type) ||
453 glsl_type_is_vector(parent->type));
454 } else {
455 /* Most of NIR cannot handle array derefs on vectors */
456 validate_assert(state, glsl_type_is_array(parent->type) ||
457 glsl_type_is_matrix(parent->type));
458 }
459 validate_assert(state,
460 instr->type == glsl_get_array_element(parent->type));
461
462 if (instr->deref_type == nir_deref_type_array) {
463 validate_src(&instr->arr.index, state,
464 nir_dest_bit_size(instr->dest), 1);
465 }
466 break;
467
468 case nir_deref_type_ptr_as_array:
469 /* ptr_as_array derefs must have a parent that is either an array,
470 * ptr_as_array, or cast. If the parent is a cast, we get the stride
471 * information (if any) from the cast deref.
472 */
473 validate_assert(state,
474 parent->deref_type == nir_deref_type_array ||
475 parent->deref_type == nir_deref_type_ptr_as_array ||
476 parent->deref_type == nir_deref_type_cast);
477 validate_src(&instr->arr.index, state,
478 nir_dest_bit_size(instr->dest), 1);
479 break;
480
481 default:
482 unreachable("Invalid deref instruction type");
483 }
484 }
485
486 /* We intentionally don't validate the size of the destination because we
487 * want to let other compiler components such as SPIR-V decide how big
488 * pointers should be.
489 */
490 validate_dest(&instr->dest, state, 0, 0);
491
492 /* Deref instructions as if conditions don't make sense because if
493 * conditions expect well-formed Booleans. If you want to compare with
494 * NULL, an explicit comparison operation should be used.
495 */
496 validate_assert(state, list_empty(&instr->dest.ssa.if_uses));
497 }
498
499 static void
500 validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
501 {
502 unsigned dest_bit_size = 0;
503 unsigned src_bit_sizes[NIR_INTRINSIC_MAX_INPUTS] = { 0, };
504 switch (instr->intrinsic) {
505 case nir_intrinsic_load_param: {
506 unsigned param_idx = nir_intrinsic_param_idx(instr);
507 validate_assert(state, param_idx < state->impl->function->num_params);
508 nir_parameter *param = &state->impl->function->params[param_idx];
509 validate_assert(state, instr->num_components == param->num_components);
510 dest_bit_size = param->bit_size;
511 break;
512 }
513
514 case nir_intrinsic_load_deref: {
515 nir_deref_instr *src = nir_src_as_deref(instr->src[0]);
516 validate_assert(state, glsl_type_is_vector_or_scalar(src->type) ||
517 (src->mode == nir_var_uniform &&
518 glsl_get_base_type(src->type) == GLSL_TYPE_SUBROUTINE));
519 validate_assert(state, instr->num_components ==
520 glsl_get_vector_elements(src->type));
521 dest_bit_size = glsl_get_bit_size(src->type);
522 /* Also allow 32-bit boolean load operations */
523 if (glsl_type_is_boolean(src->type))
524 dest_bit_size |= 32;
525 break;
526 }
527
528 case nir_intrinsic_store_deref: {
529 nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
530 validate_assert(state, glsl_type_is_vector_or_scalar(dst->type));
531 validate_assert(state, instr->num_components ==
532 glsl_get_vector_elements(dst->type));
533 src_bit_sizes[1] = glsl_get_bit_size(dst->type);
534 /* Also allow 32-bit boolean store operations */
535 if (glsl_type_is_boolean(dst->type))
536 src_bit_sizes[1] |= 32;
537 validate_assert(state, (dst->mode & (nir_var_shader_in |
538 nir_var_uniform)) == 0);
539 validate_assert(state, (nir_intrinsic_write_mask(instr) & ~((1 << instr->num_components) - 1)) == 0);
540 break;
541 }
542
543 case nir_intrinsic_copy_deref: {
544 nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
545 nir_deref_instr *src = nir_src_as_deref(instr->src[1]);
546 validate_assert(state, glsl_get_bare_type(dst->type) ==
547 glsl_get_bare_type(src->type));
548 validate_assert(state, (dst->mode & (nir_var_shader_in |
549 nir_var_uniform)) == 0);
550 break;
551 }
552
553 default:
554 break;
555 }
556
557 unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
558 for (unsigned i = 0; i < num_srcs; i++) {
559 unsigned components_read = nir_intrinsic_src_components(instr, i);
560
561 validate_assert(state, components_read > 0);
562
563 validate_src(&instr->src[i], state, src_bit_sizes[i], components_read);
564 }
565
566 if (nir_intrinsic_infos[instr->intrinsic].has_dest) {
567 unsigned components_written = nir_intrinsic_dest_components(instr);
568 unsigned bit_sizes = nir_intrinsic_infos[instr->intrinsic].dest_bit_sizes;
569
570 validate_assert(state, components_written > 0);
571
572 if (dest_bit_size && bit_sizes)
573 validate_assert(state, dest_bit_size & bit_sizes);
574 else
575 dest_bit_size = dest_bit_size ? dest_bit_size : bit_sizes;
576
577 validate_dest(&instr->dest, state, dest_bit_size, components_written);
578 }
579 }
580
581 static void
582 validate_tex_instr(nir_tex_instr *instr, validate_state *state)
583 {
584 bool src_type_seen[nir_num_tex_src_types];
585 for (unsigned i = 0; i < nir_num_tex_src_types; i++)
586 src_type_seen[i] = false;
587
588 for (unsigned i = 0; i < instr->num_srcs; i++) {
589 validate_assert(state, !src_type_seen[instr->src[i].src_type]);
590 src_type_seen[instr->src[i].src_type] = true;
591 validate_src(&instr->src[i].src, state,
592 0, nir_tex_instr_src_size(instr, i));
593
594 switch (instr->src[i].src_type) {
595 case nir_tex_src_texture_deref:
596 case nir_tex_src_sampler_deref:
597 validate_assert(state, instr->src[i].src.is_ssa);
598 validate_assert(state,
599 instr->src[i].src.ssa->parent_instr->type == nir_instr_type_deref);
600 break;
601 default:
602 break;
603 }
604 }
605
606 if (nir_tex_instr_has_explicit_tg4_offsets(instr)) {
607 validate_assert(state, instr->op == nir_texop_tg4);
608 validate_assert(state, !src_type_seen[nir_tex_src_offset]);
609 }
610
611 validate_dest(&instr->dest, state, 0, nir_tex_instr_dest_size(instr));
612 }
613
614 static void
615 validate_call_instr(nir_call_instr *instr, validate_state *state)
616 {
617 validate_assert(state, instr->num_params == instr->callee->num_params);
618
619 for (unsigned i = 0; i < instr->num_params; i++) {
620 validate_src(&instr->params[i], state,
621 instr->callee->params[i].bit_size,
622 instr->callee->params[i].num_components);
623 }
624 }
625
626 static void
627 validate_load_const_instr(nir_load_const_instr *instr, validate_state *state)
628 {
629 validate_ssa_def(&instr->def, state);
630 }
631
632 static void
633 validate_ssa_undef_instr(nir_ssa_undef_instr *instr, validate_state *state)
634 {
635 validate_ssa_def(&instr->def, state);
636 }
637
638 static void
639 validate_phi_instr(nir_phi_instr *instr, validate_state *state)
640 {
641 /*
642 * don't validate the sources until we get to them from their predecessor
643 * basic blocks, to avoid validating an SSA use before its definition.
644 */
645
646 validate_dest(&instr->dest, state, 0, 0);
647
648 exec_list_validate(&instr->srcs);
649 validate_assert(state, exec_list_length(&instr->srcs) ==
650 state->block->predecessors->entries);
651 }
652
653 static void
654 validate_instr(nir_instr *instr, validate_state *state)
655 {
656 validate_assert(state, instr->block == state->block);
657
658 state->instr = instr;
659
660 switch (instr->type) {
661 case nir_instr_type_alu:
662 validate_alu_instr(nir_instr_as_alu(instr), state);
663 break;
664
665 case nir_instr_type_deref:
666 validate_deref_instr(nir_instr_as_deref(instr), state);
667 break;
668
669 case nir_instr_type_call:
670 validate_call_instr(nir_instr_as_call(instr), state);
671 break;
672
673 case nir_instr_type_intrinsic:
674 validate_intrinsic_instr(nir_instr_as_intrinsic(instr), state);
675 break;
676
677 case nir_instr_type_tex:
678 validate_tex_instr(nir_instr_as_tex(instr), state);
679 break;
680
681 case nir_instr_type_load_const:
682 validate_load_const_instr(nir_instr_as_load_const(instr), state);
683 break;
684
685 case nir_instr_type_phi:
686 validate_phi_instr(nir_instr_as_phi(instr), state);
687 break;
688
689 case nir_instr_type_ssa_undef:
690 validate_ssa_undef_instr(nir_instr_as_ssa_undef(instr), state);
691 break;
692
693 case nir_instr_type_jump:
694 break;
695
696 default:
697 validate_assert(state, !"Invalid ALU instruction type");
698 break;
699 }
700
701 state->instr = NULL;
702 }
703
704 static void
705 validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state)
706 {
707 state->instr = &instr->instr;
708
709 validate_assert(state, instr->dest.is_ssa);
710
711 exec_list_validate(&instr->srcs);
712 nir_foreach_phi_src(src, instr) {
713 if (src->pred == pred) {
714 validate_assert(state, src->src.is_ssa);
715 validate_src(&src->src, state, instr->dest.ssa.bit_size,
716 instr->dest.ssa.num_components);
717 state->instr = NULL;
718 return;
719 }
720 }
721
722 abort();
723 }
724
725 static void
726 validate_phi_srcs(nir_block *block, nir_block *succ, validate_state *state)
727 {
728 nir_foreach_instr(instr, succ) {
729 if (instr->type != nir_instr_type_phi)
730 break;
731
732 validate_phi_src(nir_instr_as_phi(instr), block, state);
733 }
734 }
735
736 static void validate_cf_node(nir_cf_node *node, validate_state *state);
737
738 static void
739 validate_block(nir_block *block, validate_state *state)
740 {
741 validate_assert(state, block->cf_node.parent == state->parent_node);
742
743 state->block = block;
744
745 exec_list_validate(&block->instr_list);
746 nir_foreach_instr(instr, block) {
747 if (instr->type == nir_instr_type_phi) {
748 validate_assert(state, instr == nir_block_first_instr(block) ||
749 nir_instr_prev(instr)->type == nir_instr_type_phi);
750 }
751
752 if (instr->type == nir_instr_type_jump) {
753 validate_assert(state, instr == nir_block_last_instr(block));
754 }
755
756 validate_instr(instr, state);
757 }
758
759 validate_assert(state, block->successors[0] != NULL);
760 validate_assert(state, block->successors[0] != block->successors[1]);
761
762 for (unsigned i = 0; i < 2; i++) {
763 if (block->successors[i] != NULL) {
764 struct set_entry *entry =
765 _mesa_set_search(block->successors[i]->predecessors, block);
766 validate_assert(state, entry);
767
768 validate_phi_srcs(block, block->successors[i], state);
769 }
770 }
771
772 set_foreach(block->predecessors, entry) {
773 const nir_block *pred = entry->key;
774 validate_assert(state, pred->successors[0] == block ||
775 pred->successors[1] == block);
776 }
777
778 if (!exec_list_is_empty(&block->instr_list) &&
779 nir_block_last_instr(block)->type == nir_instr_type_jump) {
780 validate_assert(state, block->successors[1] == NULL);
781 nir_jump_instr *jump = nir_instr_as_jump(nir_block_last_instr(block));
782 switch (jump->type) {
783 case nir_jump_break: {
784 nir_block *after =
785 nir_cf_node_as_block(nir_cf_node_next(&state->loop->cf_node));
786 validate_assert(state, block->successors[0] == after);
787 break;
788 }
789
790 case nir_jump_continue: {
791 nir_block *first = nir_loop_first_block(state->loop);
792 validate_assert(state, block->successors[0] == first);
793 break;
794 }
795
796 case nir_jump_return:
797 validate_assert(state, block->successors[0] == state->impl->end_block);
798 break;
799
800 default:
801 unreachable("bad jump type");
802 }
803 } else {
804 nir_cf_node *next = nir_cf_node_next(&block->cf_node);
805 if (next == NULL) {
806 switch (state->parent_node->type) {
807 case nir_cf_node_loop: {
808 nir_block *first = nir_loop_first_block(state->loop);
809 validate_assert(state, block->successors[0] == first);
810 /* due to the hack for infinite loops, block->successors[1] may
811 * point to the block after the loop.
812 */
813 break;
814 }
815
816 case nir_cf_node_if: {
817 nir_block *after =
818 nir_cf_node_as_block(nir_cf_node_next(state->parent_node));
819 validate_assert(state, block->successors[0] == after);
820 validate_assert(state, block->successors[1] == NULL);
821 break;
822 }
823
824 case nir_cf_node_function:
825 validate_assert(state, block->successors[0] == state->impl->end_block);
826 validate_assert(state, block->successors[1] == NULL);
827 break;
828
829 default:
830 unreachable("unknown control flow node type");
831 }
832 } else {
833 if (next->type == nir_cf_node_if) {
834 nir_if *if_stmt = nir_cf_node_as_if(next);
835 validate_assert(state, block->successors[0] ==
836 nir_if_first_then_block(if_stmt));
837 validate_assert(state, block->successors[1] ==
838 nir_if_first_else_block(if_stmt));
839 } else {
840 validate_assert(state, next->type == nir_cf_node_loop);
841 nir_loop *loop = nir_cf_node_as_loop(next);
842 validate_assert(state, block->successors[0] ==
843 nir_loop_first_block(loop));
844 validate_assert(state, block->successors[1] == NULL);
845 }
846 }
847 }
848 }
849
850 static void
851 validate_if(nir_if *if_stmt, validate_state *state)
852 {
853 state->if_stmt = if_stmt;
854
855 validate_assert(state, !exec_node_is_head_sentinel(if_stmt->cf_node.node.prev));
856 nir_cf_node *prev_node = nir_cf_node_prev(&if_stmt->cf_node);
857 validate_assert(state, prev_node->type == nir_cf_node_block);
858
859 validate_assert(state, !exec_node_is_tail_sentinel(if_stmt->cf_node.node.next));
860 nir_cf_node *next_node = nir_cf_node_next(&if_stmt->cf_node);
861 validate_assert(state, next_node->type == nir_cf_node_block);
862
863 validate_src(&if_stmt->condition, state, 0, 1);
864
865 validate_assert(state, !exec_list_is_empty(&if_stmt->then_list));
866 validate_assert(state, !exec_list_is_empty(&if_stmt->else_list));
867
868 nir_cf_node *old_parent = state->parent_node;
869 state->parent_node = &if_stmt->cf_node;
870
871 exec_list_validate(&if_stmt->then_list);
872 foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->then_list) {
873 validate_cf_node(cf_node, state);
874 }
875
876 exec_list_validate(&if_stmt->else_list);
877 foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->else_list) {
878 validate_cf_node(cf_node, state);
879 }
880
881 state->parent_node = old_parent;
882 state->if_stmt = NULL;
883 }
884
885 static void
886 validate_loop(nir_loop *loop, validate_state *state)
887 {
888 validate_assert(state, !exec_node_is_head_sentinel(loop->cf_node.node.prev));
889 nir_cf_node *prev_node = nir_cf_node_prev(&loop->cf_node);
890 validate_assert(state, prev_node->type == nir_cf_node_block);
891
892 validate_assert(state, !exec_node_is_tail_sentinel(loop->cf_node.node.next));
893 nir_cf_node *next_node = nir_cf_node_next(&loop->cf_node);
894 validate_assert(state, next_node->type == nir_cf_node_block);
895
896 validate_assert(state, !exec_list_is_empty(&loop->body));
897
898 nir_cf_node *old_parent = state->parent_node;
899 state->parent_node = &loop->cf_node;
900 nir_loop *old_loop = state->loop;
901 state->loop = loop;
902
903 exec_list_validate(&loop->body);
904 foreach_list_typed(nir_cf_node, cf_node, node, &loop->body) {
905 validate_cf_node(cf_node, state);
906 }
907
908 state->parent_node = old_parent;
909 state->loop = old_loop;
910 }
911
912 static void
913 validate_cf_node(nir_cf_node *node, validate_state *state)
914 {
915 validate_assert(state, node->parent == state->parent_node);
916
917 switch (node->type) {
918 case nir_cf_node_block:
919 validate_block(nir_cf_node_as_block(node), state);
920 break;
921
922 case nir_cf_node_if:
923 validate_if(nir_cf_node_as_if(node), state);
924 break;
925
926 case nir_cf_node_loop:
927 validate_loop(nir_cf_node_as_loop(node), state);
928 break;
929
930 default:
931 unreachable("Invalid CF node type");
932 }
933 }
934
935 static void
936 prevalidate_reg_decl(nir_register *reg, bool is_global, validate_state *state)
937 {
938 validate_assert(state, reg->is_global == is_global);
939
940 if (is_global)
941 validate_assert(state, reg->index < state->shader->reg_alloc);
942 else
943 validate_assert(state, reg->index < state->impl->reg_alloc);
944 validate_assert(state, !BITSET_TEST(state->regs_found, reg->index));
945 BITSET_SET(state->regs_found, reg->index);
946
947 list_validate(&reg->uses);
948 list_validate(&reg->defs);
949 list_validate(&reg->if_uses);
950
951 reg_validate_state *reg_state = ralloc(state->regs, reg_validate_state);
952 reg_state->uses = _mesa_pointer_set_create(reg_state);
953 reg_state->if_uses = _mesa_pointer_set_create(reg_state);
954 reg_state->defs = _mesa_pointer_set_create(reg_state);
955
956 reg_state->where_defined = is_global ? NULL : state->impl;
957
958 _mesa_hash_table_insert(state->regs, reg, reg_state);
959 }
960
961 static void
962 postvalidate_reg_decl(nir_register *reg, validate_state *state)
963 {
964 struct hash_entry *entry = _mesa_hash_table_search(state->regs, reg);
965
966 assume(entry);
967 reg_validate_state *reg_state = (reg_validate_state *) entry->data;
968
969 nir_foreach_use(src, reg) {
970 struct set_entry *entry = _mesa_set_search(reg_state->uses, src);
971 validate_assert(state, entry);
972 _mesa_set_remove(reg_state->uses, entry);
973 }
974
975 if (reg_state->uses->entries != 0) {
976 printf("extra entries in register uses:\n");
977 set_foreach(reg_state->uses, entry)
978 printf("%p\n", entry->key);
979
980 abort();
981 }
982
983 nir_foreach_if_use(src, reg) {
984 struct set_entry *entry = _mesa_set_search(reg_state->if_uses, src);
985 validate_assert(state, entry);
986 _mesa_set_remove(reg_state->if_uses, entry);
987 }
988
989 if (reg_state->if_uses->entries != 0) {
990 printf("extra entries in register if_uses:\n");
991 set_foreach(reg_state->if_uses, entry)
992 printf("%p\n", entry->key);
993
994 abort();
995 }
996
997 nir_foreach_def(src, reg) {
998 struct set_entry *entry = _mesa_set_search(reg_state->defs, src);
999 validate_assert(state, entry);
1000 _mesa_set_remove(reg_state->defs, entry);
1001 }
1002
1003 if (reg_state->defs->entries != 0) {
1004 printf("extra entries in register defs:\n");
1005 set_foreach(reg_state->defs, entry)
1006 printf("%p\n", entry->key);
1007
1008 abort();
1009 }
1010 }
1011
1012 static void
1013 validate_var_decl(nir_variable *var, bool is_global, validate_state *state)
1014 {
1015 state->var = var;
1016
1017 validate_assert(state, is_global == nir_variable_is_global(var));
1018
1019 /* Must have exactly one mode set */
1020 validate_assert(state, util_is_power_of_two_nonzero(var->data.mode));
1021
1022 if (var->data.compact) {
1023 /* The "compact" flag is only valid on arrays of scalars. */
1024 assert(glsl_type_is_array(var->type));
1025
1026 const struct glsl_type *type = glsl_get_array_element(var->type);
1027 if (nir_is_per_vertex_io(var, state->shader->info.stage)) {
1028 assert(glsl_type_is_array(type));
1029 assert(glsl_type_is_scalar(glsl_get_array_element(type)));
1030 } else {
1031 assert(glsl_type_is_scalar(type));
1032 }
1033 }
1034
1035 if (var->num_members > 0) {
1036 const struct glsl_type *without_array = glsl_without_array(var->type);
1037 validate_assert(state, glsl_type_is_struct_or_ifc(without_array));
1038 validate_assert(state, var->num_members == glsl_get_length(without_array));
1039 validate_assert(state, var->members != NULL);
1040 }
1041
1042 /*
1043 * TODO validate some things ir_validate.cpp does (requires more GLSL type
1044 * support)
1045 */
1046
1047 _mesa_hash_table_insert(state->var_defs, var,
1048 is_global ? NULL : state->impl);
1049
1050 state->var = NULL;
1051 }
1052
1053 static bool
1054 postvalidate_ssa_def(nir_ssa_def *def, void *void_state)
1055 {
1056 validate_state *state = void_state;
1057
1058 struct hash_entry *entry = _mesa_hash_table_search(state->ssa_defs, def);
1059
1060 assume(entry);
1061 ssa_def_validate_state *def_state = (ssa_def_validate_state *)entry->data;
1062
1063 nir_foreach_use(src, def) {
1064 struct set_entry *entry = _mesa_set_search(def_state->uses, src);
1065 validate_assert(state, entry);
1066 _mesa_set_remove(def_state->uses, entry);
1067 }
1068
1069 if (def_state->uses->entries != 0) {
1070 printf("extra entries in SSA def uses:\n");
1071 set_foreach(def_state->uses, entry)
1072 printf("%p\n", entry->key);
1073
1074 abort();
1075 }
1076
1077 nir_foreach_if_use(src, def) {
1078 struct set_entry *entry = _mesa_set_search(def_state->if_uses, src);
1079 validate_assert(state, entry);
1080 _mesa_set_remove(def_state->if_uses, entry);
1081 }
1082
1083 if (def_state->if_uses->entries != 0) {
1084 printf("extra entries in SSA def uses:\n");
1085 set_foreach(def_state->if_uses, entry)
1086 printf("%p\n", entry->key);
1087
1088 abort();
1089 }
1090
1091 return true;
1092 }
1093
1094 static void
1095 validate_function_impl(nir_function_impl *impl, validate_state *state)
1096 {
1097 validate_assert(state, impl->function->impl == impl);
1098 validate_assert(state, impl->cf_node.parent == NULL);
1099
1100 validate_assert(state, exec_list_is_empty(&impl->end_block->instr_list));
1101 validate_assert(state, impl->end_block->successors[0] == NULL);
1102 validate_assert(state, impl->end_block->successors[1] == NULL);
1103
1104 state->impl = impl;
1105 state->parent_node = &impl->cf_node;
1106
1107 exec_list_validate(&impl->locals);
1108 nir_foreach_variable(var, &impl->locals) {
1109 validate_var_decl(var, false, state);
1110 }
1111
1112 state->regs_found = realloc(state->regs_found,
1113 BITSET_WORDS(impl->reg_alloc) *
1114 sizeof(BITSET_WORD));
1115 memset(state->regs_found, 0, BITSET_WORDS(impl->reg_alloc) *
1116 sizeof(BITSET_WORD));
1117 exec_list_validate(&impl->registers);
1118 foreach_list_typed(nir_register, reg, node, &impl->registers) {
1119 prevalidate_reg_decl(reg, false, state);
1120 }
1121
1122 state->ssa_defs_found = realloc(state->ssa_defs_found,
1123 BITSET_WORDS(impl->ssa_alloc) *
1124 sizeof(BITSET_WORD));
1125 memset(state->ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) *
1126 sizeof(BITSET_WORD));
1127 exec_list_validate(&impl->body);
1128 foreach_list_typed(nir_cf_node, node, node, &impl->body) {
1129 validate_cf_node(node, state);
1130 }
1131
1132 foreach_list_typed(nir_register, reg, node, &impl->registers) {
1133 postvalidate_reg_decl(reg, state);
1134 }
1135
1136 nir_foreach_block(block, impl) {
1137 nir_foreach_instr(instr, block)
1138 nir_foreach_ssa_def(instr, postvalidate_ssa_def, state);
1139 }
1140 }
1141
1142 static void
1143 validate_function(nir_function *func, validate_state *state)
1144 {
1145 if (func->impl != NULL) {
1146 validate_assert(state, func->impl->function == func);
1147 validate_function_impl(func->impl, state);
1148 }
1149 }
1150
1151 static void
1152 init_validate_state(validate_state *state)
1153 {
1154 state->regs = _mesa_pointer_hash_table_create(NULL);
1155 state->ssa_defs = _mesa_pointer_hash_table_create(NULL);
1156 state->ssa_defs_found = NULL;
1157 state->regs_found = NULL;
1158 state->var_defs = _mesa_pointer_hash_table_create(NULL);
1159 state->errors = _mesa_pointer_hash_table_create(NULL);
1160
1161 state->loop = NULL;
1162 state->instr = NULL;
1163 state->var = NULL;
1164 }
1165
1166 static void
1167 destroy_validate_state(validate_state *state)
1168 {
1169 _mesa_hash_table_destroy(state->regs, NULL);
1170 _mesa_hash_table_destroy(state->ssa_defs, NULL);
1171 free(state->ssa_defs_found);
1172 free(state->regs_found);
1173 _mesa_hash_table_destroy(state->var_defs, NULL);
1174 _mesa_hash_table_destroy(state->errors, NULL);
1175 }
1176
1177 mtx_t fail_dump_mutex = _MTX_INITIALIZER_NP;
1178
1179 static void
1180 dump_errors(validate_state *state, const char *when)
1181 {
1182 struct hash_table *errors = state->errors;
1183
1184 /* Lock around dumping so that we get clean dumps in a multi-threaded
1185 * scenario
1186 */
1187 mtx_lock(&fail_dump_mutex);
1188
1189 if (when) {
1190 fprintf(stderr, "NIR validation failed %s\n", when);
1191 fprintf(stderr, "%d errors:\n", _mesa_hash_table_num_entries(errors));
1192 } else {
1193 fprintf(stderr, "NIR validation failed with %d errors:\n",
1194 _mesa_hash_table_num_entries(errors));
1195 }
1196
1197 nir_print_shader_annotated(state->shader, stderr, errors);
1198
1199 if (_mesa_hash_table_num_entries(errors) > 0) {
1200 fprintf(stderr, "%d additional errors:\n",
1201 _mesa_hash_table_num_entries(errors));
1202 hash_table_foreach(errors, entry) {
1203 fprintf(stderr, "%s\n", (char *)entry->data);
1204 }
1205 }
1206
1207 mtx_unlock(&fail_dump_mutex);
1208
1209 abort();
1210 }
1211
1212 void
1213 nir_validate_shader(nir_shader *shader, const char *when)
1214 {
1215 static int should_validate = -1;
1216 if (should_validate < 0)
1217 should_validate = env_var_as_boolean("NIR_VALIDATE", true);
1218 if (!should_validate)
1219 return;
1220
1221 validate_state state;
1222 init_validate_state(&state);
1223
1224 state.shader = shader;
1225
1226 exec_list_validate(&shader->uniforms);
1227 nir_foreach_variable(var, &shader->uniforms) {
1228 validate_var_decl(var, true, &state);
1229 }
1230
1231 exec_list_validate(&shader->inputs);
1232 nir_foreach_variable(var, &shader->inputs) {
1233 validate_var_decl(var, true, &state);
1234 }
1235
1236 exec_list_validate(&shader->outputs);
1237 nir_foreach_variable(var, &shader->outputs) {
1238 validate_var_decl(var, true, &state);
1239 }
1240
1241 exec_list_validate(&shader->shared);
1242 nir_foreach_variable(var, &shader->shared) {
1243 validate_var_decl(var, true, &state);
1244 }
1245
1246 exec_list_validate(&shader->globals);
1247 nir_foreach_variable(var, &shader->globals) {
1248 validate_var_decl(var, true, &state);
1249 }
1250
1251 exec_list_validate(&shader->system_values);
1252 nir_foreach_variable(var, &shader->system_values) {
1253 validate_var_decl(var, true, &state);
1254 }
1255
1256 state.regs_found = realloc(state.regs_found,
1257 BITSET_WORDS(shader->reg_alloc) *
1258 sizeof(BITSET_WORD));
1259 memset(state.regs_found, 0, BITSET_WORDS(shader->reg_alloc) *
1260 sizeof(BITSET_WORD));
1261 exec_list_validate(&shader->registers);
1262 foreach_list_typed(nir_register, reg, node, &shader->registers) {
1263 prevalidate_reg_decl(reg, true, &state);
1264 }
1265
1266 exec_list_validate(&shader->functions);
1267 foreach_list_typed(nir_function, func, node, &shader->functions) {
1268 validate_function(func, &state);
1269 }
1270
1271 foreach_list_typed(nir_register, reg, node, &shader->registers) {
1272 postvalidate_reg_decl(reg, &state);
1273 }
1274
1275 if (_mesa_hash_table_num_entries(state.errors) > 0)
1276 dump_errors(&state, when);
1277
1278 destroy_validate_state(&state);
1279 }
1280
1281 #endif /* NDEBUG */