nir: support lowering clipdist to arrays
[mesa.git] / src / compiler / nir / nir_validate.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include "c11/threads.h"
30 #include <assert.h>
31
32 /*
33 * This file checks for invalid IR indicating a bug somewhere in the compiler.
34 */
35
36 /* Since this file is just a pile of asserts, don't bother compiling it if
37 * we're not building a debug build.
38 */
39 #ifndef NDEBUG
40
41 /*
42 * Per-register validation state.
43 */
44
45 typedef struct {
46 /*
47 * equivalent to the uses and defs in nir_register, but built up by the
48 * validator. At the end, we verify that the sets have the same entries.
49 */
50 struct set *uses, *if_uses, *defs;
51 nir_function_impl *where_defined; /* NULL for global registers */
52 } reg_validate_state;
53
54 typedef struct {
55 void *mem_ctx;
56
57 /* map of register -> validation state (struct above) */
58 struct hash_table *regs;
59
60 /* the current shader being validated */
61 nir_shader *shader;
62
63 /* the current instruction being validated */
64 nir_instr *instr;
65
66 /* the current variable being validated */
67 nir_variable *var;
68
69 /* the current basic block being validated */
70 nir_block *block;
71
72 /* the current if statement being validated */
73 nir_if *if_stmt;
74
75 /* the current loop being visited */
76 nir_loop *loop;
77
78 /* the parent of the current cf node being visited */
79 nir_cf_node *parent_node;
80
81 /* the current function implementation being validated */
82 nir_function_impl *impl;
83
84 /* Set of seen SSA sources */
85 struct set *ssa_srcs;
86
87 /* bitset of ssa definitions we have found; used to check uniqueness */
88 BITSET_WORD *ssa_defs_found;
89
90 /* bitset of registers we have currently found; used to check uniqueness */
91 BITSET_WORD *regs_found;
92
93 /* map of variable -> function implementation where it is defined or NULL
94 * if it is a global variable
95 */
96 struct hash_table *var_defs;
97
98 /* map of instruction/var/etc to failed assert string */
99 struct hash_table *errors;
100 } validate_state;
101
102 static void
103 log_error(validate_state *state, const char *cond, const char *file, int line)
104 {
105 const void *obj;
106
107 if (state->instr)
108 obj = state->instr;
109 else if (state->var)
110 obj = state->var;
111 else
112 obj = cond;
113
114 char *msg = ralloc_asprintf(state->errors, "error: %s (%s:%d)",
115 cond, file, line);
116
117 _mesa_hash_table_insert(state->errors, obj, msg);
118 }
119
120 #define validate_assert(state, cond) do { \
121 if (!(cond)) \
122 log_error(state, #cond, __FILE__, __LINE__); \
123 } while (0)
124
125 static void validate_src(nir_src *src, validate_state *state,
126 unsigned bit_sizes, unsigned num_components);
127
128 static void
129 validate_reg_src(nir_src *src, validate_state *state,
130 unsigned bit_sizes, unsigned num_components)
131 {
132 validate_assert(state, src->reg.reg != NULL);
133
134 struct hash_entry *entry;
135 entry = _mesa_hash_table_search(state->regs, src->reg.reg);
136 validate_assert(state, entry);
137
138 reg_validate_state *reg_state = (reg_validate_state *) entry->data;
139
140 if (state->instr) {
141 _mesa_set_add(reg_state->uses, src);
142 } else {
143 validate_assert(state, state->if_stmt);
144 _mesa_set_add(reg_state->if_uses, src);
145 }
146
147 validate_assert(state, reg_state->where_defined == state->impl &&
148 "using a register declared in a different function");
149
150 if (bit_sizes)
151 validate_assert(state, src->reg.reg->bit_size & bit_sizes);
152 if (num_components)
153 validate_assert(state, src->reg.reg->num_components == num_components);
154
155 validate_assert(state, (src->reg.reg->num_array_elems == 0 ||
156 src->reg.base_offset < src->reg.reg->num_array_elems) &&
157 "definitely out-of-bounds array access");
158
159 if (src->reg.indirect) {
160 validate_assert(state, src->reg.reg->num_array_elems != 0);
161 validate_assert(state, (src->reg.indirect->is_ssa ||
162 src->reg.indirect->reg.indirect == NULL) &&
163 "only one level of indirection allowed");
164 validate_src(src->reg.indirect, state, 32, 1);
165 }
166 }
167
168 #define SET_PTR_BIT(ptr, bit) \
169 (void *)(((uintptr_t)(ptr)) | (((uintptr_t)1) << bit))
170
171 static void
172 validate_ssa_src(nir_src *src, validate_state *state,
173 unsigned bit_sizes, unsigned num_components)
174 {
175 validate_assert(state, src->ssa != NULL);
176
177 /* As we walk SSA defs, we add every use to this set. We need to make sure
178 * our use is seen in a use list.
179 */
180 struct set_entry *entry;
181 if (state->instr) {
182 entry = _mesa_set_search(state->ssa_srcs, src);
183 } else {
184 entry = _mesa_set_search(state->ssa_srcs, SET_PTR_BIT(src, 0));
185 }
186 validate_assert(state, entry);
187
188 /* This will let us prove that we've seen all the sources */
189 if (entry)
190 _mesa_set_remove(state->ssa_srcs, entry);
191
192 if (bit_sizes)
193 validate_assert(state, src->ssa->bit_size & bit_sizes);
194 if (num_components)
195 validate_assert(state, src->ssa->num_components == num_components);
196
197 /* TODO validate that the use is dominated by the definition */
198 }
199
200 static void
201 validate_src(nir_src *src, validate_state *state,
202 unsigned bit_sizes, unsigned num_components)
203 {
204 if (state->instr)
205 validate_assert(state, src->parent_instr == state->instr);
206 else
207 validate_assert(state, src->parent_if == state->if_stmt);
208
209 if (src->is_ssa)
210 validate_ssa_src(src, state, bit_sizes, num_components);
211 else
212 validate_reg_src(src, state, bit_sizes, num_components);
213 }
214
215 static void
216 validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state)
217 {
218 nir_alu_src *src = &instr->src[index];
219
220 if (instr->op == nir_op_mov)
221 assert(!src->abs && !src->negate);
222
223 unsigned num_components = nir_src_num_components(src->src);
224 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
225 validate_assert(state, src->swizzle[i] < NIR_MAX_VEC_COMPONENTS);
226
227 if (nir_alu_instr_channel_used(instr, index, i))
228 validate_assert(state, src->swizzle[i] < num_components);
229 }
230
231 validate_src(&src->src, state, 0, 0);
232 }
233
234 static void
235 validate_reg_dest(nir_reg_dest *dest, validate_state *state,
236 unsigned bit_sizes, unsigned num_components)
237 {
238 validate_assert(state, dest->reg != NULL);
239
240 validate_assert(state, dest->parent_instr == state->instr);
241
242 struct hash_entry *entry2;
243 entry2 = _mesa_hash_table_search(state->regs, dest->reg);
244
245 validate_assert(state, entry2);
246
247 reg_validate_state *reg_state = (reg_validate_state *) entry2->data;
248 _mesa_set_add(reg_state->defs, dest);
249
250 validate_assert(state, reg_state->where_defined == state->impl &&
251 "writing to a register declared in a different function");
252
253 if (bit_sizes)
254 validate_assert(state, dest->reg->bit_size & bit_sizes);
255 if (num_components)
256 validate_assert(state, dest->reg->num_components == num_components);
257
258 validate_assert(state, (dest->reg->num_array_elems == 0 ||
259 dest->base_offset < dest->reg->num_array_elems) &&
260 "definitely out-of-bounds array access");
261
262 if (dest->indirect) {
263 validate_assert(state, dest->reg->num_array_elems != 0);
264 validate_assert(state, (dest->indirect->is_ssa || dest->indirect->reg.indirect == NULL) &&
265 "only one level of indirection allowed");
266 validate_src(dest->indirect, state, 32, 1);
267 }
268 }
269
270 static void
271 validate_ssa_def(nir_ssa_def *def, validate_state *state)
272 {
273 validate_assert(state, def->index < state->impl->ssa_alloc);
274 validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
275 BITSET_SET(state->ssa_defs_found, def->index);
276
277 validate_assert(state, def->parent_instr == state->instr);
278
279 validate_assert(state, (def->num_components <= 4) ||
280 (def->num_components == 8) ||
281 (def->num_components == 16));
282
283 list_validate(&def->uses);
284 nir_foreach_use(src, def) {
285 validate_assert(state, src->is_ssa);
286 validate_assert(state, src->ssa == def);
287 bool already_seen = false;
288 _mesa_set_search_and_add(state->ssa_srcs, src, &already_seen);
289 /* A nir_src should only appear once and only in one SSA def use list */
290 validate_assert(state, !already_seen);
291 }
292
293 list_validate(&def->if_uses);
294 nir_foreach_if_use(src, def) {
295 validate_assert(state, src->is_ssa);
296 validate_assert(state, src->ssa == def);
297 bool already_seen = false;
298 _mesa_set_search_and_add(state->ssa_srcs, SET_PTR_BIT(src, 0),
299 &already_seen);
300 /* A nir_src should only appear once and only in one SSA def use list */
301 validate_assert(state, !already_seen);
302 }
303 }
304
305 static void
306 validate_dest(nir_dest *dest, validate_state *state,
307 unsigned bit_sizes, unsigned num_components)
308 {
309 if (dest->is_ssa) {
310 if (bit_sizes)
311 validate_assert(state, dest->ssa.bit_size & bit_sizes);
312 if (num_components)
313 validate_assert(state, dest->ssa.num_components == num_components);
314 validate_ssa_def(&dest->ssa, state);
315 } else {
316 validate_reg_dest(&dest->reg, state, bit_sizes, num_components);
317 }
318 }
319
320 static void
321 validate_alu_dest(nir_alu_instr *instr, validate_state *state)
322 {
323 nir_alu_dest *dest = &instr->dest;
324
325 if (instr->op == nir_op_mov)
326 assert(!dest->saturate);
327
328 unsigned dest_size = nir_dest_num_components(dest->dest);
329 /*
330 * validate that the instruction doesn't write to components not in the
331 * register/SSA value
332 */
333 validate_assert(state, !(dest->write_mask & ~((1 << dest_size) - 1)));
334
335 /* validate that saturate is only ever used on instructions with
336 * destinations of type float
337 */
338 nir_alu_instr *alu = nir_instr_as_alu(state->instr);
339 validate_assert(state,
340 (nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) ==
341 nir_type_float) ||
342 !dest->saturate);
343
344 validate_dest(&dest->dest, state, 0, 0);
345 }
346
347 static void
348 validate_alu_instr(nir_alu_instr *instr, validate_state *state)
349 {
350 validate_assert(state, instr->op < nir_num_opcodes);
351
352 unsigned instr_bit_size = 0;
353 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
354 nir_alu_type src_type = nir_op_infos[instr->op].input_types[i];
355 unsigned src_bit_size = nir_src_bit_size(instr->src[i].src);
356 if (nir_alu_type_get_type_size(src_type)) {
357 validate_assert(state, src_bit_size == nir_alu_type_get_type_size(src_type));
358 } else if (instr_bit_size) {
359 validate_assert(state, src_bit_size == instr_bit_size);
360 } else {
361 instr_bit_size = src_bit_size;
362 }
363
364 if (nir_alu_type_get_base_type(src_type) == nir_type_float) {
365 /* 8-bit float isn't a thing */
366 validate_assert(state, src_bit_size == 16 || src_bit_size == 32 ||
367 src_bit_size == 64);
368 }
369
370 validate_alu_src(instr, i, state);
371 }
372
373 nir_alu_type dest_type = nir_op_infos[instr->op].output_type;
374 unsigned dest_bit_size = nir_dest_bit_size(instr->dest.dest);
375 if (nir_alu_type_get_type_size(dest_type)) {
376 validate_assert(state, dest_bit_size == nir_alu_type_get_type_size(dest_type));
377 } else if (instr_bit_size) {
378 validate_assert(state, dest_bit_size == instr_bit_size);
379 } else {
380 /* The only unsized thing is the destination so it's vacuously valid */
381 }
382
383 if (nir_alu_type_get_base_type(dest_type) == nir_type_float) {
384 /* 8-bit float isn't a thing */
385 validate_assert(state, dest_bit_size == 16 || dest_bit_size == 32 ||
386 dest_bit_size == 64);
387 }
388
389 validate_alu_dest(instr, state);
390 }
391
392 static void
393 validate_var_use(nir_variable *var, validate_state *state)
394 {
395 struct hash_entry *entry = _mesa_hash_table_search(state->var_defs, var);
396 validate_assert(state, entry);
397 if (entry && var->data.mode == nir_var_function_temp)
398 validate_assert(state, (nir_function_impl *) entry->data == state->impl);
399 }
400
401 static void
402 validate_deref_instr(nir_deref_instr *instr, validate_state *state)
403 {
404 if (instr->deref_type == nir_deref_type_var) {
405 /* Variable dereferences are stupid simple. */
406 validate_assert(state, instr->mode == instr->var->data.mode);
407 validate_assert(state, instr->type == instr->var->type);
408 validate_var_use(instr->var, state);
409 } else if (instr->deref_type == nir_deref_type_cast) {
410 /* For cast, we simply have to trust the instruction. It's up to
411 * lowering passes and front/back-ends to make them sane.
412 */
413 validate_src(&instr->parent, state, 0, 0);
414
415 /* We just validate that the type and mode are there */
416 validate_assert(state, instr->mode);
417 validate_assert(state, instr->type);
418 } else {
419 /* We require the parent to be SSA. This may be lifted in the future */
420 validate_assert(state, instr->parent.is_ssa);
421
422 /* The parent pointer value must have the same number of components
423 * as the destination.
424 */
425 validate_src(&instr->parent, state, nir_dest_bit_size(instr->dest),
426 nir_dest_num_components(instr->dest));
427
428 nir_instr *parent_instr = instr->parent.ssa->parent_instr;
429
430 /* The parent must come from another deref instruction */
431 validate_assert(state, parent_instr->type == nir_instr_type_deref);
432
433 nir_deref_instr *parent = nir_instr_as_deref(parent_instr);
434
435 validate_assert(state, instr->mode == parent->mode);
436
437 switch (instr->deref_type) {
438 case nir_deref_type_struct:
439 validate_assert(state, glsl_type_is_struct_or_ifc(parent->type));
440 validate_assert(state,
441 instr->strct.index < glsl_get_length(parent->type));
442 validate_assert(state, instr->type ==
443 glsl_get_struct_field(parent->type, instr->strct.index));
444 break;
445
446 case nir_deref_type_array:
447 case nir_deref_type_array_wildcard:
448 if (instr->mode == nir_var_mem_ubo ||
449 instr->mode == nir_var_mem_ssbo ||
450 instr->mode == nir_var_mem_shared ||
451 instr->mode == nir_var_mem_global) {
452 /* Shared variables and UBO/SSBOs have a bit more relaxed rules
453 * because we need to be able to handle array derefs on vectors.
454 * Fortunately, nir_lower_io handles these just fine.
455 */
456 validate_assert(state, glsl_type_is_array(parent->type) ||
457 glsl_type_is_matrix(parent->type) ||
458 glsl_type_is_vector(parent->type));
459 } else {
460 /* Most of NIR cannot handle array derefs on vectors */
461 validate_assert(state, glsl_type_is_array(parent->type) ||
462 glsl_type_is_matrix(parent->type));
463 }
464 validate_assert(state,
465 instr->type == glsl_get_array_element(parent->type));
466
467 if (instr->deref_type == nir_deref_type_array) {
468 validate_src(&instr->arr.index, state,
469 nir_dest_bit_size(instr->dest), 1);
470 }
471 break;
472
473 case nir_deref_type_ptr_as_array:
474 /* ptr_as_array derefs must have a parent that is either an array,
475 * ptr_as_array, or cast. If the parent is a cast, we get the stride
476 * information (if any) from the cast deref.
477 */
478 validate_assert(state,
479 parent->deref_type == nir_deref_type_array ||
480 parent->deref_type == nir_deref_type_ptr_as_array ||
481 parent->deref_type == nir_deref_type_cast);
482 validate_src(&instr->arr.index, state,
483 nir_dest_bit_size(instr->dest), 1);
484 break;
485
486 default:
487 unreachable("Invalid deref instruction type");
488 }
489 }
490
491 /* We intentionally don't validate the size of the destination because we
492 * want to let other compiler components such as SPIR-V decide how big
493 * pointers should be.
494 */
495 validate_dest(&instr->dest, state, 0, 0);
496
497 /* Deref instructions as if conditions don't make sense because if
498 * conditions expect well-formed Booleans. If you want to compare with
499 * NULL, an explicit comparison operation should be used.
500 */
501 validate_assert(state, list_empty(&instr->dest.ssa.if_uses));
502
503 /* Only certain modes can be used as sources for phi instructions. */
504 nir_foreach_use(use, &instr->dest.ssa) {
505 if (use->parent_instr->type == nir_instr_type_phi) {
506 validate_assert(state, instr->mode == nir_var_mem_ubo ||
507 instr->mode == nir_var_mem_ssbo ||
508 instr->mode == nir_var_mem_shared ||
509 instr->mode == nir_var_mem_global);
510 }
511 }
512 }
513
514 static void
515 validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
516 {
517 unsigned dest_bit_size = 0;
518 unsigned src_bit_sizes[NIR_INTRINSIC_MAX_INPUTS] = { 0, };
519 switch (instr->intrinsic) {
520 case nir_intrinsic_load_param: {
521 unsigned param_idx = nir_intrinsic_param_idx(instr);
522 validate_assert(state, param_idx < state->impl->function->num_params);
523 nir_parameter *param = &state->impl->function->params[param_idx];
524 validate_assert(state, instr->num_components == param->num_components);
525 dest_bit_size = param->bit_size;
526 break;
527 }
528
529 case nir_intrinsic_load_deref: {
530 nir_deref_instr *src = nir_src_as_deref(instr->src[0]);
531 validate_assert(state, glsl_type_is_vector_or_scalar(src->type) ||
532 (src->mode == nir_var_uniform &&
533 glsl_get_base_type(src->type) == GLSL_TYPE_SUBROUTINE));
534 validate_assert(state, instr->num_components ==
535 glsl_get_vector_elements(src->type));
536 dest_bit_size = glsl_get_bit_size(src->type);
537 /* Also allow 32-bit boolean load operations */
538 if (glsl_type_is_boolean(src->type))
539 dest_bit_size |= 32;
540 break;
541 }
542
543 case nir_intrinsic_store_deref: {
544 nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
545 validate_assert(state, glsl_type_is_vector_or_scalar(dst->type));
546 validate_assert(state, instr->num_components ==
547 glsl_get_vector_elements(dst->type));
548 src_bit_sizes[1] = glsl_get_bit_size(dst->type);
549 /* Also allow 32-bit boolean store operations */
550 if (glsl_type_is_boolean(dst->type))
551 src_bit_sizes[1] |= 32;
552 validate_assert(state, (dst->mode & (nir_var_shader_in |
553 nir_var_uniform)) == 0);
554 validate_assert(state, (nir_intrinsic_write_mask(instr) & ~((1 << instr->num_components) - 1)) == 0);
555 break;
556 }
557
558 case nir_intrinsic_copy_deref: {
559 nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
560 nir_deref_instr *src = nir_src_as_deref(instr->src[1]);
561 validate_assert(state, glsl_get_bare_type(dst->type) ==
562 glsl_get_bare_type(src->type));
563 validate_assert(state, (dst->mode & (nir_var_shader_in |
564 nir_var_uniform)) == 0);
565 break;
566 }
567
568 default:
569 break;
570 }
571
572 unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
573 for (unsigned i = 0; i < num_srcs; i++) {
574 unsigned components_read = nir_intrinsic_src_components(instr, i);
575
576 validate_assert(state, components_read > 0);
577
578 validate_src(&instr->src[i], state, src_bit_sizes[i], components_read);
579 }
580
581 if (nir_intrinsic_infos[instr->intrinsic].has_dest) {
582 unsigned components_written = nir_intrinsic_dest_components(instr);
583 unsigned bit_sizes = nir_intrinsic_infos[instr->intrinsic].dest_bit_sizes;
584
585 validate_assert(state, components_written > 0);
586
587 if (dest_bit_size && bit_sizes)
588 validate_assert(state, dest_bit_size & bit_sizes);
589 else
590 dest_bit_size = dest_bit_size ? dest_bit_size : bit_sizes;
591
592 validate_dest(&instr->dest, state, dest_bit_size, components_written);
593 }
594 }
595
596 static void
597 validate_tex_instr(nir_tex_instr *instr, validate_state *state)
598 {
599 bool src_type_seen[nir_num_tex_src_types];
600 for (unsigned i = 0; i < nir_num_tex_src_types; i++)
601 src_type_seen[i] = false;
602
603 for (unsigned i = 0; i < instr->num_srcs; i++) {
604 validate_assert(state, !src_type_seen[instr->src[i].src_type]);
605 src_type_seen[instr->src[i].src_type] = true;
606 validate_src(&instr->src[i].src, state,
607 0, nir_tex_instr_src_size(instr, i));
608
609 switch (instr->src[i].src_type) {
610 case nir_tex_src_texture_deref:
611 case nir_tex_src_sampler_deref:
612 validate_assert(state, instr->src[i].src.is_ssa);
613 validate_assert(state,
614 instr->src[i].src.ssa->parent_instr->type == nir_instr_type_deref);
615 break;
616 default:
617 break;
618 }
619 }
620
621 if (nir_tex_instr_has_explicit_tg4_offsets(instr)) {
622 validate_assert(state, instr->op == nir_texop_tg4);
623 validate_assert(state, !src_type_seen[nir_tex_src_offset]);
624 }
625
626 validate_dest(&instr->dest, state, 0, nir_tex_instr_dest_size(instr));
627 }
628
629 static void
630 validate_call_instr(nir_call_instr *instr, validate_state *state)
631 {
632 validate_assert(state, instr->num_params == instr->callee->num_params);
633
634 for (unsigned i = 0; i < instr->num_params; i++) {
635 validate_src(&instr->params[i], state,
636 instr->callee->params[i].bit_size,
637 instr->callee->params[i].num_components);
638 }
639 }
640
641 static void
642 validate_const_value(nir_const_value *val, unsigned bit_size,
643 validate_state *state)
644 {
645 /* In order for block copies to work properly for things like instruction
646 * comparisons and [de]serialization, we require the unused bits of the
647 * nir_const_value to be zero.
648 */
649 nir_const_value cmp_val;
650 memset(&cmp_val, 0, sizeof(cmp_val));
651 switch (bit_size) {
652 case 1:
653 cmp_val.b = val->b;
654 break;
655 case 8:
656 cmp_val.u8 = val->u8;
657 break;
658 case 16:
659 cmp_val.u16 = val->u16;
660 break;
661 case 32:
662 cmp_val.u32 = val->u32;
663 break;
664 case 64:
665 cmp_val.u64 = val->u64;
666 break;
667 default:
668 validate_assert(state, !"Invalid load_const bit size");
669 }
670 validate_assert(state, memcmp(val, &cmp_val, sizeof(cmp_val)) == 0);
671 }
672
673 static void
674 validate_load_const_instr(nir_load_const_instr *instr, validate_state *state)
675 {
676 validate_ssa_def(&instr->def, state);
677
678 for (unsigned i = 0; i < instr->def.num_components; i++)
679 validate_const_value(&instr->value[i], instr->def.bit_size, state);
680 }
681
682 static void
683 validate_ssa_undef_instr(nir_ssa_undef_instr *instr, validate_state *state)
684 {
685 validate_ssa_def(&instr->def, state);
686 }
687
688 static void
689 validate_phi_instr(nir_phi_instr *instr, validate_state *state)
690 {
691 /*
692 * don't validate the sources until we get to them from their predecessor
693 * basic blocks, to avoid validating an SSA use before its definition.
694 */
695
696 validate_dest(&instr->dest, state, 0, 0);
697
698 exec_list_validate(&instr->srcs);
699 validate_assert(state, exec_list_length(&instr->srcs) ==
700 state->block->predecessors->entries);
701 }
702
703 static void
704 validate_instr(nir_instr *instr, validate_state *state)
705 {
706 validate_assert(state, instr->block == state->block);
707
708 state->instr = instr;
709
710 switch (instr->type) {
711 case nir_instr_type_alu:
712 validate_alu_instr(nir_instr_as_alu(instr), state);
713 break;
714
715 case nir_instr_type_deref:
716 validate_deref_instr(nir_instr_as_deref(instr), state);
717 break;
718
719 case nir_instr_type_call:
720 validate_call_instr(nir_instr_as_call(instr), state);
721 break;
722
723 case nir_instr_type_intrinsic:
724 validate_intrinsic_instr(nir_instr_as_intrinsic(instr), state);
725 break;
726
727 case nir_instr_type_tex:
728 validate_tex_instr(nir_instr_as_tex(instr), state);
729 break;
730
731 case nir_instr_type_load_const:
732 validate_load_const_instr(nir_instr_as_load_const(instr), state);
733 break;
734
735 case nir_instr_type_phi:
736 validate_phi_instr(nir_instr_as_phi(instr), state);
737 break;
738
739 case nir_instr_type_ssa_undef:
740 validate_ssa_undef_instr(nir_instr_as_ssa_undef(instr), state);
741 break;
742
743 case nir_instr_type_jump:
744 break;
745
746 default:
747 validate_assert(state, !"Invalid ALU instruction type");
748 break;
749 }
750
751 state->instr = NULL;
752 }
753
754 static void
755 validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state)
756 {
757 state->instr = &instr->instr;
758
759 validate_assert(state, instr->dest.is_ssa);
760
761 exec_list_validate(&instr->srcs);
762 nir_foreach_phi_src(src, instr) {
763 if (src->pred == pred) {
764 validate_assert(state, src->src.is_ssa);
765 validate_src(&src->src, state, instr->dest.ssa.bit_size,
766 instr->dest.ssa.num_components);
767 state->instr = NULL;
768 return;
769 }
770 }
771
772 abort();
773 }
774
775 static void
776 validate_phi_srcs(nir_block *block, nir_block *succ, validate_state *state)
777 {
778 nir_foreach_instr(instr, succ) {
779 if (instr->type != nir_instr_type_phi)
780 break;
781
782 validate_phi_src(nir_instr_as_phi(instr), block, state);
783 }
784 }
785
786 static void validate_cf_node(nir_cf_node *node, validate_state *state);
787
788 static void
789 validate_block(nir_block *block, validate_state *state)
790 {
791 validate_assert(state, block->cf_node.parent == state->parent_node);
792
793 state->block = block;
794
795 exec_list_validate(&block->instr_list);
796 nir_foreach_instr(instr, block) {
797 if (instr->type == nir_instr_type_phi) {
798 validate_assert(state, instr == nir_block_first_instr(block) ||
799 nir_instr_prev(instr)->type == nir_instr_type_phi);
800 }
801
802 if (instr->type == nir_instr_type_jump) {
803 validate_assert(state, instr == nir_block_last_instr(block));
804 }
805
806 validate_instr(instr, state);
807 }
808
809 validate_assert(state, block->successors[0] != NULL);
810 validate_assert(state, block->successors[0] != block->successors[1]);
811
812 for (unsigned i = 0; i < 2; i++) {
813 if (block->successors[i] != NULL) {
814 struct set_entry *entry =
815 _mesa_set_search(block->successors[i]->predecessors, block);
816 validate_assert(state, entry);
817
818 validate_phi_srcs(block, block->successors[i], state);
819 }
820 }
821
822 set_foreach(block->predecessors, entry) {
823 const nir_block *pred = entry->key;
824 validate_assert(state, pred->successors[0] == block ||
825 pred->successors[1] == block);
826 }
827
828 if (!exec_list_is_empty(&block->instr_list) &&
829 nir_block_last_instr(block)->type == nir_instr_type_jump) {
830 validate_assert(state, block->successors[1] == NULL);
831 nir_jump_instr *jump = nir_instr_as_jump(nir_block_last_instr(block));
832 switch (jump->type) {
833 case nir_jump_break: {
834 nir_block *after =
835 nir_cf_node_as_block(nir_cf_node_next(&state->loop->cf_node));
836 validate_assert(state, block->successors[0] == after);
837 break;
838 }
839
840 case nir_jump_continue: {
841 nir_block *first = nir_loop_first_block(state->loop);
842 validate_assert(state, block->successors[0] == first);
843 break;
844 }
845
846 case nir_jump_return:
847 validate_assert(state, block->successors[0] == state->impl->end_block);
848 break;
849
850 default:
851 unreachable("bad jump type");
852 }
853 } else {
854 nir_cf_node *next = nir_cf_node_next(&block->cf_node);
855 if (next == NULL) {
856 switch (state->parent_node->type) {
857 case nir_cf_node_loop: {
858 nir_block *first = nir_loop_first_block(state->loop);
859 validate_assert(state, block->successors[0] == first);
860 /* due to the hack for infinite loops, block->successors[1] may
861 * point to the block after the loop.
862 */
863 break;
864 }
865
866 case nir_cf_node_if: {
867 nir_block *after =
868 nir_cf_node_as_block(nir_cf_node_next(state->parent_node));
869 validate_assert(state, block->successors[0] == after);
870 validate_assert(state, block->successors[1] == NULL);
871 break;
872 }
873
874 case nir_cf_node_function:
875 validate_assert(state, block->successors[0] == state->impl->end_block);
876 validate_assert(state, block->successors[1] == NULL);
877 break;
878
879 default:
880 unreachable("unknown control flow node type");
881 }
882 } else {
883 if (next->type == nir_cf_node_if) {
884 nir_if *if_stmt = nir_cf_node_as_if(next);
885 validate_assert(state, block->successors[0] ==
886 nir_if_first_then_block(if_stmt));
887 validate_assert(state, block->successors[1] ==
888 nir_if_first_else_block(if_stmt));
889 } else {
890 validate_assert(state, next->type == nir_cf_node_loop);
891 nir_loop *loop = nir_cf_node_as_loop(next);
892 validate_assert(state, block->successors[0] ==
893 nir_loop_first_block(loop));
894 validate_assert(state, block->successors[1] == NULL);
895 }
896 }
897 }
898 }
899
900 static void
901 validate_if(nir_if *if_stmt, validate_state *state)
902 {
903 state->if_stmt = if_stmt;
904
905 validate_assert(state, !exec_node_is_head_sentinel(if_stmt->cf_node.node.prev));
906 nir_cf_node *prev_node = nir_cf_node_prev(&if_stmt->cf_node);
907 validate_assert(state, prev_node->type == nir_cf_node_block);
908
909 validate_assert(state, !exec_node_is_tail_sentinel(if_stmt->cf_node.node.next));
910 nir_cf_node *next_node = nir_cf_node_next(&if_stmt->cf_node);
911 validate_assert(state, next_node->type == nir_cf_node_block);
912
913 validate_src(&if_stmt->condition, state, 0, 1);
914
915 validate_assert(state, !exec_list_is_empty(&if_stmt->then_list));
916 validate_assert(state, !exec_list_is_empty(&if_stmt->else_list));
917
918 nir_cf_node *old_parent = state->parent_node;
919 state->parent_node = &if_stmt->cf_node;
920
921 exec_list_validate(&if_stmt->then_list);
922 foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->then_list) {
923 validate_cf_node(cf_node, state);
924 }
925
926 exec_list_validate(&if_stmt->else_list);
927 foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->else_list) {
928 validate_cf_node(cf_node, state);
929 }
930
931 state->parent_node = old_parent;
932 state->if_stmt = NULL;
933 }
934
935 static void
936 validate_loop(nir_loop *loop, validate_state *state)
937 {
938 validate_assert(state, !exec_node_is_head_sentinel(loop->cf_node.node.prev));
939 nir_cf_node *prev_node = nir_cf_node_prev(&loop->cf_node);
940 validate_assert(state, prev_node->type == nir_cf_node_block);
941
942 validate_assert(state, !exec_node_is_tail_sentinel(loop->cf_node.node.next));
943 nir_cf_node *next_node = nir_cf_node_next(&loop->cf_node);
944 validate_assert(state, next_node->type == nir_cf_node_block);
945
946 validate_assert(state, !exec_list_is_empty(&loop->body));
947
948 nir_cf_node *old_parent = state->parent_node;
949 state->parent_node = &loop->cf_node;
950 nir_loop *old_loop = state->loop;
951 state->loop = loop;
952
953 exec_list_validate(&loop->body);
954 foreach_list_typed(nir_cf_node, cf_node, node, &loop->body) {
955 validate_cf_node(cf_node, state);
956 }
957
958 state->parent_node = old_parent;
959 state->loop = old_loop;
960 }
961
962 static void
963 validate_cf_node(nir_cf_node *node, validate_state *state)
964 {
965 validate_assert(state, node->parent == state->parent_node);
966
967 switch (node->type) {
968 case nir_cf_node_block:
969 validate_block(nir_cf_node_as_block(node), state);
970 break;
971
972 case nir_cf_node_if:
973 validate_if(nir_cf_node_as_if(node), state);
974 break;
975
976 case nir_cf_node_loop:
977 validate_loop(nir_cf_node_as_loop(node), state);
978 break;
979
980 default:
981 unreachable("Invalid CF node type");
982 }
983 }
984
985 static void
986 prevalidate_reg_decl(nir_register *reg, validate_state *state)
987 {
988 validate_assert(state, reg->index < state->impl->reg_alloc);
989 validate_assert(state, !BITSET_TEST(state->regs_found, reg->index));
990 BITSET_SET(state->regs_found, reg->index);
991
992 list_validate(&reg->uses);
993 list_validate(&reg->defs);
994 list_validate(&reg->if_uses);
995
996 reg_validate_state *reg_state = ralloc(state->regs, reg_validate_state);
997 reg_state->uses = _mesa_pointer_set_create(reg_state);
998 reg_state->if_uses = _mesa_pointer_set_create(reg_state);
999 reg_state->defs = _mesa_pointer_set_create(reg_state);
1000
1001 reg_state->where_defined = state->impl;
1002
1003 _mesa_hash_table_insert(state->regs, reg, reg_state);
1004 }
1005
1006 static void
1007 postvalidate_reg_decl(nir_register *reg, validate_state *state)
1008 {
1009 struct hash_entry *entry = _mesa_hash_table_search(state->regs, reg);
1010
1011 assume(entry);
1012 reg_validate_state *reg_state = (reg_validate_state *) entry->data;
1013
1014 nir_foreach_use(src, reg) {
1015 struct set_entry *entry = _mesa_set_search(reg_state->uses, src);
1016 validate_assert(state, entry);
1017 _mesa_set_remove(reg_state->uses, entry);
1018 }
1019
1020 if (reg_state->uses->entries != 0) {
1021 printf("extra entries in register uses:\n");
1022 set_foreach(reg_state->uses, entry)
1023 printf("%p\n", entry->key);
1024
1025 abort();
1026 }
1027
1028 nir_foreach_if_use(src, reg) {
1029 struct set_entry *entry = _mesa_set_search(reg_state->if_uses, src);
1030 validate_assert(state, entry);
1031 _mesa_set_remove(reg_state->if_uses, entry);
1032 }
1033
1034 if (reg_state->if_uses->entries != 0) {
1035 printf("extra entries in register if_uses:\n");
1036 set_foreach(reg_state->if_uses, entry)
1037 printf("%p\n", entry->key);
1038
1039 abort();
1040 }
1041
1042 nir_foreach_def(src, reg) {
1043 struct set_entry *entry = _mesa_set_search(reg_state->defs, src);
1044 validate_assert(state, entry);
1045 _mesa_set_remove(reg_state->defs, entry);
1046 }
1047
1048 if (reg_state->defs->entries != 0) {
1049 printf("extra entries in register defs:\n");
1050 set_foreach(reg_state->defs, entry)
1051 printf("%p\n", entry->key);
1052
1053 abort();
1054 }
1055 }
1056
1057 static void
1058 validate_var_decl(nir_variable *var, bool is_global, validate_state *state)
1059 {
1060 state->var = var;
1061
1062 validate_assert(state, is_global == nir_variable_is_global(var));
1063
1064 /* Must have exactly one mode set */
1065 validate_assert(state, util_is_power_of_two_nonzero(var->data.mode));
1066
1067 if (var->data.compact) {
1068 /* The "compact" flag is only valid on arrays of scalars. */
1069 assert(glsl_type_is_array(var->type));
1070
1071 const struct glsl_type *type = glsl_get_array_element(var->type);
1072 if (nir_is_per_vertex_io(var, state->shader->info.stage)) {
1073 assert(glsl_type_is_array(type));
1074 assert(glsl_type_is_scalar(glsl_get_array_element(type)));
1075 } else {
1076 assert(glsl_type_is_scalar(type));
1077 }
1078 }
1079
1080 if (var->num_members > 0) {
1081 const struct glsl_type *without_array = glsl_without_array(var->type);
1082 validate_assert(state, glsl_type_is_struct_or_ifc(without_array));
1083 validate_assert(state, var->num_members == glsl_get_length(without_array));
1084 validate_assert(state, var->members != NULL);
1085 }
1086
1087 /*
1088 * TODO validate some things ir_validate.cpp does (requires more GLSL type
1089 * support)
1090 */
1091
1092 _mesa_hash_table_insert(state->var_defs, var,
1093 is_global ? NULL : state->impl);
1094
1095 state->var = NULL;
1096 }
1097
1098 static void
1099 validate_function_impl(nir_function_impl *impl, validate_state *state)
1100 {
1101 /* Resize the ssa_srcs set. It's likely that the size of this set will
1102 * never actually hit the number of SSA defs because we remove sources from
1103 * the set as we visit them. (It could actually be much larger because
1104 * each SSA def can be used more than once.) However, growing it now costs
1105 * us very little (the extra memory is already dwarfed by the SSA defs
1106 * themselves) and makes collisions much less likely.
1107 */
1108 _mesa_set_resize(state->ssa_srcs, impl->ssa_alloc);
1109
1110 validate_assert(state, impl->function->impl == impl);
1111 validate_assert(state, impl->cf_node.parent == NULL);
1112
1113 validate_assert(state, exec_list_is_empty(&impl->end_block->instr_list));
1114 validate_assert(state, impl->end_block->successors[0] == NULL);
1115 validate_assert(state, impl->end_block->successors[1] == NULL);
1116
1117 state->impl = impl;
1118 state->parent_node = &impl->cf_node;
1119
1120 exec_list_validate(&impl->locals);
1121 nir_foreach_variable(var, &impl->locals) {
1122 validate_var_decl(var, false, state);
1123 }
1124
1125 state->regs_found = reralloc(state->mem_ctx, state->regs_found,
1126 BITSET_WORD, BITSET_WORDS(impl->reg_alloc));
1127 memset(state->regs_found, 0, BITSET_WORDS(impl->reg_alloc) *
1128 sizeof(BITSET_WORD));
1129 exec_list_validate(&impl->registers);
1130 foreach_list_typed(nir_register, reg, node, &impl->registers) {
1131 prevalidate_reg_decl(reg, state);
1132 }
1133
1134 state->ssa_defs_found = reralloc(state->mem_ctx, state->ssa_defs_found,
1135 BITSET_WORD, BITSET_WORDS(impl->ssa_alloc));
1136 memset(state->ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) *
1137 sizeof(BITSET_WORD));
1138 exec_list_validate(&impl->body);
1139 foreach_list_typed(nir_cf_node, node, node, &impl->body) {
1140 validate_cf_node(node, state);
1141 }
1142
1143 foreach_list_typed(nir_register, reg, node, &impl->registers) {
1144 postvalidate_reg_decl(reg, state);
1145 }
1146
1147 if (state->ssa_srcs->entries != 0) {
1148 printf("extra dangling SSA sources:\n");
1149 set_foreach(state->ssa_srcs, entry)
1150 printf("%p\n", entry->key);
1151
1152 abort();
1153 }
1154 }
1155
1156 static void
1157 validate_function(nir_function *func, validate_state *state)
1158 {
1159 if (func->impl != NULL) {
1160 validate_assert(state, func->impl->function == func);
1161 validate_function_impl(func->impl, state);
1162 }
1163 }
1164
1165 static void
1166 init_validate_state(validate_state *state)
1167 {
1168 state->mem_ctx = ralloc_context(NULL);
1169 state->regs = _mesa_pointer_hash_table_create(state->mem_ctx);
1170 state->ssa_srcs = _mesa_pointer_set_create(state->mem_ctx);
1171 state->ssa_defs_found = NULL;
1172 state->regs_found = NULL;
1173 state->var_defs = _mesa_pointer_hash_table_create(state->mem_ctx);
1174 state->errors = _mesa_pointer_hash_table_create(state->mem_ctx);
1175
1176 state->loop = NULL;
1177 state->instr = NULL;
1178 state->var = NULL;
1179 }
1180
1181 static void
1182 destroy_validate_state(validate_state *state)
1183 {
1184 ralloc_free(state->mem_ctx);
1185 }
1186
1187 mtx_t fail_dump_mutex = _MTX_INITIALIZER_NP;
1188
1189 static void
1190 dump_errors(validate_state *state, const char *when)
1191 {
1192 struct hash_table *errors = state->errors;
1193
1194 /* Lock around dumping so that we get clean dumps in a multi-threaded
1195 * scenario
1196 */
1197 mtx_lock(&fail_dump_mutex);
1198
1199 if (when) {
1200 fprintf(stderr, "NIR validation failed %s\n", when);
1201 fprintf(stderr, "%d errors:\n", _mesa_hash_table_num_entries(errors));
1202 } else {
1203 fprintf(stderr, "NIR validation failed with %d errors:\n",
1204 _mesa_hash_table_num_entries(errors));
1205 }
1206
1207 nir_print_shader_annotated(state->shader, stderr, errors);
1208
1209 if (_mesa_hash_table_num_entries(errors) > 0) {
1210 fprintf(stderr, "%d additional errors:\n",
1211 _mesa_hash_table_num_entries(errors));
1212 hash_table_foreach(errors, entry) {
1213 fprintf(stderr, "%s\n", (char *)entry->data);
1214 }
1215 }
1216
1217 mtx_unlock(&fail_dump_mutex);
1218
1219 abort();
1220 }
1221
1222 void
1223 nir_validate_shader(nir_shader *shader, const char *when)
1224 {
1225 static int should_validate = -1;
1226 if (should_validate < 0)
1227 should_validate = env_var_as_boolean("NIR_VALIDATE", true);
1228 if (!should_validate)
1229 return;
1230
1231 validate_state state;
1232 init_validate_state(&state);
1233
1234 state.shader = shader;
1235
1236 exec_list_validate(&shader->uniforms);
1237 nir_foreach_variable(var, &shader->uniforms) {
1238 validate_var_decl(var, true, &state);
1239 }
1240
1241 exec_list_validate(&shader->inputs);
1242 nir_foreach_variable(var, &shader->inputs) {
1243 validate_var_decl(var, true, &state);
1244 }
1245
1246 exec_list_validate(&shader->outputs);
1247 nir_foreach_variable(var, &shader->outputs) {
1248 validate_var_decl(var, true, &state);
1249 }
1250
1251 exec_list_validate(&shader->shared);
1252 nir_foreach_variable(var, &shader->shared) {
1253 validate_var_decl(var, true, &state);
1254 }
1255
1256 exec_list_validate(&shader->globals);
1257 nir_foreach_variable(var, &shader->globals) {
1258 validate_var_decl(var, true, &state);
1259 }
1260
1261 exec_list_validate(&shader->system_values);
1262 nir_foreach_variable(var, &shader->system_values) {
1263 validate_var_decl(var, true, &state);
1264 }
1265
1266 exec_list_validate(&shader->functions);
1267 foreach_list_typed(nir_function, func, node, &shader->functions) {
1268 validate_function(func, &state);
1269 }
1270
1271 if (_mesa_hash_table_num_entries(state.errors) > 0)
1272 dump_errors(&state, when);
1273
1274 destroy_validate_state(&state);
1275 }
1276
1277 #endif /* NDEBUG */