nir/validate: validate num_components on registers and intrinsics
[mesa.git] / src / compiler / nir / nir_validate.c
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Connor Abbott (cwabbott0@gmail.com)
25 *
26 */
27
28 #include "nir.h"
29 #include "c11/threads.h"
30 #include <assert.h>
31
32 /*
33 * This file checks for invalid IR indicating a bug somewhere in the compiler.
34 */
35
36 /* Since this file is just a pile of asserts, don't bother compiling it if
37 * we're not building a debug build.
38 */
39 #ifndef NDEBUG
40
41 /*
42 * Per-register validation state.
43 */
44
45 typedef struct {
46 /*
47 * equivalent to the uses and defs in nir_register, but built up by the
48 * validator. At the end, we verify that the sets have the same entries.
49 */
50 struct set *uses, *if_uses, *defs;
51 nir_function_impl *where_defined; /* NULL for global registers */
52 } reg_validate_state;
53
54 typedef struct {
55 void *mem_ctx;
56
57 /* map of register -> validation state (struct above) */
58 struct hash_table *regs;
59
60 /* the current shader being validated */
61 nir_shader *shader;
62
63 /* the current instruction being validated */
64 nir_instr *instr;
65
66 /* the current variable being validated */
67 nir_variable *var;
68
69 /* the current basic block being validated */
70 nir_block *block;
71
72 /* the current if statement being validated */
73 nir_if *if_stmt;
74
75 /* the current loop being visited */
76 nir_loop *loop;
77
78 /* the parent of the current cf node being visited */
79 nir_cf_node *parent_node;
80
81 /* the current function implementation being validated */
82 nir_function_impl *impl;
83
84 /* Set of seen SSA sources */
85 struct set *ssa_srcs;
86
87 /* bitset of ssa definitions we have found; used to check uniqueness */
88 BITSET_WORD *ssa_defs_found;
89
90 /* bitset of registers we have currently found; used to check uniqueness */
91 BITSET_WORD *regs_found;
92
93 /* map of variable -> function implementation where it is defined or NULL
94 * if it is a global variable
95 */
96 struct hash_table *var_defs;
97
98 /* map of instruction/var/etc to failed assert string */
99 struct hash_table *errors;
100 } validate_state;
101
102 static void
103 log_error(validate_state *state, const char *cond, const char *file, int line)
104 {
105 const void *obj;
106
107 if (state->instr)
108 obj = state->instr;
109 else if (state->var)
110 obj = state->var;
111 else
112 obj = cond;
113
114 char *msg = ralloc_asprintf(state->errors, "error: %s (%s:%d)",
115 cond, file, line);
116
117 _mesa_hash_table_insert(state->errors, obj, msg);
118 }
119
120 #define validate_assert(state, cond) do { \
121 if (!(cond)) \
122 log_error(state, #cond, __FILE__, __LINE__); \
123 } while (0)
124
125 static void validate_src(nir_src *src, validate_state *state,
126 unsigned bit_sizes, unsigned num_components);
127
128 static void
129 validate_num_components(validate_state *state, unsigned num_components)
130 {
131 validate_assert(state, num_components >= 1 &&
132 num_components <= 4);
133 }
134
135 static void
136 validate_reg_src(nir_src *src, validate_state *state,
137 unsigned bit_sizes, unsigned num_components)
138 {
139 validate_assert(state, src->reg.reg != NULL);
140
141 struct hash_entry *entry;
142 entry = _mesa_hash_table_search(state->regs, src->reg.reg);
143 validate_assert(state, entry);
144
145 reg_validate_state *reg_state = (reg_validate_state *) entry->data;
146
147 if (state->instr) {
148 _mesa_set_add(reg_state->uses, src);
149 } else {
150 validate_assert(state, state->if_stmt);
151 _mesa_set_add(reg_state->if_uses, src);
152 }
153
154 validate_assert(state, reg_state->where_defined == state->impl &&
155 "using a register declared in a different function");
156
157 if (bit_sizes)
158 validate_assert(state, src->reg.reg->bit_size & bit_sizes);
159 if (num_components)
160 validate_assert(state, src->reg.reg->num_components == num_components);
161
162 validate_assert(state, (src->reg.reg->num_array_elems == 0 ||
163 src->reg.base_offset < src->reg.reg->num_array_elems) &&
164 "definitely out-of-bounds array access");
165
166 if (src->reg.indirect) {
167 validate_assert(state, src->reg.reg->num_array_elems != 0);
168 validate_assert(state, (src->reg.indirect->is_ssa ||
169 src->reg.indirect->reg.indirect == NULL) &&
170 "only one level of indirection allowed");
171 validate_src(src->reg.indirect, state, 32, 1);
172 }
173 }
174
175 #define SET_PTR_BIT(ptr, bit) \
176 (void *)(((uintptr_t)(ptr)) | (((uintptr_t)1) << bit))
177
178 static void
179 validate_ssa_src(nir_src *src, validate_state *state,
180 unsigned bit_sizes, unsigned num_components)
181 {
182 validate_assert(state, src->ssa != NULL);
183
184 /* As we walk SSA defs, we add every use to this set. We need to make sure
185 * our use is seen in a use list.
186 */
187 struct set_entry *entry;
188 if (state->instr) {
189 entry = _mesa_set_search(state->ssa_srcs, src);
190 } else {
191 entry = _mesa_set_search(state->ssa_srcs, SET_PTR_BIT(src, 0));
192 }
193 validate_assert(state, entry);
194
195 /* This will let us prove that we've seen all the sources */
196 if (entry)
197 _mesa_set_remove(state->ssa_srcs, entry);
198
199 if (bit_sizes)
200 validate_assert(state, src->ssa->bit_size & bit_sizes);
201 if (num_components)
202 validate_assert(state, src->ssa->num_components == num_components);
203
204 /* TODO validate that the use is dominated by the definition */
205 }
206
207 static void
208 validate_src(nir_src *src, validate_state *state,
209 unsigned bit_sizes, unsigned num_components)
210 {
211 if (state->instr)
212 validate_assert(state, src->parent_instr == state->instr);
213 else
214 validate_assert(state, src->parent_if == state->if_stmt);
215
216 if (src->is_ssa)
217 validate_ssa_src(src, state, bit_sizes, num_components);
218 else
219 validate_reg_src(src, state, bit_sizes, num_components);
220 }
221
222 static void
223 validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state)
224 {
225 nir_alu_src *src = &instr->src[index];
226
227 if (instr->op == nir_op_mov)
228 assert(!src->abs && !src->negate);
229
230 unsigned num_components = nir_src_num_components(src->src);
231 for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
232 validate_assert(state, src->swizzle[i] < NIR_MAX_VEC_COMPONENTS);
233
234 if (nir_alu_instr_channel_used(instr, index, i))
235 validate_assert(state, src->swizzle[i] < num_components);
236 }
237
238 validate_src(&src->src, state, 0, 0);
239 }
240
241 static void
242 validate_reg_dest(nir_reg_dest *dest, validate_state *state,
243 unsigned bit_sizes, unsigned num_components)
244 {
245 validate_assert(state, dest->reg != NULL);
246
247 validate_assert(state, dest->parent_instr == state->instr);
248
249 struct hash_entry *entry2;
250 entry2 = _mesa_hash_table_search(state->regs, dest->reg);
251
252 validate_assert(state, entry2);
253
254 reg_validate_state *reg_state = (reg_validate_state *) entry2->data;
255 _mesa_set_add(reg_state->defs, dest);
256
257 validate_assert(state, reg_state->where_defined == state->impl &&
258 "writing to a register declared in a different function");
259
260 if (bit_sizes)
261 validate_assert(state, dest->reg->bit_size & bit_sizes);
262 if (num_components)
263 validate_assert(state, dest->reg->num_components == num_components);
264
265 validate_assert(state, (dest->reg->num_array_elems == 0 ||
266 dest->base_offset < dest->reg->num_array_elems) &&
267 "definitely out-of-bounds array access");
268
269 if (dest->indirect) {
270 validate_assert(state, dest->reg->num_array_elems != 0);
271 validate_assert(state, (dest->indirect->is_ssa || dest->indirect->reg.indirect == NULL) &&
272 "only one level of indirection allowed");
273 validate_src(dest->indirect, state, 32, 1);
274 }
275 }
276
277 static void
278 validate_ssa_def(nir_ssa_def *def, validate_state *state)
279 {
280 validate_assert(state, def->index < state->impl->ssa_alloc);
281 validate_assert(state, !BITSET_TEST(state->ssa_defs_found, def->index));
282 BITSET_SET(state->ssa_defs_found, def->index);
283
284 validate_assert(state, def->parent_instr == state->instr);
285 validate_num_components(state, def->num_components);
286
287 list_validate(&def->uses);
288 nir_foreach_use(src, def) {
289 validate_assert(state, src->is_ssa);
290 validate_assert(state, src->ssa == def);
291 bool already_seen = false;
292 _mesa_set_search_and_add(state->ssa_srcs, src, &already_seen);
293 /* A nir_src should only appear once and only in one SSA def use list */
294 validate_assert(state, !already_seen);
295 }
296
297 list_validate(&def->if_uses);
298 nir_foreach_if_use(src, def) {
299 validate_assert(state, src->is_ssa);
300 validate_assert(state, src->ssa == def);
301 bool already_seen = false;
302 _mesa_set_search_and_add(state->ssa_srcs, SET_PTR_BIT(src, 0),
303 &already_seen);
304 /* A nir_src should only appear once and only in one SSA def use list */
305 validate_assert(state, !already_seen);
306 }
307 }
308
309 static void
310 validate_dest(nir_dest *dest, validate_state *state,
311 unsigned bit_sizes, unsigned num_components)
312 {
313 if (dest->is_ssa) {
314 if (bit_sizes)
315 validate_assert(state, dest->ssa.bit_size & bit_sizes);
316 if (num_components)
317 validate_assert(state, dest->ssa.num_components == num_components);
318 validate_ssa_def(&dest->ssa, state);
319 } else {
320 validate_reg_dest(&dest->reg, state, bit_sizes, num_components);
321 }
322 }
323
324 static void
325 validate_alu_dest(nir_alu_instr *instr, validate_state *state)
326 {
327 nir_alu_dest *dest = &instr->dest;
328
329 if (instr->op == nir_op_mov)
330 assert(!dest->saturate);
331
332 unsigned dest_size = nir_dest_num_components(dest->dest);
333 /*
334 * validate that the instruction doesn't write to components not in the
335 * register/SSA value
336 */
337 validate_assert(state, !(dest->write_mask & ~((1 << dest_size) - 1)));
338
339 /* validate that saturate is only ever used on instructions with
340 * destinations of type float
341 */
342 nir_alu_instr *alu = nir_instr_as_alu(state->instr);
343 validate_assert(state,
344 (nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) ==
345 nir_type_float) ||
346 !dest->saturate);
347
348 validate_dest(&dest->dest, state, 0, 0);
349 }
350
351 static void
352 validate_alu_instr(nir_alu_instr *instr, validate_state *state)
353 {
354 validate_assert(state, instr->op < nir_num_opcodes);
355
356 unsigned instr_bit_size = 0;
357 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
358 nir_alu_type src_type = nir_op_infos[instr->op].input_types[i];
359 unsigned src_bit_size = nir_src_bit_size(instr->src[i].src);
360 if (nir_alu_type_get_type_size(src_type)) {
361 validate_assert(state, src_bit_size == nir_alu_type_get_type_size(src_type));
362 } else if (instr_bit_size) {
363 validate_assert(state, src_bit_size == instr_bit_size);
364 } else {
365 instr_bit_size = src_bit_size;
366 }
367
368 if (nir_alu_type_get_base_type(src_type) == nir_type_float) {
369 /* 8-bit float isn't a thing */
370 validate_assert(state, src_bit_size == 16 || src_bit_size == 32 ||
371 src_bit_size == 64);
372 }
373
374 validate_alu_src(instr, i, state);
375 }
376
377 nir_alu_type dest_type = nir_op_infos[instr->op].output_type;
378 unsigned dest_bit_size = nir_dest_bit_size(instr->dest.dest);
379 if (nir_alu_type_get_type_size(dest_type)) {
380 validate_assert(state, dest_bit_size == nir_alu_type_get_type_size(dest_type));
381 } else if (instr_bit_size) {
382 validate_assert(state, dest_bit_size == instr_bit_size);
383 } else {
384 /* The only unsized thing is the destination so it's vacuously valid */
385 }
386
387 if (nir_alu_type_get_base_type(dest_type) == nir_type_float) {
388 /* 8-bit float isn't a thing */
389 validate_assert(state, dest_bit_size == 16 || dest_bit_size == 32 ||
390 dest_bit_size == 64);
391 }
392
393 validate_alu_dest(instr, state);
394 }
395
396 static void
397 validate_var_use(nir_variable *var, validate_state *state)
398 {
399 struct hash_entry *entry = _mesa_hash_table_search(state->var_defs, var);
400 validate_assert(state, entry);
401 if (entry && var->data.mode == nir_var_function_temp)
402 validate_assert(state, (nir_function_impl *) entry->data == state->impl);
403 }
404
405 static void
406 validate_deref_instr(nir_deref_instr *instr, validate_state *state)
407 {
408 if (instr->deref_type == nir_deref_type_var) {
409 /* Variable dereferences are stupid simple. */
410 validate_assert(state, instr->mode == instr->var->data.mode);
411 validate_assert(state, instr->type == instr->var->type);
412 validate_var_use(instr->var, state);
413 } else if (instr->deref_type == nir_deref_type_cast) {
414 /* For cast, we simply have to trust the instruction. It's up to
415 * lowering passes and front/back-ends to make them sane.
416 */
417 validate_src(&instr->parent, state, 0, 0);
418
419 /* We just validate that the type and mode are there */
420 validate_assert(state, instr->mode);
421 validate_assert(state, instr->type);
422 } else {
423 /* We require the parent to be SSA. This may be lifted in the future */
424 validate_assert(state, instr->parent.is_ssa);
425
426 /* The parent pointer value must have the same number of components
427 * as the destination.
428 */
429 validate_src(&instr->parent, state, nir_dest_bit_size(instr->dest),
430 nir_dest_num_components(instr->dest));
431
432 nir_instr *parent_instr = instr->parent.ssa->parent_instr;
433
434 /* The parent must come from another deref instruction */
435 validate_assert(state, parent_instr->type == nir_instr_type_deref);
436
437 nir_deref_instr *parent = nir_instr_as_deref(parent_instr);
438
439 validate_assert(state, instr->mode == parent->mode);
440
441 switch (instr->deref_type) {
442 case nir_deref_type_struct:
443 validate_assert(state, glsl_type_is_struct_or_ifc(parent->type));
444 validate_assert(state,
445 instr->strct.index < glsl_get_length(parent->type));
446 validate_assert(state, instr->type ==
447 glsl_get_struct_field(parent->type, instr->strct.index));
448 break;
449
450 case nir_deref_type_array:
451 case nir_deref_type_array_wildcard:
452 if (instr->mode == nir_var_mem_ubo ||
453 instr->mode == nir_var_mem_ssbo ||
454 instr->mode == nir_var_mem_shared ||
455 instr->mode == nir_var_mem_global) {
456 /* Shared variables and UBO/SSBOs have a bit more relaxed rules
457 * because we need to be able to handle array derefs on vectors.
458 * Fortunately, nir_lower_io handles these just fine.
459 */
460 validate_assert(state, glsl_type_is_array(parent->type) ||
461 glsl_type_is_matrix(parent->type) ||
462 glsl_type_is_vector(parent->type));
463 } else {
464 /* Most of NIR cannot handle array derefs on vectors */
465 validate_assert(state, glsl_type_is_array(parent->type) ||
466 glsl_type_is_matrix(parent->type));
467 }
468 validate_assert(state,
469 instr->type == glsl_get_array_element(parent->type));
470
471 if (instr->deref_type == nir_deref_type_array) {
472 validate_src(&instr->arr.index, state,
473 nir_dest_bit_size(instr->dest), 1);
474 }
475 break;
476
477 case nir_deref_type_ptr_as_array:
478 /* ptr_as_array derefs must have a parent that is either an array,
479 * ptr_as_array, or cast. If the parent is a cast, we get the stride
480 * information (if any) from the cast deref.
481 */
482 validate_assert(state,
483 parent->deref_type == nir_deref_type_array ||
484 parent->deref_type == nir_deref_type_ptr_as_array ||
485 parent->deref_type == nir_deref_type_cast);
486 validate_src(&instr->arr.index, state,
487 nir_dest_bit_size(instr->dest), 1);
488 break;
489
490 default:
491 unreachable("Invalid deref instruction type");
492 }
493 }
494
495 /* We intentionally don't validate the size of the destination because we
496 * want to let other compiler components such as SPIR-V decide how big
497 * pointers should be.
498 */
499 validate_dest(&instr->dest, state, 0, 0);
500
501 /* Deref instructions as if conditions don't make sense because if
502 * conditions expect well-formed Booleans. If you want to compare with
503 * NULL, an explicit comparison operation should be used.
504 */
505 validate_assert(state, list_is_empty(&instr->dest.ssa.if_uses));
506
507 /* Only certain modes can be used as sources for phi instructions. */
508 nir_foreach_use(use, &instr->dest.ssa) {
509 if (use->parent_instr->type == nir_instr_type_phi) {
510 validate_assert(state, instr->mode == nir_var_mem_ubo ||
511 instr->mode == nir_var_mem_ssbo ||
512 instr->mode == nir_var_mem_shared ||
513 instr->mode == nir_var_mem_global);
514 }
515 }
516 }
517
518 static void
519 validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state)
520 {
521 unsigned dest_bit_size = 0;
522 unsigned src_bit_sizes[NIR_INTRINSIC_MAX_INPUTS] = { 0, };
523 switch (instr->intrinsic) {
524 case nir_intrinsic_load_param: {
525 unsigned param_idx = nir_intrinsic_param_idx(instr);
526 validate_assert(state, param_idx < state->impl->function->num_params);
527 nir_parameter *param = &state->impl->function->params[param_idx];
528 validate_assert(state, instr->num_components == param->num_components);
529 dest_bit_size = param->bit_size;
530 break;
531 }
532
533 case nir_intrinsic_load_deref: {
534 nir_deref_instr *src = nir_src_as_deref(instr->src[0]);
535 validate_assert(state, glsl_type_is_vector_or_scalar(src->type) ||
536 (src->mode == nir_var_uniform &&
537 glsl_get_base_type(src->type) == GLSL_TYPE_SUBROUTINE));
538 validate_assert(state, instr->num_components ==
539 glsl_get_vector_elements(src->type));
540 dest_bit_size = glsl_get_bit_size(src->type);
541 /* Also allow 32-bit boolean load operations */
542 if (glsl_type_is_boolean(src->type))
543 dest_bit_size |= 32;
544 break;
545 }
546
547 case nir_intrinsic_store_deref: {
548 nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
549 validate_assert(state, glsl_type_is_vector_or_scalar(dst->type));
550 validate_assert(state, instr->num_components ==
551 glsl_get_vector_elements(dst->type));
552 src_bit_sizes[1] = glsl_get_bit_size(dst->type);
553 /* Also allow 32-bit boolean store operations */
554 if (glsl_type_is_boolean(dst->type))
555 src_bit_sizes[1] |= 32;
556 validate_assert(state, (dst->mode & (nir_var_shader_in |
557 nir_var_uniform)) == 0);
558 validate_assert(state, (nir_intrinsic_write_mask(instr) & ~((1 << instr->num_components) - 1)) == 0);
559 break;
560 }
561
562 case nir_intrinsic_copy_deref: {
563 nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
564 nir_deref_instr *src = nir_src_as_deref(instr->src[1]);
565 validate_assert(state, glsl_get_bare_type(dst->type) ==
566 glsl_get_bare_type(src->type));
567 validate_assert(state, (dst->mode & (nir_var_shader_in |
568 nir_var_uniform)) == 0);
569 break;
570 }
571
572 default:
573 break;
574 }
575
576 if (instr->num_components > 0)
577 validate_num_components(state, instr->num_components);
578
579 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
580 unsigned num_srcs = info->num_srcs;
581 for (unsigned i = 0; i < num_srcs; i++) {
582 unsigned components_read = nir_intrinsic_src_components(instr, i);
583
584 validate_num_components(state, components_read);
585
586 validate_src(&instr->src[i], state, src_bit_sizes[i], components_read);
587 }
588
589 if (nir_intrinsic_infos[instr->intrinsic].has_dest) {
590 unsigned components_written = nir_intrinsic_dest_components(instr);
591 unsigned bit_sizes = nir_intrinsic_infos[instr->intrinsic].dest_bit_sizes;
592
593 validate_num_components(state, components_written);
594 if (dest_bit_size && bit_sizes)
595 validate_assert(state, dest_bit_size & bit_sizes);
596 else
597 dest_bit_size = dest_bit_size ? dest_bit_size : bit_sizes;
598
599 validate_dest(&instr->dest, state, dest_bit_size, components_written);
600 }
601 }
602
603 static void
604 validate_tex_instr(nir_tex_instr *instr, validate_state *state)
605 {
606 bool src_type_seen[nir_num_tex_src_types];
607 for (unsigned i = 0; i < nir_num_tex_src_types; i++)
608 src_type_seen[i] = false;
609
610 for (unsigned i = 0; i < instr->num_srcs; i++) {
611 validate_assert(state, !src_type_seen[instr->src[i].src_type]);
612 src_type_seen[instr->src[i].src_type] = true;
613 validate_src(&instr->src[i].src, state,
614 0, nir_tex_instr_src_size(instr, i));
615
616 switch (instr->src[i].src_type) {
617 case nir_tex_src_texture_deref:
618 case nir_tex_src_sampler_deref:
619 validate_assert(state, instr->src[i].src.is_ssa);
620 validate_assert(state,
621 instr->src[i].src.ssa->parent_instr->type == nir_instr_type_deref);
622 break;
623 default:
624 break;
625 }
626 }
627
628 if (nir_tex_instr_has_explicit_tg4_offsets(instr)) {
629 validate_assert(state, instr->op == nir_texop_tg4);
630 validate_assert(state, !src_type_seen[nir_tex_src_offset]);
631 }
632
633 validate_dest(&instr->dest, state, 0, nir_tex_instr_dest_size(instr));
634 }
635
636 static void
637 validate_call_instr(nir_call_instr *instr, validate_state *state)
638 {
639 validate_assert(state, instr->num_params == instr->callee->num_params);
640
641 for (unsigned i = 0; i < instr->num_params; i++) {
642 validate_src(&instr->params[i], state,
643 instr->callee->params[i].bit_size,
644 instr->callee->params[i].num_components);
645 }
646 }
647
648 static void
649 validate_const_value(nir_const_value *val, unsigned bit_size,
650 validate_state *state)
651 {
652 /* In order for block copies to work properly for things like instruction
653 * comparisons and [de]serialization, we require the unused bits of the
654 * nir_const_value to be zero.
655 */
656 nir_const_value cmp_val;
657 memset(&cmp_val, 0, sizeof(cmp_val));
658 switch (bit_size) {
659 case 1:
660 cmp_val.b = val->b;
661 break;
662 case 8:
663 cmp_val.u8 = val->u8;
664 break;
665 case 16:
666 cmp_val.u16 = val->u16;
667 break;
668 case 32:
669 cmp_val.u32 = val->u32;
670 break;
671 case 64:
672 cmp_val.u64 = val->u64;
673 break;
674 default:
675 validate_assert(state, !"Invalid load_const bit size");
676 }
677 validate_assert(state, memcmp(val, &cmp_val, sizeof(cmp_val)) == 0);
678 }
679
680 static void
681 validate_load_const_instr(nir_load_const_instr *instr, validate_state *state)
682 {
683 validate_ssa_def(&instr->def, state);
684
685 for (unsigned i = 0; i < instr->def.num_components; i++)
686 validate_const_value(&instr->value[i], instr->def.bit_size, state);
687 }
688
689 static void
690 validate_ssa_undef_instr(nir_ssa_undef_instr *instr, validate_state *state)
691 {
692 validate_ssa_def(&instr->def, state);
693 }
694
695 static void
696 validate_phi_instr(nir_phi_instr *instr, validate_state *state)
697 {
698 /*
699 * don't validate the sources until we get to them from their predecessor
700 * basic blocks, to avoid validating an SSA use before its definition.
701 */
702
703 validate_dest(&instr->dest, state, 0, 0);
704
705 exec_list_validate(&instr->srcs);
706 validate_assert(state, exec_list_length(&instr->srcs) ==
707 state->block->predecessors->entries);
708 }
709
710 static void
711 validate_instr(nir_instr *instr, validate_state *state)
712 {
713 validate_assert(state, instr->block == state->block);
714
715 state->instr = instr;
716
717 switch (instr->type) {
718 case nir_instr_type_alu:
719 validate_alu_instr(nir_instr_as_alu(instr), state);
720 break;
721
722 case nir_instr_type_deref:
723 validate_deref_instr(nir_instr_as_deref(instr), state);
724 break;
725
726 case nir_instr_type_call:
727 validate_call_instr(nir_instr_as_call(instr), state);
728 break;
729
730 case nir_instr_type_intrinsic:
731 validate_intrinsic_instr(nir_instr_as_intrinsic(instr), state);
732 break;
733
734 case nir_instr_type_tex:
735 validate_tex_instr(nir_instr_as_tex(instr), state);
736 break;
737
738 case nir_instr_type_load_const:
739 validate_load_const_instr(nir_instr_as_load_const(instr), state);
740 break;
741
742 case nir_instr_type_phi:
743 validate_phi_instr(nir_instr_as_phi(instr), state);
744 break;
745
746 case nir_instr_type_ssa_undef:
747 validate_ssa_undef_instr(nir_instr_as_ssa_undef(instr), state);
748 break;
749
750 case nir_instr_type_jump:
751 break;
752
753 default:
754 validate_assert(state, !"Invalid ALU instruction type");
755 break;
756 }
757
758 state->instr = NULL;
759 }
760
761 static void
762 validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state)
763 {
764 state->instr = &instr->instr;
765
766 validate_assert(state, instr->dest.is_ssa);
767
768 exec_list_validate(&instr->srcs);
769 nir_foreach_phi_src(src, instr) {
770 if (src->pred == pred) {
771 validate_assert(state, src->src.is_ssa);
772 validate_src(&src->src, state, instr->dest.ssa.bit_size,
773 instr->dest.ssa.num_components);
774 state->instr = NULL;
775 return;
776 }
777 }
778
779 abort();
780 }
781
782 static void
783 validate_phi_srcs(nir_block *block, nir_block *succ, validate_state *state)
784 {
785 nir_foreach_instr(instr, succ) {
786 if (instr->type != nir_instr_type_phi)
787 break;
788
789 validate_phi_src(nir_instr_as_phi(instr), block, state);
790 }
791 }
792
793 static void validate_cf_node(nir_cf_node *node, validate_state *state);
794
795 static void
796 validate_block(nir_block *block, validate_state *state)
797 {
798 validate_assert(state, block->cf_node.parent == state->parent_node);
799
800 state->block = block;
801
802 exec_list_validate(&block->instr_list);
803 nir_foreach_instr(instr, block) {
804 if (instr->type == nir_instr_type_phi) {
805 validate_assert(state, instr == nir_block_first_instr(block) ||
806 nir_instr_prev(instr)->type == nir_instr_type_phi);
807 }
808
809 if (instr->type == nir_instr_type_jump) {
810 validate_assert(state, instr == nir_block_last_instr(block));
811 }
812
813 validate_instr(instr, state);
814 }
815
816 validate_assert(state, block->successors[0] != NULL);
817 validate_assert(state, block->successors[0] != block->successors[1]);
818
819 for (unsigned i = 0; i < 2; i++) {
820 if (block->successors[i] != NULL) {
821 struct set_entry *entry =
822 _mesa_set_search(block->successors[i]->predecessors, block);
823 validate_assert(state, entry);
824
825 validate_phi_srcs(block, block->successors[i], state);
826 }
827 }
828
829 set_foreach(block->predecessors, entry) {
830 const nir_block *pred = entry->key;
831 validate_assert(state, pred->successors[0] == block ||
832 pred->successors[1] == block);
833 }
834
835 if (!exec_list_is_empty(&block->instr_list) &&
836 nir_block_last_instr(block)->type == nir_instr_type_jump) {
837 validate_assert(state, block->successors[1] == NULL);
838 nir_jump_instr *jump = nir_instr_as_jump(nir_block_last_instr(block));
839 switch (jump->type) {
840 case nir_jump_break: {
841 nir_block *after =
842 nir_cf_node_as_block(nir_cf_node_next(&state->loop->cf_node));
843 validate_assert(state, block->successors[0] == after);
844 break;
845 }
846
847 case nir_jump_continue: {
848 nir_block *first = nir_loop_first_block(state->loop);
849 validate_assert(state, block->successors[0] == first);
850 break;
851 }
852
853 case nir_jump_return:
854 validate_assert(state, block->successors[0] == state->impl->end_block);
855 break;
856
857 default:
858 unreachable("bad jump type");
859 }
860 } else {
861 nir_cf_node *next = nir_cf_node_next(&block->cf_node);
862 if (next == NULL) {
863 switch (state->parent_node->type) {
864 case nir_cf_node_loop: {
865 nir_block *first = nir_loop_first_block(state->loop);
866 validate_assert(state, block->successors[0] == first);
867 /* due to the hack for infinite loops, block->successors[1] may
868 * point to the block after the loop.
869 */
870 break;
871 }
872
873 case nir_cf_node_if: {
874 nir_block *after =
875 nir_cf_node_as_block(nir_cf_node_next(state->parent_node));
876 validate_assert(state, block->successors[0] == after);
877 validate_assert(state, block->successors[1] == NULL);
878 break;
879 }
880
881 case nir_cf_node_function:
882 validate_assert(state, block->successors[0] == state->impl->end_block);
883 validate_assert(state, block->successors[1] == NULL);
884 break;
885
886 default:
887 unreachable("unknown control flow node type");
888 }
889 } else {
890 if (next->type == nir_cf_node_if) {
891 nir_if *if_stmt = nir_cf_node_as_if(next);
892 validate_assert(state, block->successors[0] ==
893 nir_if_first_then_block(if_stmt));
894 validate_assert(state, block->successors[1] ==
895 nir_if_first_else_block(if_stmt));
896 } else {
897 validate_assert(state, next->type == nir_cf_node_loop);
898 nir_loop *loop = nir_cf_node_as_loop(next);
899 validate_assert(state, block->successors[0] ==
900 nir_loop_first_block(loop));
901 validate_assert(state, block->successors[1] == NULL);
902 }
903 }
904 }
905 }
906
907 static void
908 validate_if(nir_if *if_stmt, validate_state *state)
909 {
910 state->if_stmt = if_stmt;
911
912 validate_assert(state, !exec_node_is_head_sentinel(if_stmt->cf_node.node.prev));
913 nir_cf_node *prev_node = nir_cf_node_prev(&if_stmt->cf_node);
914 validate_assert(state, prev_node->type == nir_cf_node_block);
915
916 validate_assert(state, !exec_node_is_tail_sentinel(if_stmt->cf_node.node.next));
917 nir_cf_node *next_node = nir_cf_node_next(&if_stmt->cf_node);
918 validate_assert(state, next_node->type == nir_cf_node_block);
919
920 validate_src(&if_stmt->condition, state, 0, 1);
921
922 validate_assert(state, !exec_list_is_empty(&if_stmt->then_list));
923 validate_assert(state, !exec_list_is_empty(&if_stmt->else_list));
924
925 nir_cf_node *old_parent = state->parent_node;
926 state->parent_node = &if_stmt->cf_node;
927
928 exec_list_validate(&if_stmt->then_list);
929 foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->then_list) {
930 validate_cf_node(cf_node, state);
931 }
932
933 exec_list_validate(&if_stmt->else_list);
934 foreach_list_typed(nir_cf_node, cf_node, node, &if_stmt->else_list) {
935 validate_cf_node(cf_node, state);
936 }
937
938 state->parent_node = old_parent;
939 state->if_stmt = NULL;
940 }
941
942 static void
943 validate_loop(nir_loop *loop, validate_state *state)
944 {
945 validate_assert(state, !exec_node_is_head_sentinel(loop->cf_node.node.prev));
946 nir_cf_node *prev_node = nir_cf_node_prev(&loop->cf_node);
947 validate_assert(state, prev_node->type == nir_cf_node_block);
948
949 validate_assert(state, !exec_node_is_tail_sentinel(loop->cf_node.node.next));
950 nir_cf_node *next_node = nir_cf_node_next(&loop->cf_node);
951 validate_assert(state, next_node->type == nir_cf_node_block);
952
953 validate_assert(state, !exec_list_is_empty(&loop->body));
954
955 nir_cf_node *old_parent = state->parent_node;
956 state->parent_node = &loop->cf_node;
957 nir_loop *old_loop = state->loop;
958 state->loop = loop;
959
960 exec_list_validate(&loop->body);
961 foreach_list_typed(nir_cf_node, cf_node, node, &loop->body) {
962 validate_cf_node(cf_node, state);
963 }
964
965 state->parent_node = old_parent;
966 state->loop = old_loop;
967 }
968
969 static void
970 validate_cf_node(nir_cf_node *node, validate_state *state)
971 {
972 validate_assert(state, node->parent == state->parent_node);
973
974 switch (node->type) {
975 case nir_cf_node_block:
976 validate_block(nir_cf_node_as_block(node), state);
977 break;
978
979 case nir_cf_node_if:
980 validate_if(nir_cf_node_as_if(node), state);
981 break;
982
983 case nir_cf_node_loop:
984 validate_loop(nir_cf_node_as_loop(node), state);
985 break;
986
987 default:
988 unreachable("Invalid CF node type");
989 }
990 }
991
992 static void
993 prevalidate_reg_decl(nir_register *reg, validate_state *state)
994 {
995 validate_assert(state, reg->index < state->impl->reg_alloc);
996 validate_assert(state, !BITSET_TEST(state->regs_found, reg->index));
997 validate_num_components(state, reg->num_components);
998 BITSET_SET(state->regs_found, reg->index);
999
1000 list_validate(&reg->uses);
1001 list_validate(&reg->defs);
1002 list_validate(&reg->if_uses);
1003
1004 reg_validate_state *reg_state = ralloc(state->regs, reg_validate_state);
1005 reg_state->uses = _mesa_pointer_set_create(reg_state);
1006 reg_state->if_uses = _mesa_pointer_set_create(reg_state);
1007 reg_state->defs = _mesa_pointer_set_create(reg_state);
1008
1009 reg_state->where_defined = state->impl;
1010
1011 _mesa_hash_table_insert(state->regs, reg, reg_state);
1012 }
1013
1014 static void
1015 postvalidate_reg_decl(nir_register *reg, validate_state *state)
1016 {
1017 struct hash_entry *entry = _mesa_hash_table_search(state->regs, reg);
1018
1019 assume(entry);
1020 reg_validate_state *reg_state = (reg_validate_state *) entry->data;
1021
1022 nir_foreach_use(src, reg) {
1023 struct set_entry *entry = _mesa_set_search(reg_state->uses, src);
1024 validate_assert(state, entry);
1025 _mesa_set_remove(reg_state->uses, entry);
1026 }
1027
1028 if (reg_state->uses->entries != 0) {
1029 printf("extra entries in register uses:\n");
1030 set_foreach(reg_state->uses, entry)
1031 printf("%p\n", entry->key);
1032
1033 abort();
1034 }
1035
1036 nir_foreach_if_use(src, reg) {
1037 struct set_entry *entry = _mesa_set_search(reg_state->if_uses, src);
1038 validate_assert(state, entry);
1039 _mesa_set_remove(reg_state->if_uses, entry);
1040 }
1041
1042 if (reg_state->if_uses->entries != 0) {
1043 printf("extra entries in register if_uses:\n");
1044 set_foreach(reg_state->if_uses, entry)
1045 printf("%p\n", entry->key);
1046
1047 abort();
1048 }
1049
1050 nir_foreach_def(src, reg) {
1051 struct set_entry *entry = _mesa_set_search(reg_state->defs, src);
1052 validate_assert(state, entry);
1053 _mesa_set_remove(reg_state->defs, entry);
1054 }
1055
1056 if (reg_state->defs->entries != 0) {
1057 printf("extra entries in register defs:\n");
1058 set_foreach(reg_state->defs, entry)
1059 printf("%p\n", entry->key);
1060
1061 abort();
1062 }
1063 }
1064
1065 static void
1066 validate_var_decl(nir_variable *var, nir_variable_mode valid_modes,
1067 validate_state *state)
1068 {
1069 state->var = var;
1070
1071 /* Must have exactly one mode set */
1072 validate_assert(state, util_is_power_of_two_nonzero(var->data.mode));
1073 validate_assert(state, var->data.mode & valid_modes);
1074
1075 if (var->data.compact) {
1076 /* The "compact" flag is only valid on arrays of scalars. */
1077 assert(glsl_type_is_array(var->type));
1078
1079 const struct glsl_type *type = glsl_get_array_element(var->type);
1080 if (nir_is_per_vertex_io(var, state->shader->info.stage)) {
1081 assert(glsl_type_is_array(type));
1082 assert(glsl_type_is_scalar(glsl_get_array_element(type)));
1083 } else {
1084 assert(glsl_type_is_scalar(type));
1085 }
1086 }
1087
1088 if (var->num_members > 0) {
1089 const struct glsl_type *without_array = glsl_without_array(var->type);
1090 validate_assert(state, glsl_type_is_struct_or_ifc(without_array));
1091 validate_assert(state, var->num_members == glsl_get_length(without_array));
1092 validate_assert(state, var->members != NULL);
1093 }
1094
1095 /*
1096 * TODO validate some things ir_validate.cpp does (requires more GLSL type
1097 * support)
1098 */
1099
1100 _mesa_hash_table_insert(state->var_defs, var,
1101 valid_modes == nir_var_function_temp ?
1102 state->impl : NULL);
1103
1104 state->var = NULL;
1105 }
1106
1107 static void
1108 validate_function_impl(nir_function_impl *impl, validate_state *state)
1109 {
1110 /* Resize the ssa_srcs set. It's likely that the size of this set will
1111 * never actually hit the number of SSA defs because we remove sources from
1112 * the set as we visit them. (It could actually be much larger because
1113 * each SSA def can be used more than once.) However, growing it now costs
1114 * us very little (the extra memory is already dwarfed by the SSA defs
1115 * themselves) and makes collisions much less likely.
1116 */
1117 _mesa_set_resize(state->ssa_srcs, impl->ssa_alloc);
1118
1119 validate_assert(state, impl->function->impl == impl);
1120 validate_assert(state, impl->cf_node.parent == NULL);
1121
1122 validate_assert(state, exec_list_is_empty(&impl->end_block->instr_list));
1123 validate_assert(state, impl->end_block->successors[0] == NULL);
1124 validate_assert(state, impl->end_block->successors[1] == NULL);
1125
1126 state->impl = impl;
1127 state->parent_node = &impl->cf_node;
1128
1129 exec_list_validate(&impl->locals);
1130 nir_foreach_variable(var, &impl->locals) {
1131 validate_var_decl(var, nir_var_function_temp, state);
1132 }
1133
1134 state->regs_found = reralloc(state->mem_ctx, state->regs_found,
1135 BITSET_WORD, BITSET_WORDS(impl->reg_alloc));
1136 memset(state->regs_found, 0, BITSET_WORDS(impl->reg_alloc) *
1137 sizeof(BITSET_WORD));
1138 exec_list_validate(&impl->registers);
1139 foreach_list_typed(nir_register, reg, node, &impl->registers) {
1140 prevalidate_reg_decl(reg, state);
1141 }
1142
1143 state->ssa_defs_found = reralloc(state->mem_ctx, state->ssa_defs_found,
1144 BITSET_WORD, BITSET_WORDS(impl->ssa_alloc));
1145 memset(state->ssa_defs_found, 0, BITSET_WORDS(impl->ssa_alloc) *
1146 sizeof(BITSET_WORD));
1147 exec_list_validate(&impl->body);
1148 foreach_list_typed(nir_cf_node, node, node, &impl->body) {
1149 validate_cf_node(node, state);
1150 }
1151
1152 foreach_list_typed(nir_register, reg, node, &impl->registers) {
1153 postvalidate_reg_decl(reg, state);
1154 }
1155
1156 if (state->ssa_srcs->entries != 0) {
1157 printf("extra dangling SSA sources:\n");
1158 set_foreach(state->ssa_srcs, entry)
1159 printf("%p\n", entry->key);
1160
1161 abort();
1162 }
1163 }
1164
1165 static void
1166 validate_function(nir_function *func, validate_state *state)
1167 {
1168 if (func->impl != NULL) {
1169 validate_assert(state, func->impl->function == func);
1170 validate_function_impl(func->impl, state);
1171 }
1172 }
1173
1174 static void
1175 init_validate_state(validate_state *state)
1176 {
1177 state->mem_ctx = ralloc_context(NULL);
1178 state->regs = _mesa_pointer_hash_table_create(state->mem_ctx);
1179 state->ssa_srcs = _mesa_pointer_set_create(state->mem_ctx);
1180 state->ssa_defs_found = NULL;
1181 state->regs_found = NULL;
1182 state->var_defs = _mesa_pointer_hash_table_create(state->mem_ctx);
1183 state->errors = _mesa_pointer_hash_table_create(state->mem_ctx);
1184
1185 state->loop = NULL;
1186 state->instr = NULL;
1187 state->var = NULL;
1188 }
1189
1190 static void
1191 destroy_validate_state(validate_state *state)
1192 {
1193 ralloc_free(state->mem_ctx);
1194 }
1195
1196 mtx_t fail_dump_mutex = _MTX_INITIALIZER_NP;
1197
1198 static void
1199 dump_errors(validate_state *state, const char *when)
1200 {
1201 struct hash_table *errors = state->errors;
1202
1203 /* Lock around dumping so that we get clean dumps in a multi-threaded
1204 * scenario
1205 */
1206 mtx_lock(&fail_dump_mutex);
1207
1208 if (when) {
1209 fprintf(stderr, "NIR validation failed %s\n", when);
1210 fprintf(stderr, "%d errors:\n", _mesa_hash_table_num_entries(errors));
1211 } else {
1212 fprintf(stderr, "NIR validation failed with %d errors:\n",
1213 _mesa_hash_table_num_entries(errors));
1214 }
1215
1216 nir_print_shader_annotated(state->shader, stderr, errors);
1217
1218 if (_mesa_hash_table_num_entries(errors) > 0) {
1219 fprintf(stderr, "%d additional errors:\n",
1220 _mesa_hash_table_num_entries(errors));
1221 hash_table_foreach(errors, entry) {
1222 fprintf(stderr, "%s\n", (char *)entry->data);
1223 }
1224 }
1225
1226 mtx_unlock(&fail_dump_mutex);
1227
1228 abort();
1229 }
1230
1231 void
1232 nir_validate_shader(nir_shader *shader, const char *when)
1233 {
1234 static int should_validate = -1;
1235 if (should_validate < 0)
1236 should_validate = env_var_as_boolean("NIR_VALIDATE", true);
1237 if (!should_validate)
1238 return;
1239
1240 validate_state state;
1241 init_validate_state(&state);
1242
1243 state.shader = shader;
1244
1245 exec_list_validate(&shader->uniforms);
1246 nir_foreach_variable(var, &shader->uniforms) {
1247 validate_var_decl(var, nir_var_uniform |
1248 nir_var_mem_ubo |
1249 nir_var_mem_ssbo,
1250 &state);
1251 }
1252
1253 exec_list_validate(&shader->inputs);
1254 nir_foreach_variable(var, &shader->inputs) {
1255 validate_var_decl(var, nir_var_shader_in, &state);
1256 }
1257
1258 exec_list_validate(&shader->outputs);
1259 nir_foreach_variable(var, &shader->outputs) {
1260 validate_var_decl(var, nir_var_shader_out, &state);
1261 }
1262
1263 exec_list_validate(&shader->shared);
1264 nir_foreach_variable(var, &shader->shared) {
1265 validate_var_decl(var, nir_var_mem_shared, &state);
1266 }
1267
1268 exec_list_validate(&shader->globals);
1269 nir_foreach_variable(var, &shader->globals) {
1270 validate_var_decl(var, nir_var_shader_temp, &state);
1271 }
1272
1273 exec_list_validate(&shader->system_values);
1274 nir_foreach_variable(var, &shader->system_values) {
1275 validate_var_decl(var, nir_var_system_value, &state);
1276 }
1277
1278 exec_list_validate(&shader->functions);
1279 foreach_list_typed(nir_function, func, node, &shader->functions) {
1280 validate_function(func, &state);
1281 }
1282
1283 if (_mesa_hash_table_num_entries(state.errors) > 0)
1284 dump_errors(&state, when);
1285
1286 destroy_validate_state(&state);
1287 }
1288
1289 #endif /* NDEBUG */