static void validate_src(nir_src *src, validate_state *state,
unsigned bit_sizes, unsigned num_components);
+static void
+validate_num_components(validate_state *state, unsigned num_components)
+{
+ validate_assert(state, nir_num_components_valid(num_components));
+}
+
static void
validate_reg_src(nir_src *src, validate_state *state,
unsigned bit_sizes, unsigned num_components)
{
nir_alu_src *src = &instr->src[index];
+ if (instr->op == nir_op_mov)
+ assert(!src->abs && !src->negate);
+
unsigned num_components = nir_src_num_components(src->src);
for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
validate_assert(state, src->swizzle[i] < NIR_MAX_VEC_COMPONENTS);
BITSET_SET(state->ssa_defs_found, def->index);
validate_assert(state, def->parent_instr == state->instr);
-
- validate_assert(state, (def->num_components <= 4) ||
- (def->num_components == 8) ||
- (def->num_components == 16));
+ validate_num_components(state, def->num_components);
list_validate(&def->uses);
nir_foreach_use(src, def) {
{
nir_alu_dest *dest = &instr->dest;
+ if (instr->op == nir_op_mov)
+ assert(!dest->saturate);
+
unsigned dest_size = nir_dest_num_components(dest->dest);
/*
* validate that the instruction doesn't write to components not in the
* conditions expect well-formed Booleans. If you want to compare with
* NULL, an explicit comparison operation should be used.
*/
- validate_assert(state, list_empty(&instr->dest.ssa.if_uses));
+ validate_assert(state, list_is_empty(&instr->dest.ssa.if_uses));
+
+ /* Only certain modes can be used as sources for phi instructions. */
+ nir_foreach_use(use, &instr->dest.ssa) {
+ if (use->parent_instr->type == nir_instr_type_phi) {
+ validate_assert(state, instr->mode == nir_var_mem_ubo ||
+ instr->mode == nir_var_mem_ssbo ||
+ instr->mode == nir_var_mem_shared ||
+ instr->mode == nir_var_mem_global);
+ }
+ }
+}
+
+static bool
+vectorized_intrinsic(nir_intrinsic_instr *intr)
+{
+ const nir_intrinsic_info *info = &nir_intrinsic_infos[intr->intrinsic];
+
+ if (info->dest_components == 0)
+ return true;
+
+ for (unsigned i = 0; i < info->num_srcs; i++)
+ if (info->src_components[i] == 0)
+ return true;
+
+ return false;
}
static void
case nir_intrinsic_load_deref: {
nir_deref_instr *src = nir_src_as_deref(instr->src[0]);
+ assert(src);
validate_assert(state, glsl_type_is_vector_or_scalar(src->type) ||
(src->mode == nir_var_uniform &&
glsl_get_base_type(src->type) == GLSL_TYPE_SUBROUTINE));
case nir_intrinsic_store_deref: {
nir_deref_instr *dst = nir_src_as_deref(instr->src[0]);
+ assert(dst);
validate_assert(state, glsl_type_is_vector_or_scalar(dst->type));
validate_assert(state, instr->num_components ==
glsl_get_vector_elements(dst->type));
break;
}
+ case nir_intrinsic_load_ubo:
+ case nir_intrinsic_load_ssbo:
+ case nir_intrinsic_load_shared:
+ case nir_intrinsic_load_global:
+ case nir_intrinsic_load_scratch:
+ case nir_intrinsic_load_constant:
+ /* These memory load operations must have alignments */
+ validate_assert(state,
+ util_is_power_of_two_nonzero(nir_intrinsic_align_mul(instr)));
+ validate_assert(state, nir_intrinsic_align_offset(instr) <
+ nir_intrinsic_align_mul(instr));
+ /* Fall through */
+
+ case nir_intrinsic_load_uniform:
+ case nir_intrinsic_load_input:
+ case nir_intrinsic_load_per_vertex_input:
+ case nir_intrinsic_load_interpolated_input:
+ case nir_intrinsic_load_output:
+ case nir_intrinsic_load_per_vertex_output:
+ case nir_intrinsic_load_push_constant:
+ /* All memory load operations must load at least a byte */
+ validate_assert(state, nir_dest_bit_size(instr->dest) >= 8);
+ break;
+
+ case nir_intrinsic_store_ssbo:
+ case nir_intrinsic_store_shared:
+ case nir_intrinsic_store_global:
+ case nir_intrinsic_store_scratch:
+ /* These memory store operations must also have alignments */
+ validate_assert(state,
+ util_is_power_of_two_nonzero(nir_intrinsic_align_mul(instr)));
+ validate_assert(state, nir_intrinsic_align_offset(instr) <
+ nir_intrinsic_align_mul(instr));
+ /* Fall through */
+
+ case nir_intrinsic_store_output:
+ case nir_intrinsic_store_per_vertex_output:
+ /* All memory store operations must store at least a byte */
+ validate_assert(state, nir_src_bit_size(instr->src[0]) >= 8);
+ break;
+
default:
break;
}
- unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs;
+ if (instr->num_components > 0)
+ validate_num_components(state, instr->num_components);
+
+ const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
+ unsigned num_srcs = info->num_srcs;
for (unsigned i = 0; i < num_srcs; i++) {
unsigned components_read = nir_intrinsic_src_components(instr, i);
- validate_assert(state, components_read > 0);
+ validate_num_components(state, components_read);
validate_src(&instr->src[i], state, src_bit_sizes[i], components_read);
}
unsigned components_written = nir_intrinsic_dest_components(instr);
unsigned bit_sizes = nir_intrinsic_infos[instr->intrinsic].dest_bit_sizes;
- validate_assert(state, components_written > 0);
-
+ validate_num_components(state, components_written);
if (dest_bit_size && bit_sizes)
validate_assert(state, dest_bit_size & bit_sizes);
else
validate_dest(&instr->dest, state, dest_bit_size, components_written);
}
+
+ if (!vectorized_intrinsic(instr))
+ validate_assert(state, instr->num_components == 0);
}
static void
state->block->predecessors->entries);
}
+static void
+validate_jump_instr(nir_jump_instr *instr, validate_state *state)
+{
+ nir_block *block = state->block;
+ validate_assert(state, &instr->instr == nir_block_last_instr(block));
+
+ switch (instr->type) {
+ case nir_jump_return:
+ validate_assert(state, block->successors[0] == state->impl->end_block);
+ validate_assert(state, block->successors[1] == NULL);
+ validate_assert(state, instr->target == NULL);
+ validate_assert(state, instr->else_target == NULL);
+ break;
+
+ case nir_jump_break:
+ validate_assert(state, state->impl->structured);
+ validate_assert(state, state->loop != NULL);
+ if (state->loop) {
+ nir_block *after =
+ nir_cf_node_as_block(nir_cf_node_next(&state->loop->cf_node));
+ validate_assert(state, block->successors[0] == after);
+ }
+ validate_assert(state, block->successors[1] == NULL);
+ validate_assert(state, instr->target == NULL);
+ validate_assert(state, instr->else_target == NULL);
+ break;
+
+ case nir_jump_continue:
+ validate_assert(state, state->impl->structured);
+ validate_assert(state, state->loop != NULL);
+ if (state->loop) {
+ nir_block *first = nir_loop_first_block(state->loop);
+ validate_assert(state, block->successors[0] == first);
+ }
+ validate_assert(state, block->successors[1] == NULL);
+ validate_assert(state, instr->target == NULL);
+ validate_assert(state, instr->else_target == NULL);
+ break;
+
+ case nir_jump_goto:
+ validate_assert(state, !state->impl->structured);
+ validate_assert(state, instr->target == block->successors[0]);
+ validate_assert(state, instr->target != NULL);
+ validate_assert(state, instr->else_target == NULL);
+ break;
+
+ case nir_jump_goto_if:
+ validate_assert(state, !state->impl->structured);
+ validate_assert(state, instr->target == block->successors[1]);
+ validate_assert(state, instr->else_target == block->successors[0]);
+ validate_src(&instr->condition, state, 0, 1);
+ validate_assert(state, instr->target != NULL);
+ validate_assert(state, instr->else_target != NULL);
+ break;
+
+ default:
+ validate_assert(state, !"Invalid jump instruction type");
+ break;
+ }
+}
+
static void
validate_instr(nir_instr *instr, validate_state *state)
{
break;
case nir_instr_type_jump:
+ validate_jump_instr(nir_instr_as_jump(instr), state);
break;
default:
return;
}
}
-
- abort();
+ validate_assert(state, !"Phi does not have a source corresponding to one "
+ "of its predecessor blocks");
}
static void
nir_instr_prev(instr)->type == nir_instr_type_phi);
}
- if (instr->type == nir_instr_type_jump) {
- validate_assert(state, instr == nir_block_last_instr(block));
- }
-
validate_instr(instr, state);
}
pred->successors[1] == block);
}
- if (!exec_list_is_empty(&block->instr_list) &&
- nir_block_last_instr(block)->type == nir_instr_type_jump) {
- validate_assert(state, block->successors[1] == NULL);
- nir_jump_instr *jump = nir_instr_as_jump(nir_block_last_instr(block));
- switch (jump->type) {
- case nir_jump_break: {
- nir_block *after =
- nir_cf_node_as_block(nir_cf_node_next(&state->loop->cf_node));
- validate_assert(state, block->successors[0] == after);
- break;
- }
-
- case nir_jump_continue: {
- nir_block *first = nir_loop_first_block(state->loop);
- validate_assert(state, block->successors[0] == first);
- break;
- }
-
- case nir_jump_return:
- validate_assert(state, block->successors[0] == state->impl->end_block);
- break;
-
- default:
- unreachable("bad jump type");
- }
- } else {
+ if (!state->impl->structured) {
+ validate_assert(state, nir_block_ends_in_jump(block));
+ } else if (!nir_block_ends_in_jump(block)) {
nir_cf_node *next = nir_cf_node_next(&block->cf_node);
if (next == NULL) {
switch (state->parent_node->type) {
nir_if_first_then_block(if_stmt));
validate_assert(state, block->successors[1] ==
nir_if_first_else_block(if_stmt));
- } else {
- validate_assert(state, next->type == nir_cf_node_loop);
+ } else if (next->type == nir_cf_node_loop) {
nir_loop *loop = nir_cf_node_as_loop(next);
validate_assert(state, block->successors[0] ==
nir_loop_first_block(loop));
validate_assert(state, block->successors[1] == NULL);
+ } else {
+ validate_assert(state,
+ !"Structured NIR cannot have consecutive blocks");
}
}
}
static void
validate_if(nir_if *if_stmt, validate_state *state)
{
+ validate_assert(state, state->impl->structured);
+
state->if_stmt = if_stmt;
validate_assert(state, !exec_node_is_head_sentinel(if_stmt->cf_node.node.prev));
static void
validate_loop(nir_loop *loop, validate_state *state)
{
+ validate_assert(state, state->impl->structured);
+
validate_assert(state, !exec_node_is_head_sentinel(loop->cf_node.node.prev));
nir_cf_node *prev_node = nir_cf_node_prev(&loop->cf_node);
validate_assert(state, prev_node->type == nir_cf_node_block);
{
validate_assert(state, reg->index < state->impl->reg_alloc);
validate_assert(state, !BITSET_TEST(state->regs_found, reg->index));
+ validate_num_components(state, reg->num_components);
BITSET_SET(state->regs_found, reg->index);
list_validate(®->uses);
validate_assert(state, entry);
_mesa_set_remove(reg_state->uses, entry);
}
-
- if (reg_state->uses->entries != 0) {
- printf("extra entries in register uses:\n");
- set_foreach(reg_state->uses, entry)
- printf("%p\n", entry->key);
-
- abort();
- }
+ validate_assert(state, reg_state->uses->entries == 0);
nir_foreach_if_use(src, reg) {
struct set_entry *entry = _mesa_set_search(reg_state->if_uses, src);
validate_assert(state, entry);
_mesa_set_remove(reg_state->if_uses, entry);
}
-
- if (reg_state->if_uses->entries != 0) {
- printf("extra entries in register if_uses:\n");
- set_foreach(reg_state->if_uses, entry)
- printf("%p\n", entry->key);
-
- abort();
- }
+ validate_assert(state, reg_state->if_uses->entries == 0);
nir_foreach_def(src, reg) {
struct set_entry *entry = _mesa_set_search(reg_state->defs, src);
validate_assert(state, entry);
_mesa_set_remove(reg_state->defs, entry);
}
-
- if (reg_state->defs->entries != 0) {
- printf("extra entries in register defs:\n");
- set_foreach(reg_state->defs, entry)
- printf("%p\n", entry->key);
-
- abort();
- }
+ validate_assert(state, reg_state->defs->entries == 0);
}
static void
-validate_var_decl(nir_variable *var, bool is_global, validate_state *state)
+validate_var_decl(nir_variable *var, nir_variable_mode valid_modes,
+ validate_state *state)
{
state->var = var;
- validate_assert(state, is_global == nir_variable_is_global(var));
-
/* Must have exactly one mode set */
validate_assert(state, util_is_power_of_two_nonzero(var->data.mode));
+ validate_assert(state, var->data.mode & valid_modes);
if (var->data.compact) {
/* The "compact" flag is only valid on arrays of scalars. */
validate_assert(state, var->members != NULL);
}
+ if (var->data.per_view)
+ validate_assert(state, glsl_type_is_array(var->type));
+
/*
* TODO validate some things ir_validate.cpp does (requires more GLSL type
* support)
*/
_mesa_hash_table_insert(state->var_defs, var,
- is_global ? NULL : state->impl);
+ valid_modes == nir_var_function_temp ?
+ state->impl : NULL);
state->var = NULL;
}
state->parent_node = &impl->cf_node;
exec_list_validate(&impl->locals);
- nir_foreach_variable(var, &impl->locals) {
- validate_var_decl(var, false, state);
+ nir_foreach_function_temp_variable(var, impl) {
+ validate_var_decl(var, nir_var_function_temp, state);
}
state->regs_found = reralloc(state->mem_ctx, state->regs_found,
postvalidate_reg_decl(reg, state);
}
- if (state->ssa_srcs->entries != 0) {
- printf("extra dangling SSA sources:\n");
- set_foreach(state->ssa_srcs, entry)
- printf("%p\n", entry->key);
-
- abort();
- }
+ validate_assert(state, state->ssa_srcs->entries == 0);
+ _mesa_set_clear(state->ssa_srcs, NULL);
}
static void
state.shader = shader;
- exec_list_validate(&shader->uniforms);
- nir_foreach_variable(var, &shader->uniforms) {
- validate_var_decl(var, true, &state);
- }
-
- exec_list_validate(&shader->inputs);
- nir_foreach_variable(var, &shader->inputs) {
- validate_var_decl(var, true, &state);
- }
-
- exec_list_validate(&shader->outputs);
- nir_foreach_variable(var, &shader->outputs) {
- validate_var_decl(var, true, &state);
- }
-
- exec_list_validate(&shader->shared);
- nir_foreach_variable(var, &shader->shared) {
- validate_var_decl(var, true, &state);
- }
-
- exec_list_validate(&shader->globals);
- nir_foreach_variable(var, &shader->globals) {
- validate_var_decl(var, true, &state);
- }
-
- exec_list_validate(&shader->system_values);
- nir_foreach_variable(var, &shader->system_values) {
- validate_var_decl(var, true, &state);
- }
+ nir_variable_mode valid_modes =
+ nir_var_shader_in |
+ nir_var_shader_out |
+ nir_var_shader_temp |
+ nir_var_uniform |
+ nir_var_mem_ubo |
+ nir_var_system_value |
+ nir_var_mem_ssbo |
+ nir_var_mem_shared;
+
+ exec_list_validate(&shader->variables);
+ nir_foreach_variable_in_shader(var, shader)
+ validate_var_decl(var, valid_modes, &state);
exec_list_validate(&shader->functions);
foreach_list_typed(nir_function, func, node, &shader->functions) {