X-Git-Url: https://git.libre-soc.org/?a=blobdiff_plain;f=src%2Fcompiler%2Fnir%2Fnir_validate.c;h=eee737e8069cd8c9f9d90cc8a8377af94fc9ea64;hb=67c728f7a9450b04d4de1a29f1dcfb9265a7ebfd;hp=60af71550ba2ef29401a3a1fadf2d5de5168bb0a;hpb=8ee909ee4261bc2d4396851c44219f07041cd85c;p=mesa.git diff --git a/src/compiler/nir/nir_validate.c b/src/compiler/nir/nir_validate.c index 60af71550ba..eee737e8069 100644 --- a/src/compiler/nir/nir_validate.c +++ b/src/compiler/nir/nir_validate.c @@ -35,7 +35,7 @@ /* Since this file is just a pile of asserts, don't bother compiling it if * we're not building a debug build. */ -#ifdef DEBUG +#ifndef NDEBUG /* * Per-register validation state. @@ -96,7 +96,9 @@ typedef struct { /* bitset of registers we have currently found; used to check uniqueness */ BITSET_WORD *regs_found; - /* map of local variable -> function implementation where it is defined */ + /* map of variable -> function implementation where it is defined or NULL + * if it is a global variable + */ struct hash_table *var_defs; /* map of instruction/var/etc to failed assert string */ @@ -126,10 +128,12 @@ log_error(validate_state *state, const char *cond, const char *file, int line) log_error(state, #cond, __FILE__, __LINE__); \ } while (0) -static void validate_src(nir_src *src, validate_state *state); +static void validate_src(nir_src *src, validate_state *state, + unsigned bit_size, unsigned num_components); static void -validate_reg_src(nir_src *src, validate_state *state) +validate_reg_src(nir_src *src, validate_state *state, + unsigned bit_size, unsigned num_components) { validate_assert(state, src->reg.reg != NULL); @@ -151,6 +155,13 @@ validate_reg_src(nir_src *src, validate_state *state) "using a register declared in a different function"); } + if (!src->reg.reg->is_packed) { + if (bit_size) + validate_assert(state, src->reg.reg->bit_size == bit_size); + if (num_components) + validate_assert(state, src->reg.reg->num_components == num_components); + } + validate_assert(state, (src->reg.reg->num_array_elems == 0 || src->reg.base_offset < src->reg.reg->num_array_elems) && "definitely out-of-bounds array access"); @@ -160,12 +171,13 @@ validate_reg_src(nir_src *src, validate_state *state) validate_assert(state, (src->reg.indirect->is_ssa || src->reg.indirect->reg.indirect == NULL) && "only one level of indirection allowed"); - validate_src(src->reg.indirect, state); + validate_src(src->reg.indirect, state, 32, 1); } } static void -validate_ssa_src(nir_src *src, validate_state *state) +validate_ssa_src(nir_src *src, validate_state *state, + unsigned bit_size, unsigned num_components) { validate_assert(state, src->ssa != NULL); @@ -188,11 +200,17 @@ validate_ssa_src(nir_src *src, validate_state *state) _mesa_set_add(def_state->if_uses, src); } + if (bit_size) + validate_assert(state, src->ssa->bit_size == bit_size); + if (num_components) + validate_assert(state, src->ssa->num_components == num_components); + /* TODO validate that the use is dominated by the definition */ } static void -validate_src(nir_src *src, validate_state *state) +validate_src(nir_src *src, validate_state *state, + unsigned bit_size, unsigned num_components) { if (state->instr) validate_assert(state, src->parent_instr == state->instr); @@ -200,9 +218,9 @@ validate_src(nir_src *src, validate_state *state) validate_assert(state, src->parent_if == state->if_stmt); if (src->is_ssa) - validate_ssa_src(src, state); + validate_ssa_src(src, state, bit_size, num_components); else - validate_reg_src(src, state); + validate_reg_src(src, state, bit_size, num_components); } static void @@ -211,12 +229,9 @@ validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state) nir_alu_src *src = &instr->src[index]; unsigned num_components; - unsigned src_bit_size; if (src->src.is_ssa) { - src_bit_size = src->src.ssa->bit_size; num_components = src->src.ssa->num_components; } else { - src_bit_size = src->src.reg.reg->bit_size; if (src->src.reg.reg->is_packed) num_components = 4; /* can't check anything */ else @@ -229,29 +244,12 @@ validate_alu_src(nir_alu_instr *instr, unsigned index, validate_state *state) validate_assert(state, src->swizzle[i] < num_components); } - nir_alu_type src_type = nir_op_infos[instr->op].input_types[index]; - - /* 8-bit float isn't a thing */ - if (nir_alu_type_get_base_type(src_type) == nir_type_float) - validate_assert(state, src_bit_size == 16 || src_bit_size == 32 || src_bit_size == 64); - - if (nir_alu_type_get_type_size(src_type)) { - /* This source has an explicit bit size */ - validate_assert(state, nir_alu_type_get_type_size(src_type) == src_bit_size); - } else { - if (!nir_alu_type_get_type_size(nir_op_infos[instr->op].output_type)) { - unsigned dest_bit_size = - instr->dest.dest.is_ssa ? instr->dest.dest.ssa.bit_size - : instr->dest.dest.reg.reg->bit_size; - validate_assert(state, dest_bit_size == src_bit_size); - } - } - - validate_src(&src->src, state); + validate_src(&src->src, state, 0, 0); } static void -validate_reg_dest(nir_reg_dest *dest, validate_state *state) +validate_reg_dest(nir_reg_dest *dest, validate_state *state, + unsigned bit_size, unsigned num_components) { validate_assert(state, dest->reg != NULL); @@ -270,6 +268,13 @@ validate_reg_dest(nir_reg_dest *dest, validate_state *state) "writing to a register declared in a different function"); } + if (!dest->reg->is_packed) { + if (bit_size) + validate_assert(state, dest->reg->bit_size == bit_size); + if (num_components) + validate_assert(state, dest->reg->num_components == num_components); + } + validate_assert(state, (dest->reg->num_array_elems == 0 || dest->base_offset < dest->reg->num_array_elems) && "definitely out-of-bounds array access"); @@ -278,7 +283,7 @@ validate_reg_dest(nir_reg_dest *dest, validate_state *state) validate_assert(state, dest->reg->num_array_elems != 0); validate_assert(state, (dest->indirect->is_ssa || dest->indirect->reg.indirect == NULL) && "only one level of indirection allowed"); - validate_src(dest->indirect, state); + validate_src(dest->indirect, state, 32, 1); } } @@ -291,7 +296,9 @@ validate_ssa_def(nir_ssa_def *def, validate_state *state) validate_assert(state, def->parent_instr == state->instr); - validate_assert(state, def->num_components <= 4); + validate_assert(state, (def->num_components <= 4) || + (def->num_components == 8) || + (def->num_components == 16)); list_validate(&def->uses); list_validate(&def->if_uses); @@ -307,12 +314,18 @@ validate_ssa_def(nir_ssa_def *def, validate_state *state) } static void -validate_dest(nir_dest *dest, validate_state *state) +validate_dest(nir_dest *dest, validate_state *state, + unsigned bit_size, unsigned num_components) { - if (dest->is_ssa) + if (dest->is_ssa) { + if (bit_size) + validate_assert(state, dest->ssa.bit_size == bit_size); + if (num_components) + validate_assert(state, dest->ssa.num_components == num_components); validate_ssa_def(&dest->ssa, state); - else - validate_reg_dest(&dest->reg, state); + } else { + validate_reg_dest(&dest->reg, state, bit_size, num_components); + } } static void @@ -339,18 +352,7 @@ validate_alu_dest(nir_alu_instr *instr, validate_state *state) nir_type_float) || !dest->saturate); - unsigned bit_size = dest->dest.is_ssa ? dest->dest.ssa.bit_size - : dest->dest.reg.reg->bit_size; - nir_alu_type type = nir_op_infos[instr->op].output_type; - - /* 8-bit float isn't a thing */ - if (nir_alu_type_get_base_type(type) == nir_type_float) - validate_assert(state, bit_size == 16 || bit_size == 32 || bit_size == 64); - - validate_assert(state, nir_alu_type_get_type_size(type) == 0 || - nir_alu_type_get_type_size(type) == bit_size); - - validate_dest(&dest->dest, state); + validate_dest(&dest->dest, state, 0, 0); } static void @@ -358,15 +360,49 @@ validate_alu_instr(nir_alu_instr *instr, validate_state *state) { validate_assert(state, instr->op < nir_num_opcodes); + unsigned instr_bit_size = 0; for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) { + nir_alu_type src_type = nir_op_infos[instr->op].input_types[i]; + unsigned src_bit_size = nir_src_bit_size(instr->src[i].src); + if (nir_alu_type_get_type_size(src_type)) { + validate_assert(state, src_bit_size == nir_alu_type_get_type_size(src_type)); + } else if (instr_bit_size) { + validate_assert(state, src_bit_size == instr_bit_size); + } else { + instr_bit_size = src_bit_size; + } + + if (nir_alu_type_get_base_type(src_type) == nir_type_float) { + /* 8-bit float isn't a thing */ + validate_assert(state, src_bit_size == 16 || src_bit_size == 32 || + src_bit_size == 64); + } + validate_alu_src(instr, i, state); } + nir_alu_type dest_type = nir_op_infos[instr->op].output_type; + unsigned dest_bit_size = nir_dest_bit_size(instr->dest.dest); + if (nir_alu_type_get_type_size(dest_type)) { + validate_assert(state, dest_bit_size == nir_alu_type_get_type_size(dest_type)); + } else if (instr_bit_size) { + validate_assert(state, dest_bit_size == instr_bit_size); + } else { + /* The only unsized thing is the destination so it's vacuously valid */ + } + + if (nir_alu_type_get_base_type(dest_type) == nir_type_float) { + /* 8-bit float isn't a thing */ + validate_assert(state, dest_bit_size == 16 || dest_bit_size == 32 || + dest_bit_size == 64); + } + validate_alu_dest(instr, state); } static void -validate_deref_chain(nir_deref *deref, validate_state *state) +validate_deref_chain(nir_deref *deref, nir_variable_mode mode, + validate_state *state) { validate_assert(state, deref->child == NULL || ralloc_parent(deref->child) == deref); @@ -374,10 +410,23 @@ validate_deref_chain(nir_deref *deref, validate_state *state) while (deref != NULL) { switch (deref->deref_type) { case nir_deref_type_array: + if (mode == nir_var_shared) { + /* Shared variables have a bit more relaxed rules because we need + * to be able to handle array derefs on vectors. Fortunately, + * nir_lower_io handles these just fine. + */ + validate_assert(state, glsl_type_is_array(parent->type) || + glsl_type_is_matrix(parent->type) || + glsl_type_is_vector(parent->type)); + } else { + /* Most of NIR cannot handle array derefs on vectors */ + validate_assert(state, glsl_type_is_array(parent->type) || + glsl_type_is_matrix(parent->type)); + } validate_assert(state, deref->type == glsl_get_array_element(parent->type)); if (nir_deref_as_array(deref)->deref_array_type == nir_deref_array_type_indirect) - validate_src(&nir_deref_as_array(deref)->indirect, state); + validate_src(&nir_deref_as_array(deref)->indirect, state, 32, 1); break; case nir_deref_type_struct: @@ -403,12 +452,10 @@ validate_deref_chain(nir_deref *deref, validate_state *state) static void validate_var_use(nir_variable *var, validate_state *state) { - if (var->data.mode == nir_var_local) { - struct hash_entry *entry = _mesa_hash_table_search(state->var_defs, var); - - validate_assert(state, entry); + struct hash_entry *entry = _mesa_hash_table_search(state->var_defs, var); + validate_assert(state, entry); + if (var->data.mode == nir_var_local) validate_assert(state, (nir_function_impl *) entry->data == state->impl); - } } static void @@ -420,28 +467,27 @@ validate_deref_var(void *parent_mem_ctx, nir_deref_var *deref, validate_state *s validate_var_use(deref->var, state); - validate_deref_chain(&deref->deref, state); + validate_deref_chain(&deref->deref, deref->var->data.mode, state); } static void validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state) { + unsigned bit_size = 0; + if (instr->intrinsic == nir_intrinsic_load_var || + instr->intrinsic == nir_intrinsic_store_var) { + const struct glsl_type *type = + nir_deref_tail(&instr->variables[0]->deref)->type; + bit_size = glsl_get_bit_size(type); + } + unsigned num_srcs = nir_intrinsic_infos[instr->intrinsic].num_srcs; for (unsigned i = 0; i < num_srcs; i++) { - unsigned components_read = - nir_intrinsic_infos[instr->intrinsic].src_components[i]; - if (components_read == 0) - components_read = instr->num_components; + unsigned components_read = nir_intrinsic_src_components(instr, i); validate_assert(state, components_read > 0); - if (instr->src[i].is_ssa) { - validate_assert(state, components_read <= instr->src[i].ssa->num_components); - } else if (!instr->src[i].reg.reg->is_packed) { - validate_assert(state, components_read <= instr->src[i].reg.reg->num_components); - } - - validate_src(&instr->src[i], state); + validate_src(&instr->src[i], state, bit_size, components_read); } unsigned num_vars = nir_intrinsic_infos[instr->intrinsic].num_variables; @@ -450,20 +496,11 @@ validate_intrinsic_instr(nir_intrinsic_instr *instr, validate_state *state) } if (nir_intrinsic_infos[instr->intrinsic].has_dest) { - unsigned components_written = - nir_intrinsic_infos[instr->intrinsic].dest_components; - if (components_written == 0) - components_written = instr->num_components; + unsigned components_written = nir_intrinsic_dest_components(instr); validate_assert(state, components_written > 0); - if (instr->dest.is_ssa) { - validate_assert(state, components_written <= instr->dest.ssa.num_components); - } else if (!instr->dest.reg.reg->is_packed) { - validate_assert(state, components_written <= instr->dest.reg.reg->num_components); - } - - validate_dest(&instr->dest, state); + validate_dest(&instr->dest, state, bit_size, components_written); } switch (instr->intrinsic) { @@ -511,7 +548,8 @@ validate_tex_instr(nir_tex_instr *instr, validate_state *state) for (unsigned i = 0; i < instr->num_srcs; i++) { validate_assert(state, !src_type_seen[instr->src[i].src_type]); src_type_seen[instr->src[i].src_type] = true; - validate_src(&instr->src[i].src, state); + validate_src(&instr->src[i].src, state, + 0, nir_tex_instr_src_size(instr, i)); } if (instr->texture != NULL) @@ -520,7 +558,7 @@ validate_tex_instr(nir_tex_instr *instr, validate_state *state) if (instr->sampler != NULL) validate_deref_var(instr, instr->sampler, state); - validate_dest(&instr->dest, state); + validate_dest(&instr->dest, state, 0, nir_tex_instr_dest_size(instr)); } static void @@ -561,7 +599,7 @@ validate_phi_instr(nir_phi_instr *instr, validate_state *state) * basic blocks, to avoid validating an SSA use before its definition. */ - validate_dest(&instr->dest, state); + validate_dest(&instr->dest, state, 0, 0); exec_list_validate(&instr->srcs); validate_assert(state, exec_list_length(&instr->srcs) == @@ -626,10 +664,8 @@ validate_phi_src(nir_phi_instr *instr, nir_block *pred, validate_state *state) nir_foreach_phi_src(src, instr) { if (src->pred == pred) { validate_assert(state, src->src.is_ssa); - validate_assert(state, src->src.ssa->num_components == - instr->dest.ssa.num_components); - - validate_src(&src->src, state); + validate_src(&src->src, state, instr->dest.ssa.bit_size, + instr->dest.ssa.num_components); state->instr = NULL; return; } @@ -705,8 +741,7 @@ validate_block(nir_block *block, validate_state *state) } case nir_jump_continue: { - nir_block *first = - nir_cf_node_as_block(nir_loop_first_cf_node(state->loop)); + nir_block *first = nir_loop_first_block(state->loop); validate_assert(state, block->successors[0] == first); break; } @@ -723,8 +758,7 @@ validate_block(nir_block *block, validate_state *state) if (next == NULL) { switch (state->parent_node->type) { case nir_cf_node_loop: { - nir_block *first = - nir_cf_node_as_block(nir_loop_first_cf_node(state->loop)); + nir_block *first = nir_loop_first_block(state->loop); validate_assert(state, block->successors[0] == first); /* due to the hack for infinite loops, block->successors[1] may * point to the block after the loop. @@ -751,15 +785,15 @@ validate_block(nir_block *block, validate_state *state) } else { if (next->type == nir_cf_node_if) { nir_if *if_stmt = nir_cf_node_as_if(next); - validate_assert(state, &block->successors[0]->cf_node == - nir_if_first_then_node(if_stmt)); - validate_assert(state, &block->successors[1]->cf_node == - nir_if_first_else_node(if_stmt)); + validate_assert(state, block->successors[0] == + nir_if_first_then_block(if_stmt)); + validate_assert(state, block->successors[1] == + nir_if_first_else_block(if_stmt)); } else { validate_assert(state, next->type == nir_cf_node_loop); nir_loop *loop = nir_cf_node_as_loop(next); - validate_assert(state, &block->successors[0]->cf_node == - nir_loop_first_cf_node(loop)); + validate_assert(state, block->successors[0] == + nir_loop_first_block(loop)); validate_assert(state, block->successors[1] == NULL); } } @@ -779,7 +813,7 @@ validate_if(nir_if *if_stmt, validate_state *state) nir_cf_node *next_node = nir_cf_node_next(&if_stmt->cf_node); validate_assert(state, next_node->type == nir_cf_node_block); - validate_src(&if_stmt->condition, state); + validate_src(&if_stmt->condition, state, 32, 1); validate_assert(state, !exec_list_is_empty(&if_stmt->then_list)); validate_assert(state, !exec_list_is_empty(&if_stmt->else_list)); @@ -942,16 +976,28 @@ validate_var_decl(nir_variable *var, bool is_global, validate_state *state) validate_assert(state, is_global == nir_variable_is_global(var)); /* Must have exactly one mode set */ - validate_assert(state, util_bitcount(var->data.mode) == 1); + validate_assert(state, util_is_power_of_two_nonzero(var->data.mode)); + + if (var->data.compact) { + /* The "compact" flag is only valid on arrays of scalars. */ + assert(glsl_type_is_array(var->type)); + + const struct glsl_type *type = glsl_get_array_element(var->type); + if (nir_is_per_vertex_io(var, state->shader->info.stage)) { + assert(glsl_type_is_array(type)); + assert(glsl_type_is_scalar(glsl_get_array_element(type))); + } else { + assert(glsl_type_is_scalar(type)); + } + } /* * TODO validate some things ir_validate.cpp does (requires more GLSL type * support) */ - if (!is_global) { - _mesa_hash_table_insert(state->var_defs, var, state->impl); - } + _mesa_hash_table_insert(state->var_defs, var, + is_global ? NULL : state->impl); state->var = NULL; }