static bool
constant_fold_alu_instr(nir_alu_instr *instr, void *mem_ctx)
{
- nir_const_value src[4];
+ nir_const_value src[NIR_MAX_VEC_COMPONENTS];
if (!instr->dest.dest.is_ssa)
return false;
+ /* In the case that any outputs/inputs have unsized types, then we need to
+ * guess the bit-size. In this case, the validator ensures that all
+ * bit-sizes match so we can just take the bit-size from first
+ * output/input with an unsized type. If all the outputs/inputs are sized
+ * then we don't need to guess the bit-size at all because the code we
+ * generate for constant opcodes in this case already knows the sizes of
+ * the types involved and does not need the provided bit-size for anything
+ * (although it still requires to receive a valid bit-size).
+ */
+ unsigned bit_size = 0;
+ if (!nir_alu_type_get_type_size(nir_op_infos[instr->op].output_type))
+ bit_size = instr->dest.dest.ssa.bit_size;
+
for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
if (!instr->src[i].src.is_ssa)
return false;
+ if (bit_size == 0 &&
+ !nir_alu_type_get_type_size(nir_op_infos[instr->op].input_types[i]))
+ bit_size = instr->src[i].src.ssa->bit_size;
+
nir_instr *src_instr = instr->src[i].src.ssa->parent_instr;
if (src_instr->type != nir_instr_type_load_const)
for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(instr, i);
j++) {
- src[i].u[j] = load_const->value.u[instr->src[i].swizzle[j]];
+ switch(load_const->def.bit_size) {
+ case 64:
+ src[i].u64[j] = load_const->value.u64[instr->src[i].swizzle[j]];
+ break;
+ case 32:
+ src[i].u32[j] = load_const->value.u32[instr->src[i].swizzle[j]];
+ break;
+ case 16:
+ src[i].u16[j] = load_const->value.u16[instr->src[i].swizzle[j]];
+ break;
+ case 8:
+ src[i].u8[j] = load_const->value.u8[instr->src[i].swizzle[j]];
+ break;
+ case 1:
+ src[i].b[j] = load_const->value.b[instr->src[i].swizzle[j]];
+ break;
+ default:
+ unreachable("Invalid bit size");
+ }
}
/* We shouldn't have any source modifiers in the optimization loop. */
assert(!instr->src[i].abs && !instr->src[i].negate);
}
+ if (bit_size == 0)
+ bit_size = 32;
+
/* We shouldn't have any saturate modifiers in the optimization loop. */
assert(!instr->dest.saturate);
nir_const_value dest =
nir_eval_const_opcode(instr->op, instr->dest.dest.ssa.num_components,
- src);
+ bit_size, src);
nir_load_const_instr *new_instr =
nir_load_const_instr_create(mem_ctx,
- instr->dest.dest.ssa.num_components);
+ instr->dest.dest.ssa.num_components,
+ instr->dest.dest.ssa.bit_size);
new_instr->value = dest;
}
static bool
-constant_fold_deref(nir_instr *instr, nir_deref_var *deref)
+constant_fold_intrinsic_instr(nir_intrinsic_instr *instr)
{
bool progress = false;
- for (nir_deref *tail = deref->deref.child; tail; tail = tail->child) {
- if (tail->deref_type != nir_deref_type_array)
- continue;
-
- nir_deref_array *arr = nir_deref_as_array(tail);
-
- if (arr->deref_array_type == nir_deref_array_type_indirect &&
- arr->indirect.is_ssa &&
- arr->indirect.ssa->parent_instr->type == nir_instr_type_load_const) {
- nir_load_const_instr *indirect =
- nir_instr_as_load_const(arr->indirect.ssa->parent_instr);
-
- arr->base_offset += indirect->value.u[0];
-
- /* Clear out the source */
- nir_instr_rewrite_src(instr, &arr->indirect, nir_src_for_ssa(NULL));
-
- arr->deref_array_type = nir_deref_array_type_direct;
-
+ if (instr->intrinsic == nir_intrinsic_discard_if &&
+ nir_src_is_const(instr->src[0])) {
+ if (nir_src_as_bool(instr->src[0])) {
+ /* This method of getting a nir_shader * from a nir_instr is
+ * admittedly gross, but given the rarity of hitting this case I think
+ * it's preferable to plumbing an otherwise unused nir_shader *
+ * parameter through four functions to get here.
+ */
+ nir_cf_node *cf_node = &instr->instr.block->cf_node;
+ nir_function_impl *impl = nir_cf_node_get_function(cf_node);
+ nir_shader *shader = impl->function->shader;
+
+ nir_intrinsic_instr *discard =
+ nir_intrinsic_instr_create(shader, nir_intrinsic_discard);
+ nir_instr_insert_before(&instr->instr, &discard->instr);
+ nir_instr_remove(&instr->instr);
+ progress = true;
+ } else {
+ /* We're not discarding, just delete the instruction */
+ nir_instr_remove(&instr->instr);
progress = true;
}
}
}
static bool
-constant_fold_intrinsic_instr(nir_intrinsic_instr *instr)
+constant_fold_block(nir_block *block, void *mem_ctx)
{
bool progress = false;
- unsigned num_vars = nir_intrinsic_infos[instr->intrinsic].num_variables;
- for (unsigned i = 0; i < num_vars; i++) {
- progress |= constant_fold_deref(&instr->instr, instr->variables[i]);
- }
-
- return progress;
-}
-
-static bool
-constant_fold_tex_instr(nir_tex_instr *instr)
-{
- bool progress = false;
-
- if (instr->texture)
- progress |= constant_fold_deref(&instr->instr, instr->texture);
-
- if (instr->sampler)
- progress |= constant_fold_deref(&instr->instr, instr->sampler);
-
- return progress;
-}
-
-static bool
-constant_fold_block(nir_block *block, void *void_state)
-{
- struct constant_fold_state *state = void_state;
-
- nir_foreach_instr_safe(block, instr) {
+ nir_foreach_instr_safe(instr, block) {
switch (instr->type) {
case nir_instr_type_alu:
- state->progress |= constant_fold_alu_instr(nir_instr_as_alu(instr),
- state->mem_ctx);
+ progress |= constant_fold_alu_instr(nir_instr_as_alu(instr), mem_ctx);
break;
case nir_instr_type_intrinsic:
- state->progress |=
+ progress |=
constant_fold_intrinsic_instr(nir_instr_as_intrinsic(instr));
break;
- case nir_instr_type_tex:
- state->progress |= constant_fold_tex_instr(nir_instr_as_tex(instr));
- break;
default:
/* Don't know how to constant fold */
break;
}
}
- return true;
+ return progress;
}
static bool
nir_opt_constant_folding_impl(nir_function_impl *impl)
{
- struct constant_fold_state state;
-
- state.mem_ctx = ralloc_parent(impl);
- state.impl = impl;
- state.progress = false;
+ void *mem_ctx = ralloc_parent(impl);
+ bool progress = false;
- nir_foreach_block(impl, constant_fold_block, &state);
+ nir_foreach_block(block, impl) {
+ progress |= constant_fold_block(block, mem_ctx);
+ }
- if (state.progress)
+ if (progress) {
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);
+ } else {
+#ifndef NDEBUG
+ impl->valid_metadata &= ~nir_metadata_not_properly_reset;
+#endif
+ }
- return state.progress;
+ return progress;
}
bool
{
bool progress = false;
- nir_foreach_function(shader, function) {
+ nir_foreach_function(function, shader) {
if (function->impl)
progress |= nir_opt_constant_folding_impl(function->impl);
}