*/
#include "nir.h"
+#include "nir_builder.h"
#include "util/set.h"
#include "util/hash_table.h"
if (intrin->intrinsic != nir_intrinsic_load_deref)
continue;
- nir_variable *var =
- nir_deref_instr_get_variable(nir_src_as_deref(intrin->src[0]));
-
- if (var->data.mode != nir_var_shader_out)
+ nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
+ if (deref->mode != nir_var_shader_out)
continue;
+ nir_variable *var = nir_deref_instr_get_variable(deref);
if (var->data.patch) {
patches_read[var->data.location_frac] |=
get_variable_io_mask(var, shader->info.stage);
}
}
-static bool
-remove_unused_io_vars(nir_shader *shader, struct exec_list *var_list,
- uint64_t *used_by_other_stage,
- uint64_t *used_by_other_stage_patches)
+/**
+ * Helper for removing unused shader I/O variables, by demoting them to global
+ * variables (which may then by dead code eliminated).
+ *
+ * Example usage is:
+ *
+ * progress = nir_remove_unused_io_vars(producer,
+ * &producer->outputs,
+ * read, patches_read) ||
+ * progress;
+ *
+ * The "used" should be an array of 4 uint64_ts (probably of VARYING_BIT_*)
+ * representing each .location_frac used. Note that for vector variables,
+ * only the first channel (.location_frac) is examined for deciding if the
+ * variable is used!
+ */
+bool
+nir_remove_unused_io_vars(nir_shader *shader, struct exec_list *var_list,
+ uint64_t *used_by_other_stage,
+ uint64_t *used_by_other_stage_patches)
{
bool progress = false;
uint64_t *used;
if (var->data.always_active_io)
continue;
+ if (var->data.explicit_xfb_buffer)
+ continue;
+
uint64_t other_stage = used[var->data.location_frac];
if (!(other_stage & get_variable_io_mask(var, shader->info.stage))) {
/* This one is invalid, make it a global variable instead */
var->data.location = 0;
- var->data.mode = nir_var_global;
+ var->data.mode = nir_var_shader_temp;
exec_node_remove(&var->node);
exec_list_push_tail(&shader->globals, &var->node);
}
}
+ if (progress)
+ nir_fixup_deref_modes(shader);
+
return progress;
}
tcs_add_output_reads(producer, read, patches_read);
bool progress = false;
- progress = remove_unused_io_vars(producer, &producer->outputs, read,
- patches_read);
+ progress = nir_remove_unused_io_vars(producer, &producer->outputs, read,
+ patches_read);
- progress = remove_unused_io_vars(consumer, &consumer->inputs, written,
- patches_written) || progress;
+ progress = nir_remove_unused_io_vars(consumer, &consumer->inputs, written,
+ patches_written) || progress;
return progress;
}
static uint8_t
-get_interp_type(nir_variable *var, bool default_to_smooth_interp)
+get_interp_type(nir_variable *var, const struct glsl_type *type,
+ bool default_to_smooth_interp)
{
- if (var->data.interpolation != INTERP_MODE_NONE)
+ if (glsl_type_is_integer(type))
+ return INTERP_MODE_FLAT;
+ else if (var->data.interpolation != INTERP_MODE_NONE)
return var->data.interpolation;
else if (default_to_smooth_interp)
return INTERP_MODE_SMOOTH;
unsigned comps_slot2 = 0;
for (unsigned i = 0; i < slots; i++) {
interp_type[location + i] =
- get_interp_type(var, default_to_smooth_interp);
+ get_interp_type(var, type, default_to_smooth_interp);
interp_loc[location + i] = get_interp_loc(var);
if (dual_slot) {
/* We ignore complex types above and all other vector types should
* have been split into scalar variables by the lower_io_to_scalar
- * pass. The only exeption should by OpenGL xfb varyings.
+ * pass. The only exception should by OpenGL xfb varyings.
*/
if (glsl_get_vector_elements(type) != 1)
continue;
continue;
bool found_new_offset = false;
- uint8_t interp = get_interp_type(var, default_to_smooth_interp);
+ uint8_t interp = get_interp_type(var, type, default_to_smooth_interp);
for (; cursor[interp] < 32; cursor[interp]++) {
uint8_t cursor_used_comps = comps[cursor[interp]];
compact_components(producer, consumer, comps, interp_type, interp_loc,
default_to_smooth_interp);
}
+
+/*
+ * Mark XFB varyings as always_active_io in the consumer so the linking opts
+ * don't touch them.
+ */
+void
+nir_link_xfb_varyings(nir_shader *producer, nir_shader *consumer)
+{
+ nir_variable *input_vars[MAX_VARYING] = { 0 };
+
+ nir_foreach_variable(var, &consumer->inputs) {
+ if (var->data.location >= VARYING_SLOT_VAR0 &&
+ var->data.location - VARYING_SLOT_VAR0 < MAX_VARYING) {
+
+ unsigned location = var->data.location - VARYING_SLOT_VAR0;
+ input_vars[location] = var;
+ }
+ }
+
+ nir_foreach_variable(var, &producer->outputs) {
+ if (var->data.location >= VARYING_SLOT_VAR0 &&
+ var->data.location - VARYING_SLOT_VAR0 < MAX_VARYING) {
+
+ if (!var->data.always_active_io)
+ continue;
+
+ unsigned location = var->data.location - VARYING_SLOT_VAR0;
+ if (input_vars[location]) {
+ input_vars[location]->data.always_active_io = true;
+ }
+ }
+ }
+}
+
+static bool
+does_varying_match(nir_variable *out_var, nir_variable *in_var)
+{
+ return in_var->data.location == out_var->data.location &&
+ in_var->data.location_frac == out_var->data.location_frac;
+}
+
+static nir_variable *
+get_matching_input_var(nir_shader *consumer, nir_variable *out_var)
+{
+ nir_foreach_variable(var, &consumer->inputs) {
+ if (does_varying_match(out_var, var))
+ return var;
+ }
+
+ return NULL;
+}
+
+static bool
+can_replace_varying(nir_variable *out_var)
+{
+ /* Skip types that require more complex handling.
+ * TODO: add support for these types.
+ */
+ if (glsl_type_is_array(out_var->type) ||
+ glsl_type_is_dual_slot(out_var->type) ||
+ glsl_type_is_matrix(out_var->type) ||
+ glsl_type_is_struct(out_var->type))
+ return false;
+
+ /* Limit this pass to scalars for now to keep things simple. Most varyings
+ * should have been lowered to scalars at this point anyway.
+ */
+ if (!glsl_type_is_scalar(out_var->type))
+ return false;
+
+ if (out_var->data.location < VARYING_SLOT_VAR0 ||
+ out_var->data.location - VARYING_SLOT_VAR0 >= MAX_VARYING)
+ return false;
+
+ return true;
+}
+
+static bool
+replace_constant_input(nir_shader *shader, nir_intrinsic_instr *store_intr)
+{
+ nir_function_impl *impl = nir_shader_get_entrypoint(shader);
+
+ nir_builder b;
+ nir_builder_init(&b, impl);
+
+ nir_variable *out_var =
+ nir_deref_instr_get_variable(nir_src_as_deref(store_intr->src[0]));
+
+ bool progress = false;
+ nir_foreach_block(block, impl) {
+ nir_foreach_instr(instr, block) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
+ if (intr->intrinsic != nir_intrinsic_load_deref)
+ continue;
+
+ nir_deref_instr *in_deref = nir_src_as_deref(intr->src[0]);
+ if (in_deref->mode != nir_var_shader_in)
+ continue;
+
+ nir_variable *in_var = nir_deref_instr_get_variable(in_deref);
+
+ if (!does_varying_match(out_var, in_var))
+ continue;
+
+ b.cursor = nir_before_instr(instr);
+
+ nir_load_const_instr *out_const =
+ nir_instr_as_load_const(store_intr->src[1].ssa->parent_instr);
+
+ /* Add new const to replace the input */
+ nir_ssa_def *nconst = nir_build_imm(&b, store_intr->num_components,
+ intr->dest.ssa.bit_size,
+ out_const->value);
+
+ nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(nconst));
+
+ progress = true;
+ }
+ }
+
+ return progress;
+}
+
+static bool
+replace_duplicate_input(nir_shader *shader, nir_variable *input_var,
+ nir_intrinsic_instr *dup_store_intr)
+{
+ assert(input_var);
+
+ nir_function_impl *impl = nir_shader_get_entrypoint(shader);
+
+ nir_builder b;
+ nir_builder_init(&b, impl);
+
+ nir_variable *dup_out_var =
+ nir_deref_instr_get_variable(nir_src_as_deref(dup_store_intr->src[0]));
+
+ bool progress = false;
+ nir_foreach_block(block, impl) {
+ nir_foreach_instr(instr, block) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
+ if (intr->intrinsic != nir_intrinsic_load_deref)
+ continue;
+
+ nir_deref_instr *in_deref = nir_src_as_deref(intr->src[0]);
+ if (in_deref->mode != nir_var_shader_in)
+ continue;
+
+ nir_variable *in_var = nir_deref_instr_get_variable(in_deref);
+
+ if (!does_varying_match(dup_out_var, in_var) ||
+ in_var->data.interpolation != input_var->data.interpolation ||
+ get_interp_loc(in_var) != get_interp_loc(input_var))
+ continue;
+
+ b.cursor = nir_before_instr(instr);
+
+ nir_ssa_def *load = nir_load_var(&b, input_var);
+ nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(load));
+
+ progress = true;
+ }
+ }
+
+ return progress;
+}
+
+bool
+nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer)
+{
+ /* TODO: Add support for more shader stage combinations */
+ if (consumer->info.stage != MESA_SHADER_FRAGMENT ||
+ (producer->info.stage != MESA_SHADER_VERTEX &&
+ producer->info.stage != MESA_SHADER_TESS_EVAL))
+ return false;
+
+ bool progress = false;
+
+ nir_function_impl *impl = nir_shader_get_entrypoint(producer);
+
+ struct hash_table *varying_values = _mesa_pointer_hash_table_create(NULL);
+
+ /* If we find a store in the last block of the producer we can be sure this
+ * is the only possible value for this output.
+ */
+ nir_block *last_block = nir_impl_last_block(impl);
+ nir_foreach_instr_reverse(instr, last_block) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
+
+ if (intr->intrinsic != nir_intrinsic_store_deref)
+ continue;
+
+ nir_deref_instr *out_deref = nir_src_as_deref(intr->src[0]);
+ if (out_deref->mode != nir_var_shader_out)
+ continue;
+
+ nir_variable *out_var = nir_deref_instr_get_variable(out_deref);
+ if (!can_replace_varying(out_var))
+ continue;
+
+ if (intr->src[1].ssa->parent_instr->type == nir_instr_type_load_const) {
+ progress |= replace_constant_input(consumer, intr);
+ } else {
+ struct hash_entry *entry =
+ _mesa_hash_table_search(varying_values, intr->src[1].ssa);
+ if (entry) {
+ progress |= replace_duplicate_input(consumer,
+ (nir_variable *) entry->data,
+ intr);
+ } else {
+ nir_variable *in_var = get_matching_input_var(consumer, out_var);
+ if (in_var) {
+ _mesa_hash_table_insert(varying_values, intr->src[1].ssa,
+ in_var);
+ }
+ }
+ }
+ }
+
+ _mesa_hash_table_destroy(varying_values, NULL);
+
+ return progress;
+}