+
+/*
+ * Mark XFB varyings as always_active_io in the consumer so the linking opts
+ * don't touch them.
+ */
+void
+nir_link_xfb_varyings(nir_shader *producer, nir_shader *consumer)
+{
+ nir_variable *input_vars[MAX_VARYING] = { 0 };
+
+ nir_foreach_variable(var, &consumer->inputs) {
+ if (var->data.location >= VARYING_SLOT_VAR0 &&
+ var->data.location - VARYING_SLOT_VAR0 < MAX_VARYING) {
+
+ unsigned location = var->data.location - VARYING_SLOT_VAR0;
+ input_vars[location] = var;
+ }
+ }
+
+ nir_foreach_variable(var, &producer->outputs) {
+ if (var->data.location >= VARYING_SLOT_VAR0 &&
+ var->data.location - VARYING_SLOT_VAR0 < MAX_VARYING) {
+
+ if (!var->data.always_active_io)
+ continue;
+
+ unsigned location = var->data.location - VARYING_SLOT_VAR0;
+ if (input_vars[location]) {
+ input_vars[location]->data.always_active_io = true;
+ }
+ }
+ }
+}
+
+static bool
+does_varying_match(nir_variable *out_var, nir_variable *in_var)
+{
+ return in_var->data.location == out_var->data.location &&
+ in_var->data.location_frac == out_var->data.location_frac;
+}
+
+static nir_variable *
+get_matching_input_var(nir_shader *consumer, nir_variable *out_var)
+{
+ nir_foreach_variable(var, &consumer->inputs) {
+ if (does_varying_match(out_var, var))
+ return var;
+ }
+
+ return NULL;
+}
+
+static bool
+can_replace_varying(nir_variable *out_var)
+{
+ /* Skip types that require more complex handling.
+ * TODO: add support for these types.
+ */
+ if (glsl_type_is_array(out_var->type) ||
+ glsl_type_is_dual_slot(out_var->type) ||
+ glsl_type_is_matrix(out_var->type) ||
+ glsl_type_is_struct_or_ifc(out_var->type))
+ return false;
+
+ /* Limit this pass to scalars for now to keep things simple. Most varyings
+ * should have been lowered to scalars at this point anyway.
+ */
+ if (!glsl_type_is_scalar(out_var->type))
+ return false;
+
+ if (out_var->data.location < VARYING_SLOT_VAR0 ||
+ out_var->data.location - VARYING_SLOT_VAR0 >= MAX_VARYING)
+ return false;
+
+ return true;
+}
+
+static bool
+replace_constant_input(nir_shader *shader, nir_intrinsic_instr *store_intr)
+{
+ nir_function_impl *impl = nir_shader_get_entrypoint(shader);
+
+ nir_builder b;
+ nir_builder_init(&b, impl);
+
+ nir_variable *out_var =
+ nir_deref_instr_get_variable(nir_src_as_deref(store_intr->src[0]));
+
+ bool progress = false;
+ nir_foreach_block(block, impl) {
+ nir_foreach_instr(instr, block) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
+ if (intr->intrinsic != nir_intrinsic_load_deref)
+ continue;
+
+ nir_deref_instr *in_deref = nir_src_as_deref(intr->src[0]);
+ if (in_deref->mode != nir_var_shader_in)
+ continue;
+
+ nir_variable *in_var = nir_deref_instr_get_variable(in_deref);
+
+ if (!does_varying_match(out_var, in_var))
+ continue;
+
+ b.cursor = nir_before_instr(instr);
+
+ nir_load_const_instr *out_const =
+ nir_instr_as_load_const(store_intr->src[1].ssa->parent_instr);
+
+ /* Add new const to replace the input */
+ nir_ssa_def *nconst = nir_build_imm(&b, store_intr->num_components,
+ intr->dest.ssa.bit_size,
+ out_const->value);
+
+ nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(nconst));
+
+ progress = true;
+ }
+ }
+
+ return progress;
+}
+
+static bool
+replace_duplicate_input(nir_shader *shader, nir_variable *input_var,
+ nir_intrinsic_instr *dup_store_intr)
+{
+ assert(input_var);
+
+ nir_function_impl *impl = nir_shader_get_entrypoint(shader);
+
+ nir_builder b;
+ nir_builder_init(&b, impl);
+
+ nir_variable *dup_out_var =
+ nir_deref_instr_get_variable(nir_src_as_deref(dup_store_intr->src[0]));
+
+ bool progress = false;
+ nir_foreach_block(block, impl) {
+ nir_foreach_instr(instr, block) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
+ if (intr->intrinsic != nir_intrinsic_load_deref)
+ continue;
+
+ nir_deref_instr *in_deref = nir_src_as_deref(intr->src[0]);
+ if (in_deref->mode != nir_var_shader_in)
+ continue;
+
+ nir_variable *in_var = nir_deref_instr_get_variable(in_deref);
+
+ if (!does_varying_match(dup_out_var, in_var) ||
+ in_var->data.interpolation != input_var->data.interpolation ||
+ get_interp_loc(in_var) != get_interp_loc(input_var))
+ continue;
+
+ b.cursor = nir_before_instr(instr);
+
+ nir_ssa_def *load = nir_load_var(&b, input_var);
+ nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(load));
+
+ progress = true;
+ }
+ }
+
+ return progress;
+}
+
+bool
+nir_link_opt_varyings(nir_shader *producer, nir_shader *consumer)
+{
+ /* TODO: Add support for more shader stage combinations */
+ if (consumer->info.stage != MESA_SHADER_FRAGMENT ||
+ (producer->info.stage != MESA_SHADER_VERTEX &&
+ producer->info.stage != MESA_SHADER_TESS_EVAL))
+ return false;
+
+ bool progress = false;
+
+ nir_function_impl *impl = nir_shader_get_entrypoint(producer);
+
+ struct hash_table *varying_values = _mesa_pointer_hash_table_create(NULL);
+
+ /* If we find a store in the last block of the producer we can be sure this
+ * is the only possible value for this output.
+ */
+ nir_block *last_block = nir_impl_last_block(impl);
+ nir_foreach_instr_reverse(instr, last_block) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
+
+ if (intr->intrinsic != nir_intrinsic_store_deref)
+ continue;
+
+ nir_deref_instr *out_deref = nir_src_as_deref(intr->src[0]);
+ if (out_deref->mode != nir_var_shader_out)
+ continue;
+
+ nir_variable *out_var = nir_deref_instr_get_variable(out_deref);
+ if (!can_replace_varying(out_var))
+ continue;
+
+ if (intr->src[1].ssa->parent_instr->type == nir_instr_type_load_const) {
+ progress |= replace_constant_input(consumer, intr);
+ } else {
+ struct hash_entry *entry =
+ _mesa_hash_table_search(varying_values, intr->src[1].ssa);
+ if (entry) {
+ progress |= replace_duplicate_input(consumer,
+ (nir_variable *) entry->data,
+ intr);
+ } else {
+ nir_variable *in_var = get_matching_input_var(consumer, out_var);
+ if (in_var) {
+ _mesa_hash_table_insert(varying_values, intr->src[1].ssa,
+ in_var);
+ }
+ }
+ }
+ }
+
+ _mesa_hash_table_destroy(varying_values, NULL);
+
+ return progress;
+}
+
+/* TODO any better helper somewhere to sort a list? */
+
+static void
+insert_sorted(struct exec_list *var_list, nir_variable *new_var)
+{
+ nir_foreach_variable(var, var_list) {
+ if (var->data.location > new_var->data.location) {
+ exec_node_insert_node_before(&var->node, &new_var->node);
+ return;
+ }
+ }
+ exec_list_push_tail(var_list, &new_var->node);
+}
+
+static void
+sort_varyings(struct exec_list *var_list)
+{
+ struct exec_list new_list;
+ exec_list_make_empty(&new_list);
+ nir_foreach_variable_safe(var, var_list) {
+ exec_node_remove(&var->node);
+ insert_sorted(&new_list, var);
+ }
+ exec_list_move_nodes_to(&new_list, var_list);
+}
+
+void
+nir_assign_io_var_locations(struct exec_list *var_list, unsigned *size,
+ gl_shader_stage stage)
+{
+ unsigned location = 0;
+ unsigned assigned_locations[VARYING_SLOT_TESS_MAX];
+ uint64_t processed_locs[2] = {0};
+
+ sort_varyings(var_list);
+
+ const int base = stage == MESA_SHADER_FRAGMENT ?
+ (int) FRAG_RESULT_DATA0 : (int) VARYING_SLOT_VAR0;
+
+ int UNUSED last_loc = 0;
+ bool last_partial = false;
+ nir_foreach_variable(var, var_list) {
+ const struct glsl_type *type = var->type;
+ if (nir_is_per_vertex_io(var, stage)) {
+ assert(glsl_type_is_array(type));
+ type = glsl_get_array_element(type);
+ }
+
+ unsigned var_size;
+ if (var->data.compact) {
+ /* compact variables must be arrays of scalars */
+ assert(glsl_type_is_array(type));
+ assert(glsl_type_is_scalar(glsl_get_array_element(type)));
+ unsigned start = 4 * location + var->data.location_frac;
+ unsigned end = start + glsl_get_length(type);
+ var_size = end / 4 - location;
+ last_partial = end % 4 != 0;
+ } else {
+ /* Compact variables bypass the normal varying compacting pass,
+ * which means they cannot be in the same vec4 slot as a normal
+ * variable. If part of the current slot is taken up by a compact
+ * variable, we need to go to the next one.
+ */
+ if (last_partial) {
+ location++;
+ last_partial = false;
+ }
+ var_size = glsl_count_attribute_slots(type, false);
+ }
+
+ /* Builtins don't allow component packing so we only need to worry about
+ * user defined varyings sharing the same location.
+ */
+ bool processed = false;
+ if (var->data.location >= base) {
+ unsigned glsl_location = var->data.location - base;
+
+ for (unsigned i = 0; i < var_size; i++) {
+ if (processed_locs[var->data.index] &
+ ((uint64_t)1 << (glsl_location + i)))
+ processed = true;
+ else
+ processed_locs[var->data.index] |=
+ ((uint64_t)1 << (glsl_location + i));
+ }
+ }
+
+ /* Because component packing allows varyings to share the same location
+ * we may have already have processed this location.
+ */
+ if (processed) {
+ unsigned driver_location = assigned_locations[var->data.location];
+ var->data.driver_location = driver_location;
+
+ /* An array may be packed such that is crosses multiple other arrays
+ * or variables, we need to make sure we have allocated the elements
+ * consecutively if the previously proccessed var was shorter than
+ * the current array we are processing.
+ *
+ * NOTE: The code below assumes the var list is ordered in ascending
+ * location order.
+ */
+ assert(last_loc <= var->data.location);
+ last_loc = var->data.location;
+ unsigned last_slot_location = driver_location + var_size;
+ if (last_slot_location > location) {
+ unsigned num_unallocated_slots = last_slot_location - location;
+ unsigned first_unallocated_slot = var_size - num_unallocated_slots;
+ for (unsigned i = first_unallocated_slot; i < num_unallocated_slots; i++) {
+ assigned_locations[var->data.location + i] = location;
+ location++;
+ }
+ }
+ continue;
+ }
+
+ for (unsigned i = 0; i < var_size; i++) {
+ assigned_locations[var->data.location + i] = location + i;
+ }
+
+ var->data.driver_location = location;
+ location += var_size;
+ }
+
+ if (last_partial)
+ location++;
+
+ *size = location;
+}
+