radv_lower_fs_io(nir_shader *nir)
{
NIR_PASS_V(nir, lower_view_index);
- nir_assign_io_var_locations(&nir->inputs, &nir->num_inputs,
+ nir_assign_io_var_locations(nir, nir_var_shader_in, &nir->num_inputs,
MESA_SHADER_FRAGMENT);
NIR_PASS_V(nir, nir_lower_io, nir_var_shader_in, type_size_vec4, 0);
bool nir_lower_amul(nir_shader *shader,
int (*type_size)(const struct glsl_type *, bool));
-void nir_assign_io_var_locations(struct exec_list *var_list,
+void nir_assign_io_var_locations(nir_shader *shader,
+ nir_variable_mode mode,
unsigned *size,
gl_shader_stage stage);
}
static void
-sort_varyings(struct exec_list *var_list)
+sort_varyings(nir_shader *shader, nir_variable_mode mode,
+ struct exec_list *sorted_list)
{
- struct exec_list new_list;
- exec_list_make_empty(&new_list);
- nir_foreach_variable_safe(var, var_list) {
+ exec_list_make_empty(sorted_list);
+ nir_foreach_variable_with_modes_safe(var, shader, mode) {
exec_node_remove(&var->node);
- insert_sorted(&new_list, var);
+ insert_sorted(sorted_list, var);
}
- exec_list_move_nodes_to(&new_list, var_list);
}
void
-nir_assign_io_var_locations(struct exec_list *var_list, unsigned *size,
- gl_shader_stage stage)
+nir_assign_io_var_locations(nir_shader *shader, nir_variable_mode mode,
+ unsigned *size, gl_shader_stage stage)
{
unsigned location = 0;
unsigned assigned_locations[VARYING_SLOT_TESS_MAX];
uint64_t processed_locs[2] = {0};
- sort_varyings(var_list);
+ struct exec_list io_vars;
+ sort_varyings(shader, mode, &io_vars);
int UNUSED last_loc = 0;
bool last_partial = false;
- nir_foreach_variable(var, var_list) {
+ nir_foreach_variable(var, &io_vars) {
const struct glsl_type *type = var->type;
if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
if (last_partial)
location++;
+ struct exec_list *var_list = nir_variable_list_for_mode(shader, mode);
+ exec_list_append(var_list, &io_vars);
*size = location;
}
NIR_PASS_V(nir, nir_lower_io_arrays_to_elements_no_indirects, false);
- nir_assign_io_var_locations(&nir->inputs, &nir->num_inputs, stage);
- nir_assign_io_var_locations(&nir->outputs, &nir->num_outputs, stage);
+ nir_assign_io_var_locations(nir, nir_var_shader_in, &nir->num_inputs, stage);
+ nir_assign_io_var_locations(nir, nir_var_shader_out, &nir->num_outputs, stage);
NIR_PASS_V(nir, nir_lower_system_values);
NIR_PASS_V(nir, nir_lower_frexp);
st_nir_assign_varying_locations(struct st_context *st, nir_shader *nir)
{
if (nir->info.stage == MESA_SHADER_VERTEX) {
- nir_assign_io_var_locations(&nir->outputs,
+ nir_assign_io_var_locations(nir, nir_var_shader_out,
&nir->num_outputs,
nir->info.stage);
st_nir_fixup_varying_slots(st, &nir->outputs);
} else if (nir->info.stage == MESA_SHADER_GEOMETRY ||
nir->info.stage == MESA_SHADER_TESS_CTRL ||
nir->info.stage == MESA_SHADER_TESS_EVAL) {
- nir_assign_io_var_locations(&nir->inputs,
+ nir_assign_io_var_locations(nir, nir_var_shader_in,
&nir->num_inputs,
nir->info.stage);
st_nir_fixup_varying_slots(st, &nir->inputs);
- nir_assign_io_var_locations(&nir->outputs,
+ nir_assign_io_var_locations(nir, nir_var_shader_out,
&nir->num_outputs,
nir->info.stage);
st_nir_fixup_varying_slots(st, &nir->outputs);
} else if (nir->info.stage == MESA_SHADER_FRAGMENT) {
- nir_assign_io_var_locations(&nir->inputs,
+ nir_assign_io_var_locations(nir, nir_var_shader_in,
&nir->num_inputs,
nir->info.stage);
st_nir_fixup_varying_slots(st, &nir->inputs);
- nir_assign_io_var_locations(&nir->outputs,
+ nir_assign_io_var_locations(nir, nir_var_shader_out,
&nir->num_outputs,
nir->info.stage);
} else if (nir->info.stage == MESA_SHADER_COMPUTE) {