void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint);
-void nir_assign_var_locations(struct exec_list *var_list,
- unsigned *size,
+void nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
+ unsigned base_offset,
int (*type_size)(const struct glsl_type *));
void nir_lower_io(nir_shader *shader,
void
nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
+ unsigned base_offset,
int (*type_size)(const struct glsl_type *))
{
unsigned location = 0;
+ /* There are 32 regular and 32 patch varyings allowed */
+ int locations[64][2];
+ for (unsigned i = 0; i < 64; i++) {
+ for (unsigned j = 0; j < 2; j++)
+ locations[i][j] = -1;
+ }
+
nir_foreach_variable(var, var_list) {
/*
* UBO's have their own address spaces, so don't count them towards the
var->interface_type != NULL)
continue;
- var->data.driver_location = location;
- location += type_size(var->type);
+ /* Make sure we give the same location to varyings packed with
+ * ARB_enhanced_layouts.
+ */
+ int idx = var->data.location - base_offset;
+ if (base_offset && idx >= 0) {
+ assert(idx < ARRAY_SIZE(locations));
+
+ if (locations[idx][var->data.index] == -1) {
+ var->data.driver_location = location;
+ locations[idx][var->data.index] = location;
+ location += type_size(var->type);
+ } else {
+ var->data.driver_location = locations[idx][var->data.index];
+ }
+ } else {
+ var->data.driver_location = location;
+ location += type_size(var->type);
+ }
}
*size = location;
void
brw_nir_lower_fs_inputs(nir_shader *nir)
{
- nir_assign_var_locations(&nir->inputs, &nir->num_inputs, type_size_scalar);
+ nir_assign_var_locations(&nir->inputs, &nir->num_inputs, VARYING_SLOT_VAR0,
+ type_size_scalar);
nir_lower_io(nir, nir_var_shader_in, type_size_scalar);
}
{
if (is_scalar) {
nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
+ VARYING_SLOT_VAR0,
type_size_scalar);
nir_lower_io(nir, nir_var_shader_out, type_size_scalar);
} else {
brw_nir_lower_fs_outputs(nir_shader *nir)
{
nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
- type_size_scalar);
+ FRAG_RESULT_DATA0, type_size_scalar);
nir_lower_io(nir, nir_var_shader_out, type_size_scalar);
}
void
brw_nir_lower_cs_shared(nir_shader *nir)
{
- nir_assign_var_locations(&nir->shared, &nir->num_shared,
+ nir_assign_var_locations(&nir->shared, &nir->num_shared, 0,
type_size_scalar_bytes);
nir_lower_io(nir, nir_var_shared, type_size_scalar_bytes);
}
brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
{
if (is_scalar) {
- nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
+ nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, 0,
type_size_scalar_bytes);
nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes);
} else {
- nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
+ nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms, 0,
type_size_vec4_bytes);
nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes);
}
sort_varyings(&nir->outputs);
nir_assign_var_locations(&nir->outputs,
&nir->num_outputs,
+ VARYING_SLOT_VAR0,
st_glsl_type_size);
st_nir_fixup_varying_slots(st, &nir->outputs);
} else if (nir->stage == MESA_SHADER_FRAGMENT) {
sort_varyings(&nir->inputs);
nir_assign_var_locations(&nir->inputs,
&nir->num_inputs,
+ VARYING_SLOT_VAR0,
st_glsl_type_size);
st_nir_fixup_varying_slots(st, &nir->inputs);
nir_assign_var_locations(&nir->outputs,
&nir->num_outputs,
+ FRAG_RESULT_DATA0,
st_glsl_type_size);
} else {
unreachable("invalid shader type for tgsi bypass\n");