void
nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
+ unsigned base_offset,
int (*type_size)(const struct glsl_type *))
{
unsigned location = 0;
+ /* There are 32 regular and 32 patch varyings allowed */
+ int locations[64][2];
+ for (unsigned i = 0; i < 64; i++) {
+ for (unsigned j = 0; j < 2; j++)
+ locations[i][j] = -1;
+ }
+
nir_foreach_variable(var, var_list) {
/*
* UBO's have their own address spaces, so don't count them towards the
var->interface_type != NULL)
continue;
- var->data.driver_location = location;
- location += type_size(var->type);
+ /* Make sure we give the same location to varyings packed with
+ * ARB_enhanced_layouts.
+ */
+ int idx = var->data.location - base_offset;
+ if (base_offset && idx >= 0) {
+ assert(idx < ARRAY_SIZE(locations));
+
+ if (locations[idx][var->data.index] == -1) {
+ var->data.driver_location = location;
+ locations[idx][var->data.index] = location;
+ location += type_size(var->type);
+ } else {
+ var->data.driver_location = locations[idx][var->data.index];
+ }
+ } else {
+ var->data.driver_location = location;
+ location += type_size(var->type);
+ }
}
*size = location;
continue;
}
- nir_variable_mode mode = intrin->variables[0]->var->data.mode;
+ nir_variable *var = intrin->variables[0]->var;
+ nir_variable_mode mode = var->data.mode;
if ((state->modes & mode) == 0)
continue;
b->cursor = nir_before_instr(instr);
- switch (intrin->intrinsic) {
- case nir_intrinsic_load_var: {
- bool per_vertex =
- is_per_vertex_input(state, intrin->variables[0]->var) ||
- is_per_vertex_output(state, intrin->variables[0]->var);
+ const bool per_vertex =
+ is_per_vertex_input(state, var) || is_per_vertex_output(state, var);
- nir_ssa_def *offset;
- nir_ssa_def *vertex_index;
+ nir_ssa_def *offset;
+ nir_ssa_def *vertex_index;
- offset = get_io_offset(b, intrin->variables[0],
- per_vertex ? &vertex_index : NULL,
- state->type_size);
+ offset = get_io_offset(b, intrin->variables[0],
+ per_vertex ? &vertex_index : NULL,
+ state->type_size);
+ switch (intrin->intrinsic) {
+ case nir_intrinsic_load_var: {
nir_intrinsic_instr *load =
nir_intrinsic_instr_create(state->mem_ctx,
load_op(mode, per_vertex));
load->num_components = intrin->num_components;
nir_intrinsic_set_base(load,
- intrin->variables[0]->var->data.driver_location);
+ var->data.driver_location);
+ if (mode == nir_var_shader_in || mode == nir_var_shader_out) {
+ nir_intrinsic_set_component(load, var->data.location_frac);
+ }
if (load->intrinsic == nir_intrinsic_load_uniform) {
- nir_intrinsic_set_range(load,
- state->type_size(intrin->variables[0]->var->type));
+ nir_intrinsic_set_range(load, state->type_size(var->type));
}
if (per_vertex)
case nir_intrinsic_store_var: {
assert(mode == nir_var_shader_out || mode == nir_var_shared);
- nir_ssa_def *offset;
- nir_ssa_def *vertex_index;
-
- bool per_vertex =
- is_per_vertex_output(state, intrin->variables[0]->var);
-
- offset = get_io_offset(b, intrin->variables[0],
- per_vertex ? &vertex_index : NULL,
- state->type_size);
-
nir_intrinsic_instr *store =
nir_intrinsic_instr_create(state->mem_ctx,
store_op(state, mode, per_vertex));
nir_src_copy(&store->src[0], &intrin->src[0], store);
nir_intrinsic_set_base(store,
- intrin->variables[0]->var->data.driver_location);
+ var->data.driver_location);
+ if (mode == nir_var_shader_out) {
+ nir_intrinsic_set_component(store, var->data.location_frac);
+ }
nir_intrinsic_set_write_mask(store, nir_intrinsic_write_mask(intrin));
if (per_vertex)
case nir_intrinsic_var_atomic_comp_swap: {
assert(mode == nir_var_shared);
- nir_ssa_def *offset;
-
- offset = get_io_offset(b, intrin->variables[0],
- NULL, state->type_size);
-
nir_intrinsic_instr *atomic =
nir_intrinsic_instr_create(state->mem_ctx,
atomic_op(intrin->intrinsic));
atomic->src[0] = nir_src_for_ssa(offset);
- atomic->const_index[0] =
- intrin->variables[0]->var->data.driver_location;
+ atomic->const_index[0] = var->data.driver_location;
for (unsigned i = 0;
i < nir_op_infos[intrin->intrinsic].num_inputs;