struct lower_io_state {
nir_builder builder;
- void *mem_ctx;
int (*type_size)(const struct glsl_type *type);
nir_variable_mode modes;
nir_lower_io_options options;
nir_foreach_variable(var, var_list) {
/*
- * UBO's have their own address spaces, so don't count them towards the
+ * UBOs have their own address spaces, so don't count them towards the
* number of global uniforms
*/
if ((var->data.mode == nir_var_uniform || var->data.mode == nir_var_shader_storage) &&
* (such as geometry shader inputs).
*/
bool
-nir_is_per_vertex_io(nir_variable *var, gl_shader_stage stage)
+nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage)
{
if (var->data.patch || !glsl_type_is_array(var->type))
return false;
static nir_ssa_def *
get_io_offset(nir_builder *b, nir_deref_var *deref,
nir_ssa_def **vertex_index,
- int (*type_size)(const struct glsl_type *))
+ int (*type_size)(const struct glsl_type *),
+ unsigned *component)
{
nir_deref *tail = &deref->deref;
*vertex_index = vtx;
}
+ if (deref->var->data.compact) {
+ assert(tail->child->deref_type == nir_deref_type_array);
+ assert(glsl_type_is_scalar(glsl_without_array(deref->var->type)));
+ nir_deref_array *deref_array = nir_deref_as_array(tail->child);
+ /* We always lower indirect dereferences for "compact" array vars. */
+ assert(deref_array->deref_array_type == nir_deref_array_type_direct);
+
+ const unsigned total_offset = *component + deref_array->base_offset;
+ const unsigned slot_offset = total_offset / 4;
+ *component = total_offset % 4;
+ return nir_imm_int(b, type_size(glsl_vec4_type()) * slot_offset);
+ }
+
/* Just emit code and let constant-folding go to town */
nir_ssa_def *offset = nir_imm_int(b, 0);
static nir_intrinsic_instr *
lower_load(nir_intrinsic_instr *intrin, struct lower_io_state *state,
- nir_ssa_def *vertex_index, nir_ssa_def *offset)
+ nir_ssa_def *vertex_index, nir_ssa_def *offset,
+ unsigned component)
{
const nir_shader *nir = state->builder.shader;
nir_variable *var = intrin->variables[0]->var;
unreachable("Unknown variable mode");
}
- nir_intrinsic_instr *load = nir_intrinsic_instr_create(state->mem_ctx, op);
+ nir_intrinsic_instr *load =
+ nir_intrinsic_instr_create(state->builder.shader, op);
load->num_components = intrin->num_components;
nir_intrinsic_set_base(load, var->data.driver_location);
if (mode == nir_var_shader_in || mode == nir_var_shader_out)
- nir_intrinsic_set_component(load, var->data.location_frac);
+ nir_intrinsic_set_component(load, component);
if (load->intrinsic == nir_intrinsic_load_uniform)
nir_intrinsic_set_range(load, state->type_size(var->type));
static nir_intrinsic_instr *
lower_store(nir_intrinsic_instr *intrin, struct lower_io_state *state,
- nir_ssa_def *vertex_index, nir_ssa_def *offset)
+ nir_ssa_def *vertex_index, nir_ssa_def *offset,
+ unsigned component)
{
nir_variable *var = intrin->variables[0]->var;
nir_variable_mode mode = var->data.mode;
nir_intrinsic_store_output;
}
- nir_intrinsic_instr *store = nir_intrinsic_instr_create(state->mem_ctx, op);
+ nir_intrinsic_instr *store =
+ nir_intrinsic_instr_create(state->builder.shader, op);
store->num_components = intrin->num_components;
nir_src_copy(&store->src[0], &intrin->src[0], store);
nir_intrinsic_set_base(store, var->data.driver_location);
if (mode == nir_var_shader_out)
- nir_intrinsic_set_component(store, var->data.location_frac);
+ nir_intrinsic_set_component(store, component);
nir_intrinsic_set_write_mask(store, nir_intrinsic_write_mask(intrin));
}
nir_intrinsic_instr *atomic =
- nir_intrinsic_instr_create(state->mem_ctx, op);
+ nir_intrinsic_instr_create(state->builder.shader, op);
nir_intrinsic_set_base(atomic, var->data.driver_location);
static nir_intrinsic_instr *
lower_interpolate_at(nir_intrinsic_instr *intrin, struct lower_io_state *state,
- nir_ssa_def *offset)
+ nir_ssa_def *offset, unsigned component)
{
nir_variable *var = intrin->variables[0]->var;
/* Ignore interpolateAt() for flat variables - flat is flat. */
if (var->data.interpolation == INTERP_MODE_FLAT)
- return lower_load(intrin, state, NULL, offset);
+ return lower_load(intrin, state, NULL, offset, component);
nir_intrinsic_op bary_op;
switch (intrin->intrinsic) {
}
nir_intrinsic_instr *bary_setup =
- nir_intrinsic_instr_create(state->mem_ctx, bary_op);
+ nir_intrinsic_instr_create(state->builder.shader, bary_op);
nir_ssa_dest_init(&bary_setup->instr, &bary_setup->dest, 2, 32, NULL);
nir_intrinsic_set_interp_mode(bary_setup, var->data.interpolation);
nir_builder_instr_insert(&state->builder, &bary_setup->instr);
nir_intrinsic_instr *load =
- nir_intrinsic_instr_create(state->mem_ctx,
+ nir_intrinsic_instr_create(state->builder.shader,
nir_intrinsic_load_interpolated_input);
load->num_components = intrin->num_components;
nir_intrinsic_set_base(load, var->data.driver_location);
- nir_intrinsic_set_component(load, var->data.location_frac);
+ nir_intrinsic_set_component(load, component);
load->src[0] = nir_src_for_ssa(&bary_setup->dest.ssa);
load->src[1] = nir_src_for_ssa(offset);
{
nir_builder *b = &state->builder;
const nir_shader_compiler_options *options = b->shader->options;
+ bool progress = false;
nir_foreach_instr_safe(instr, block) {
if (instr->type != nir_instr_type_intrinsic)
nir_ssa_def *offset;
nir_ssa_def *vertex_index = NULL;
+ unsigned component_offset = var->data.location_frac;
offset = get_io_offset(b, intrin->variables[0],
per_vertex ? &vertex_index : NULL,
- state->type_size);
+ state->type_size, &component_offset);
nir_intrinsic_instr *replacement;
switch (intrin->intrinsic) {
case nir_intrinsic_load_var:
- replacement = lower_load(intrin, state, vertex_index, offset);
+ replacement = lower_load(intrin, state, vertex_index, offset,
+ component_offset);
break;
case nir_intrinsic_store_var:
- replacement = lower_store(intrin, state, vertex_index, offset);
+ replacement = lower_store(intrin, state, vertex_index, offset,
+ component_offset);
break;
case nir_intrinsic_var_atomic_add:
case nir_intrinsic_interp_var_at_sample:
case nir_intrinsic_interp_var_at_offset:
assert(vertex_index == NULL);
- replacement = lower_interpolate_at(intrin, state, offset);
+ replacement = lower_interpolate_at(intrin, state, offset,
+ component_offset);
break;
default:
nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
nir_src_for_ssa(&replacement->dest.ssa));
} else {
- nir_dest_copy(&replacement->dest, &intrin->dest, state->mem_ctx);
+ nir_dest_copy(&replacement->dest, &intrin->dest, &intrin->instr);
}
}
nir_instr_insert_before(&intrin->instr, &replacement->instr);
nir_instr_remove(&intrin->instr);
+ progress = true;
}
- return true;
+ return progress;
}
-static void
+static bool
nir_lower_io_impl(nir_function_impl *impl,
nir_variable_mode modes,
int (*type_size)(const struct glsl_type *),
nir_lower_io_options options)
{
struct lower_io_state state;
+ bool progress = false;
nir_builder_init(&state.builder, impl);
- state.mem_ctx = ralloc_parent(impl);
state.modes = modes;
state.type_size = type_size;
state.options = options;
nir_foreach_block(block, impl) {
- nir_lower_io_block(block, &state);
+ progress |= nir_lower_io_block(block, &state);
}
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);
+ return progress;
}
-void
+bool
nir_lower_io(nir_shader *shader, nir_variable_mode modes,
int (*type_size)(const struct glsl_type *),
nir_lower_io_options options)
{
+ bool progress = false;
+
nir_foreach_function(function, shader) {
if (function->impl) {
- nir_lower_io_impl(function->impl, modes, type_size, options);
+ progress |= nir_lower_io_impl(function->impl, modes,
+ type_size, options);
}
}
+
+ return progress;
}
/**