#include "nir_deref.h"
#include "nir_vla.h"
+#include "util/set.h"
#include "util/u_math.h"
+static struct set *
+get_complex_used_vars(nir_shader *shader, void *mem_ctx)
+{
+ struct set *complex_vars = _mesa_pointer_set_create(mem_ctx);
+
+ nir_foreach_function(function, shader) {
+ if (!function->impl)
+ continue;
+
+ nir_foreach_block(block, function->impl) {
+ nir_foreach_instr(instr, block) {
+ if (instr->type != nir_instr_type_deref)
+ continue;
+
+ nir_deref_instr *deref = nir_instr_as_deref(instr);
+
+ /* We only need to consider var derefs because
+ * nir_deref_instr_has_complex_use is recursive.
+ */
+ if (deref->deref_type == nir_deref_type_var &&
+ nir_deref_instr_has_complex_use(deref))
+ _mesa_set_add(complex_vars, deref->var);
+ }
+ }
+ }
+
+ return complex_vars;
+}
struct split_var_state {
void *mem_ctx;
};
const struct glsl_type *struct_type = glsl_without_array(type);
- if (glsl_type_is_struct(struct_type)) {
+ if (glsl_type_is_struct_or_ifc(struct_type)) {
field->num_fields = glsl_get_length(struct_type),
field->fields = ralloc_array(state->mem_ctx, struct field,
field->num_fields);
split_var_list_structs(nir_shader *shader,
nir_function_impl *impl,
struct exec_list *vars,
+ nir_variable_mode mode,
struct hash_table *var_field_map,
+ struct set **complex_vars,
void *mem_ctx)
{
struct split_var_state state = {
/* To avoid list confusion (we'll be adding things as we split variables),
* pull all of the variables we plan to split off of the list
*/
- nir_foreach_variable_safe(var, vars) {
- if (!glsl_type_is_struct(glsl_without_array(var->type)))
+ nir_foreach_variable_in_list_safe(var, vars) {
+ if (var->data.mode != mode)
+ continue;
+
+ if (!glsl_type_is_struct_or_ifc(glsl_without_array(var->type)))
+ continue;
+
+ if (*complex_vars == NULL)
+ *complex_vars = get_complex_used_vars(shader, mem_ctx);
+
+ /* We can't split a variable that's referenced with deref that has any
+ * sort of complex usage.
+ */
+ if (_mesa_set_search(*complex_vars, var))
continue;
exec_node_remove(&var->node);
exec_list_push_tail(&split_vars, &var->node);
}
- nir_foreach_variable(var, &split_vars) {
+ nir_foreach_variable_in_list(var, &split_vars) {
state.base_var = var;
struct field *root_field = ralloc(mem_ctx, struct field);
continue;
assert(i > 0);
- assert(glsl_type_is_struct(path.path[i - 1]->type));
+ assert(glsl_type_is_struct_or_ifc(path.path[i - 1]->type));
assert(path.path[i - 1]->type ==
glsl_without_array(tail_field->type));
void *mem_ctx = ralloc_context(NULL);
struct hash_table *var_field_map =
_mesa_pointer_hash_table_create(mem_ctx);
+ struct set *complex_vars = NULL;
assert((modes & (nir_var_shader_temp | nir_var_function_temp)) == modes);
bool has_global_splits = false;
if (modes & nir_var_shader_temp) {
has_global_splits = split_var_list_structs(shader, NULL,
- &shader->globals,
- var_field_map, mem_ctx);
+ &shader->variables,
+ nir_var_shader_temp,
+ var_field_map,
+ &complex_vars,
+ mem_ctx);
}
bool progress = false;
if (modes & nir_var_function_temp) {
has_local_splits = split_var_list_structs(shader, function->impl,
&function->impl->locals,
- var_field_map, mem_ctx);
+ nir_var_function_temp,
+ var_field_map,
+ &complex_vars,
+ mem_ctx);
}
if (has_global_splits || has_local_splits) {
nir_metadata_preserve(function->impl, nir_metadata_block_index |
nir_metadata_dominance);
progress = true;
+ } else {
+ nir_metadata_preserve(function->impl, nir_metadata_all);
}
}
};
static bool
-init_var_list_array_infos(struct exec_list *vars,
+init_var_list_array_infos(nir_shader *shader,
+ struct exec_list *vars,
+ nir_variable_mode mode,
struct hash_table *var_info_map,
+ struct set **complex_vars,
void *mem_ctx)
{
bool has_array = false;
- nir_foreach_variable(var, vars) {
+ nir_foreach_variable_in_list(var, vars) {
+ if (var->data.mode != mode)
+ continue;
+
int num_levels = num_array_levels_in_array_of_vector_type(var->type);
if (num_levels <= 0)
continue;
+ if (*complex_vars == NULL)
+ *complex_vars = get_complex_used_vars(shader, mem_ctx);
+
+ /* We can't split a variable that's referenced with deref that has any
+ * sort of complex usage.
+ */
+ if (_mesa_set_search(*complex_vars, var))
+ continue;
+
struct array_var_info *info =
rzalloc_size(mem_ctx, sizeof(*info) +
num_levels * sizeof(info->levels[0]));
const struct glsl_type *type = var->type;
for (int i = 0; i < num_levels; i++) {
- assert(glsl_get_explicit_stride(type) == 0);
info->levels[i].array_len = glsl_get_length(type);
type = glsl_get_array_element(type);
if (!(deref->mode & modes))
return NULL;
- return get_array_var_info(nir_deref_instr_get_variable(deref),
- var_info_map);
+ nir_variable *var = nir_deref_instr_get_variable(deref);
+ if (var == NULL)
+ return NULL;
+
+ return get_array_var_info(var, var_info_map);
}
static void
split_var_list_arrays(nir_shader *shader,
nir_function_impl *impl,
struct exec_list *vars,
+ nir_variable_mode mode,
struct hash_table *var_info_map,
void *mem_ctx)
{
struct exec_list split_vars;
exec_list_make_empty(&split_vars);
- nir_foreach_variable_safe(var, vars) {
+ nir_foreach_variable_in_list_safe(var, vars) {
+ if (var->data.mode != mode)
+ continue;
+
struct array_var_info *info = get_array_var_info(var, var_info_map);
if (!info)
continue;
exec_node_remove(&var->node);
exec_list_push_tail(&split_vars, &var->node);
} else {
- assert(split_type == var->type);
+ assert(split_type == glsl_get_bare_type(var->type));
/* If we're not modifying this variable, delete the info so we skip
* it faster in later passes.
*/
}
}
- nir_foreach_variable(var, &split_vars) {
+ nir_foreach_variable_in_list(var, &split_vars) {
struct array_var_info *info = get_array_var_info(var, var_info_map);
create_split_array_vars(info, 0, &info->root_split, var->name,
shader, impl, mem_ctx);
glsl_get_length(src_path->path[src_level]->type));
unsigned len = glsl_get_length(dst_path->path[dst_level]->type);
for (unsigned i = 0; i < len; i++) {
- nir_ssa_def *idx = nir_imm_int(b, i);
emit_split_copies(b, dst_info, dst_path, dst_level + 1,
- nir_build_deref_array(b, dst, idx),
+ nir_build_deref_array_imm(b, dst, i),
src_info, src_path, src_level + 1,
- nir_build_deref_array(b, src, idx));
+ nir_build_deref_array_imm(b, src, i));
}
} else {
/* Neither side is being split so we just keep going */
{
void *mem_ctx = ralloc_context(NULL);
struct hash_table *var_info_map = _mesa_pointer_hash_table_create(mem_ctx);
+ struct set *complex_vars = NULL;
assert((modes & (nir_var_shader_temp | nir_var_function_temp)) == modes);
bool has_global_array = false;
if (modes & nir_var_shader_temp) {
- has_global_array = init_var_list_array_infos(&shader->globals,
- var_info_map, mem_ctx);
+ has_global_array = init_var_list_array_infos(shader,
+ &shader->variables,
+ nir_var_shader_temp,
+ var_info_map,
+ &complex_vars,
+ mem_ctx);
}
bool has_any_array = false;
bool has_local_array = false;
if (modes & nir_var_function_temp) {
- has_local_array = init_var_list_array_infos(&function->impl->locals,
- var_info_map, mem_ctx);
+ has_local_array = init_var_list_array_infos(shader,
+ &function->impl->locals,
+ nir_var_function_temp,
+ var_info_map,
+ &complex_vars,
+ mem_ctx);
}
if (has_global_array || has_local_array) {
/* If we failed to find any arrays of arrays, bail early. */
if (!has_any_array) {
ralloc_free(mem_ctx);
+ nir_shader_preserve_all_metadata(shader);
return false;
}
bool has_global_splits = false;
if (modes & nir_var_shader_temp) {
has_global_splits = split_var_list_arrays(shader, NULL,
- &shader->globals,
+ &shader->variables,
+ nir_var_shader_temp,
var_info_map, mem_ctx);
}
if (modes & nir_var_function_temp) {
has_local_splits = split_var_list_arrays(shader, function->impl,
&function->impl->locals,
+ nir_var_function_temp,
var_info_map, mem_ctx);
}
nir_metadata_preserve(function->impl, nir_metadata_block_index |
nir_metadata_dominance);
progress = true;
+ } else {
+ nir_metadata_preserve(function->impl, nir_metadata_all);
}
}
/* True if there is a copy that isn't to/from a shrinkable vector */
bool has_external_copy;
+ bool has_complex_use;
struct set *vars_copied;
unsigned num_levels;
const struct glsl_type *type = var->type;
for (unsigned i = 0; i < num_levels; i++) {
usage->levels[i].array_len = glsl_get_length(type);
- assert(glsl_get_explicit_stride(type) == 0);
type = glsl_get_array_element(type);
}
assert(glsl_type_is_vector_or_scalar(type));
var_usage_map, add_usage_entry, mem_ctx);
}
+static void
+mark_deref_if_complex(nir_deref_instr *deref,
+ struct hash_table *var_usage_map,
+ nir_variable_mode modes,
+ void *mem_ctx)
+{
+ if (!(deref->mode & modes))
+ return;
+
+ /* Only bother with var derefs because nir_deref_instr_has_complex_use is
+ * recursive.
+ */
+ if (deref->deref_type != nir_deref_type_var)
+ return;
+
+ if (!nir_deref_instr_has_complex_use(deref))
+ return;
+
+ struct vec_var_usage *usage =
+ get_vec_var_usage(deref->var, var_usage_map, true, mem_ctx);
+ if (!usage)
+ return;
+
+ usage->has_complex_use = true;
+}
+
static void
mark_deref_used(nir_deref_instr *deref,
nir_component_mask_t comps_read,
return;
nir_variable *var = nir_deref_instr_get_variable(deref);
+ if (var == NULL)
+ return;
struct vec_var_usage *usage =
get_vec_var_usage(var, var_usage_map, true, mem_ctx);
static bool
src_is_load_deref(nir_src src, nir_src deref_src)
{
- assert(src.is_ssa);
- assert(deref_src.is_ssa);
-
- if (src.ssa->parent_instr->type != nir_instr_type_intrinsic)
- return false;
-
- nir_intrinsic_instr *load = nir_instr_as_intrinsic(src.ssa->parent_instr);
- if (load->intrinsic != nir_intrinsic_load_deref)
+ nir_intrinsic_instr *load = nir_src_as_intrinsic(src);
+ if (load == NULL || load->intrinsic != nir_intrinsic_load_deref)
return false;
assert(load->src[0].is_ssa);
nir_alu_instr *src_alu = nir_instr_as_alu(src_instr);
- if (src_alu->op == nir_op_imov ||
- src_alu->op == nir_op_fmov) {
+ if (src_alu->op == nir_op_mov) {
/* If it's just a swizzle of a load from the same deref, discount any
* channels that don't move in the swizzle.
*/
comps &= ~(1u << i);
}
}
- } else if (src_alu->op == nir_op_vec2 ||
- src_alu->op == nir_op_vec3 ||
- src_alu->op == nir_op_vec4) {
+ } else if (nir_op_is_vec(src_alu->op)) {
/* If it's a vec, discount any channels that are just loads from the
* same deref put in the same spot.
*/
{
nir_foreach_block(block, impl) {
nir_foreach_instr(instr, block) {
+ if (instr->type == nir_instr_type_deref) {
+ mark_deref_if_complex(nir_instr_as_deref(instr),
+ var_usage_map, modes, mem_ctx);
+ }
+
if (instr->type != nir_instr_type_intrinsic)
continue;
static bool
shrink_vec_var_list(struct exec_list *vars,
+ nir_variable_mode mode,
struct hash_table *var_usage_map)
{
/* Initialize the components kept field of each variable. This is the
* Also, if we have a copy that to/from something we can't shrink, we need
* to leave components and array_len of any wildcards alone.
*/
- nir_foreach_variable(var, vars) {
+ nir_foreach_variable_in_list(var, vars) {
+ if (var->data.mode != mode)
+ continue;
+
struct vec_var_usage *usage =
get_vec_var_usage(var, var_usage_map, false, NULL);
if (!usage)
continue;
assert(usage->comps_kept == 0);
- if (usage->has_external_copy)
+ if (usage->has_external_copy || usage->has_complex_use)
usage->comps_kept = usage->all_comps;
else
usage->comps_kept = usage->comps_read & usage->comps_written;
struct array_level_usage *level = &usage->levels[i];
assert(level->array_len > 0);
- if (level->max_written == UINT_MAX || level->has_external_copy)
+ if (level->max_written == UINT_MAX || level->has_external_copy ||
+ usage->has_complex_use)
continue; /* Can't shrink */
unsigned max_used = MIN2(level->max_read, level->max_written);
bool fp_progress;
do {
fp_progress = false;
- nir_foreach_variable(var, vars) {
+ nir_foreach_variable_in_list(var, vars) {
+ if (var->data.mode != mode)
+ continue;
+
struct vec_var_usage *var_usage =
get_vec_var_usage(var, var_usage_map, false, NULL);
if (!var_usage || !var_usage->vars_copied)
} while (fp_progress);
bool vars_shrunk = false;
- nir_foreach_variable_safe(var, vars) {
+ nir_foreach_variable_in_list_safe(var, vars) {
+ if (var->data.mode != mode)
+ continue;
+
struct vec_var_usage *usage =
get_vec_var_usage(var, var_usage_map, false, NULL);
if (!usage)
continue;
}
+ /* If we're not dropping any components, there's no need to
+ * compact vectors.
+ */
+ if (usage->comps_kept == usage->all_comps)
+ continue;
+
if (intrin->intrinsic == nir_intrinsic_load_deref) {
b.cursor = nir_after_instr(&intrin->instr);
b.cursor = nir_before_instr(&intrin->instr);
nir_ssa_def *swizzled =
- nir_swizzle(&b, intrin->src[1].ssa, swizzle, c, false);
+ nir_swizzle(&b, intrin->src[1].ssa, swizzle, c);
/* Rewrite to use the compacted source */
nir_instr_rewrite_src(&intrin->instr, &intrin->src[1],
{
nir_shader *shader = impl->function->shader;
- if ((modes & nir_var_shader_temp) && !exec_list_is_empty(&shader->globals))
- return true;
+ if (modes & ~nir_var_function_temp) {
+ nir_foreach_variable_with_modes(var, shader,
+ modes & ~nir_var_function_temp)
+ return true;
+ }
if ((modes & nir_var_function_temp) && !exec_list_is_empty(&impl->locals))
return true;
}
if (!has_vars_to_shrink) {
ralloc_free(mem_ctx);
+ nir_shader_preserve_all_metadata(shader);
return false;
}
bool globals_shrunk = false;
- if (modes & nir_var_shader_temp)
- globals_shrunk = shrink_vec_var_list(&shader->globals, var_usage_map);
+ if (modes & nir_var_shader_temp) {
+ globals_shrunk = shrink_vec_var_list(&shader->variables,
+ nir_var_shader_temp,
+ var_usage_map);
+ }
bool progress = false;
nir_foreach_function(function, shader) {
bool locals_shrunk = false;
if (modes & nir_var_function_temp) {
locals_shrunk = shrink_vec_var_list(&function->impl->locals,
+ nir_var_function_temp,
var_usage_map);
}
nir_metadata_preserve(function->impl, nir_metadata_block_index |
nir_metadata_dominance);
progress = true;
+ } else {
+ nir_metadata_preserve(function->impl, nir_metadata_all);
}
}