#include "nir.h"
#include "nir_builder.h"
+#include "nir_deref.h"
#include "util/bitscan.h"
+#include "util/u_dynarray.h"
+
+static const bool debug = false;
/**
* Variable-based copy propagation
* 1) Copy-propagation on variables that have indirect access. This includes
* propagating from indirect stores into indirect loads.
*
- * 2) Dead code elimination of store_var and copy_var intrinsics based on
- * killed destination values.
- *
- * 3) Removal of redundant load_var intrinsics. We can't trust regular CSE
+ * 2) Removal of redundant load_deref intrinsics. We can't trust regular CSE
* to do this because it isn't aware of variable writes that may alias the
* value and make the former load invalid.
*
- * Unfortunately, properly handling all of those cases makes this path rather
- * complex. In order to avoid additional complexity, this pass is entirely
- * block-local. If we tried to make it global, the data-flow analysis would
- * rapidly get out of hand. Fortunately, for anything that is only ever
- * accessed directly, we get SSA based copy-propagation which is extremely
- * powerful so this isn't that great a loss.
+ * This pass uses an intermediate solution between being local / "per-block"
+ * and a complete data-flow analysis. It follows the control flow graph, and
+ * propagate the available copy information forward, invalidating data at each
+ * cf_node.
+ *
+ * Removal of dead writes to variables is handled by another pass.
*/
+struct vars_written {
+ nir_variable_mode modes;
+
+ /* Key is deref and value is the uintptr_t with the write mask. */
+ struct hash_table *derefs;
+};
+
struct value {
bool is_ssa;
union {
- nir_ssa_def *ssa[4];
- nir_deref_var *deref;
+ struct {
+ nir_ssa_def *def[NIR_MAX_VEC_COMPONENTS];
+ uint8_t component[NIR_MAX_VEC_COMPONENTS];
+ } ssa;
+ nir_deref_instr *deref;
};
};
-struct copy_entry {
- struct list_head link;
-
- nir_instr *store_instr[4];
+static void
+value_set_ssa_components(struct value *value, nir_ssa_def *def,
+ unsigned num_components)
+{
+ if (!value->is_ssa)
+ memset(&value->ssa, 0, sizeof(value->ssa));
+ value->is_ssa = true;
+ for (unsigned i = 0; i < num_components; i++) {
+ value->ssa.def[i] = def;
+ value->ssa.component[i] = i;
+ }
+}
- unsigned comps_may_be_read;
+struct copy_entry {
struct value src;
- nir_deref_var *dst;
+ nir_deref_instr *dst;
};
struct copy_prop_var_state {
- nir_shader *shader;
+ nir_function_impl *impl;
void *mem_ctx;
+ void *lin_ctx;
- struct list_head copies;
-
- /* We're going to be allocating and deleting a lot of copy entries so we'll
- * keep a free list to avoid thrashing malloc too badly.
+ /* Maps nodes to vars_written. Used to invalidate copy entries when
+ * visiting each node.
*/
- struct list_head copy_free_list;
+ struct hash_table *vars_written_map;
bool progress;
};
-static struct copy_entry *
-copy_entry_create(struct copy_prop_var_state *state,
- nir_deref_var *dst_deref)
+static bool
+value_equals_store_src(struct value *value, nir_intrinsic_instr *intrin)
{
- struct copy_entry *entry;
- if (!list_empty(&state->copy_free_list)) {
- struct list_head *item = state->copy_free_list.next;
- list_del(item);
- entry = LIST_ENTRY(struct copy_entry, item, link);
- memset(entry, 0, sizeof(*entry));
- } else {
- entry = rzalloc(state->mem_ctx, struct copy_entry);
+ assert(intrin->intrinsic == nir_intrinsic_store_deref);
+ uintptr_t write_mask = nir_intrinsic_write_mask(intrin);
+
+ for (unsigned i = 0; i < intrin->num_components; i++) {
+ if ((write_mask & (1 << i)) &&
+ (value->ssa.def[i] != intrin->src[1].ssa ||
+ value->ssa.component[i] != i))
+ return false;
}
- entry->dst = dst_deref;
- list_add(&entry->link, &state->copies);
-
- return entry;
+ return true;
}
-static void
-copy_entry_remove(struct copy_prop_var_state *state, struct copy_entry *entry)
+static struct vars_written *
+create_vars_written(struct copy_prop_var_state *state)
{
- list_del(&entry->link);
- list_add(&entry->link, &state->copy_free_list);
+ struct vars_written *written =
+ linear_zalloc_child(state->lin_ctx, sizeof(struct vars_written));
+ written->derefs = _mesa_pointer_hash_table_create(state->mem_ctx);
+ return written;
}
-enum deref_compare_result {
- derefs_equal_bit = (1 << 0),
- derefs_may_alias_bit = (1 << 1),
- derefs_a_contains_b_bit = (1 << 2),
- derefs_b_contains_a_bit = (1 << 3),
-};
-
-/** Returns true if the storage referrenced to by deref completely contains
- * the storage referenced by sub.
- *
- * NOTE: This is fairly general and could be moved to core NIR if someone else
- * ever needs it.
- */
-static enum deref_compare_result
-compare_derefs(nir_deref_var *a, nir_deref_var *b)
+static void
+gather_vars_written(struct copy_prop_var_state *state,
+ struct vars_written *written,
+ nir_cf_node *cf_node)
{
- if (a->var != b->var)
- return 0;
+ struct vars_written *new_written = NULL;
+
+ switch (cf_node->type) {
+ case nir_cf_node_function: {
+ nir_function_impl *impl = nir_cf_node_as_function(cf_node);
+ foreach_list_typed_safe(nir_cf_node, cf_node, node, &impl->body)
+ gather_vars_written(state, NULL, cf_node);
+ break;
+ }
- /* Start off assuming they fully compare. We ignore equality for now. In
- * the end, we'll determine that by containment.
- */
- enum deref_compare_result result = derefs_may_alias_bit |
- derefs_a_contains_b_bit |
- derefs_b_contains_a_bit;
-
- nir_deref *a_tail = &a->deref;
- nir_deref *b_tail = &b->deref;
- while (a_tail->child && b_tail->child) {
- a_tail = a_tail->child;
- b_tail = b_tail->child;
-
- assert(a_tail->deref_type == b_tail->deref_type);
- switch (a_tail->deref_type) {
- case nir_deref_type_array: {
- nir_deref_array *a_arr = nir_deref_as_array(a_tail);
- nir_deref_array *b_arr = nir_deref_as_array(b_tail);
-
- if (a_arr->deref_array_type == nir_deref_array_type_direct &&
- b_arr->deref_array_type == nir_deref_array_type_direct) {
- /* If they're both direct and have different offsets, they
- * don't even alias much less anything else.
- */
- if (a_arr->base_offset != b_arr->base_offset)
- return 0;
- } else if (a_arr->deref_array_type == nir_deref_array_type_wildcard) {
- if (b_arr->deref_array_type != nir_deref_array_type_wildcard)
- result &= ~derefs_b_contains_a_bit;
- } else if (b_arr->deref_array_type == nir_deref_array_type_wildcard) {
- if (a_arr->deref_array_type != nir_deref_array_type_wildcard)
- result &= ~derefs_a_contains_b_bit;
- } else if (a_arr->deref_array_type == nir_deref_array_type_indirect &&
- b_arr->deref_array_type == nir_deref_array_type_indirect) {
- assert(a_arr->indirect.is_ssa && b_arr->indirect.is_ssa);
- if (a_arr->indirect.ssa == b_arr->indirect.ssa) {
- /* If they're different constant offsets from the same indirect
- * then they don't alias at all.
- */
- if (a_arr->base_offset != b_arr->base_offset)
- return 0;
- /* Otherwise the indirect and base both match */
- } else {
- /* If they're have different indirect offsets then we can't
- * prove anything about containment.
- */
- result &= ~(derefs_a_contains_b_bit | derefs_b_contains_a_bit);
- }
- } else {
- /* In this case, one is indirect and the other direct so we can't
- * prove anything about containment.
- */
- result &= ~(derefs_a_contains_b_bit | derefs_b_contains_a_bit);
- }
+ case nir_cf_node_block: {
+ if (!written)
break;
- }
- case nir_deref_type_struct: {
- nir_deref_struct *a_struct = nir_deref_as_struct(a_tail);
- nir_deref_struct *b_struct = nir_deref_as_struct(b_tail);
+ nir_block *block = nir_cf_node_as_block(cf_node);
+ nir_foreach_instr(instr, block) {
+ if (instr->type == nir_instr_type_call) {
+ written->modes |= nir_var_shader_out |
+ nir_var_shader_temp |
+ nir_var_function_temp |
+ nir_var_mem_ssbo |
+ nir_var_mem_shared;
+ continue;
+ }
- /* If they're different struct members, they don't even alias */
- if (a_struct->index != b_struct->index)
- return 0;
- break;
- }
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
- default:
- unreachable("Invalid deref type");
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+ switch (intrin->intrinsic) {
+ case nir_intrinsic_barrier:
+ case nir_intrinsic_memory_barrier:
+ written->modes |= nir_var_shader_out |
+ nir_var_mem_ssbo |
+ nir_var_mem_shared;
+ break;
+
+ case nir_intrinsic_scoped_memory_barrier:
+ if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
+ written->modes |= nir_intrinsic_memory_modes(intrin);
+ break;
+
+ case nir_intrinsic_emit_vertex:
+ case nir_intrinsic_emit_vertex_with_counter:
+ written->modes = nir_var_shader_out;
+ break;
+
+ case nir_intrinsic_deref_atomic_add:
+ case nir_intrinsic_deref_atomic_imin:
+ case nir_intrinsic_deref_atomic_umin:
+ case nir_intrinsic_deref_atomic_imax:
+ case nir_intrinsic_deref_atomic_umax:
+ case nir_intrinsic_deref_atomic_and:
+ case nir_intrinsic_deref_atomic_or:
+ case nir_intrinsic_deref_atomic_xor:
+ case nir_intrinsic_deref_atomic_exchange:
+ case nir_intrinsic_deref_atomic_comp_swap:
+ case nir_intrinsic_store_deref:
+ case nir_intrinsic_copy_deref: {
+ /* Destination in all of store_deref, copy_deref and the atomics is src[0]. */
+ nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
+
+ uintptr_t mask = intrin->intrinsic == nir_intrinsic_store_deref ?
+ nir_intrinsic_write_mask(intrin) : (1 << glsl_get_vector_elements(dst->type)) - 1;
+
+ struct hash_entry *ht_entry = _mesa_hash_table_search(written->derefs, dst);
+ if (ht_entry)
+ ht_entry->data = (void *)(mask | (uintptr_t)ht_entry->data);
+ else
+ _mesa_hash_table_insert(written->derefs, dst, (void *)mask);
+
+ break;
+ }
+
+ default:
+ break;
+ }
}
+
+ break;
}
- /* If a is longer than b, then it can't contain b */
- if (a_tail->child)
- result &= ~derefs_a_contains_b_bit;
- if (b_tail->child)
- result &= ~derefs_b_contains_a_bit;
+ case nir_cf_node_if: {
+ nir_if *if_stmt = nir_cf_node_as_if(cf_node);
- /* If a contains b and b contains a they must be equal. */
- if ((result & derefs_a_contains_b_bit) && (result & derefs_b_contains_a_bit))
- result |= derefs_equal_bit;
+ new_written = create_vars_written(state);
- return result;
-}
+ foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->then_list)
+ gather_vars_written(state, new_written, cf_node);
-static void
-remove_dead_writes(struct copy_prop_var_state *state,
- struct copy_entry *entry, unsigned write_mask)
-{
- /* We're overwriting another entry. Some of it's components may not
- * have been read yet and, if that's the case, we may be able to delete
- * some instructions but we have to be careful.
- */
- unsigned dead_comps = write_mask & ~entry->comps_may_be_read;
+ foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->else_list)
+ gather_vars_written(state, new_written, cf_node);
- for (unsigned mask = dead_comps; mask;) {
- unsigned i = u_bit_scan(&mask);
+ break;
+ }
- nir_instr *instr = entry->store_instr[i];
+ case nir_cf_node_loop: {
+ nir_loop *loop = nir_cf_node_as_loop(cf_node);
- /* We may have already deleted it on a previous iteration */
- if (!instr)
- continue;
+ new_written = create_vars_written(state);
- /* See if this instr is used anywhere that it's not dead */
- bool keep = false;
- for (unsigned j = 0; j < 4; j++) {
- if (entry->store_instr[j] == instr) {
- if (dead_comps & (1 << j)) {
- entry->store_instr[j] = NULL;
+ foreach_list_typed_safe(nir_cf_node, cf_node, node, &loop->body)
+ gather_vars_written(state, new_written, cf_node);
+
+ break;
+ }
+
+ default:
+ unreachable("Invalid CF node type");
+ }
+
+ if (new_written) {
+ /* Merge new information to the parent control flow node. */
+ if (written) {
+ written->modes |= new_written->modes;
+ hash_table_foreach(new_written->derefs, new_entry) {
+ struct hash_entry *old_entry =
+ _mesa_hash_table_search_pre_hashed(written->derefs, new_entry->hash,
+ new_entry->key);
+ if (old_entry) {
+ nir_component_mask_t merged = (uintptr_t) new_entry->data |
+ (uintptr_t) old_entry->data;
+ old_entry->data = (void *) ((uintptr_t) merged);
} else {
- keep = true;
+ _mesa_hash_table_insert_pre_hashed(written->derefs, new_entry->hash,
+ new_entry->key, new_entry->data);
}
}
}
-
- if (!keep) {
- nir_instr_remove(instr);
- state->progress = true;
- }
+ _mesa_hash_table_insert(state->vars_written_map, cf_node, new_written);
}
}
static struct copy_entry *
-lookup_entry_for_deref(struct copy_prop_var_state *state,
- nir_deref_var *deref,
- enum deref_compare_result allowed_comparisons)
+copy_entry_create(struct util_dynarray *copies,
+ nir_deref_instr *dst_deref)
{
- list_for_each_entry(struct copy_entry, iter, &state->copies, link) {
- if (compare_derefs(iter->dst, deref) & allowed_comparisons)
- return iter;
- }
-
- return NULL;
+ struct copy_entry new_entry = {
+ .dst = dst_deref,
+ };
+ util_dynarray_append(copies, struct copy_entry, new_entry);
+ return util_dynarray_top_ptr(copies, struct copy_entry);
}
+/* Remove copy entry by swapping it with the last element and reducing the
+ * size. If used inside an iteration on copies, it must be a reverse
+ * (backwards) iteration. It is safe to use in those cases because the swap
+ * will not affect the rest of the iteration.
+ */
static void
-mark_aliased_entries_as_read(struct copy_prop_var_state *state,
- nir_deref_var *deref, unsigned components)
+copy_entry_remove(struct util_dynarray *copies,
+ struct copy_entry *entry)
{
- list_for_each_entry(struct copy_entry, iter, &state->copies, link) {
- if (compare_derefs(iter->dst, deref) & derefs_may_alias_bit)
- iter->comps_may_be_read |= components;
- }
+ /* This also works when removing the last element since pop don't shrink
+ * the memory used by the array, so the swap is useless but not invalid.
+ */
+ *entry = util_dynarray_pop(copies, struct copy_entry);
+}
+
+static bool
+is_array_deref_of_vector(nir_deref_instr *deref)
+{
+ if (deref->deref_type != nir_deref_type_array)
+ return false;
+ nir_deref_instr *parent = nir_deref_instr_parent(deref);
+ return glsl_type_is_vector(parent->type);
}
static struct copy_entry *
-get_entry_and_kill_aliases(struct copy_prop_var_state *state,
- nir_deref_var *deref,
- unsigned write_mask)
+lookup_entry_for_deref(struct util_dynarray *copies,
+ nir_deref_instr *deref,
+ nir_deref_compare_result allowed_comparisons)
{
struct copy_entry *entry = NULL;
- list_for_each_entry_safe(struct copy_entry, iter, &state->copies, link) {
+ util_dynarray_foreach(copies, struct copy_entry, iter) {
+ nir_deref_compare_result result = nir_compare_derefs(iter->dst, deref);
+ if (result & allowed_comparisons) {
+ entry = iter;
+ if (result & nir_derefs_equal_bit)
+ break;
+ /* Keep looking in case we have an equal match later in the array. */
+ }
+ }
+ return entry;
+}
+
+static struct copy_entry *
+lookup_entry_and_kill_aliases(struct util_dynarray *copies,
+ nir_deref_instr *deref,
+ unsigned write_mask)
+{
+ /* TODO: Take into account the write_mask. */
+
+ nir_deref_instr *dst_match = NULL;
+ util_dynarray_foreach_reverse(copies, struct copy_entry, iter) {
if (!iter->src.is_ssa) {
/* If this write aliases the source of some entry, get rid of it */
- if (compare_derefs(iter->src.deref, deref) & derefs_may_alias_bit) {
- copy_entry_remove(state, iter);
+ if (nir_compare_derefs(iter->src.deref, deref) & nir_derefs_may_alias_bit) {
+ copy_entry_remove(copies, iter);
continue;
}
}
- enum deref_compare_result comp = compare_derefs(iter->dst, deref);
- /* This is a store operation. If we completely overwrite some value, we
- * want to delete any dead writes that may be present.
- */
- if (comp & derefs_b_contains_a_bit)
- remove_dead_writes(state, iter, write_mask);
+ nir_deref_compare_result comp = nir_compare_derefs(iter->dst, deref);
- if (comp & derefs_equal_bit) {
- assert(entry == NULL);
- entry = iter;
- } else if (comp & derefs_may_alias_bit) {
- copy_entry_remove(state, iter);
+ if (comp & nir_derefs_equal_bit) {
+ /* Removing entries invalidate previous iter pointers, so we'll
+ * collect the matching entry later. Just make sure it is unique.
+ */
+ assert(!dst_match);
+ dst_match = iter->dst;
+ } else if (comp & nir_derefs_may_alias_bit) {
+ copy_entry_remove(copies, iter);
+ }
+ }
+
+ struct copy_entry *entry = NULL;
+ if (dst_match) {
+ util_dynarray_foreach(copies, struct copy_entry, iter) {
+ if (iter->dst == dst_match) {
+ entry = iter;
+ break;
+ }
}
+ assert(entry);
}
+ return entry;
+}
+
+static void
+kill_aliases(struct util_dynarray *copies,
+ nir_deref_instr *deref,
+ unsigned write_mask)
+{
+ /* TODO: Take into account the write_mask. */
+
+ struct copy_entry *entry =
+ lookup_entry_and_kill_aliases(copies, deref, write_mask);
+ if (entry)
+ copy_entry_remove(copies, entry);
+}
+
+static struct copy_entry *
+get_entry_and_kill_aliases(struct util_dynarray *copies,
+ nir_deref_instr *deref,
+ unsigned write_mask)
+{
+ /* TODO: Take into account the write_mask. */
+
+ struct copy_entry *entry =
+ lookup_entry_and_kill_aliases(copies, deref, write_mask);
if (entry == NULL)
- entry = copy_entry_create(state, deref);
+ entry = copy_entry_create(copies, deref);
return entry;
}
static void
-apply_barrier_for_modes(struct copy_prop_var_state *state,
+apply_barrier_for_modes(struct util_dynarray *copies,
nir_variable_mode modes)
{
- list_for_each_entry_safe(struct copy_entry, iter, &state->copies, link) {
- if ((iter->dst->var->data.mode & modes) ||
- (!iter->src.is_ssa && (iter->src.deref->var->data.mode & modes)))
- copy_entry_remove(state, iter);
+ util_dynarray_foreach_reverse(copies, struct copy_entry, iter) {
+ if ((iter->dst->mode & modes) ||
+ (!iter->src.is_ssa && (iter->src.deref->mode & modes)))
+ copy_entry_remove(copies, iter);
}
}
static void
-store_to_entry(struct copy_prop_var_state *state, struct copy_entry *entry,
- const struct value *value, unsigned write_mask,
- nir_instr *store_instr)
+value_set_from_value(struct value *value, const struct value *from,
+ unsigned base_index, unsigned write_mask)
{
- entry->comps_may_be_read &= ~write_mask;
- if (value->is_ssa) {
- entry->src.is_ssa = true;
+ /* We can't have non-zero indexes with non-trivial write masks */
+ assert(base_index == 0 || write_mask == 1);
+
+ if (from->is_ssa) {
+ /* Clear value if it was being used as non-SSA. */
+ if (!value->is_ssa)
+ memset(&value->ssa, 0, sizeof(value->ssa));
+ value->is_ssa = true;
/* Only overwrite the written components */
- for (unsigned i = 0; i < 4; i++) {
+ for (unsigned i = 0; i < NIR_MAX_VEC_COMPONENTS; i++) {
if (write_mask & (1 << i)) {
- entry->store_instr[i] = store_instr;
- entry->src.ssa[i] = value->ssa[i];
+ value->ssa.def[base_index + i] = from->ssa.def[i];
+ value->ssa.component[base_index + i] = from->ssa.component[i];
}
}
} else {
/* Non-ssa stores always write everything */
- entry->src.is_ssa = false;
- entry->src.deref = value->deref;
- for (unsigned i = 0; i < 4; i++)
- entry->store_instr[i] = store_instr;
+ value->is_ssa = false;
+ value->deref = from->deref;
}
}
-/* Remove an instruction and return a cursor pointing to where it was */
-static nir_cursor
-instr_remove_cursor(nir_instr *instr)
+/* Try to load a single element of a vector from the copy_entry. If the data
+ * isn't available, just let the original intrinsic do the work.
+ */
+static bool
+load_element_from_ssa_entry_value(struct copy_prop_var_state *state,
+ struct copy_entry *entry,
+ nir_builder *b, nir_intrinsic_instr *intrin,
+ struct value *value, unsigned index)
{
- nir_cursor cursor;
- nir_instr *prev = nir_instr_prev(instr);
- if (prev) {
- cursor = nir_after_instr(prev);
- } else {
- cursor = nir_before_block(instr->block);
- }
- nir_instr_remove(instr);
- return cursor;
+ assert(index < glsl_get_vector_elements(entry->dst->type));
+
+ /* We don't have the element available, so let the instruction do the work. */
+ if (!entry->src.ssa.def[index])
+ return false;
+
+ b->cursor = nir_instr_remove(&intrin->instr);
+ intrin->instr.block = NULL;
+
+ assert(entry->src.ssa.component[index] <
+ entry->src.ssa.def[index]->num_components);
+ nir_ssa_def *def = nir_channel(b, entry->src.ssa.def[index],
+ entry->src.ssa.component[index]);
+
+ *value = (struct value) {
+ .is_ssa = true,
+ {
+ .ssa = {
+ .def = { def },
+ .component = { 0 },
+ },
+ }
+ };
+
+ return true;
}
/* Do a "load" from an SSA-based entry return it in "value" as a value with a
- * single SSA def. Because an entry could reference up to 4 different SSA
+ * single SSA def. Because an entry could reference multiple different SSA
* defs, a vecN operation may be inserted to combine them into a single SSA
* def before handing it back to the caller. If the load instruction is no
* longer needed, it is removed and nir_instr::block is set to NULL. (It is
load_from_ssa_entry_value(struct copy_prop_var_state *state,
struct copy_entry *entry,
nir_builder *b, nir_intrinsic_instr *intrin,
- struct value *value)
+ nir_deref_instr *src, struct value *value)
{
+ if (is_array_deref_of_vector(src)) {
+ if (nir_src_is_const(src->arr.index)) {
+ return load_element_from_ssa_entry_value(state, entry, b, intrin, value,
+ nir_src_as_uint(src->arr.index));
+ }
+
+ /* An SSA copy_entry for the vector won't help indirect load. */
+ if (glsl_type_is_vector(entry->dst->type)) {
+ assert(entry->dst->type == nir_deref_instr_parent(src)->type);
+ /* TODO: If all SSA entries are there, try an if-ladder. */
+ return false;
+ }
+ }
+
*value = entry->src;
assert(value->is_ssa);
- const struct glsl_type *type = nir_deref_tail(&entry->dst->deref)->type;
+ const struct glsl_type *type = entry->dst->type;
unsigned num_components = glsl_get_vector_elements(type);
- uint8_t available = 0;
+ nir_component_mask_t available = 0;
bool all_same = true;
for (unsigned i = 0; i < num_components; i++) {
- if (value->ssa[i])
+ if (value->ssa.def[i])
available |= (1 << i);
- if (value->ssa[i] != value->ssa[0])
+ if (value->ssa.def[i] != value->ssa.def[0])
+ all_same = false;
+
+ if (value->ssa.component[i] != i)
all_same = false;
}
if (all_same) {
/* Our work here is done */
- b->cursor = instr_remove_cursor(&intrin->instr);
+ b->cursor = nir_instr_remove(&intrin->instr);
intrin->instr.block = NULL;
return true;
}
if (available != (1 << num_components) - 1 &&
- intrin->intrinsic == nir_intrinsic_load_var &&
+ intrin->intrinsic == nir_intrinsic_load_deref &&
(available & nir_ssa_def_components_read(&intrin->dest.ssa)) == 0) {
/* If none of the components read are available as SSA values, then we
* should just bail. Otherwise, we would end up replacing the uses of
- * the load_var a vecN() that just gathers up its components.
+ * the load_deref a vecN() that just gathers up its components.
*/
return false;
}
b->cursor = nir_after_instr(&intrin->instr);
nir_ssa_def *load_def =
- intrin->intrinsic == nir_intrinsic_load_var ? &intrin->dest.ssa : NULL;
+ intrin->intrinsic == nir_intrinsic_load_deref ? &intrin->dest.ssa : NULL;
bool keep_intrin = false;
- nir_ssa_def *comps[4];
+ nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
for (unsigned i = 0; i < num_components; i++) {
- if (value->ssa[i]) {
- comps[i] = nir_channel(b, value->ssa[i], i);
+ if (value->ssa.def[i]) {
+ comps[i] = nir_channel(b, value->ssa.def[i], value->ssa.component[i]);
} else {
/* We don't have anything for this component in our
* list. Just re-use a channel from the load.
*/
if (load_def == NULL)
- load_def = nir_load_deref_var(b, entry->dst);
+ load_def = nir_load_deref(b, entry->dst);
if (load_def->parent_instr == &intrin->instr)
keep_intrin = true;
}
nir_ssa_def *vec = nir_vec(b, comps, num_components);
- for (unsigned i = 0; i < num_components; i++)
- value->ssa[i] = vec;
+ value_set_ssa_components(value, vec, num_components);
if (!keep_intrin) {
/* Removing this instruction should not touch the cursor because we
* process is guided by \param guide which references the same type as \param
* specific but has the same wildcard array lengths as \param deref.
*/
-static nir_deref_var *
-specialize_wildcards(nir_deref_var *deref,
- nir_deref_var *guide,
- nir_deref_var *specific,
- void *mem_ctx)
+static nir_deref_instr *
+specialize_wildcards(nir_builder *b,
+ nir_deref_path *deref,
+ nir_deref_path *guide,
+ nir_deref_path *specific)
{
- nir_deref_var *ret = nir_deref_var_create(mem_ctx, deref->var);
-
- nir_deref *deref_tail = deref->deref.child;
- nir_deref *guide_tail = guide->deref.child;
- nir_deref *spec_tail = specific->deref.child;
- nir_deref *ret_tail = &ret->deref;
- while (deref_tail) {
- switch (deref_tail->deref_type) {
- case nir_deref_type_array: {
- nir_deref_array *deref_arr = nir_deref_as_array(deref_tail);
-
- nir_deref_array *ret_arr = nir_deref_array_create(ret_tail);
- ret_arr->deref.type = deref_arr->deref.type;
- ret_arr->deref_array_type = deref_arr->deref_array_type;
-
- switch (deref_arr->deref_array_type) {
- case nir_deref_array_type_direct:
- ret_arr->base_offset = deref_arr->base_offset;
- break;
- case nir_deref_array_type_indirect:
- ret_arr->base_offset = deref_arr->base_offset;
- assert(deref_arr->indirect.is_ssa);
- ret_arr->indirect = deref_arr->indirect;
- break;
- case nir_deref_array_type_wildcard:
- /* This is where things get tricky. We have to search through
- * the entry deref to find its corresponding wildcard and fill
- * this slot in with the value from the src.
- */
- while (guide_tail) {
- if (guide_tail->deref_type == nir_deref_type_array &&
- nir_deref_as_array(guide_tail)->deref_array_type ==
- nir_deref_array_type_wildcard)
- break;
-
- guide_tail = guide_tail->child;
- spec_tail = spec_tail->child;
- }
-
- nir_deref_array *spec_arr = nir_deref_as_array(spec_tail);
- ret_arr->deref_array_type = spec_arr->deref_array_type;
- ret_arr->base_offset = spec_arr->base_offset;
- ret_arr->indirect = spec_arr->indirect;
+ nir_deref_instr **deref_p = &deref->path[1];
+ nir_deref_instr **guide_p = &guide->path[1];
+ nir_deref_instr **spec_p = &specific->path[1];
+ nir_deref_instr *ret_tail = deref->path[0];
+ for (; *deref_p; deref_p++) {
+ if ((*deref_p)->deref_type == nir_deref_type_array_wildcard) {
+ /* This is where things get tricky. We have to search through
+ * the entry deref to find its corresponding wildcard and fill
+ * this slot in with the value from the src.
+ */
+ while (*guide_p &&
+ (*guide_p)->deref_type != nir_deref_type_array_wildcard) {
+ guide_p++;
+ spec_p++;
}
+ assert(*guide_p && *spec_p);
- ret_tail->child = &ret_arr->deref;
- break;
- }
- case nir_deref_type_struct: {
- nir_deref_struct *deref_struct = nir_deref_as_struct(deref_tail);
-
- nir_deref_struct *ret_struct =
- nir_deref_struct_create(ret_tail, deref_struct->index);
- ret_struct->deref.type = deref_struct->deref.type;
+ ret_tail = nir_build_deref_follower(b, ret_tail, *spec_p);
- ret_tail->child = &ret_struct->deref;
- break;
- }
- case nir_deref_type_var:
- unreachable("Invalid deref type");
+ guide_p++;
+ spec_p++;
+ } else {
+ ret_tail = nir_build_deref_follower(b, ret_tail, *deref_p);
}
-
- deref_tail = deref_tail->child;
- ret_tail = ret_tail->child;
}
- return ret;
+ return ret_tail;
}
/* Do a "load" from an deref-based entry return it in "value" as a value. The
load_from_deref_entry_value(struct copy_prop_var_state *state,
struct copy_entry *entry,
nir_builder *b, nir_intrinsic_instr *intrin,
- nir_deref_var *src, struct value *value)
+ nir_deref_instr *src, struct value *value)
{
*value = entry->src;
- /* Walk the deref to get the two tails and also figure out if we need to
- * specialize any wildcards.
- */
- bool need_to_specialize_wildcards = false;
- nir_deref *entry_tail = &entry->dst->deref;
- nir_deref *src_tail = &src->deref;
- while (entry_tail->child && src_tail->child) {
- assert(src_tail->child->deref_type == entry_tail->child->deref_type);
- if (src_tail->child->deref_type == nir_deref_type_array) {
- nir_deref_array *entry_arr = nir_deref_as_array(entry_tail->child);
- nir_deref_array *src_arr = nir_deref_as_array(src_tail->child);
-
- if (src_arr->deref_array_type != nir_deref_array_type_wildcard &&
- entry_arr->deref_array_type == nir_deref_array_type_wildcard)
- need_to_specialize_wildcards = true;
- }
+ b->cursor = nir_instr_remove(&intrin->instr);
- entry_tail = entry_tail->child;
- src_tail = src_tail->child;
+ nir_deref_path entry_dst_path, src_path;
+ nir_deref_path_init(&entry_dst_path, entry->dst, state->mem_ctx);
+ nir_deref_path_init(&src_path, src, state->mem_ctx);
+
+ bool need_to_specialize_wildcards = false;
+ nir_deref_instr **entry_p = &entry_dst_path.path[1];
+ nir_deref_instr **src_p = &src_path.path[1];
+ while (*entry_p && *src_p) {
+ nir_deref_instr *entry_tail = *entry_p++;
+ nir_deref_instr *src_tail = *src_p++;
+
+ if (src_tail->deref_type == nir_deref_type_array &&
+ entry_tail->deref_type == nir_deref_type_array_wildcard)
+ need_to_specialize_wildcards = true;
}
/* If the entry deref is longer than the source deref then it refers to a
* smaller type and we can't source from it.
*/
- assert(entry_tail->child == NULL);
+ assert(*entry_p == NULL);
if (need_to_specialize_wildcards) {
/* The entry has some wildcards that are not in src. This means we need
* to construct a new deref based on the entry but using the wildcards
* from the source and guided by the entry dst. Oof.
*/
- value->deref = specialize_wildcards(entry->src.deref, entry->dst, src,
- state->mem_ctx);
- } else {
- /* We're going to need to make a copy in case we modify it below */
- value->deref = nir_deref_var_clone(value->deref, state->mem_ctx);
+ nir_deref_path entry_src_path;
+ nir_deref_path_init(&entry_src_path, entry->src.deref, state->mem_ctx);
+ value->deref = specialize_wildcards(b, &entry_src_path,
+ &entry_dst_path, &src_path);
+ nir_deref_path_finish(&entry_src_path);
}
- if (src_tail->child) {
- /* If our source deref is longer than the entry deref, that's ok because
- * it just means the entry deref needs to be extended a bit.
- */
- nir_deref *value_tail = nir_deref_tail(&value->deref->deref);
- value_tail->child = nir_deref_clone(src_tail->child, value_tail);
+ /* If our source deref is longer than the entry deref, that's ok because
+ * it just means the entry deref needs to be extended a bit.
+ */
+ while (*src_p) {
+ nir_deref_instr *src_tail = *src_p++;
+ value->deref = nir_build_deref_follower(b, value->deref, src_tail);
}
- b->cursor = instr_remove_cursor(&intrin->instr);
+ nir_deref_path_finish(&entry_dst_path);
+ nir_deref_path_finish(&src_path);
return true;
}
static bool
try_load_from_entry(struct copy_prop_var_state *state, struct copy_entry *entry,
nir_builder *b, nir_intrinsic_instr *intrin,
- nir_deref_var *src, struct value *value)
+ nir_deref_instr *src, struct value *value)
{
if (entry == NULL)
return false;
if (entry->src.is_ssa) {
- return load_from_ssa_entry_value(state, entry, b, intrin, value);
+ return load_from_ssa_entry_value(state, entry, b, intrin, src, value);
} else {
return load_from_deref_entry_value(state, entry, b, intrin, src, value);
}
}
+static void
+invalidate_copies_for_cf_node(struct copy_prop_var_state *state,
+ struct util_dynarray *copies,
+ nir_cf_node *cf_node)
+{
+ struct hash_entry *ht_entry = _mesa_hash_table_search(state->vars_written_map, cf_node);
+ assert(ht_entry);
+
+ struct vars_written *written = ht_entry->data;
+ if (written->modes) {
+ util_dynarray_foreach_reverse(copies, struct copy_entry, entry) {
+ if (entry->dst->mode & written->modes)
+ copy_entry_remove(copies, entry);
+ }
+ }
+
+ hash_table_foreach (written->derefs, entry) {
+ nir_deref_instr *deref_written = (nir_deref_instr *)entry->key;
+ kill_aliases(copies, deref_written, (uintptr_t)entry->data);
+ }
+}
+
+static void
+print_value(struct value *value, unsigned num_components)
+{
+ if (!value->is_ssa) {
+ printf(" %s ", glsl_get_type_name(value->deref->type));
+ nir_print_deref(value->deref, stdout);
+ return;
+ }
+
+ bool same_ssa = true;
+ for (unsigned i = 0; i < num_components; i++) {
+ if (value->ssa.component[i] != i ||
+ (i > 0 && value->ssa.def[i - 1] != value->ssa.def[i])) {
+ same_ssa = false;
+ break;
+ }
+ }
+ if (same_ssa) {
+ printf(" ssa_%d", value->ssa.def[0]->index);
+ } else {
+ for (int i = 0; i < num_components; i++) {
+ if (value->ssa.def[i])
+ printf(" ssa_%d[%u]", value->ssa.def[i]->index, value->ssa.component[i]);
+ else
+ printf(" _");
+ }
+ }
+}
+
+static void
+print_copy_entry(struct copy_entry *entry)
+{
+ printf(" %s ", glsl_get_type_name(entry->dst->type));
+ nir_print_deref(entry->dst, stdout);
+ printf(":\t");
+
+ unsigned num_components = glsl_get_vector_elements(entry->dst->type);
+ print_value(&entry->src, num_components);
+ printf("\n");
+}
+
+static void
+dump_instr(nir_instr *instr)
+{
+ printf(" ");
+ nir_print_instr(instr, stdout);
+ printf("\n");
+}
+
+static void
+dump_copy_entries(struct util_dynarray *copies)
+{
+ util_dynarray_foreach(copies, struct copy_entry, iter)
+ print_copy_entry(iter);
+ printf("\n");
+}
+
static void
copy_prop_vars_block(struct copy_prop_var_state *state,
- nir_builder *b, nir_block *block)
+ nir_builder *b, nir_block *block,
+ struct util_dynarray *copies)
{
- /* Start each block with a blank slate */
- list_for_each_entry_safe(struct copy_entry, iter, &state->copies, link)
- copy_entry_remove(state, iter);
+ if (debug) {
+ printf("# block%d\n", block->index);
+ dump_copy_entries(copies);
+ }
nir_foreach_instr_safe(instr, block) {
+ if (debug && instr->type == nir_instr_type_deref)
+ dump_instr(instr);
+
+ if (instr->type == nir_instr_type_call) {
+ if (debug) dump_instr(instr);
+ apply_barrier_for_modes(copies, nir_var_shader_out |
+ nir_var_shader_temp |
+ nir_var_function_temp |
+ nir_var_mem_ssbo |
+ nir_var_mem_shared);
+ if (debug) dump_copy_entries(copies);
+ continue;
+ }
+
if (instr->type != nir_instr_type_intrinsic)
continue;
switch (intrin->intrinsic) {
case nir_intrinsic_barrier:
case nir_intrinsic_memory_barrier:
- /* If we hit a barrier, we need to trash everything that may possibly
- * be accessible to another thread. Locals, globals, and things of
- * the like are safe, however.
- */
- apply_barrier_for_modes(state, ~(nir_var_local | nir_var_global |
- nir_var_shader_in | nir_var_uniform));
+ if (debug) dump_instr(instr);
+
+ apply_barrier_for_modes(copies, nir_var_shader_out |
+ nir_var_mem_ssbo |
+ nir_var_mem_shared);
+ break;
+
+ case nir_intrinsic_scoped_memory_barrier:
+ if (debug) dump_instr(instr);
+
+ if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
+ apply_barrier_for_modes(copies, nir_intrinsic_memory_modes(intrin));
break;
case nir_intrinsic_emit_vertex:
case nir_intrinsic_emit_vertex_with_counter:
- apply_barrier_for_modes(state, nir_var_shader_out);
+ if (debug) dump_instr(instr);
+
+ apply_barrier_for_modes(copies, nir_var_shader_out);
break;
- case nir_intrinsic_load_var: {
- nir_deref_var *src = intrin->variables[0];
+ case nir_intrinsic_load_deref: {
+ if (debug) dump_instr(instr);
+
+ if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE)
+ break;
+
+ nir_deref_instr *src = nir_src_as_deref(intrin->src[0]);
- uint8_t comps_read = nir_ssa_def_components_read(&intrin->dest.ssa);
- mark_aliased_entries_as_read(state, src, comps_read);
+ /* Direct array_derefs of vectors operate on the vectors (the parent
+ * deref). Indirects will be handled like other derefs.
+ */
+ int vec_index = 0;
+ nir_deref_instr *vec_src = src;
+ if (is_array_deref_of_vector(src) && nir_src_is_const(src->arr.index)) {
+ vec_src = nir_deref_instr_parent(src);
+ unsigned vec_comps = glsl_get_vector_elements(vec_src->type);
+ vec_index = nir_src_as_uint(src->arr.index);
+
+ /* Loading from an invalid index yields an undef */
+ if (vec_index >= vec_comps) {
+ b->cursor = nir_instr_remove(instr);
+ nir_ssa_def *u = nir_ssa_undef(b, 1, intrin->dest.ssa.bit_size);
+ nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(u));
+ break;
+ }
+ }
struct copy_entry *src_entry =
- lookup_entry_for_deref(state, src, derefs_a_contains_b_bit);
- struct value value;
+ lookup_entry_for_deref(copies, src, nir_derefs_a_contains_b_bit);
+ struct value value = {0};
if (try_load_from_entry(state, src_entry, b, intrin, src, &value)) {
if (value.is_ssa) {
/* lookup_load has already ensured that we get a single SSA
* value that has all of the channels. We just have to do the
- * rewrite operation.
+ * rewrite operation. Note for array derefs of vectors, the
+ * channel 0 is used.
*/
if (intrin->instr.block) {
/* The lookup left our instruction in-place. This means it
* rewrite the vecN itself.
*/
nir_ssa_def_rewrite_uses_after(&intrin->dest.ssa,
- nir_src_for_ssa(value.ssa[0]),
- value.ssa[0]->parent_instr);
+ nir_src_for_ssa(value.ssa.def[0]),
+ value.ssa.def[0]->parent_instr);
} else {
nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
- nir_src_for_ssa(value.ssa[0]));
+ nir_src_for_ssa(value.ssa.def[0]));
}
} else {
/* We're turning it into a load of a different variable */
- ralloc_steal(intrin, value.deref);
- intrin->variables[0] = value.deref;
+ intrin->src[0] = nir_src_for_ssa(&value.deref->dest.ssa);
/* Put it back in again. */
nir_builder_instr_insert(b, instr);
-
- value.is_ssa = true;
- for (unsigned i = 0; i < intrin->num_components; i++)
- value.ssa[i] = &intrin->dest.ssa;
+ value_set_ssa_components(&value, &intrin->dest.ssa,
+ intrin->num_components);
}
state->progress = true;
} else {
- value.is_ssa = true;
- for (unsigned i = 0; i < intrin->num_components; i++)
- value.ssa[i] = &intrin->dest.ssa;
+ value_set_ssa_components(&value, &intrin->dest.ssa,
+ intrin->num_components);
}
/* Now that we have a value, we're going to store it back so that we
* to do this, we need an exact match, not just something that
* contains what we're looking for.
*/
- struct copy_entry *store_entry =
- lookup_entry_for_deref(state, src, derefs_equal_bit);
- if (!store_entry)
- store_entry = copy_entry_create(state, src);
-
- /* Set up a store to this entry with the value of the load. This way
- * we can potentially remove subsequent loads. However, we use a
- * NULL instruction so we don't try and delete the load on a
- * subsequent store.
+ struct copy_entry *entry =
+ lookup_entry_for_deref(copies, vec_src, nir_derefs_equal_bit);
+ if (!entry)
+ entry = copy_entry_create(copies, vec_src);
+
+ /* Update the entry with the value of the load. This way
+ * we can potentially remove subsequent loads.
*/
- store_to_entry(state, store_entry, &value,
- ((1 << intrin->num_components) - 1), NULL);
+ value_set_from_value(&entry->src, &value, vec_index,
+ (1 << intrin->num_components) - 1);
break;
}
- case nir_intrinsic_store_var: {
- struct value value = {
- .is_ssa = true
- };
+ case nir_intrinsic_store_deref: {
+ if (debug) dump_instr(instr);
+
+ if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE)
+ break;
+
+ nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
+ assert(glsl_type_is_vector_or_scalar(dst->type));
- for (unsigned i = 0; i < intrin->num_components; i++)
- value.ssa[i] = intrin->src[0].ssa;
+ /* Direct array_derefs of vectors operate on the vectors (the parent
+ * deref). Indirects will be handled like other derefs.
+ */
+ int vec_index = 0;
+ nir_deref_instr *vec_dst = dst;
+ if (is_array_deref_of_vector(dst) && nir_src_is_const(dst->arr.index)) {
+ vec_dst = nir_deref_instr_parent(dst);
+ unsigned vec_comps = glsl_get_vector_elements(vec_dst->type);
+
+ vec_index = nir_src_as_uint(dst->arr.index);
+
+ /* Storing to an invalid index is a no-op. */
+ if (vec_index >= vec_comps) {
+ nir_instr_remove(instr);
+ break;
+ }
+ }
- nir_deref_var *dst = intrin->variables[0];
- unsigned wrmask = nir_intrinsic_write_mask(intrin);
struct copy_entry *entry =
- get_entry_and_kill_aliases(state, dst, wrmask);
- store_to_entry(state, entry, &value, wrmask, &intrin->instr);
+ lookup_entry_for_deref(copies, dst, nir_derefs_equal_bit);
+ if (entry && value_equals_store_src(&entry->src, intrin)) {
+ /* If we are storing the value from a load of the same var the
+ * store is redundant so remove it.
+ */
+ nir_instr_remove(instr);
+ } else {
+ struct value value = {0};
+ value_set_ssa_components(&value, intrin->src[1].ssa,
+ intrin->num_components);
+ unsigned wrmask = nir_intrinsic_write_mask(intrin);
+ struct copy_entry *entry =
+ get_entry_and_kill_aliases(copies, vec_dst, wrmask);
+ value_set_from_value(&entry->src, &value, vec_index, wrmask);
+ }
+
break;
}
- case nir_intrinsic_copy_var: {
- nir_deref_var *dst = intrin->variables[0];
- nir_deref_var *src = intrin->variables[1];
+ case nir_intrinsic_copy_deref: {
+ if (debug) dump_instr(instr);
+
+ if ((nir_intrinsic_src_access(intrin) & ACCESS_VOLATILE) ||
+ (nir_intrinsic_dst_access(intrin) & ACCESS_VOLATILE))
+ break;
- if (compare_derefs(src, dst) & derefs_equal_bit) {
+ nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
+ nir_deref_instr *src = nir_src_as_deref(intrin->src[1]);
+
+ if (nir_compare_derefs(src, dst) & nir_derefs_equal_bit) {
/* This is a no-op self-copy. Get rid of it */
nir_instr_remove(instr);
continue;
}
- mark_aliased_entries_as_read(state, src, 0xf);
+ /* The copy_deref intrinsic doesn't keep track of num_components, so
+ * get it ourselves.
+ */
+ unsigned num_components = glsl_get_vector_elements(dst->type);
+ unsigned full_mask = (1 << num_components) - 1;
+
+ /* Copy of direct array derefs of vectors are not handled. Just
+ * invalidate what's written and bail.
+ */
+ if ((is_array_deref_of_vector(src) && nir_src_is_const(src->arr.index)) ||
+ (is_array_deref_of_vector(dst) && nir_src_is_const(dst->arr.index))) {
+ kill_aliases(copies, dst, full_mask);
+ break;
+ }
struct copy_entry *src_entry =
- lookup_entry_for_deref(state, src, derefs_a_contains_b_bit);
+ lookup_entry_for_deref(copies, src, nir_derefs_a_contains_b_bit);
struct value value;
if (try_load_from_entry(state, src_entry, b, intrin, src, &value)) {
+ /* If load works, intrin (the copy_deref) is removed. */
if (value.is_ssa) {
- nir_store_deref_var(b, dst, value.ssa[0], 0xf);
- intrin = nir_instr_as_intrinsic(nir_builder_last_instr(b));
+ nir_store_deref(b, dst, value.ssa.def[0], full_mask);
} else {
/* If this would be a no-op self-copy, don't bother. */
- if (compare_derefs(value.deref, dst) & derefs_equal_bit)
+ if (nir_compare_derefs(value.deref, dst) & nir_derefs_equal_bit)
continue;
/* Just turn it into a copy of a different deref */
- ralloc_steal(intrin, value.deref);
- intrin->variables[1] = value.deref;
+ intrin->src[1] = nir_src_for_ssa(&value.deref->dest.ssa);
/* Put it back in again. */
nir_builder_instr_insert(b, instr);
} else {
value = (struct value) {
.is_ssa = false,
- .deref = src,
+ { .deref = src },
};
}
+ nir_variable *src_var = nir_deref_instr_get_variable(src);
+ if (src_var && src_var->data.cannot_coalesce) {
+ /* The source cannot be coaleseced, which means we can't propagate
+ * this copy.
+ */
+ break;
+ }
+
struct copy_entry *dst_entry =
- get_entry_and_kill_aliases(state, dst, 0xf);
- store_to_entry(state, dst_entry, &value, 0xf, &intrin->instr);
+ get_entry_and_kill_aliases(copies, dst, full_mask);
+ value_set_from_value(&dst_entry->src, &value, 0, full_mask);
break;
}
- default:
+ case nir_intrinsic_deref_atomic_add:
+ case nir_intrinsic_deref_atomic_imin:
+ case nir_intrinsic_deref_atomic_umin:
+ case nir_intrinsic_deref_atomic_imax:
+ case nir_intrinsic_deref_atomic_umax:
+ case nir_intrinsic_deref_atomic_and:
+ case nir_intrinsic_deref_atomic_or:
+ case nir_intrinsic_deref_atomic_xor:
+ case nir_intrinsic_deref_atomic_exchange:
+ case nir_intrinsic_deref_atomic_comp_swap:
+ if (debug) dump_instr(instr);
+
+ if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE)
+ break;
+
+ nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
+ unsigned num_components = glsl_get_vector_elements(dst->type);
+ unsigned full_mask = (1 << num_components) - 1;
+ kill_aliases(copies, dst, full_mask);
break;
+
+ default:
+ continue; /* To skip the debug below. */
}
+
+ if (debug) dump_copy_entries(copies);
}
}
-bool
-nir_opt_copy_prop_vars(nir_shader *shader)
+static void
+copy_prop_vars_cf_node(struct copy_prop_var_state *state,
+ struct util_dynarray *copies,
+ nir_cf_node *cf_node)
{
- struct copy_prop_var_state state;
+ switch (cf_node->type) {
+ case nir_cf_node_function: {
+ nir_function_impl *impl = nir_cf_node_as_function(cf_node);
- state.shader = shader;
- state.mem_ctx = ralloc_context(NULL);
- list_inithead(&state.copies);
- list_inithead(&state.copy_free_list);
+ struct util_dynarray impl_copies;
+ util_dynarray_init(&impl_copies, state->mem_ctx);
- bool global_progress = false;
- nir_foreach_function(function, shader) {
- if (!function->impl)
- continue;
+ foreach_list_typed_safe(nir_cf_node, cf_node, node, &impl->body)
+ copy_prop_vars_cf_node(state, &impl_copies, cf_node);
+
+ break;
+ }
+ case nir_cf_node_block: {
+ nir_block *block = nir_cf_node_as_block(cf_node);
nir_builder b;
- nir_builder_init(&b, function->impl);
+ nir_builder_init(&b, state->impl);
+ copy_prop_vars_block(state, &b, block, copies);
+ break;
+ }
- state.progress = false;
- nir_foreach_block(block, function->impl)
- copy_prop_vars_block(&state, &b, block);
+ case nir_cf_node_if: {
+ nir_if *if_stmt = nir_cf_node_as_if(cf_node);
- if (state.progress) {
- nir_metadata_preserve(function->impl, nir_metadata_block_index |
- nir_metadata_dominance);
- global_progress = true;
- }
+ /* Clone the copies for each branch of the if statement. The idea is
+ * that they both see the same state of available copies, but do not
+ * interfere to each other.
+ */
+
+ struct util_dynarray then_copies;
+ util_dynarray_clone(&then_copies, state->mem_ctx, copies);
+
+ struct util_dynarray else_copies;
+ util_dynarray_clone(&else_copies, state->mem_ctx, copies);
+
+ foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->then_list)
+ copy_prop_vars_cf_node(state, &then_copies, cf_node);
+
+ foreach_list_typed_safe(nir_cf_node, cf_node, node, &if_stmt->else_list)
+ copy_prop_vars_cf_node(state, &else_copies, cf_node);
+
+ /* Both branches copies can be ignored, since the effect of running both
+ * branches was captured in the first pass that collects vars_written.
+ */
+
+ invalidate_copies_for_cf_node(state, copies, cf_node);
+
+ break;
+ }
+
+ case nir_cf_node_loop: {
+ nir_loop *loop = nir_cf_node_as_loop(cf_node);
+
+ /* Invalidate before cloning the copies for the loop, since the loop
+ * body can be executed more than once.
+ */
+
+ invalidate_copies_for_cf_node(state, copies, cf_node);
+
+ struct util_dynarray loop_copies;
+ util_dynarray_clone(&loop_copies, state->mem_ctx, copies);
+
+ foreach_list_typed_safe(nir_cf_node, cf_node, node, &loop->body)
+ copy_prop_vars_cf_node(state, &loop_copies, cf_node);
+
+ break;
+ }
+
+ default:
+ unreachable("Invalid CF node type");
+ }
+}
+
+static bool
+nir_copy_prop_vars_impl(nir_function_impl *impl)
+{
+ void *mem_ctx = ralloc_context(NULL);
+
+ if (debug) {
+ nir_metadata_require(impl, nir_metadata_block_index);
+ printf("## nir_copy_prop_vars_impl for %s\n", impl->function->name);
}
- ralloc_free(state.mem_ctx);
+ struct copy_prop_var_state state = {
+ .impl = impl,
+ .mem_ctx = mem_ctx,
+ .lin_ctx = linear_zalloc_parent(mem_ctx, 0),
+
+ .vars_written_map = _mesa_pointer_hash_table_create(mem_ctx),
+ };
+
+ gather_vars_written(&state, NULL, &impl->cf_node);
+
+ copy_prop_vars_cf_node(&state, NULL, &impl->cf_node);
+
+ if (state.progress) {
+ nir_metadata_preserve(impl, nir_metadata_block_index |
+ nir_metadata_dominance);
+ } else {
+#ifndef NDEBUG
+ impl->valid_metadata &= ~nir_metadata_not_properly_reset;
+#endif
+ }
+
+ ralloc_free(mem_ctx);
+ return state.progress;
+}
+
+bool
+nir_opt_copy_prop_vars(nir_shader *shader)
+{
+ bool progress = false;
+
+ nir_foreach_function(function, shader) {
+ if (!function->impl)
+ continue;
+ progress |= nir_copy_prop_vars_impl(function->impl);
+ }
- return global_progress;
+ return progress;
}