nir_var_mem_shared;
break;
+ case nir_intrinsic_scoped_memory_barrier:
+ if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
+ written->modes |= nir_intrinsic_memory_modes(intrin);
+ break;
+
case nir_intrinsic_emit_vertex:
case nir_intrinsic_emit_vertex_with_counter:
written->modes = nir_var_shader_out;
nir_builder *b, nir_intrinsic_instr *intrin,
struct value *value, unsigned index)
{
- const struct glsl_type *type = entry->dst->type;
- unsigned num_components = glsl_get_vector_elements(type);
- assert(index < num_components);
+ assert(index < glsl_get_vector_elements(entry->dst->type));
/* We don't have the element available, so let the instruction do the work. */
if (!entry->src.ssa.def[index])
*value = (struct value) {
.is_ssa = true,
- .ssa = {
- .def = { def },
- .component = { 0 },
- },
+ {
+ .ssa = {
+ .def = { def },
+ .component = { 0 },
+ },
+ }
};
return true;
nir_deref_instr *src, struct value *value)
{
if (is_array_deref_of_vector(src)) {
- if (!nir_src_is_const(src->arr.index))
- return false;
+ if (nir_src_is_const(src->arr.index)) {
+ return load_element_from_ssa_entry_value(state, entry, b, intrin, value,
+ nir_src_as_uint(src->arr.index));
+ }
- return load_element_from_ssa_entry_value(state, entry, b, intrin, value,
- nir_src_as_uint(src->arr.index));
+ /* An SSA copy_entry for the vector won't help indirect load. */
+ if (glsl_type_is_vector(entry->dst->type)) {
+ assert(entry->dst->type == nir_deref_instr_parent(src)->type);
+ /* TODO: If all SSA entries are there, try an if-ladder. */
+ return false;
+ }
}
*value = entry->src;
nir_var_mem_shared);
break;
+ case nir_intrinsic_scoped_memory_barrier:
+ if (debug) dump_instr(instr);
+
+ if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
+ apply_barrier_for_modes(copies, nir_intrinsic_memory_modes(intrin));
+ break;
+
case nir_intrinsic_emit_vertex:
case nir_intrinsic_emit_vertex_with_counter:
if (debug) dump_instr(instr);
case nir_intrinsic_load_deref: {
if (debug) dump_instr(instr);
+ if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE)
+ break;
+
nir_deref_instr *src = nir_src_as_deref(intrin->src[0]);
+ /* Direct array_derefs of vectors operate on the vectors (the parent
+ * deref). Indirects will be handled like other derefs.
+ */
int vec_index = 0;
nir_deref_instr *vec_src = src;
- if (is_array_deref_of_vector(src)) {
+ if (is_array_deref_of_vector(src) && nir_src_is_const(src->arr.index)) {
vec_src = nir_deref_instr_parent(src);
unsigned vec_comps = glsl_get_vector_elements(vec_src->type);
-
- /* Indirects are not handled yet. */
- if (!nir_src_is_const(src->arr.index))
- break;
-
vec_index = nir_src_as_uint(src->arr.index);
/* Loading from an invalid index yields an undef */
case nir_intrinsic_store_deref: {
if (debug) dump_instr(instr);
+ if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE)
+ break;
+
nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
assert(glsl_type_is_vector_or_scalar(dst->type));
+ /* Direct array_derefs of vectors operate on the vectors (the parent
+ * deref). Indirects will be handled like other derefs.
+ */
int vec_index = 0;
nir_deref_instr *vec_dst = dst;
- if (is_array_deref_of_vector(dst)) {
+ if (is_array_deref_of_vector(dst) && nir_src_is_const(dst->arr.index)) {
vec_dst = nir_deref_instr_parent(dst);
unsigned vec_comps = glsl_get_vector_elements(vec_dst->type);
- /* Indirects are not handled yet. Kill everything */
- if (!nir_src_is_const(dst->arr.index)) {
- kill_aliases(copies, vec_dst, (1 << vec_comps) - 1);
- break;
- }
-
vec_index = nir_src_as_uint(dst->arr.index);
/* Storing to an invalid index is a no-op. */
case nir_intrinsic_copy_deref: {
if (debug) dump_instr(instr);
+ if ((nir_intrinsic_src_access(intrin) & ACCESS_VOLATILE) ||
+ (nir_intrinsic_dst_access(intrin) & ACCESS_VOLATILE))
+ break;
+
nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
nir_deref_instr *src = nir_src_as_deref(intrin->src[1]);
unsigned num_components = glsl_get_vector_elements(dst->type);
unsigned full_mask = (1 << num_components) - 1;
- if (is_array_deref_of_vector(src) || is_array_deref_of_vector(dst)) {
- /* Cases not handled yet. Writing into an element of 'dst'
- * invalidates any related entries in copies. Reading from 'src'
- * doesn't invalidate anything, so no action needed for it.
- */
+ /* Copy of direct array derefs of vectors are not handled. Just
+ * invalidate what's written and bail.
+ */
+ if ((is_array_deref_of_vector(src) && nir_src_is_const(src->arr.index)) ||
+ (is_array_deref_of_vector(dst) && nir_src_is_const(dst->arr.index))) {
kill_aliases(copies, dst, full_mask);
break;
}
};
}
+ nir_variable *src_var = nir_deref_instr_get_variable(src);
+ if (src_var && src_var->data.cannot_coalesce) {
+ /* The source cannot be coaleseced, which means we can't propagate
+ * this copy.
+ */
+ break;
+ }
+
struct copy_entry *dst_entry =
get_entry_and_kill_aliases(copies, dst, full_mask);
value_set_from_value(&dst_entry->src, &value, 0, full_mask);
case nir_intrinsic_deref_atomic_comp_swap:
if (debug) dump_instr(instr);
+ if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE)
+ break;
+
nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
unsigned num_components = glsl_get_vector_elements(dst->type);
unsigned full_mask = (1 << num_components) - 1;