nir: Add scoped_memory_barrier intrinsic
[mesa.git] / src / compiler / nir / nir_opt_copy_prop_vars.c
index 7fc84083ea697d34181bacf9947d0e545c0f8317..c4544eac0f2bee7ec4a60c6e2ee7ccb640e58e43 100644 (file)
@@ -171,6 +171,11 @@ gather_vars_written(struct copy_prop_var_state *state,
                               nir_var_mem_shared;
             break;
 
+         case nir_intrinsic_scoped_memory_barrier:
+            if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
+               written->modes |= nir_intrinsic_memory_modes(intrin);
+            break;
+
          case nir_intrinsic_emit_vertex:
          case nir_intrinsic_emit_vertex_with_counter:
             written->modes = nir_var_shader_out;
@@ -433,9 +438,7 @@ load_element_from_ssa_entry_value(struct copy_prop_var_state *state,
                                   nir_builder *b, nir_intrinsic_instr *intrin,
                                   struct value *value, unsigned index)
 {
-   const struct glsl_type *type = entry->dst->type;
-   unsigned num_components = glsl_get_vector_elements(type);
-   assert(index < num_components);
+   assert(index < glsl_get_vector_elements(entry->dst->type));
 
    /* We don't have the element available, so let the instruction do the work. */
    if (!entry->src.ssa.def[index])
@@ -451,10 +454,12 @@ load_element_from_ssa_entry_value(struct copy_prop_var_state *state,
 
    *value = (struct value) {
       .is_ssa = true,
-      .ssa = {
-         .def = { def },
-         .component = { 0 },
-      },
+      {
+       .ssa = {
+         .def = { def },
+         .component = { 0 },
+       },
+      }
    };
 
    return true;
@@ -475,11 +480,17 @@ load_from_ssa_entry_value(struct copy_prop_var_state *state,
                           nir_deref_instr *src, struct value *value)
 {
    if (is_array_deref_of_vector(src)) {
-      if (!nir_src_is_const(src->arr.index))
-         return false;
+      if (nir_src_is_const(src->arr.index)) {
+         return load_element_from_ssa_entry_value(state, entry, b, intrin, value,
+                                                  nir_src_as_uint(src->arr.index));
+      }
 
-      return load_element_from_ssa_entry_value(state, entry, b, intrin, value,
-                                               nir_src_as_uint(src->arr.index));
+      /* An SSA copy_entry for the vector won't help indirect load. */
+      if (glsl_type_is_vector(entry->dst->type)) {
+         assert(entry->dst->type == nir_deref_instr_parent(src)->type);
+         /* TODO: If all SSA entries are there, try an if-ladder. */
+         return false;
+      }
    }
 
    *value = entry->src;
@@ -796,6 +807,13 @@ copy_prop_vars_block(struct copy_prop_var_state *state,
                                          nir_var_mem_shared);
          break;
 
+      case nir_intrinsic_scoped_memory_barrier:
+         if (debug) dump_instr(instr);
+
+         if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
+            apply_barrier_for_modes(copies, nir_intrinsic_memory_modes(intrin));
+         break;
+
       case nir_intrinsic_emit_vertex:
       case nir_intrinsic_emit_vertex_with_counter:
          if (debug) dump_instr(instr);
@@ -806,18 +824,19 @@ copy_prop_vars_block(struct copy_prop_var_state *state,
       case nir_intrinsic_load_deref: {
          if (debug) dump_instr(instr);
 
+         if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE)
+            break;
+
          nir_deref_instr *src = nir_src_as_deref(intrin->src[0]);
 
+         /* Direct array_derefs of vectors operate on the vectors (the parent
+          * deref).  Indirects will be handled like other derefs.
+          */
          int vec_index = 0;
          nir_deref_instr *vec_src = src;
-         if (is_array_deref_of_vector(src)) {
+         if (is_array_deref_of_vector(src) && nir_src_is_const(src->arr.index)) {
             vec_src = nir_deref_instr_parent(src);
             unsigned vec_comps = glsl_get_vector_elements(vec_src->type);
-
-            /* Indirects are not handled yet.  */
-            if (!nir_src_is_const(src->arr.index))
-               break;
-
             vec_index = nir_src_as_uint(src->arr.index);
 
             /* Loading from an invalid index yields an undef */
@@ -888,21 +907,21 @@ copy_prop_vars_block(struct copy_prop_var_state *state,
       case nir_intrinsic_store_deref: {
          if (debug) dump_instr(instr);
 
+         if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE)
+            break;
+
          nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
          assert(glsl_type_is_vector_or_scalar(dst->type));
 
+         /* Direct array_derefs of vectors operate on the vectors (the parent
+          * deref).  Indirects will be handled like other derefs.
+          */
          int vec_index = 0;
          nir_deref_instr *vec_dst = dst;
-         if (is_array_deref_of_vector(dst)) {
+         if (is_array_deref_of_vector(dst) && nir_src_is_const(dst->arr.index)) {
             vec_dst = nir_deref_instr_parent(dst);
             unsigned vec_comps = glsl_get_vector_elements(vec_dst->type);
 
-            /* Indirects are not handled yet.  Kill everything */
-            if (!nir_src_is_const(dst->arr.index)) {
-               kill_aliases(copies, vec_dst, (1 << vec_comps) - 1);
-               break;
-            }
-
             vec_index = nir_src_as_uint(dst->arr.index);
 
             /* Storing to an invalid index is a no-op. */
@@ -935,6 +954,10 @@ copy_prop_vars_block(struct copy_prop_var_state *state,
       case nir_intrinsic_copy_deref: {
          if (debug) dump_instr(instr);
 
+         if ((nir_intrinsic_src_access(intrin) & ACCESS_VOLATILE) ||
+             (nir_intrinsic_dst_access(intrin) & ACCESS_VOLATILE))
+            break;
+
          nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
          nir_deref_instr *src = nir_src_as_deref(intrin->src[1]);
 
@@ -950,11 +973,11 @@ copy_prop_vars_block(struct copy_prop_var_state *state,
          unsigned num_components = glsl_get_vector_elements(dst->type);
          unsigned full_mask = (1 << num_components) - 1;
 
-         if (is_array_deref_of_vector(src) || is_array_deref_of_vector(dst)) {
-            /* Cases not handled yet.  Writing into an element of 'dst'
-             * invalidates any related entries in copies.  Reading from 'src'
-             * doesn't invalidate anything, so no action needed for it.
-             */
+         /* Copy of direct array derefs of vectors are not handled.  Just
+          * invalidate what's written and bail.
+          */
+         if ((is_array_deref_of_vector(src) && nir_src_is_const(src->arr.index)) ||
+             (is_array_deref_of_vector(dst) && nir_src_is_const(dst->arr.index))) {
             kill_aliases(copies, dst, full_mask);
             break;
          }
@@ -986,6 +1009,14 @@ copy_prop_vars_block(struct copy_prop_var_state *state,
             };
          }
 
+         nir_variable *src_var = nir_deref_instr_get_variable(src);
+         if (src_var && src_var->data.cannot_coalesce) {
+            /* The source cannot be coaleseced, which means we can't propagate
+             * this copy.
+             */
+            break;
+         }
+
          struct copy_entry *dst_entry =
             get_entry_and_kill_aliases(copies, dst, full_mask);
          value_set_from_value(&dst_entry->src, &value, 0, full_mask);
@@ -1004,6 +1035,9 @@ copy_prop_vars_block(struct copy_prop_var_state *state,
       case nir_intrinsic_deref_atomic_comp_swap:
          if (debug) dump_instr(instr);
 
+         if (nir_intrinsic_access(intrin) & ACCESS_VOLATILE)
+            break;
+
          nir_deref_instr *dst = nir_src_as_deref(intrin->src[0]);
          unsigned num_components = glsl_get_vector_elements(dst->type);
          unsigned full_mask = (1 << num_components) - 1;