i965: Use nir_lower_load_const_to_scalar().
[mesa.git] / src / mesa / drivers / dri / i965 / brw_nir.c
index 91358d8f389ac9be1832be783cdc84fa9e5726ca..46b5116357917cffa2a607789ecc0ad917f7f4ab 100644 (file)
 
 #include "brw_nir.h"
 #include "brw_shader.h"
-#include "glsl/glsl_parser_extras.h"
-#include "glsl/nir/glsl_to_nir.h"
+#include "compiler/nir/glsl_to_nir.h"
+#include "compiler/nir/nir_builder.h"
 #include "program/prog_to_nir.h"
 
+static bool
+is_input(nir_intrinsic_instr *intrin)
+{
+   return intrin->intrinsic == nir_intrinsic_load_input ||
+          intrin->intrinsic == nir_intrinsic_load_per_vertex_input;
+}
+
+static bool
+is_output(nir_intrinsic_instr *intrin)
+{
+   return intrin->intrinsic == nir_intrinsic_load_output ||
+          intrin->intrinsic == nir_intrinsic_load_per_vertex_output ||
+          intrin->intrinsic == nir_intrinsic_store_output ||
+          intrin->intrinsic == nir_intrinsic_store_per_vertex_output;
+}
+
+/**
+ * In many cases, we just add the base and offset together, so there's no
+ * reason to keep them separate.  Sometimes, combining them is essential:
+ * if a shader only accesses part of a compound variable (such as a matrix
+ * or array), the variable's base may not actually exist in the VUE map.
+ *
+ * This pass adds constant offsets to instr->const_index[0], and resets
+ * the offset source to 0.  Non-constant offsets remain unchanged - since
+ * we don't know what part of a compound variable is accessed, we allocate
+ * storage for the entire thing.
+ */
+struct add_const_offset_to_base_params {
+   nir_builder b;
+   nir_variable_mode mode;
+};
+
+static bool
+add_const_offset_to_base_block(nir_block *block, void *closure)
+{
+   struct add_const_offset_to_base_params *params = closure;
+   nir_builder *b = &params->b;
+
+   nir_foreach_instr_safe(block, instr) {
+      if (instr->type != nir_instr_type_intrinsic)
+         continue;
+
+      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+
+      if ((params->mode == nir_var_shader_in && is_input(intrin)) ||
+          (params->mode == nir_var_shader_out && is_output(intrin))) {
+         nir_src *offset = nir_get_io_offset_src(intrin);
+         nir_const_value *const_offset = nir_src_as_const_value(*offset);
+
+         if (const_offset) {
+            intrin->const_index[0] += const_offset->u[0];
+            b->cursor = nir_before_instr(&intrin->instr);
+            nir_instr_rewrite_src(&intrin->instr, offset,
+                                  nir_src_for_ssa(nir_imm_int(b, 0)));
+         }
+      }
+   }
+   return true;
+}
+
+static void
+add_const_offset_to_base(nir_shader *nir, nir_variable_mode mode)
+{
+   struct add_const_offset_to_base_params params = { .mode = mode };
+
+   nir_foreach_function(nir, f) {
+      if (f->impl) {
+         nir_builder_init(&params.b, f->impl);
+         nir_foreach_block(f->impl, add_const_offset_to_base_block, &params);
+      }
+   }
+}
+
 static bool
 remap_vs_attrs(nir_block *block, void *closure)
 {
@@ -38,9 +111,6 @@ remap_vs_attrs(nir_block *block, void *closure)
 
       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
 
-      /* We set EmitNoIndirect for VS inputs, so there are no indirects. */
-      assert(intrin->intrinsic != nir_intrinsic_load_input_indirect);
-
       if (intrin->intrinsic == nir_intrinsic_load_input) {
          /* Attributes come in a contiguous block, ordered by their
           * gl_vert_attrib value.  That means we can compute the slot
@@ -49,12 +119,89 @@ remap_vs_attrs(nir_block *block, void *closure)
           */
          int attr = intrin->const_index[0];
          int slot = _mesa_bitcount_64(inputs_read & BITFIELD64_MASK(attr));
+
          intrin->const_index[0] = 4 * slot;
       }
    }
    return true;
 }
 
+static bool
+remap_inputs_with_vue_map(nir_block *block, void *closure)
+{
+   const struct brw_vue_map *vue_map = closure;
+
+   nir_foreach_instr(block, instr) {
+      if (instr->type != nir_instr_type_intrinsic)
+         continue;
+
+      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+
+      if (intrin->intrinsic == nir_intrinsic_load_input ||
+          intrin->intrinsic == nir_intrinsic_load_per_vertex_input) {
+         int vue_slot = vue_map->varying_to_slot[intrin->const_index[0]];
+         assert(vue_slot != -1);
+         intrin->const_index[0] = vue_slot;
+      }
+   }
+   return true;
+}
+
+struct remap_patch_urb_offsets_state {
+   nir_builder b;
+   struct brw_vue_map vue_map;
+};
+
+static bool
+remap_patch_urb_offsets(nir_block *block, void *closure)
+{
+   struct remap_patch_urb_offsets_state *state = closure;
+
+   nir_foreach_instr_safe(block, instr) {
+      if (instr->type != nir_instr_type_intrinsic)
+         continue;
+
+      nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+
+      gl_shader_stage stage = state->b.shader->stage;
+
+      if ((stage == MESA_SHADER_TESS_CTRL && is_output(intrin)) ||
+          (stage == MESA_SHADER_TESS_EVAL && is_input(intrin))) {
+         int vue_slot = state->vue_map.varying_to_slot[intrin->const_index[0]];
+         assert(vue_slot != -1);
+         intrin->const_index[0] = vue_slot;
+
+         nir_src *vertex = nir_get_io_vertex_index_src(intrin);
+         if (vertex) {
+            nir_const_value *const_vertex = nir_src_as_const_value(*vertex);
+            if (const_vertex) {
+               intrin->const_index[0] += const_vertex->u[0] *
+                                         state->vue_map.num_per_vertex_slots;
+            } else {
+               state->b.cursor = nir_before_instr(&intrin->instr);
+
+               /* Multiply by the number of per-vertex slots. */
+               nir_ssa_def *vertex_offset =
+                  nir_imul(&state->b,
+                           nir_ssa_for_src(&state->b, *vertex, 1),
+                           nir_imm_int(&state->b,
+                                       state->vue_map.num_per_vertex_slots));
+
+               /* Add it to the existing offset */
+               nir_src *offset = nir_get_io_offset_src(intrin);
+               nir_ssa_def *total_offset =
+                  nir_iadd(&state->b, vertex_offset,
+                           nir_ssa_for_src(&state->b, *offset, 1));
+
+               nir_instr_rewrite_src(&intrin->instr, offset,
+                                     nir_src_for_ssa(total_offset));
+            }
+         }
+      }
+   }
+   return true;
+}
+
 static void
 brw_nir_lower_inputs(nir_shader *nir,
                      const struct brw_device_info *devinfo,
@@ -62,13 +209,6 @@ brw_nir_lower_inputs(nir_shader *nir,
 {
    switch (nir->stage) {
    case MESA_SHADER_VERTEX:
-      /* For now, leave the vec4 backend doing the old method. */
-      if (!is_scalar) {
-         nir_assign_var_locations(&nir->inputs, &nir->num_inputs,
-                                  type_size_vec4);
-         break;
-      }
-
       /* Start with the location of the variable's base. */
       foreach_list_typed(nir_variable, var, node, &nir->inputs) {
          var->data.driver_location = var->data.location;
@@ -80,20 +220,30 @@ brw_nir_lower_inputs(nir_shader *nir,
        */
       nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
 
-      /* Finally, translate VERT_ATTRIB_* values into the actual registers.
-       *
-       * Note that we can use nir->info.inputs_read instead of key->inputs_read
-       * since the two are identical aside from Gen4-5 edge flag differences.
-       */
-      GLbitfield64 inputs_read = nir->info.inputs_read;
-      nir_foreach_overload(nir, overload) {
-         if (overload->impl) {
-            nir_foreach_block(overload->impl, remap_vs_attrs, &inputs_read);
+      /* This pass needs actual constants */
+      nir_opt_constant_folding(nir);
+
+      add_const_offset_to_base(nir, nir_var_shader_in);
+
+      if (is_scalar) {
+         /* Finally, translate VERT_ATTRIB_* values into the actual registers.
+          *
+          * Note that we can use nir->info.inputs_read instead of
+          * key->inputs_read since the two are identical aside from Gen4-5
+          * edge flag differences.
+          */
+         GLbitfield64 inputs_read = nir->info.inputs_read;
+
+         nir_foreach_function(nir, function) {
+            if (function->impl) {
+               nir_foreach_block(function->impl, remap_vs_attrs, &inputs_read);
+            }
          }
       }
       break;
+   case MESA_SHADER_TESS_CTRL:
    case MESA_SHADER_GEOMETRY: {
-      if (!is_scalar) {
+      if (!is_scalar && nir->stage == MESA_SHADER_GEOMETRY) {
          foreach_list_typed(nir_variable, var, node, &nir->inputs) {
             var->data.driver_location = var->data.location;
          }
@@ -115,17 +265,52 @@ brw_nir_lower_inputs(nir_shader *nir,
          GLbitfield64 inputs_read =
             nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID;
          brw_compute_vue_map(devinfo, &input_vue_map, inputs_read,
-                             nir->info.separate_shader);
+                             nir->info.separate_shader ||
+                             nir->stage == MESA_SHADER_TESS_CTRL);
 
-         /* Start with the slot for the variable's base. */
          foreach_list_typed(nir_variable, var, node, &nir->inputs) {
-            assert(input_vue_map.varying_to_slot[var->data.location] != -1);
-            var->data.driver_location =
-               input_vue_map.varying_to_slot[var->data.location];
+            var->data.driver_location = var->data.location;
          }
 
          /* Inputs are stored in vec4 slots, so use type_size_vec4(). */
          nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
+
+         /* This pass needs actual constants */
+         nir_opt_constant_folding(nir);
+
+         add_const_offset_to_base(nir, nir_var_shader_in);
+
+         nir_foreach_function(nir, function) {
+            if (function->impl) {
+               nir_foreach_block(function->impl, remap_inputs_with_vue_map,
+                                 &input_vue_map);
+            }
+         }
+      }
+      break;
+   }
+   case MESA_SHADER_TESS_EVAL: {
+      struct remap_patch_urb_offsets_state state;
+      brw_compute_tess_vue_map(&state.vue_map,
+                               nir->info.inputs_read & ~VARYING_BIT_PRIMITIVE_ID,
+                               nir->info.patch_inputs_read);
+
+      foreach_list_typed(nir_variable, var, node, &nir->inputs) {
+         var->data.driver_location = var->data.location;
+      }
+
+      nir_lower_io(nir, nir_var_shader_in, type_size_vec4);
+
+      /* This pass needs actual constants */
+      nir_opt_constant_folding(nir);
+
+      add_const_offset_to_base(nir, nir_var_shader_in);
+
+      nir_foreach_function(nir, function) {
+         if (function->impl) {
+            nir_builder_init(&state.b, function->impl);
+            nir_foreach_block(function->impl, remap_patch_urb_offsets, &state);
+         }
       }
       break;
    }
@@ -144,10 +329,13 @@ brw_nir_lower_inputs(nir_shader *nir,
 }
 
 static void
-brw_nir_lower_outputs(nir_shader *nir, bool is_scalar)
+brw_nir_lower_outputs(nir_shader *nir,
+                      const struct brw_device_info *devinfo,
+                      bool is_scalar)
 {
    switch (nir->stage) {
    case MESA_SHADER_VERTEX:
+   case MESA_SHADER_TESS_EVAL:
    case MESA_SHADER_GEOMETRY:
       if (is_scalar) {
          nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
@@ -158,6 +346,30 @@ brw_nir_lower_outputs(nir_shader *nir, bool is_scalar)
             var->data.driver_location = var->data.location;
       }
       break;
+   case MESA_SHADER_TESS_CTRL: {
+      struct remap_patch_urb_offsets_state state;
+      brw_compute_tess_vue_map(&state.vue_map, nir->info.outputs_written,
+                               nir->info.patch_outputs_written);
+
+      nir_foreach_variable(var, &nir->outputs) {
+         var->data.driver_location = var->data.location;
+      }
+
+      nir_lower_io(nir, nir_var_shader_out, type_size_vec4);
+
+      /* This pass needs actual constants */
+      nir_opt_constant_folding(nir);
+
+      add_const_offset_to_base(nir, nir_var_shader_out);
+
+      nir_foreach_function(nir, function) {
+         if (function->impl) {
+            nir_builder_init(&state.b, function->impl);
+            nir_foreach_block(function->impl, remap_patch_urb_offsets, &state);
+         }
+      }
+      break;
+   }
    case MESA_SHADER_FRAGMENT:
       nir_assign_var_locations(&nir->outputs, &nir->num_outputs,
                                type_size_scalar);
@@ -171,40 +383,41 @@ brw_nir_lower_outputs(nir_shader *nir, bool is_scalar)
    }
 }
 
-static bool
-should_clone_nir()
+static int
+type_size_scalar_bytes(const struct glsl_type *type)
 {
-   static int should_clone = -1;
-   if (should_clone < 1)
-      should_clone = brw_env_var_as_boolean("NIR_TEST_CLONE", false);
+   return type_size_scalar(type) * 4;
+}
+
+static int
+type_size_vec4_bytes(const struct glsl_type *type)
+{
+   return type_size_vec4(type) * 16;
+}
 
-   return should_clone;
+static void
+brw_nir_lower_uniforms(nir_shader *nir, bool is_scalar)
+{
+   if (is_scalar) {
+      nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
+                               type_size_scalar_bytes);
+      nir_lower_io(nir, nir_var_uniform, type_size_scalar_bytes);
+   } else {
+      nir_assign_var_locations(&nir->uniforms, &nir->num_uniforms,
+                               type_size_vec4_bytes);
+      nir_lower_io(nir, nir_var_uniform, type_size_vec4_bytes);
+   }
 }
 
-#define _OPT(do_pass) (({                                            \
-   bool this_progress = true;                                        \
-   do_pass                                                           \
-   nir_validate_shader(nir);                                         \
-   if (should_clone_nir()) {                                         \
-      nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \
-      ralloc_free(nir);                                              \
-      nir = clone;                                                   \
-   }                                                                 \
-   this_progress;                                                    \
-}))
-
-#define OPT(pass, ...) _OPT(                   \
-   nir_metadata_set_validation_flag(nir);      \
-   this_progress = pass(nir ,##__VA_ARGS__);   \
-   if (this_progress) {                        \
-      progress = true;                         \
-      nir_metadata_check_validation_flag(nir); \
-   }                                           \
-)
-
-#define OPT_V(pass, ...) _OPT( \
-   pass(nir, ##__VA_ARGS__);   \
-)
+#define OPT(pass, ...) ({                                  \
+   bool this_progress = false;                             \
+   NIR_PASS(this_progress, nir, pass, ##__VA_ARGS__);      \
+   if (this_progress)                                      \
+      progress = true;                                     \
+   this_progress;                                          \
+})
+
+#define OPT_V(pass, ...) NIR_PASS_V(nir, pass, ##__VA_ARGS__)
 
 static nir_shader *
 nir_optimize(nir_shader *nir, bool is_scalar)
@@ -269,6 +482,10 @@ brw_preprocess_nir(nir_shader *nir, bool is_scalar)
 
    nir = nir_optimize(nir, is_scalar);
 
+   if (is_scalar) {
+      OPT_V(nir_lower_load_const_to_scalar);
+   }
+
    /* Lower a bunch of stuff */
    OPT_V(nir_lower_var_copies);
 
@@ -280,39 +497,19 @@ brw_preprocess_nir(nir_shader *nir, bool is_scalar)
    return nir;
 }
 
-/* Lowers inputs, outputs, uniforms, and samplers for i965
- *
- * This function does all of the standard lowering prior to post-processing.
- * The lowering done is highly gen, stage, and backend-specific.  The
- * shader_prog parameter is optional and is used only for lowering sampler
- * derefs and atomics for GLSL shaders.
- */
+/** Lower input and output loads and stores for i965. */
 nir_shader *
-brw_lower_nir(nir_shader *nir,
-              const struct brw_device_info *devinfo,
-              const struct gl_shader_program *shader_prog,
-              bool is_scalar)
+brw_nir_lower_io(nir_shader *nir,
+                 const struct brw_device_info *devinfo,
+                 bool is_scalar)
 {
    bool progress; /* Written by OPT and OPT_V */
    (void)progress;
 
    OPT_V(brw_nir_lower_inputs, devinfo, is_scalar);
-   OPT_V(brw_nir_lower_outputs, is_scalar);
-   nir_assign_var_locations(&nir->uniforms,
-                            &nir->num_uniforms,
-                            is_scalar ? type_size_scalar : type_size_vec4);
+   OPT_V(brw_nir_lower_outputs, devinfo, is_scalar);
    OPT_V(nir_lower_io, nir_var_all, is_scalar ? type_size_scalar : type_size_vec4);
 
-   if (shader_prog) {
-      OPT_V(nir_lower_samplers, shader_prog);
-   }
-
-   OPT(nir_lower_system_values);
-
-   if (shader_prog) {
-      OPT_V(nir_lower_atomics, shader_prog);
-   }
-
    return nir_optimize(nir, is_scalar);
 }
 
@@ -349,9 +546,9 @@ brw_postprocess_nir(nir_shader *nir,
 
    if (unlikely(debug_enabled)) {
       /* Re-index SSA defs so we print more sensible numbers. */
-      nir_foreach_overload(nir, overload) {
-         if (overload->impl)
-            nir_index_ssa_defs(overload->impl);
+      nir_foreach_function(nir, function) {
+         if (function->impl)
+            nir_index_ssa_defs(function->impl);
       }
 
       fprintf(stderr, "NIR (SSA form) for %s shader:\n",
@@ -411,7 +608,19 @@ brw_create_nir(struct brw_context *brw,
    (void)progress;
 
    nir = brw_preprocess_nir(nir, is_scalar);
-   nir = brw_lower_nir(nir, devinfo, shader_prog, is_scalar);
+
+   OPT(nir_lower_system_values);
+   OPT_V(brw_nir_lower_uniforms, is_scalar);
+
+   if (shader_prog) {
+      OPT_V(nir_lower_samplers, shader_prog);
+      OPT_V(nir_lower_atomics, shader_prog);
+   }
+
+   if (nir->stage != MESA_SHADER_TESS_CTRL &&
+       nir->stage != MESA_SHADER_TESS_EVAL) {
+      nir = brw_nir_lower_io(nir, devinfo, is_scalar);
+   }
 
    return nir;
 }