nir/nir_lower_io: Add vec4 support
authorIago Toral Quiroga <itoral@igalia.com>
Thu, 18 Jun 2015 11:52:21 +0000 (13:52 +0200)
committerJason Ekstrand <jason.ekstrand@intel.com>
Mon, 3 Aug 2015 16:40:47 +0000 (09:40 -0700)
The current implementation operates in scalar mode only, so add a vec4
mode where types are padded to vec4 sizes.

This will be useful in the i965 driver for its vec4 nir backend
(and possbly other drivers that have vec4-based shaders).

Reviewed-by: Jason Ekstrand <jason.ekstrand@intel.com>
src/glsl/nir/nir.h
src/glsl/nir/nir_lower_io.c
src/mesa/drivers/dri/i965/brw_nir.c

index 6c954c0ef7c16950daa2372f4945fc7419871f8a..d9daaa2e7e2e19cc6d62e549e98eb86d38a3bca4 100644 (file)
@@ -1641,14 +1641,16 @@ void nir_lower_global_vars_to_local(nir_shader *shader);
 
 void nir_lower_locals_to_regs(nir_shader *shader);
 
-void nir_assign_var_locations_scalar(struct exec_list *var_list,
-                                     unsigned *size);
-void nir_assign_var_locations_scalar_direct_first(nir_shader *shader,
-                                                  struct exec_list *var_list,
-                                                  unsigned *direct_size,
-                                                  unsigned *size);
-
-void nir_lower_io(nir_shader *shader);
+void nir_assign_var_locations(struct exec_list *var_list,
+                              unsigned *size,
+                              bool is_scalar);
+void nir_assign_var_locations_direct_first(nir_shader *shader,
+                                           struct exec_list *var_list,
+                                           unsigned *direct_size,
+                                           unsigned *size,
+                                           bool is_scalar);
+
+void nir_lower_io(nir_shader *shader, bool is_scalar);
 
 void nir_lower_vars_to_ssa(nir_shader *shader);
 
index f6e5e2beda4396990a494566be9d151f36a40574..3c179299bd976ce172db3d7d44c1f0e8330f516f 100644 (file)
 /*
  * This lowering pass converts references to input/output variables with
  * loads/stores to actual input/output intrinsics.
- *
- * NOTE: This pass really only works for scalar backends at the moment due
- * to the way it packes the input/output data.
  */
 
 #include "nir.h"
 
 struct lower_io_state {
    void *mem_ctx;
+   bool is_scalar;
 };
 
+static int
+type_size_vec4(const struct glsl_type *type)
+{
+   unsigned int i;
+   int size;
+
+   switch (glsl_get_base_type(type)) {
+   case GLSL_TYPE_UINT:
+   case GLSL_TYPE_INT:
+   case GLSL_TYPE_FLOAT:
+   case GLSL_TYPE_BOOL:
+      if (glsl_type_is_matrix(type)) {
+         return glsl_get_matrix_columns(type);
+      } else {
+         return 1;
+      }
+   case GLSL_TYPE_ARRAY:
+      return type_size_vec4(glsl_get_array_element(type)) * glsl_get_length(type);
+   case GLSL_TYPE_STRUCT:
+      size = 0;
+      for (i = 0; i <  glsl_get_length(type); i++) {
+         size += type_size_vec4(glsl_get_struct_field(type, i));
+      }
+      return size;
+   case GLSL_TYPE_SAMPLER:
+      return 0;
+   case GLSL_TYPE_ATOMIC_UINT:
+      return 0;
+   case GLSL_TYPE_IMAGE:
+   case GLSL_TYPE_VOID:
+   case GLSL_TYPE_DOUBLE:
+   case GLSL_TYPE_ERROR:
+   case GLSL_TYPE_INTERFACE:
+      unreachable("not reached");
+   }
+
+   return 0;
+}
+
 static unsigned
-type_size(const struct glsl_type *type)
+type_size_scalar(const struct glsl_type *type)
 {
    unsigned int size, i;
 
@@ -52,11 +89,11 @@ type_size(const struct glsl_type *type)
    case GLSL_TYPE_BOOL:
       return glsl_get_components(type);
    case GLSL_TYPE_ARRAY:
-      return type_size(glsl_get_array_element(type)) * glsl_get_length(type);
+      return type_size_scalar(glsl_get_array_element(type)) * glsl_get_length(type);
    case GLSL_TYPE_STRUCT:
       size = 0;
       for (i = 0; i < glsl_get_length(type); i++) {
-         size += type_size(glsl_get_struct_field(type, i));
+         size += type_size_scalar(glsl_get_struct_field(type, i));
       }
       return size;
    case GLSL_TYPE_SUBROUTINE:
@@ -78,8 +115,17 @@ type_size(const struct glsl_type *type)
    return 0;
 }
 
+static unsigned
+type_size(const struct glsl_type *type, bool is_scalar)
+{
+   if (is_scalar)
+      return type_size_scalar(type);
+   else
+      return type_size_vec4(type);
+}
+
 void
-nir_assign_var_locations_scalar(struct exec_list *var_list, unsigned *size)
+nir_assign_var_locations(struct exec_list *var_list, unsigned *size, bool is_scalar)
 {
    unsigned location = 0;
 
@@ -93,7 +139,7 @@ nir_assign_var_locations_scalar(struct exec_list *var_list, unsigned *size)
          continue;
 
       var->data.driver_location = location;
-      location += type_size(var->type);
+      location += type_size(var->type, is_scalar);
    }
 
    *size = location;
@@ -139,10 +185,11 @@ mark_indirect_uses_block(nir_block *block, void *void_state)
  * assigns locations to variables that are used indirectly.
  */
 void
-nir_assign_var_locations_scalar_direct_first(nir_shader *shader,
-                                             struct exec_list *var_list,
-                                             unsigned *direct_size,
-                                             unsigned *size)
+nir_assign_var_locations_direct_first(nir_shader *shader,
+                                      struct exec_list *var_list,
+                                      unsigned *direct_size,
+                                      unsigned *size,
+                                      bool is_scalar)
 {
    struct set *indirect_set = _mesa_set_create(NULL, _mesa_hash_pointer,
                                                _mesa_key_pointer_equal);
@@ -164,7 +211,7 @@ nir_assign_var_locations_scalar_direct_first(nir_shader *shader,
          continue;
 
       var->data.driver_location = location;
-      location += type_size(var->type);
+      location += type_size(var->type, is_scalar);
    }
 
    *direct_size = location;
@@ -178,7 +225,7 @@ nir_assign_var_locations_scalar_direct_first(nir_shader *shader,
          continue;
 
       var->data.driver_location = location;
-      location += type_size(var->type);
+      location += type_size(var->type, is_scalar);
    }
 
    *size = location;
@@ -200,7 +247,7 @@ get_io_offset(nir_deref_var *deref, nir_instr *instr, nir_src *indirect,
 
       if (tail->deref_type == nir_deref_type_array) {
          nir_deref_array *deref_array = nir_deref_as_array(tail);
-         unsigned size = type_size(tail->type);
+         unsigned size = type_size(tail->type, state->is_scalar);
 
          base_offset += size * deref_array->base_offset;
 
@@ -242,7 +289,8 @@ get_io_offset(nir_deref_var *deref, nir_instr *instr, nir_src *indirect,
          nir_deref_struct *deref_struct = nir_deref_as_struct(tail);
 
          for (unsigned i = 0; i < deref_struct->index; i++)
-            base_offset += type_size(glsl_get_struct_field(parent_type, i));
+            base_offset += type_size(glsl_get_struct_field(parent_type, i),
+                                     state->is_scalar);
       }
    }
 
@@ -355,11 +403,12 @@ nir_lower_io_block(nir_block *block, void *void_state)
 }
 
 static void
-nir_lower_io_impl(nir_function_impl *impl)
+nir_lower_io_impl(nir_function_impl *impl, bool is_scalar)
 {
    struct lower_io_state state;
 
    state.mem_ctx = ralloc_parent(impl);
+   state.is_scalar = is_scalar;
 
    nir_foreach_block(impl, nir_lower_io_block, &state);
 
@@ -368,10 +417,10 @@ nir_lower_io_impl(nir_function_impl *impl)
 }
 
 void
-nir_lower_io(nir_shader *shader)
+nir_lower_io(nir_shader *shader, bool is_scalar)
 {
    nir_foreach_overload(shader, overload) {
       if (overload->impl)
-         nir_lower_io_impl(overload->impl);
+         nir_lower_io_impl(overload->impl, is_scalar);
    }
 }
index 4aa893aff50fbff82114b10ff43aa68b65cb633a..8700cb71ad409b1e5a812c138ff80ece69ef840a 100644 (file)
@@ -101,13 +101,15 @@ brw_create_nir(struct brw_context *brw,
    /* Get rid of split copies */
    nir_optimize(nir);
 
-   nir_assign_var_locations_scalar_direct_first(nir, &nir->uniforms,
-                                                &nir->num_direct_uniforms,
-                                                &nir->num_uniforms);
-   nir_assign_var_locations_scalar(&nir->inputs, &nir->num_inputs);
-   nir_assign_var_locations_scalar(&nir->outputs, &nir->num_outputs);
+   nir_assign_var_locations_direct_first(nir, &nir->uniforms,
+                                         &nir->num_direct_uniforms,
+                                         &nir->num_uniforms,
+                                         is_scalar);
+   nir_assign_var_locations(&nir->inputs, &nir->num_inputs, is_scalar);
+   nir_assign_var_locations(&nir->outputs, &nir->num_outputs, is_scalar);
+
+   nir_lower_io(nir, is_scalar);
 
-   nir_lower_io(nir);
    nir_validate_shader(nir);
 
    nir_remove_dead_variables(nir);