Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / glsl / nir / nir_lower_io.c
index 4c59298ecb7ccf332f29679621187b6ac93258bf..6a4494d5fd2da8b50d10352c69d5ce42f699efa0 100644 (file)
 /*
  * This lowering pass converts references to input/output variables with
  * loads/stores to actual input/output intrinsics.
- *
- * NOTE: This pass really only works for scalar backends at the moment due
- * to the way it packes the input/output data.
  */
 
 #include "nir.h"
 
 struct lower_io_state {
    void *mem_ctx;
+   bool is_scalar;
 };
 
+static int
+type_size_vec4(const struct glsl_type *type)
+{
+   unsigned int i;
+   int size;
+
+   switch (glsl_get_base_type(type)) {
+   case GLSL_TYPE_UINT:
+   case GLSL_TYPE_INT:
+   case GLSL_TYPE_FLOAT:
+   case GLSL_TYPE_BOOL:
+      if (glsl_type_is_matrix(type)) {
+         return glsl_get_matrix_columns(type);
+      } else {
+         return 1;
+      }
+   case GLSL_TYPE_ARRAY:
+      return type_size_vec4(glsl_get_array_element(type)) * glsl_get_length(type);
+   case GLSL_TYPE_STRUCT:
+      size = 0;
+      for (i = 0; i <  glsl_get_length(type); i++) {
+         size += type_size_vec4(glsl_get_struct_field(type, i));
+      }
+      return size;
+   case GLSL_TYPE_SUBROUTINE:
+      return 1;
+   case GLSL_TYPE_SAMPLER:
+      return 0;
+   case GLSL_TYPE_ATOMIC_UINT:
+      return 0;
+   case GLSL_TYPE_IMAGE:
+   case GLSL_TYPE_VOID:
+   case GLSL_TYPE_DOUBLE:
+   case GLSL_TYPE_ERROR:
+   case GLSL_TYPE_INTERFACE:
+      unreachable("not reached");
+   }
+
+   return 0;
+}
+
 static unsigned
-type_size(const struct glsl_type *type)
+type_size_scalar(const struct glsl_type *type)
 {
    unsigned int size, i;
 
@@ -52,13 +91,15 @@ type_size(const struct glsl_type *type)
    case GLSL_TYPE_BOOL:
       return glsl_get_components(type);
    case GLSL_TYPE_ARRAY:
-      return type_size(glsl_get_array_element(type)) * glsl_get_length(type);
+      return type_size_scalar(glsl_get_array_element(type)) * glsl_get_length(type);
    case GLSL_TYPE_STRUCT:
       size = 0;
       for (i = 0; i < glsl_get_length(type); i++) {
-         size += type_size(glsl_get_struct_field(type, i));
+         size += type_size_scalar(glsl_get_struct_field(type, i));
       }
       return size;
+   case GLSL_TYPE_SUBROUTINE:
+      return 1;
    case GLSL_TYPE_SAMPLER:
       return 0;
    case GLSL_TYPE_ATOMIC_UINT:
@@ -77,8 +118,17 @@ type_size(const struct glsl_type *type)
    return 0;
 }
 
+static unsigned
+type_size(const struct glsl_type *type, bool is_scalar)
+{
+   if (is_scalar)
+      return type_size_scalar(type);
+   else
+      return type_size_vec4(type);
+}
+
 void
-nir_assign_var_locations_scalar(struct exec_list *var_list, unsigned *size)
+nir_assign_var_locations(struct exec_list *var_list, unsigned *size, bool is_scalar)
 {
    unsigned location = 0;
 
@@ -87,11 +137,12 @@ nir_assign_var_locations_scalar(struct exec_list *var_list, unsigned *size)
        * UBO's have their own address spaces, so don't count them towards the
        * number of global uniforms
        */
-      if (var->data.mode == nir_var_uniform && var->interface_type != NULL)
+      if ((var->data.mode == nir_var_uniform || var->data.mode == nir_var_shader_storage) &&
+          var->interface_type != NULL)
          continue;
 
       var->data.driver_location = location;
-      location += type_size(var->type);
+      location += type_size(var->type, is_scalar);
    }
 
    *size = location;
@@ -137,10 +188,11 @@ mark_indirect_uses_block(nir_block *block, void *void_state)
  * assigns locations to variables that are used indirectly.
  */
 void
-nir_assign_var_locations_scalar_direct_first(nir_shader *shader,
-                                             struct exec_list *var_list,
-                                             unsigned *direct_size,
-                                             unsigned *size)
+nir_assign_var_locations_direct_first(nir_shader *shader,
+                                      struct exec_list *var_list,
+                                      unsigned *direct_size,
+                                      unsigned *size,
+                                      bool is_scalar)
 {
    struct set *indirect_set = _mesa_set_create(NULL, _mesa_hash_pointer,
                                                _mesa_key_pointer_equal);
@@ -154,27 +206,29 @@ nir_assign_var_locations_scalar_direct_first(nir_shader *shader,
    unsigned location = 0;
 
    foreach_list_typed(nir_variable, var, node, var_list) {
-      if (var->data.mode == nir_var_uniform && var->interface_type != NULL)
+      if ((var->data.mode == nir_var_uniform || var->data.mode == nir_var_shader_storage) &&
+          var->interface_type != NULL)
          continue;
 
       if (_mesa_set_search(indirect_set, var))
          continue;
 
       var->data.driver_location = location;
-      location += type_size(var->type);
+      location += type_size(var->type, is_scalar);
    }
 
    *direct_size = location;
 
    foreach_list_typed(nir_variable, var, node, var_list) {
-      if (var->data.mode == nir_var_uniform && var->interface_type != NULL)
+      if ((var->data.mode == nir_var_uniform || var->data.mode == nir_var_shader_storage) &&
+          var->interface_type != NULL)
          continue;
 
       if (!_mesa_set_search(indirect_set, var))
          continue;
 
       var->data.driver_location = location;
-      location += type_size(var->type);
+      location += type_size(var->type, is_scalar);
    }
 
    *size = location;
@@ -196,7 +250,7 @@ get_io_offset(nir_deref_var *deref, nir_instr *instr, nir_src *indirect,
 
       if (tail->deref_type == nir_deref_type_array) {
          nir_deref_array *deref_array = nir_deref_as_array(tail);
-         unsigned size = type_size(tail->type);
+         unsigned size = type_size(tail->type, state->is_scalar);
 
          base_offset += size * deref_array->base_offset;
 
@@ -238,7 +292,8 @@ get_io_offset(nir_deref_var *deref, nir_instr *instr, nir_src *indirect,
          nir_deref_struct *deref_struct = nir_deref_as_struct(tail);
 
          for (unsigned i = 0; i < deref_struct->index; i++)
-            base_offset += type_size(glsl_get_struct_field(parent_type, i));
+            base_offset += type_size(glsl_get_struct_field(parent_type, i),
+                                     state->is_scalar);
       }
    }
 
@@ -351,11 +406,12 @@ nir_lower_io_block(nir_block *block, void *void_state)
 }
 
 static void
-nir_lower_io_impl(nir_function_impl *impl)
+nir_lower_io_impl(nir_function_impl *impl, bool is_scalar)
 {
    struct lower_io_state state;
 
    state.mem_ctx = ralloc_parent(impl);
+   state.is_scalar = is_scalar;
 
    nir_foreach_block(impl, nir_lower_io_block, &state);
 
@@ -364,10 +420,10 @@ nir_lower_io_impl(nir_function_impl *impl)
 }
 
 void
-nir_lower_io(nir_shader *shader)
+nir_lower_io(nir_shader *shader, bool is_scalar)
 {
    nir_foreach_overload(shader, overload) {
       if (overload->impl)
-         nir_lower_io_impl(overload->impl);
+         nir_lower_io_impl(overload->impl, is_scalar);
    }
 }