spirv: Drop lower_workgroup_access_to_offsets
authorCaio Marcelo de Oliveira Filho <caio.oliveira@intel.com>
Thu, 8 Aug 2019 17:00:45 +0000 (10:00 -0700)
committerCaio Marcelo de Oliveira Filho <caio.oliveira@intel.com>
Sun, 11 Aug 2019 05:15:35 +0000 (22:15 -0700)
Intel drivers are not using this anymore, and turnip still don't have
Compute Shaders, so won't make a difference to stop using this option.

Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Acked-by: Rob Clark <robdclark@chromium.org>
src/compiler/spirv/nir_spirv.h
src/compiler/spirv/spirv_to_nir.c
src/compiler/spirv/vtn_variables.c
src/freedreno/vulkan/tu_shader.c
src/gallium/drivers/freedreno/ir3/ir3_cmdline.c

index d427a9a1973aebd45a8aea23608a0e21ff7a5724..9a54845d7f1cc9f2a4e8b259047f2f7eae861fee 100644 (file)
@@ -59,14 +59,6 @@ enum nir_spirv_execution_environment {
 struct spirv_to_nir_options {
    enum nir_spirv_execution_environment environment;
 
-   /* Whether or not to lower all workgroup variable access to offsets
-    * up-front.  This means you will _shared intrinsics instead of _var
-    * for workgroup data access.
-    *
-    * This is currently required for full variable pointers support.
-    */
-   bool lower_workgroup_access_to_offsets;
-
    /* Whether or not to lower all UBO/SSBO access to offsets up-front. */
    bool lower_ubo_ssbo_access_to_offsets;
 
index 7a2e30707ce7248fbe1df82ec2b3625d6ace8425..08649be080c73502886cfb431d62b56e96e7d5d6 100644 (file)
@@ -1086,65 +1086,6 @@ translate_image_format(struct vtn_builder *b, SpvImageFormat format)
    }
 }
 
-static struct vtn_type *
-vtn_type_layout_std430(struct vtn_builder *b, struct vtn_type *type,
-                       uint32_t *size_out, uint32_t *align_out)
-{
-   switch (type->base_type) {
-   case vtn_base_type_scalar: {
-      uint32_t comp_size = glsl_type_is_boolean(type->type)
-         ? 4 : glsl_get_bit_size(type->type) / 8;
-      *size_out = comp_size;
-      *align_out = comp_size;
-      return type;
-   }
-
-   case vtn_base_type_vector: {
-      uint32_t comp_size = glsl_type_is_boolean(type->type)
-         ? 4 : glsl_get_bit_size(type->type) / 8;
-      unsigned align_comps = type->length == 3 ? 4 : type->length;
-      *size_out = comp_size * type->length,
-      *align_out = comp_size * align_comps;
-      return type;
-   }
-
-   case vtn_base_type_matrix:
-   case vtn_base_type_array: {
-      /* We're going to add an array stride */
-      type = vtn_type_copy(b, type);
-      uint32_t elem_size, elem_align;
-      type->array_element = vtn_type_layout_std430(b, type->array_element,
-                                                   &elem_size, &elem_align);
-      type->stride = vtn_align_u32(elem_size, elem_align);
-      *size_out = type->stride * type->length;
-      *align_out = elem_align;
-      return type;
-   }
-
-   case vtn_base_type_struct: {
-      /* We're going to add member offsets */
-      type = vtn_type_copy(b, type);
-      uint32_t offset = 0;
-      uint32_t align = 0;
-      for (unsigned i = 0; i < type->length; i++) {
-         uint32_t mem_size, mem_align;
-         type->members[i] = vtn_type_layout_std430(b, type->members[i],
-                                                   &mem_size, &mem_align);
-         offset = vtn_align_u32(offset, mem_align);
-         type->offsets[i] = offset;
-         offset += mem_size;
-         align = MAX2(align, mem_align);
-      }
-      *size_out = offset;
-      *align_out = align;
-      return type;
-   }
-
-   default:
-      unreachable("Invalid SPIR-V type for std430");
-   }
-}
-
 static void
 vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
                 const uint32_t *w, unsigned count)
@@ -1416,18 +1357,6 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
             default:
                break;
             }
-         } else if (storage_class == SpvStorageClassWorkgroup &&
-                    b->options->lower_workgroup_access_to_offsets) {
-            /* Lay out Workgroup types so it can be lowered to offsets during
-             * SPIR-V to NIR conversion.  When not lowering to offsets, the
-             * stride will be calculated by the driver.
-             */
-            uint32_t size, align;
-            val->type->deref = vtn_type_layout_std430(b, val->type->deref,
-                                                      &size, &align);
-            val->type->length = size;
-            val->type->align = align;
-            val->type->stride = vtn_align_u32(size, align);
          }
       }
       break;
@@ -2696,33 +2625,6 @@ get_uniform_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
    }
 }
 
-static nir_intrinsic_op
-get_shared_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
-{
-   switch (opcode) {
-   case SpvOpAtomicLoad:         return nir_intrinsic_load_shared;
-   case SpvOpAtomicStore:        return nir_intrinsic_store_shared;
-#define OP(S, N) case SpvOp##S: return nir_intrinsic_shared_##N;
-   OP(AtomicExchange,            atomic_exchange)
-   OP(AtomicCompareExchange,     atomic_comp_swap)
-   OP(AtomicCompareExchangeWeak, atomic_comp_swap)
-   OP(AtomicIIncrement,          atomic_add)
-   OP(AtomicIDecrement,          atomic_add)
-   OP(AtomicIAdd,                atomic_add)
-   OP(AtomicISub,                atomic_add)
-   OP(AtomicSMin,                atomic_imin)
-   OP(AtomicUMin,                atomic_umin)
-   OP(AtomicSMax,                atomic_imax)
-   OP(AtomicUMax,                atomic_umax)
-   OP(AtomicAnd,                 atomic_and)
-   OP(AtomicOr,                  atomic_or)
-   OP(AtomicXor,                 atomic_xor)
-#undef OP
-   default:
-      vtn_fail_with_opcode("Invalid shared atomic", opcode);
-   }
-}
-
 static nir_intrinsic_op
 get_deref_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
 {
@@ -2842,15 +2744,9 @@ vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode,
       nir_ssa_def *offset, *index;
       offset = vtn_pointer_to_offset(b, ptr, &index);
 
-      nir_intrinsic_op op;
-      if (ptr->mode == vtn_variable_mode_ssbo) {
-         op = get_ssbo_nir_atomic_op(b, opcode);
-      } else {
-         vtn_assert(ptr->mode == vtn_variable_mode_workgroup &&
-                    b->options->lower_workgroup_access_to_offsets);
-         op = get_shared_nir_atomic_op(b, opcode);
-      }
+      assert(ptr->mode == vtn_variable_mode_ssbo);
 
+      nir_intrinsic_op op  = get_ssbo_nir_atomic_op(b, opcode);
       atomic = nir_intrinsic_instr_create(b->nb.shader, op);
 
       int src = 0;
index 843801cae5a1565c83558092beddfcb00906d58b..52802ba5162aa70c01531060b6fbcf95817a3fa4 100644 (file)
@@ -97,9 +97,7 @@ vtn_mode_uses_ssa_offset(struct vtn_builder *b,
    return ((mode == vtn_variable_mode_ubo ||
             mode == vtn_variable_mode_ssbo) &&
            b->options->lower_ubo_ssbo_access_to_offsets) ||
-          mode == vtn_variable_mode_push_constant ||
-          (mode == vtn_variable_mode_workgroup &&
-           b->options->lower_workgroup_access_to_offsets);
+          mode == vtn_variable_mode_push_constant;
 }
 
 static bool
@@ -109,9 +107,7 @@ vtn_pointer_is_external_block(struct vtn_builder *b,
    return ptr->mode == vtn_variable_mode_ssbo ||
           ptr->mode == vtn_variable_mode_ubo ||
           ptr->mode == vtn_variable_mode_phys_ssbo ||
-          ptr->mode == vtn_variable_mode_push_constant ||
-          (ptr->mode == vtn_variable_mode_workgroup &&
-           b->options->lower_workgroup_access_to_offsets);
+          ptr->mode == vtn_variable_mode_push_constant;
 }
 
 static nir_ssa_def *
@@ -1752,9 +1748,7 @@ var_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
           */
          vtn_assert(vtn_var->mode == vtn_variable_mode_ubo ||
                     vtn_var->mode == vtn_variable_mode_ssbo ||
-                    vtn_var->mode == vtn_variable_mode_push_constant ||
-                    (vtn_var->mode == vtn_variable_mode_workgroup &&
-                     b->options->lower_workgroup_access_to_offsets));
+                    vtn_var->mode == vtn_variable_mode_push_constant);
       }
    }
 }
@@ -2211,19 +2205,15 @@ vtn_create_variable(struct vtn_builder *b, struct vtn_value *val,
       break;
 
    case vtn_variable_mode_workgroup:
-      if (b->options->lower_workgroup_access_to_offsets) {
-         var->shared_location = -1;
-      } else {
-         /* Create the variable normally */
-         var->var = rzalloc(b->shader, nir_variable);
-         var->var->name = ralloc_strdup(var->var, val->name);
-         /* Workgroup variables don't have any explicit layout but some
-          * layouts may have leaked through due to type deduplication in the
-          * SPIR-V.
-          */
-         var->var->type = var->type->type;
-         var->var->data.mode = nir_var_mem_shared;
-      }
+      /* Create the variable normally */
+      var->var = rzalloc(b->shader, nir_variable);
+      var->var->name = ralloc_strdup(var->var, val->name);
+      /* Workgroup variables don't have any explicit layout but some
+       * layouts may have leaked through due to type deduplication in the
+       * SPIR-V.
+       */
+      var->var->type = var->type->type;
+      var->var->data.mode = nir_var_mem_shared;
       break;
 
    case vtn_variable_mode_input:
index d87aa1dbf7129f7d5e7d75ce5450624ee6690857..f6e13d7c42fa5550ecd66c5dffc63232c013afb5 100644 (file)
@@ -38,7 +38,6 @@ tu_spirv_to_nir(struct ir3_compiler *compiler,
 {
    /* TODO these are made-up */
    const struct spirv_to_nir_options spirv_options = {
-      .lower_workgroup_access_to_offsets = true,
       .lower_ubo_ssbo_access_to_offsets = true,
       .caps = { false },
    };
index f8acc480d69ac9f06862928bab6849d624801d7c..246bafdf490d32455437a3d140df54bafcbc887d 100644 (file)
@@ -236,7 +236,6 @@ load_spirv(const char *filename, const char *entry, gl_shader_stage stage)
                        .int64 = true,
                        .variable_pointers = true,
                },
-               .lower_workgroup_access_to_offsets = true,
                .lower_ubo_ssbo_access_to_offsets = true,
                .debug = {
                        .func = debug_func,