spirv/nir: add atomic counter support on vtn_handle_ssbo_or_shared_atomic
[mesa.git] / src / compiler / spirv / spirv_to_nir.c
index b7c07064d9d238766ec4f5bd58e38715321792d7..270f263d0471c5017d9cb75ca330668d1b3c6f72 100644 (file)
@@ -29,6 +29,7 @@
 #include "nir/nir_vla.h"
 #include "nir/nir_control_flow.h"
 #include "nir/nir_constant_expressions.h"
+#include "nir/nir_deref.h"
 #include "spirv_info.h"
 
 #include <stdio.h>
@@ -129,6 +130,18 @@ _vtn_warn(struct vtn_builder *b, const char *file, unsigned line,
    va_end(args);
 }
 
+void
+_vtn_err(struct vtn_builder *b, const char *file, unsigned line,
+          const char *fmt, ...)
+{
+   va_list args;
+
+   va_start(args, fmt);
+   vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_ERROR, "SPIR-V ERROR:\n",
+               file, line, fmt, args);
+   va_end(args);
+}
+
 void
 _vtn_fail(struct vtn_builder *b, const char *file, unsigned line,
           const char *fmt, ...)
@@ -1169,6 +1182,13 @@ vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
          val->type->type = glsl_vector_type(GLSL_TYPE_UINT, 2);
       }
 
+      if (storage_class == SpvStorageClassPushConstant) {
+         /* These can actually be stored to nir_variables and used as SSA
+          * values so they need a real glsl_type.
+          */
+         val->type->type = glsl_uint_type();
+      }
+
       if (storage_class == SpvStorageClassWorkgroup &&
           b->options->lower_workgroup_access_to_offsets) {
          uint32_t size, align;
@@ -1775,39 +1795,54 @@ vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
    vtn_callee->referenced = true;
 
    nir_call_instr *call = nir_call_instr_create(b->nb.shader, callee);
-   for (unsigned i = 0; i < call->num_params; i++) {
-      unsigned arg_id = w[4 + i];
-      struct vtn_value *arg = vtn_untyped_value(b, arg_id);
-      if (arg->value_type == vtn_value_type_pointer &&
-          arg->pointer->ptr_type->type == NULL) {
-         nir_deref_var *d = vtn_pointer_to_deref(b, arg->pointer);
-         call->params[i] = nir_deref_var_clone(d, call);
-      } else {
-         struct vtn_ssa_value *arg_ssa = vtn_ssa_value(b, arg_id);
 
-         /* Make a temporary to store the argument in */
-         nir_variable *tmp =
-            nir_local_variable_create(b->nb.impl, arg_ssa->type, "arg_tmp");
-         call->params[i] = nir_deref_var_create(call, tmp);
+   unsigned param_idx = 0;
 
-         vtn_local_store(b, arg_ssa, call->params[i]);
-      }
+   nir_deref_instr *ret_deref = NULL;
+   struct vtn_type *ret_type = vtn_callee->type->return_type;
+   if (ret_type->base_type != vtn_base_type_void) {
+      nir_variable *ret_tmp =
+         nir_local_variable_create(b->nb.impl, ret_type->type, "return_tmp");
+      ret_deref = nir_build_deref_var(&b->nb, ret_tmp);
+      call->params[param_idx++] = nir_src_for_ssa(&ret_deref->dest.ssa);
    }
 
-   nir_variable *out_tmp = NULL;
-   vtn_assert(res_type->type == callee->return_type);
-   if (!glsl_type_is_void(callee->return_type)) {
-      out_tmp = nir_local_variable_create(b->nb.impl, callee->return_type,
-                                          "out_tmp");
-      call->return_deref = nir_deref_var_create(call, out_tmp);
+   for (unsigned i = 0; i < vtn_callee->type->length; i++) {
+      struct vtn_type *arg_type = vtn_callee->type->params[i];
+      unsigned arg_id = w[4 + i];
+
+      if (arg_type->base_type == vtn_base_type_sampled_image) {
+         struct vtn_sampled_image *sampled_image =
+            vtn_value(b, arg_id, vtn_value_type_sampled_image)->sampled_image;
+
+         call->params[param_idx++] =
+            nir_src_for_ssa(&sampled_image->image->deref->dest.ssa);
+         call->params[param_idx++] =
+            nir_src_for_ssa(&sampled_image->sampler->deref->dest.ssa);
+      } else if (arg_type->base_type == vtn_base_type_pointer ||
+                 arg_type->base_type == vtn_base_type_image ||
+                 arg_type->base_type == vtn_base_type_sampler) {
+         struct vtn_pointer *pointer =
+            vtn_value(b, arg_id, vtn_value_type_pointer)->pointer;
+         call->params[param_idx++] =
+            nir_src_for_ssa(vtn_pointer_to_ssa(b, pointer));
+      } else {
+         /* This is a regular SSA value and we need a temporary */
+         nir_variable *tmp =
+            nir_local_variable_create(b->nb.impl, arg_type->type, "arg_tmp");
+         nir_deref_instr *tmp_deref = nir_build_deref_var(&b->nb, tmp);
+         vtn_local_store(b, vtn_ssa_value(b, arg_id), tmp_deref);
+         call->params[param_idx++] = nir_src_for_ssa(&tmp_deref->dest.ssa);
+      }
    }
+   assert(param_idx == call->num_params);
 
    nir_builder_instr_insert(&b->nb, &call->instr);
 
-   if (glsl_type_is_void(callee->return_type)) {
+   if (ret_type->base_type == vtn_base_type_void) {
       vtn_push_value(b, w[2], vtn_value_type_undef);
    } else {
-      vtn_push_ssa(b, w[2], res_type, vtn_local_load(b, call->return_deref));
+      vtn_push_ssa(b, w[2], res_type, vtn_local_load(b, ret_deref));
    }
 }
 
@@ -1908,7 +1943,6 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
    const struct glsl_type *image_type = sampled.type->type;
    const enum glsl_sampler_dim sampler_dim = glsl_get_sampler_dim(image_type);
    const bool is_array = glsl_sampler_type_is_array(image_type);
-   const bool is_shadow = glsl_sampler_type_is_shadow(image_type);
 
    /* Figure out the base texture operation */
    nir_texop texop;
@@ -1961,9 +1995,41 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
       vtn_fail("Unhandled opcode");
    }
 
-   nir_tex_src srcs[8]; /* 8 should be enough */
+   nir_tex_src srcs[10]; /* 10 should be enough */
    nir_tex_src *p = srcs;
 
+   nir_deref_instr *sampler = vtn_pointer_to_deref(b, sampled.sampler);
+   nir_deref_instr *texture =
+      sampled.image ? vtn_pointer_to_deref(b, sampled.image) : sampler;
+
+   p->src = nir_src_for_ssa(&texture->dest.ssa);
+   p->src_type = nir_tex_src_texture_deref;
+   p++;
+
+   switch (texop) {
+   case nir_texop_tex:
+   case nir_texop_txb:
+   case nir_texop_txl:
+   case nir_texop_txd:
+   case nir_texop_tg4:
+      /* These operations require a sampler */
+      p->src = nir_src_for_ssa(&sampler->dest.ssa);
+      p->src_type = nir_tex_src_sampler_deref;
+      p++;
+      break;
+   case nir_texop_txf:
+   case nir_texop_txf_ms:
+   case nir_texop_txs:
+   case nir_texop_lod:
+   case nir_texop_query_levels:
+   case nir_texop_texture_samples:
+   case nir_texop_samples_identical:
+      /* These don't */
+      break;
+   case nir_texop_txf_ms_mcs:
+      vtn_fail("unexpected nir_texop_txf_ms_mcs");
+   }
+
    unsigned idx = 4;
 
    struct nir_ssa_def *coord;
@@ -2032,6 +2098,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
       break;
    }
 
+   bool is_shadow = false;
    unsigned gather_component = 0;
    switch (opcode) {
    case SpvOpImageSampleDrefImplicitLod:
@@ -2040,6 +2107,7 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
    case SpvOpImageSampleProjDrefExplicitLod:
    case SpvOpImageDrefGather:
       /* These all have an explicit depth value as their next source */
+      is_shadow = true;
       (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparator);
       break;
 
@@ -2122,40 +2190,6 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
       vtn_fail("Invalid base type for sampler result");
    }
 
-   nir_deref_var *sampler = vtn_pointer_to_deref(b, sampled.sampler);
-   nir_deref_var *texture;
-   if (sampled.image) {
-      nir_deref_var *image = vtn_pointer_to_deref(b, sampled.image);
-      texture = image;
-   } else {
-      texture = sampler;
-   }
-
-   instr->texture = nir_deref_var_clone(texture, instr);
-
-   switch (instr->op) {
-   case nir_texop_tex:
-   case nir_texop_txb:
-   case nir_texop_txl:
-   case nir_texop_txd:
-   case nir_texop_tg4:
-      /* These operations require a sampler */
-      instr->sampler = nir_deref_var_clone(sampler, instr);
-      break;
-   case nir_texop_txf:
-   case nir_texop_txf_ms:
-   case nir_texop_txs:
-   case nir_texop_lod:
-   case nir_texop_query_levels:
-   case nir_texop_texture_samples:
-   case nir_texop_samples_identical:
-      /* These don't */
-      instr->sampler = NULL;
-      break;
-   case nir_texop_txf_ms_mcs:
-      vtn_fail("unexpected nir_texop_txf_ms_mcs");
-   }
-
    nir_ssa_dest_init(&instr->instr, &instr->dest,
                      nir_tex_instr_dest_size(instr), 32, NULL);
 
@@ -2180,8 +2214,6 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
          instrs[i]->is_new_style_shadow = instr->is_new_style_shadow;
          instrs[i]->component = instr->component;
          instrs[i]->dest_type = instr->dest_type;
-         instrs[i]->texture = nir_deref_var_clone(texture, instrs[i]);
-         instrs[i]->sampler = NULL;
 
          memcpy(instrs[i]->src, srcs, instr->num_srcs * sizeof(*instr->src));
 
@@ -2356,7 +2388,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
 
    nir_intrinsic_op op;
    switch (opcode) {
-#define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_var_##N; break;
+#define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break;
    OP(ImageQuerySize,         size)
    OP(ImageRead,              load)
    OP(ImageWrite,             store)
@@ -2382,8 +2414,8 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
 
    nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
 
-   nir_deref_var *image_deref = vtn_pointer_to_deref(b, image.image);
-   intrin->variables[0] = nir_deref_var_clone(image_deref, intrin);
+   nir_deref_instr *image_deref = vtn_pointer_to_deref(b, image.image);
+   intrin->src[0] = nir_src_for_ssa(&image_deref->dest.ssa);
 
    /* ImageQuerySize doesn't take any extra parameters */
    if (opcode != SpvOpImageQuerySize) {
@@ -2393,9 +2425,9 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
       unsigned swiz[4];
       for (unsigned i = 0; i < 4; i++)
          swiz[i] = i < image.coord->num_components ? i : 0;
-      intrin->src[0] = nir_src_for_ssa(nir_swizzle(&b->nb, image.coord,
+      intrin->src[1] = nir_src_for_ssa(nir_swizzle(&b->nb, image.coord,
                                                    swiz, 4, false));
-      intrin->src[1] = nir_src_for_ssa(image.sample);
+      intrin->src[2] = nir_src_for_ssa(image.sample);
    }
 
    switch (opcode) {
@@ -2404,10 +2436,10 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
    case SpvOpImageRead:
       break;
    case SpvOpAtomicStore:
-      intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
+      intrin->src[3] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
       break;
    case SpvOpImageWrite:
-      intrin->src[2] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def);
+      intrin->src[3] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def);
       break;
 
    case SpvOpAtomicCompareExchange:
@@ -2423,7 +2455,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
    case SpvOpAtomicAnd:
    case SpvOpAtomicOr:
    case SpvOpAtomicXor:
-      fill_common_atomic_sources(b, opcode, w, &intrin->src[2]);
+      fill_common_atomic_sources(b, opcode, w, &intrin->src[3]);
       break;
 
    default:
@@ -2435,7 +2467,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
       struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
 
       unsigned dest_components = nir_intrinsic_dest_components(intrin);
-      if (intrin->intrinsic == nir_intrinsic_image_var_size) {
+      if (intrin->intrinsic == nir_intrinsic_image_deref_size) {
          dest_components = intrin->num_components =
             glsl_get_vector_elements(type->type);
       }
@@ -2478,6 +2510,35 @@ get_ssbo_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
    }
 }
 
+static nir_intrinsic_op
+get_uniform_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
+{
+   switch (opcode) {
+#define OP(S, N) case SpvOp##S: return nir_intrinsic_atomic_counter_ ##N;
+   OP(AtomicLoad,             read_deref)
+   OP(AtomicExchange,         exchange)
+   OP(AtomicCompareExchange,  comp_swap)
+   OP(AtomicIIncrement,       inc_deref)
+   OP(AtomicIDecrement,       dec_deref)
+   OP(AtomicIAdd,             add_deref)
+   OP(AtomicISub,             add_deref)
+   OP(AtomicUMin,             min_deref)
+   OP(AtomicUMax,             max_deref)
+   OP(AtomicAnd,              and_deref)
+   OP(AtomicOr,               or_deref)
+   OP(AtomicXor,              xor_deref)
+#undef OP
+   default:
+      /* We left the following out: AtomicStore, AtomicSMin and
+       * AtomicSmax. Right now there are not nir intrinsics for them. At this
+       * moment Atomic Counter support is needed for ARB_spirv support, so is
+       * only need to support GLSL Atomic Counters that are uints and don't
+       * allow direct storage.
+       */
+      unreachable("Invalid uniform atomic");
+   }
+}
+
 static nir_intrinsic_op
 get_shared_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
 {
@@ -2505,12 +2566,12 @@ get_shared_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
 }
 
 static nir_intrinsic_op
-get_var_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
+get_deref_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
 {
    switch (opcode) {
-   case SpvOpAtomicLoad:      return nir_intrinsic_load_var;
-   case SpvOpAtomicStore:     return nir_intrinsic_store_var;
-#define OP(S, N) case SpvOp##S: return nir_intrinsic_var_##N;
+   case SpvOpAtomicLoad:      return nir_intrinsic_load_deref;
+   case SpvOpAtomicStore:     return nir_intrinsic_store_deref;
+#define OP(S, N) case SpvOp##S: return nir_intrinsic_deref_##N;
    OP(AtomicExchange,         atomic_exchange)
    OP(AtomicCompareExchange,  atomic_comp_swap)
    OP(AtomicIIncrement,       atomic_add)
@@ -2530,9 +2591,12 @@ get_var_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
    }
 }
 
+/*
+ * Handles shared atomics, ssbo atomics and atomic counters.
+ */
 static void
-vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode,
-                                 const uint32_t *w, unsigned count)
+vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode,
+                   const uint32_t *w, unsigned count)
 {
    struct vtn_pointer *ptr;
    nir_intrinsic_instr *atomic;
@@ -2569,13 +2633,59 @@ vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode,
    SpvMemorySemanticsMask semantics = w[5];
    */
 
-   if (ptr->mode == vtn_variable_mode_workgroup &&
-       !b->options->lower_workgroup_access_to_offsets) {
-      nir_deref_var *deref = vtn_pointer_to_deref(b, ptr);
-      const struct glsl_type *deref_type = nir_deref_tail(&deref->deref)->type;
-      nir_intrinsic_op op = get_var_nir_atomic_op(b, opcode);
+   /* uniform as "atomic counter uniform" */
+   if (ptr->mode == vtn_variable_mode_uniform) {
+      nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
+      const struct glsl_type *deref_type = deref->type;
+      nir_intrinsic_op op = get_uniform_nir_atomic_op(b, opcode);
+      atomic = nir_intrinsic_instr_create(b->nb.shader, op);
+      atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
+
+      /* SSBO needs to initialize index/offset. In this case we don't need to,
+       * as that info is already stored on the ptr->var->var nir_variable (see
+       * vtn_create_variable)
+       */
+
+      switch (opcode) {
+      case SpvOpAtomicLoad:
+         atomic->num_components = glsl_get_vector_elements(deref_type);
+         break;
+
+      case SpvOpAtomicStore:
+         atomic->num_components = glsl_get_vector_elements(deref_type);
+         nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
+         break;
+
+      case SpvOpAtomicExchange:
+      case SpvOpAtomicCompareExchange:
+      case SpvOpAtomicCompareExchangeWeak:
+      case SpvOpAtomicIIncrement:
+      case SpvOpAtomicIDecrement:
+      case SpvOpAtomicIAdd:
+      case SpvOpAtomicISub:
+      case SpvOpAtomicSMin:
+      case SpvOpAtomicUMin:
+      case SpvOpAtomicSMax:
+      case SpvOpAtomicUMax:
+      case SpvOpAtomicAnd:
+      case SpvOpAtomicOr:
+      case SpvOpAtomicXor:
+         /* Nothing: we don't need to call fill_common_atomic_sources here, as
+          * atomic counter uniforms doesn't have sources
+          */
+         break;
+
+      default:
+         unreachable("Invalid SPIR-V atomic");
+
+      }
+   } else if (ptr->mode == vtn_variable_mode_workgroup &&
+              !b->options->lower_workgroup_access_to_offsets) {
+      nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
+      const struct glsl_type *deref_type = deref->type;
+      nir_intrinsic_op op = get_deref_nir_atomic_op(b, opcode);
       atomic = nir_intrinsic_instr_create(b->nb.shader, op);
-      atomic->variables[0] = nir_deref_var_clone(deref, atomic);
+      atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
 
       switch (opcode) {
       case SpvOpAtomicLoad:
@@ -2585,7 +2695,7 @@ vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode,
       case SpvOpAtomicStore:
          atomic->num_components = glsl_get_vector_elements(deref_type);
          nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
-         atomic->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
+         atomic->src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
          break;
 
       case SpvOpAtomicExchange:
@@ -2602,7 +2712,7 @@ vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode,
       case SpvOpAtomicAnd:
       case SpvOpAtomicOr:
       case SpvOpAtomicXor:
-         fill_common_atomic_sources(b, opcode, w, &atomic->src[0]);
+         fill_common_atomic_sources(b, opcode, w, &atomic->src[1]);
          break;
 
       default:
@@ -2611,7 +2721,7 @@ vtn_handle_ssbo_or_shared_atomic(struct vtn_builder *b, SpvOp opcode,
       }
    } else {
       nir_ssa_def *offset, *index;
-      offset = vtn_pointer_to_offset(b, ptr, &index, NULL);
+      offset = vtn_pointer_to_offset(b, ptr, &index);
 
       nir_intrinsic_op op;
       if (ptr->mode == vtn_variable_mode_ssbo) {
@@ -3280,7 +3390,6 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
       case SpvCapabilityFloat16:
       case SpvCapabilityInt64Atomics:
       case SpvCapabilityAtomicStorage:
-      case SpvCapabilityInt16:
       case SpvCapabilityStorageImageMultisample:
       case SpvCapabilityInt8:
       case SpvCapabilitySparseResidency:
@@ -3296,6 +3405,9 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
       case SpvCapabilityInt64:
          spv_check_supported(int64, cap);
          break;
+      case SpvCapabilityInt16:
+         spv_check_supported(int16, cap);
+         break;
 
       case SpvCapabilityAddresses:
       case SpvCapabilityKernel:
@@ -3360,10 +3472,12 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
 
       case SpvCapabilityGroupNonUniformQuad:
          spv_check_supported(subgroup_quad, cap);
+         break;
 
       case SpvCapabilityGroupNonUniformArithmetic:
       case SpvCapabilityGroupNonUniformClustered:
          spv_check_supported(subgroup_arithmetic, cap);
+         break;
 
       case SpvCapabilityVariablePointersStorageBuffer:
       case SpvCapabilityVariablePointers:
@@ -3381,6 +3495,20 @@ vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
          spv_check_supported(shader_viewport_index_layer, cap);
          break;
 
+      case SpvCapabilityInputAttachmentArrayDynamicIndexingEXT:
+      case SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT:
+      case SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT:
+         spv_check_supported(descriptor_array_dynamic_indexing, cap);
+         break;
+
+      case SpvCapabilityRuntimeDescriptorArrayEXT:
+         spv_check_supported(runtime_descriptor_array, cap);
+         break;
+
+      case SpvCapabilityStencilExportEXT:
+         spv_check_supported(stencil_export, cap);
+         break;
+
       default:
          vtn_fail("Unhandled capability");
       }
@@ -3558,6 +3686,10 @@ vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
    case SpvExecutionModeContractionOff:
       break; /* OpenCL */
 
+   case SpvExecutionModeStencilRefReplacingEXT:
+      vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
+      break;
+
    default:
       vtn_fail("Unhandled execution mode");
    }
@@ -3707,10 +3839,10 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
    case SpvOpImageQuerySize: {
       struct vtn_pointer *image =
          vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
-      if (image->mode == vtn_variable_mode_image) {
+      if (glsl_type_is_image(image->type->type)) {
          vtn_handle_image(b, opcode, w, count);
       } else {
-         vtn_assert(image->mode == vtn_variable_mode_sampler);
+         vtn_assert(glsl_type_is_sampler(image->type->type));
          vtn_handle_texture(b, opcode, w, count);
       }
       break;
@@ -3736,7 +3868,7 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
          vtn_handle_image(b, opcode, w, count);
       } else {
          vtn_assert(pointer->value_type == vtn_value_type_pointer);
-         vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count);
+         vtn_handle_atomics(b, opcode, w, count);
       }
       break;
    }
@@ -3747,7 +3879,7 @@ vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
          vtn_handle_image(b, opcode, w, count);
       } else {
          vtn_assert(pointer->value_type == vtn_value_type_pointer);
-         vtn_handle_ssbo_or_shared_atomic(b, opcode, w, count);
+         vtn_handle_atomics(b, opcode, w, count);
       }
       break;
    }
@@ -3996,19 +4128,36 @@ vtn_create_builder(const uint32_t *words, size_t word_count,
    b->entry_point_name = entry_point_name;
    b->options = options;
 
-   /* Handle the SPIR-V header (first 4 dwords)  */
-   vtn_assert(word_count > 5);
+   /*
+    * Handle the SPIR-V header (first 5 dwords).
+    * Can't use vtx_assert() as the setjmp(3) target isn't initialized yet.
+    */
+   if (word_count <= 5)
+      goto fail;
+
+   if (words[0] != SpvMagicNumber) {
+      vtn_err("words[0] was 0x%x, want 0x%x", words[0], SpvMagicNumber);
+      goto fail;
+   }
+   if (words[1] < 0x10000) {
+      vtn_err("words[1] was 0x%x, want >= 0x10000", words[1]);
+      goto fail;
+   }
 
-   vtn_assert(words[0] == SpvMagicNumber);
-   vtn_assert(words[1] >= 0x10000);
    /* words[2] == generator magic */
    unsigned value_id_bound = words[3];
-   vtn_assert(words[4] == 0);
+   if (words[4] != 0) {
+      vtn_err("words[4] was %u, want 0", words[4]);
+      goto fail;
+   }
 
    b->value_id_bound = value_id_bound;
    b->values = rzalloc_array(b, struct vtn_value, value_id_bound);
 
    return b;
+ fail:
+   ralloc_free(b);
+   return NULL;
 }
 
 nir_function *
@@ -4085,6 +4234,11 @@ spirv_to_nir(const uint32_t *words, size_t word_count,
       }
    } while (progress);
 
+   /* We sometimes generate bogus derefs that, while never used, give the
+    * validator a bit of heartburn.  Run dead code to get rid of them.
+    */
+   nir_opt_dce(b->shader);
+
    vtn_assert(b->entry_point->value_type == vtn_value_type_function);
    nir_function *entry_point = b->entry_point->func->impl->function;
    vtn_assert(entry_point);