#include "nir/nir_constant_expressions.h"
#include "spirv_info.h"
+#include <stdio.h>
+
void
vtn_log(struct vtn_builder *b, enum nir_spirv_debug_level level,
size_t spirv_offset, const char *message)
ralloc_free(msg);
}
+static void
+vtn_dump_shader(struct vtn_builder *b, const char *path, const char *prefix)
+{
+ static int idx = 0;
+
+ char filename[1024];
+ int len = snprintf(filename, sizeof(filename), "%s/%s-%d.spirv",
+ path, prefix, idx++);
+ if (len < 0 || len >= sizeof(filename))
+ return;
+
+ FILE *f = fopen(filename, "w");
+ if (f == NULL)
+ return;
+
+ fwrite(b->spirv, sizeof(*b->spirv), b->spirv_word_count, f);
+ fclose(f);
+
+ vtn_info("SPIR-V shader dumped to %s", filename);
+}
+
void
_vtn_warn(struct vtn_builder *b, const char *file, unsigned line,
const char *fmt, ...)
file, line, fmt, args);
va_end(args);
+ const char *dump_path = getenv("MESA_SPIRV_FAIL_DUMP_PATH");
+ if (dump_path)
+ vtn_dump_shader(b, dump_path, "fail");
+
longjmp(b->fail_jump, 1);
}
if (dec->scope == VTN_DEC_DECORATION) {
member = parent_member;
} else if (dec->scope >= VTN_DEC_STRUCT_MEMBER0) {
- vtn_assert(parent_member == -1);
+ vtn_fail_if(value->value_type != vtn_value_type_type ||
+ value->type->base_type != vtn_base_type_struct,
+ "OpMemberDecorate and OpGroupMemberDecorate are only "
+ "allowed on OpTypeStruct");
+ /* This means we haven't recursed yet */
+ assert(value == base_value);
+
member = dec->scope - VTN_DEC_STRUCT_MEMBER0;
+
+ vtn_fail_if(member >= base_value->type->length,
+ "OpMemberDecorate specifies member %d but the "
+ "OpTypeStruct has only %u members",
+ member, base_value->type->length);
} else {
/* Not a decoration */
+ assert(dec->scope == VTN_DEC_EXECUTION_MODE);
continue;
}
if (dec->group) {
- vtn_assert(dec->group->value_type == vtn_value_type_decoration_group);
+ assert(dec->group->value_type == vtn_value_type_decoration_group);
_foreach_decoration_helper(b, base_value, member, dec->group,
cb, data);
} else {
if (dec->scope != VTN_DEC_EXECUTION_MODE)
continue;
- vtn_assert(dec->group == NULL);
+ assert(dec->group == NULL);
cb(b, value, dec, data);
}
}
case SpvOpDecorate:
case SpvOpMemberDecorate:
case SpvOpExecutionMode: {
- struct vtn_value *val = &b->values[target];
+ struct vtn_value *val = vtn_untyped_value(b, target);
struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
switch (opcode) {
break;
case SpvOpMemberDecorate:
dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++);
+ vtn_fail_if(dec->scope < VTN_DEC_STRUCT_MEMBER0, /* overflow */
+ "Member argument of OpMemberDecorate too large");
break;
case SpvOpExecutionMode:
dec->scope = VTN_DEC_EXECUTION_MODE;
break;
default:
- vtn_fail("Invalid decoration opcode");
+ unreachable("Invalid decoration opcode");
}
dec->decoration = *(w++);
dec->literals = w;
dec->scope = VTN_DEC_DECORATION;
} else {
dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(++w);
+ vtn_fail_if(dec->scope < 0, /* Check for overflow */
+ "Member argument of OpGroupMemberDecorate too large");
}
/* Link into the list */
}
default:
- vtn_fail("Unhandled opcode");
+ unreachable("Unhandled opcode");
}
}
struct vtn_type *type;
};
+/** Returns true if two types are "compatible", i.e. you can do an OpLoad,
+ * OpStore, or OpCopyMemory between them without breaking anything.
+ * Technically, the SPIR-V rules require the exact same type ID but this lets
+ * us internally be a bit looser.
+ */
+bool
+vtn_types_compatible(struct vtn_builder *b,
+ struct vtn_type *t1, struct vtn_type *t2)
+{
+ if (t1->id == t2->id)
+ return true;
+
+ if (t1->base_type != t2->base_type)
+ return false;
+
+ switch (t1->base_type) {
+ case vtn_base_type_void:
+ case vtn_base_type_scalar:
+ case vtn_base_type_vector:
+ case vtn_base_type_matrix:
+ case vtn_base_type_image:
+ case vtn_base_type_sampler:
+ case vtn_base_type_sampled_image:
+ return t1->type == t2->type;
+
+ case vtn_base_type_array:
+ return t1->length == t2->length &&
+ vtn_types_compatible(b, t1->array_element, t2->array_element);
+
+ case vtn_base_type_pointer:
+ return vtn_types_compatible(b, t1->deref, t2->deref);
+
+ case vtn_base_type_struct:
+ if (t1->length != t2->length)
+ return false;
+
+ for (unsigned i = 0; i < t1->length; i++) {
+ if (!vtn_types_compatible(b, t1->members[i], t2->members[i]))
+ return false;
+ }
+ return true;
+
+ case vtn_base_type_function:
+ /* This case shouldn't get hit since you can't copy around function
+ * types. Just require them to be identical.
+ */
+ return false;
+ }
+
+ vtn_fail("Invalid base type");
+}
+
/* does a shallow copy of a vtn_type */
static struct vtn_type *
if (member < 0)
return;
- vtn_assert(member < ctx->num_fields);
+ assert(member < ctx->num_fields);
switch (dec->decoration) {
case SpvDecorationNonWritable:
{
if (dec->decoration != SpvDecorationMatrixStride)
return;
- vtn_assert(member >= 0);
+
+ vtn_fail_if(member < 0,
+ "The MatrixStride decoration is only allowed on members "
+ "of OpTypeStruct");
struct member_decoration_ctx *ctx = void_ctx;
{
struct vtn_type *type = val->type;
- if (member != -1)
+ if (member != -1) {
+ /* This should have been handled by OpTypeStruct */
+ assert(val->type->base_type == vtn_base_type_struct);
+ assert(member >= 0 && member < val->type->length);
return;
+ }
switch (dec->decoration) {
case SpvDecorationArrayStride:
struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_type);
val->type = rzalloc(b, struct vtn_type);
- val->type->val = val;
+ val->type->id = w[1];
switch (opcode) {
case SpvOpTypeVoid:
case SpvOpTypeImage: {
val->type->base_type = vtn_base_type_image;
- const struct glsl_type *sampled_type =
- vtn_value(b, w[2], vtn_value_type_type)->type->type;
+ const struct vtn_type *sampled_type =
+ vtn_value(b, w[2], vtn_value_type_type)->type;
- vtn_assert(glsl_type_is_vector_or_scalar(sampled_type));
+ vtn_fail_if(sampled_type->base_type != vtn_base_type_scalar ||
+ glsl_get_bit_size(sampled_type->type) != 32,
+ "Sampled type of OpTypeImage must be a 32-bit scalar");
enum glsl_sampler_dim dim;
switch ((SpvDim)w[3]) {
case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break;
case SpvDimSubpassData: dim = GLSL_SAMPLER_DIM_SUBPASS; break;
default:
- vtn_fail("Invalid SPIR-V Sampler dimension");
+ vtn_fail("Invalid SPIR-V image dimensionality");
}
bool is_shadow = w[4];
val->type->image_format = translate_image_format(b, format);
+ enum glsl_base_type sampled_base_type =
+ glsl_get_base_type(sampled_type->type);
if (sampled == 1) {
val->type->sampled = true;
val->type->type = glsl_sampler_type(dim, is_shadow, is_array,
- glsl_get_base_type(sampled_type));
+ sampled_base_type);
} else if (sampled == 2) {
vtn_assert(!is_shadow);
val->type->sampled = false;
- val->type->type = glsl_image_type(dim, is_array,
- glsl_get_base_type(sampled_type));
+ val->type->type = glsl_image_type(dim, is_array, sampled_base_type);
} else {
vtn_fail("We need to know if the image will be sampled");
}
}
case SpvOpConstant: {
- vtn_assert(glsl_type_is_scalar(val->type->type));
+ vtn_fail_if(val->type->base_type != vtn_base_type_scalar,
+ "Result type of %s must be a scalar",
+ spirv_op_to_string(opcode));
int bit_size = glsl_get_bit_size(val->type->type);
switch (bit_size) {
case 64:
}
break;
}
+
case SpvOpSpecConstant: {
- vtn_assert(glsl_type_is_scalar(val->type->type));
- val->constant->values[0].u32[0] = get_specialization(b, val, w[3]);
+ vtn_fail_if(val->type->base_type != vtn_base_type_scalar,
+ "Result type of %s must be a scalar",
+ spirv_op_to_string(opcode));
int bit_size = glsl_get_bit_size(val->type->type);
switch (bit_size) {
case 64:
}
break;
}
+
case SpvOpSpecConstantComposite:
case SpvOpConstantComposite: {
unsigned elem_count = count - 3;
+ vtn_fail_if(elem_count != val->type->length,
+ "%s has %u constituents, expected %u",
+ spirv_op_to_string(opcode), elem_count, val->type->length);
+
nir_constant **elems = ralloc_array(b, nir_constant *, elem_count);
for (unsigned i = 0; i < elem_count; i++)
elems[i] = vtn_value(b, w[i + 3], vtn_value_type_constant)->constant;
- switch (glsl_get_base_type(val->type->type)) {
- case GLSL_TYPE_UINT:
- case GLSL_TYPE_INT:
- case GLSL_TYPE_UINT16:
- case GLSL_TYPE_INT16:
- case GLSL_TYPE_UINT64:
- case GLSL_TYPE_INT64:
- case GLSL_TYPE_FLOAT:
- case GLSL_TYPE_FLOAT16:
- case GLSL_TYPE_BOOL:
- case GLSL_TYPE_DOUBLE: {
+ switch (val->type->base_type) {
+ case vtn_base_type_vector: {
+ assert(glsl_type_is_vector(val->type->type));
int bit_size = glsl_get_bit_size(val->type->type);
- if (glsl_type_is_matrix(val->type->type)) {
- vtn_assert(glsl_get_matrix_columns(val->type->type) == elem_count);
- for (unsigned i = 0; i < elem_count; i++)
- val->constant->values[i] = elems[i]->values[0];
- } else {
- vtn_assert(glsl_type_is_vector(val->type->type));
- vtn_assert(glsl_get_vector_elements(val->type->type) == elem_count);
- for (unsigned i = 0; i < elem_count; i++) {
- switch (bit_size) {
- case 64:
- val->constant->values[0].u64[i] = elems[i]->values[0].u64[0];
- break;
- case 32:
- val->constant->values[0].u32[i] = elems[i]->values[0].u32[0];
- break;
- case 16:
- val->constant->values[0].u16[i] = elems[i]->values[0].u16[0];
- break;
- default:
- vtn_fail("Invalid SpvOpConstantComposite bit size");
- }
+ for (unsigned i = 0; i < elem_count; i++) {
+ switch (bit_size) {
+ case 64:
+ val->constant->values[0].u64[i] = elems[i]->values[0].u64[0];
+ break;
+ case 32:
+ val->constant->values[0].u32[i] = elems[i]->values[0].u32[0];
+ break;
+ case 16:
+ val->constant->values[0].u16[i] = elems[i]->values[0].u16[0];
+ break;
+ default:
+ vtn_fail("Invalid SpvOpConstantComposite bit size");
}
}
- ralloc_free(elems);
break;
}
- case GLSL_TYPE_STRUCT:
- case GLSL_TYPE_ARRAY:
+
+ case vtn_base_type_matrix:
+ assert(glsl_type_is_matrix(val->type->type));
+ for (unsigned i = 0; i < elem_count; i++)
+ val->constant->values[i] = elems[i]->values[0];
+ break;
+
+ case vtn_base_type_struct:
+ case vtn_base_type_array:
ralloc_steal(val->constant, elems);
val->constant->num_elements = elem_count;
val->constant->elements = elems;
break;
default:
- vtn_fail("Unsupported type for constants");
+ vtn_fail("Result type of %s must be a composite type",
+ spirv_op_to_string(opcode));
}
break;
}
int elem = -1;
int col = 0;
- const struct glsl_type *type = comp->type->type;
+ const struct vtn_type *type = comp->type;
for (unsigned i = deref_start; i < count; i++) {
- switch (glsl_get_base_type(type)) {
- case GLSL_TYPE_UINT:
- case GLSL_TYPE_INT:
- case GLSL_TYPE_UINT16:
- case GLSL_TYPE_INT16:
- case GLSL_TYPE_UINT64:
- case GLSL_TYPE_INT64:
- case GLSL_TYPE_FLOAT:
- case GLSL_TYPE_FLOAT16:
- case GLSL_TYPE_DOUBLE:
- case GLSL_TYPE_BOOL:
- /* If we hit this granularity, we're picking off an element */
- if (glsl_type_is_matrix(type)) {
- vtn_assert(col == 0 && elem == -1);
- col = w[i];
- elem = 0;
- type = glsl_get_column_type(type);
- } else {
- vtn_assert(elem <= 0 && glsl_type_is_vector(type));
- elem = w[i];
- type = glsl_scalar_type(glsl_get_base_type(type));
- }
- continue;
-
- case GLSL_TYPE_ARRAY:
+ vtn_fail_if(w[i] > type->length,
+ "%uth index of %s is %u but the type has only "
+ "%u elements", i - deref_start,
+ spirv_op_to_string(opcode), w[i], type->length);
+
+ switch (type->base_type) {
+ case vtn_base_type_vector:
+ elem = w[i];
+ type = type->array_element;
+ break;
+
+ case vtn_base_type_matrix:
+ assert(col == 0 && elem == -1);
+ col = w[i];
+ elem = 0;
+ type = type->array_element;
+ break;
+
+ case vtn_base_type_array:
c = &(*c)->elements[w[i]];
- type = glsl_get_array_element(type);
- continue;
+ type = type->array_element;
+ break;
- case GLSL_TYPE_STRUCT:
+ case vtn_base_type_struct:
c = &(*c)->elements[w[i]];
- type = glsl_get_struct_field(type, w[i]);
- continue;
+ type = type->members[w[i]];
+ break;
default:
- vtn_fail("Invalid constant type");
+ vtn_fail("%s must only index into composite types",
+ spirv_op_to_string(opcode));
}
}
if (elem == -1) {
val->constant = *c;
} else {
- unsigned num_components = glsl_get_vector_elements(type);
- unsigned bit_size = glsl_get_bit_size(type);
+ unsigned num_components = type->length;
+ unsigned bit_size = glsl_get_bit_size(type->type);
for (unsigned i = 0; i < num_components; i++)
switch(bit_size) {
case 64:
} else {
struct vtn_value *insert =
vtn_value(b, w[4], vtn_value_type_constant);
- vtn_assert(insert->type->type == type);
+ vtn_assert(insert->type == type);
if (elem == -1) {
*c = insert->constant;
} else {
- unsigned num_components = glsl_get_vector_elements(type);
- unsigned bit_size = glsl_get_bit_size(type);
+ unsigned num_components = type->length;
+ unsigned bit_size = glsl_get_bit_size(type->type);
for (unsigned i = 0; i < num_components; i++)
switch (bit_size) {
case 64:
}
}
+static void
+vtn_emit_barrier(struct vtn_builder *b, nir_intrinsic_op op)
+{
+ nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
+ nir_builder_instr_insert(&b->nb, &intrin->instr);
+}
+
+static void
+vtn_emit_memory_barrier(struct vtn_builder *b, SpvScope scope,
+ SpvMemorySemanticsMask semantics)
+{
+ static const SpvMemorySemanticsMask all_memory_semantics =
+ SpvMemorySemanticsUniformMemoryMask |
+ SpvMemorySemanticsWorkgroupMemoryMask |
+ SpvMemorySemanticsAtomicCounterMemoryMask |
+ SpvMemorySemanticsImageMemoryMask;
+
+ /* If we're not actually doing a memory barrier, bail */
+ if (!(semantics & all_memory_semantics))
+ return;
+
+ /* GL and Vulkan don't have these */
+ vtn_assert(scope != SpvScopeCrossDevice);
+
+ if (scope == SpvScopeSubgroup)
+ return; /* Nothing to do here */
+
+ if (scope == SpvScopeWorkgroup) {
+ vtn_emit_barrier(b, nir_intrinsic_group_memory_barrier);
+ return;
+ }
+
+ /* There's only two scopes thing left */
+ vtn_assert(scope == SpvScopeInvocation || scope == SpvScopeDevice);
+
+ if ((semantics & all_memory_semantics) == all_memory_semantics) {
+ vtn_emit_barrier(b, nir_intrinsic_memory_barrier);
+ return;
+ }
+
+ /* Issue a bunch of more specific barriers */
+ uint32_t bits = semantics;
+ while (bits) {
+ SpvMemorySemanticsMask semantic = 1 << u_bit_scan(&bits);
+ switch (semantic) {
+ case SpvMemorySemanticsUniformMemoryMask:
+ vtn_emit_barrier(b, nir_intrinsic_memory_barrier_buffer);
+ break;
+ case SpvMemorySemanticsWorkgroupMemoryMask:
+ vtn_emit_barrier(b, nir_intrinsic_memory_barrier_shared);
+ break;
+ case SpvMemorySemanticsAtomicCounterMemoryMask:
+ vtn_emit_barrier(b, nir_intrinsic_memory_barrier_atomic_counter);
+ break;
+ case SpvMemorySemanticsImageMemoryMask:
+ vtn_emit_barrier(b, nir_intrinsic_memory_barrier_image);
+ break;
+ default:
+ break;;
+ }
+ }
+}
+
static void
vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
{
- nir_intrinsic_op intrinsic_op;
switch (opcode) {
case SpvOpEmitVertex:
case SpvOpEmitStreamVertex:
- intrinsic_op = nir_intrinsic_emit_vertex;
- break;
case SpvOpEndPrimitive:
- case SpvOpEndStreamPrimitive:
- intrinsic_op = nir_intrinsic_end_primitive;
- break;
- case SpvOpMemoryBarrier:
- intrinsic_op = nir_intrinsic_memory_barrier;
- break;
- case SpvOpControlBarrier:
- intrinsic_op = nir_intrinsic_barrier;
+ case SpvOpEndStreamPrimitive: {
+ nir_intrinsic_op intrinsic_op;
+ switch (opcode) {
+ case SpvOpEmitVertex:
+ case SpvOpEmitStreamVertex:
+ intrinsic_op = nir_intrinsic_emit_vertex;
+ break;
+ case SpvOpEndPrimitive:
+ case SpvOpEndStreamPrimitive:
+ intrinsic_op = nir_intrinsic_end_primitive;
+ break;
+ default:
+ unreachable("Invalid opcode");
+ }
+
+ nir_intrinsic_instr *intrin =
+ nir_intrinsic_instr_create(b->shader, intrinsic_op);
+
+ switch (opcode) {
+ case SpvOpEmitStreamVertex:
+ case SpvOpEndStreamPrimitive:
+ nir_intrinsic_set_stream_id(intrin, w[1]);
+ break;
+ default:
+ break;
+ }
+
+ nir_builder_instr_insert(&b->nb, &intrin->instr);
break;
- default:
- vtn_fail("unknown barrier instruction");
}
- nir_intrinsic_instr *intrin =
- nir_intrinsic_instr_create(b->shader, intrinsic_op);
+ case SpvOpMemoryBarrier: {
+ SpvScope scope = vtn_constant_value(b, w[1])->values[0].u32[0];
+ SpvMemorySemanticsMask semantics =
+ vtn_constant_value(b, w[2])->values[0].u32[0];
+ vtn_emit_memory_barrier(b, scope, semantics);
+ return;
+ }
- if (opcode == SpvOpEmitStreamVertex || opcode == SpvOpEndStreamPrimitive)
- nir_intrinsic_set_stream_id(intrin, w[1]);
+ case SpvOpControlBarrier: {
+ SpvScope execution_scope =
+ vtn_constant_value(b, w[1])->values[0].u32[0];
+ if (execution_scope == SpvScopeWorkgroup)
+ vtn_emit_barrier(b, nir_intrinsic_barrier);
- nir_builder_instr_insert(&b->nb, &intrin->instr);
+ SpvScope memory_scope =
+ vtn_constant_value(b, w[2])->values[0].u32[0];
+ SpvMemorySemanticsMask memory_semantics =
+ vtn_constant_value(b, w[3])->values[0].u32[0];
+ vtn_emit_memory_barrier(b, memory_scope, memory_semantics);
+ break;
+ }
+
+ default:
+ unreachable("unknown barrier instruction");
+ }
}
static unsigned
case SpvOpSourceExtension:
case SpvOpSourceContinued:
case SpvOpExtension:
+ case SpvOpModuleProcessed:
/* Unhandled, but these are for debug so that's ok. */
break;
spv_check_supported(image_write_without_format, cap);
break;
+ case SpvCapabilityDeviceGroup:
+ spv_check_supported(device_group, cap);
+ break;
+
case SpvCapabilityMultiView:
spv_check_supported(multiview, cap);
break;
+ case SpvCapabilityGroupNonUniform:
+ spv_check_supported(subgroup_basic, cap);
+ break;
+
+ case SpvCapabilityGroupNonUniformVote:
+ spv_check_supported(subgroup_vote, cap);
+ break;
+
+ case SpvCapabilitySubgroupBallotKHR:
+ case SpvCapabilityGroupNonUniformBallot:
+ spv_check_supported(subgroup_ballot, cap);
+ break;
+
+ case SpvCapabilityGroupNonUniformShuffle:
+ case SpvCapabilityGroupNonUniformShuffleRelative:
+ spv_check_supported(subgroup_shuffle, cap);
+ break;
+
+ case SpvCapabilityGroupNonUniformQuad:
+ spv_check_supported(subgroup_quad, cap);
+
case SpvCapabilityVariablePointersStorageBuffer:
case SpvCapabilityVariablePointers:
spv_check_supported(variable_pointers, cap);
spv_check_supported(storage_16bit, cap);
break;
+ case SpvCapabilityShaderViewportIndexLayerEXT:
+ spv_check_supported(shader_viewport_index_layer, cap);
+ break;
+
default:
vtn_fail("Unhandled capability");
}
vtn_handle_barrier(b, opcode, w, count);
break;
+ case SpvOpGroupNonUniformElect:
+ case SpvOpGroupNonUniformAll:
+ case SpvOpGroupNonUniformAny:
+ case SpvOpGroupNonUniformAllEqual:
+ case SpvOpGroupNonUniformBroadcast:
+ case SpvOpGroupNonUniformBroadcastFirst:
+ case SpvOpGroupNonUniformBallot:
+ case SpvOpGroupNonUniformInverseBallot:
+ case SpvOpGroupNonUniformBallotBitExtract:
+ case SpvOpGroupNonUniformBallotBitCount:
+ case SpvOpGroupNonUniformBallotFindLSB:
+ case SpvOpGroupNonUniformBallotFindMSB:
+ case SpvOpGroupNonUniformShuffle:
+ case SpvOpGroupNonUniformShuffleXor:
+ case SpvOpGroupNonUniformShuffleUp:
+ case SpvOpGroupNonUniformShuffleDown:
+ case SpvOpGroupNonUniformIAdd:
+ case SpvOpGroupNonUniformFAdd:
+ case SpvOpGroupNonUniformIMul:
+ case SpvOpGroupNonUniformFMul:
+ case SpvOpGroupNonUniformSMin:
+ case SpvOpGroupNonUniformUMin:
+ case SpvOpGroupNonUniformFMin:
+ case SpvOpGroupNonUniformSMax:
+ case SpvOpGroupNonUniformUMax:
+ case SpvOpGroupNonUniformFMax:
+ case SpvOpGroupNonUniformBitwiseAnd:
+ case SpvOpGroupNonUniformBitwiseOr:
+ case SpvOpGroupNonUniformBitwiseXor:
+ case SpvOpGroupNonUniformLogicalAnd:
+ case SpvOpGroupNonUniformLogicalOr:
+ case SpvOpGroupNonUniformLogicalXor:
+ case SpvOpGroupNonUniformQuadBroadcast:
+ case SpvOpGroupNonUniformQuadSwap:
+ vtn_handle_subgroup(b, opcode, w, count);
+ break;
+
default:
vtn_fail("Unhandled opcode");
}
/* Initialize the stn_builder object */
struct vtn_builder *b = rzalloc(NULL, struct vtn_builder);
b->spirv = words;
+ b->spirv_word_count = word_count;
b->file = NULL;
b->line = -1;
b->col = -1;