+ /* Private variables don't have any explicit layout but some layouts
+ * may have leaked through due to type deduplication in the SPIR-V.
+ */
+ var->var->type = var->type->type;
+ }
+ var->var->data.mode = nir_mode;
+ var->var->data.location = -1;
+ var->var->interface_type = NULL;
+ break;
+
+ case vtn_variable_mode_ubo:
+ case vtn_variable_mode_ssbo:
+ var->var = rzalloc(b->shader, nir_variable);
+ var->var->name = ralloc_strdup(var->var, val->name);
+
+ var->var->type = var->type->type;
+ var->var->interface_type = var->type->type;
+
+ var->var->data.mode = nir_mode;
+ var->var->data.location = -1;
+
+ break;
+
+ case vtn_variable_mode_workgroup:
+ /* Create the variable normally */
+ var->var = rzalloc(b->shader, nir_variable);
+ var->var->name = ralloc_strdup(var->var, val->name);
+ /* Workgroup variables don't have any explicit layout but some
+ * layouts may have leaked through due to type deduplication in the
+ * SPIR-V.
+ */
+ var->var->type = var->type->type;
+ var->var->data.mode = nir_var_mem_shared;
+ break;
+
+ case vtn_variable_mode_input:
+ case vtn_variable_mode_output: {
+ /* In order to know whether or not we're a per-vertex inout, we need
+ * the patch qualifier. This means walking the variable decorations
+ * early before we actually create any variables. Not a big deal.
+ *
+ * GLSLang really likes to place decorations in the most interior
+ * thing it possibly can. In particular, if you have a struct, it
+ * will place the patch decorations on the struct members. This
+ * should be handled by the variable splitting below just fine.
+ *
+ * If you have an array-of-struct, things get even more weird as it
+ * will place the patch decorations on the struct even though it's
+ * inside an array and some of the members being patch and others not
+ * makes no sense whatsoever. Since the only sensible thing is for
+ * it to be all or nothing, we'll call it patch if any of the members
+ * are declared patch.
+ */
+ var->patch = false;
+ vtn_foreach_decoration(b, val, var_is_patch_cb, &var->patch);
+ if (glsl_type_is_array(var->type->type) &&
+ glsl_type_is_struct_or_ifc(without_array->type)) {
+ vtn_foreach_decoration(b, vtn_value(b, without_array->id,
+ vtn_value_type_type),
+ var_is_patch_cb, &var->patch);
+ }
+
+ /* For inputs and outputs, we immediately split structures. This
+ * is for a couple of reasons. For one, builtins may all come in
+ * a struct and we really want those split out into separate
+ * variables. For another, interpolation qualifiers can be
+ * applied to members of the top-level struct ane we need to be
+ * able to preserve that information.
+ */
+
+ struct vtn_type *per_vertex_type = var->type;
+ if (is_per_vertex_inout(var, b->shader->info.stage)) {
+ /* In Geometry shaders (and some tessellation), inputs come
+ * in per-vertex arrays. However, some builtins come in
+ * non-per-vertex, hence the need for the is_array check. In
+ * any case, there are no non-builtin arrays allowed so this
+ * check should be sufficient.
+ */
+ per_vertex_type = var->type->array_element;
+ }
+
+ var->var = rzalloc(b->shader, nir_variable);
+ var->var->name = ralloc_strdup(var->var, val->name);
+ /* In Vulkan, shader I/O variables don't have any explicit layout but
+ * some layouts may have leaked through due to type deduplication in
+ * the SPIR-V. We do, however, keep the layouts in the variable's
+ * interface_type because we need offsets for XFB arrays of blocks.
+ */
+ var->var->type = var->type->type;
+ var->var->data.mode = nir_mode;
+ var->var->data.patch = var->patch;
+
+ /* Figure out the interface block type. */
+ struct vtn_type *iface_type = per_vertex_type;
+ if (var->mode == vtn_variable_mode_output &&
+ (b->shader->info.stage == MESA_SHADER_VERTEX ||
+ b->shader->info.stage == MESA_SHADER_TESS_EVAL ||
+ b->shader->info.stage == MESA_SHADER_GEOMETRY)) {
+ /* For vertex data outputs, we can end up with arrays of blocks for
+ * transform feedback where each array element corresponds to a
+ * different XFB output buffer.
+ */
+ while (iface_type->base_type == vtn_base_type_array)
+ iface_type = iface_type->array_element;
+ }
+ if (iface_type->base_type == vtn_base_type_struct && iface_type->block)
+ var->var->interface_type = iface_type->type;
+
+ if (per_vertex_type->base_type == vtn_base_type_struct &&
+ per_vertex_type->block) {
+ /* It's a struct. Set it up as per-member. */
+ var->var->num_members = glsl_get_length(per_vertex_type->type);
+ var->var->members = rzalloc_array(var->var, struct nir_variable_data,
+ var->var->num_members);
+
+ for (unsigned i = 0; i < var->var->num_members; i++) {
+ var->var->members[i].mode = nir_mode;
+ var->var->members[i].patch = var->patch;
+ var->var->members[i].location = -1;
+ }
+ }
+
+ /* For inputs and outputs, we need to grab locations and builtin
+ * information from the per-vertex type.
+ */
+ vtn_foreach_decoration(b, vtn_value(b, per_vertex_type->id,
+ vtn_value_type_type),
+ var_decoration_cb, var);
+ break;
+ }
+
+ case vtn_variable_mode_push_constant:
+ case vtn_variable_mode_cross_workgroup:
+ /* These don't need actual variables. */
+ break;
+
+ case vtn_variable_mode_image:
+ case vtn_variable_mode_phys_ssbo:
+ unreachable("Should have been caught before");
+ }
+
+ /* We can only have one type of initializer */
+ assert(!(const_initializer && var_initializer));
+ if (const_initializer) {
+ var->var->constant_initializer =
+ nir_constant_clone(const_initializer, var->var);
+ }
+ if (var_initializer)
+ var->var->pointer_initializer = var_initializer;
+
+ vtn_foreach_decoration(b, val, var_decoration_cb, var);
+ vtn_foreach_decoration(b, val, ptr_decoration_cb, val->pointer);
+
+ /* Propagate access flags from the OpVariable decorations. */
+ val->pointer->access |= var->access;
+
+ if ((var->mode == vtn_variable_mode_input ||
+ var->mode == vtn_variable_mode_output) &&
+ var->var->members) {
+ assign_missing_member_locations(var);
+ }
+
+ if (var->mode == vtn_variable_mode_uniform ||
+ var->mode == vtn_variable_mode_ubo ||
+ var->mode == vtn_variable_mode_ssbo) {
+ /* XXX: We still need the binding information in the nir_variable
+ * for these. We should fix that.
+ */
+ var->var->data.binding = var->binding;
+ var->var->data.explicit_binding = var->explicit_binding;
+ var->var->data.descriptor_set = var->descriptor_set;
+ var->var->data.index = var->input_attachment_index;
+ var->var->data.offset = var->offset;
+
+ if (glsl_type_is_image(without_array->type))
+ var->var->data.image.format = without_array->image_format;
+ }
+
+ if (var->mode == vtn_variable_mode_function) {
+ vtn_assert(var->var != NULL && var->var->members == NULL);
+ nir_function_impl_add_variable(b->nb.impl, var->var);
+ } else if (var->var) {
+ nir_shader_add_variable(b->shader, var->var);
+ } else {
+ vtn_assert(vtn_pointer_is_external_block(b, val->pointer));
+ }
+}
+
+static void
+vtn_assert_types_equal(struct vtn_builder *b, SpvOp opcode,
+ struct vtn_type *dst_type,
+ struct vtn_type *src_type)
+{
+ if (dst_type->id == src_type->id)
+ return;
+
+ if (vtn_types_compatible(b, dst_type, src_type)) {
+ /* Early versions of GLSLang would re-emit types unnecessarily and you
+ * would end up with OpLoad, OpStore, or OpCopyMemory opcodes which have
+ * mismatched source and destination types.
+ *
+ * https://github.com/KhronosGroup/glslang/issues/304
+ * https://github.com/KhronosGroup/glslang/issues/307
+ * https://bugs.freedesktop.org/show_bug.cgi?id=104338
+ * https://bugs.freedesktop.org/show_bug.cgi?id=104424
+ */
+ vtn_warn("Source and destination types of %s do not have the same "
+ "ID (but are compatible): %u vs %u",
+ spirv_op_to_string(opcode), dst_type->id, src_type->id);
+ return;
+ }
+
+ vtn_fail("Source and destination types of %s do not match: %s vs. %s",
+ spirv_op_to_string(opcode),
+ glsl_get_type_name(dst_type->type),
+ glsl_get_type_name(src_type->type));
+}
+
+static nir_ssa_def *
+nir_shrink_zero_pad_vec(nir_builder *b, nir_ssa_def *val,
+ unsigned num_components)
+{
+ if (val->num_components == num_components)
+ return val;
+
+ nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
+ for (unsigned i = 0; i < num_components; i++) {
+ if (i < val->num_components)
+ comps[i] = nir_channel(b, val, i);
+ else
+ comps[i] = nir_imm_intN_t(b, 0, val->bit_size);
+ }
+ return nir_vec(b, comps, num_components);
+}
+
+static nir_ssa_def *
+nir_sloppy_bitcast(nir_builder *b, nir_ssa_def *val,
+ const struct glsl_type *type)
+{
+ const unsigned num_components = glsl_get_vector_elements(type);
+ const unsigned bit_size = glsl_get_bit_size(type);
+
+ /* First, zero-pad to ensure that the value is big enough that when we
+ * bit-cast it, we don't loose anything.
+ */
+ if (val->bit_size < bit_size) {
+ const unsigned src_num_components_needed =
+ vtn_align_u32(val->num_components, bit_size / val->bit_size);
+ val = nir_shrink_zero_pad_vec(b, val, src_num_components_needed);
+ }
+
+ val = nir_bitcast_vector(b, val, bit_size);
+
+ return nir_shrink_zero_pad_vec(b, val, num_components);
+}
+
+void
+vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
+ const uint32_t *w, unsigned count)
+{
+ switch (opcode) {
+ case SpvOpUndef: {
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_undef);
+ val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
+ break;
+ }
+
+ case SpvOpVariable: {
+ struct vtn_type *ptr_type = vtn_value(b, w[1], vtn_value_type_type)->type;
+
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_pointer);
+
+ SpvStorageClass storage_class = w[3];
+ nir_constant *const_initializer = NULL;
+ nir_variable *var_initializer = NULL;
+ if (count > 4) {
+ struct vtn_value *init = vtn_untyped_value(b, w[4]);
+ switch (init->value_type) {
+ case vtn_value_type_constant:
+ const_initializer = init->constant;
+ break;
+ case vtn_value_type_pointer:
+ var_initializer = init->pointer->var->var;
+ break;
+ default:
+ vtn_fail("SPIR-V variable initializer %u must be constant or pointer",
+ w[4]);
+ }