+static const struct glsl_type *
+wrap_type_in_array(const struct glsl_type *type,
+ const struct glsl_type *array_type)
+{
+ if (!glsl_type_is_array(array_type))
+ return type;
+
+ const struct glsl_type *elem_type =
+ wrap_type_in_array(type, glsl_get_array_element(array_type));
+ return glsl_array_type(elem_type, glsl_get_length(array_type),
+ glsl_get_explicit_stride(array_type));
+}
+
+static bool
+vtn_type_needs_explicit_layout(struct vtn_builder *b, enum vtn_variable_mode mode)
+{
+ /* For OpenCL we never want to strip the info from the types, and it makes
+ * type comparisons easier in later stages.
+ */
+ if (b->options->environment == NIR_SPIRV_OPENCL)
+ return true;
+
+ switch (mode) {
+ case vtn_variable_mode_input:
+ case vtn_variable_mode_output:
+ /* Layout decorations kept because we need offsets for XFB arrays of
+ * blocks.
+ */
+ return b->shader->info.has_transform_feedback_varyings;
+
+ case vtn_variable_mode_ssbo:
+ case vtn_variable_mode_phys_ssbo:
+ case vtn_variable_mode_ubo:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+const struct glsl_type *
+vtn_type_get_nir_type(struct vtn_builder *b, struct vtn_type *type,
+ enum vtn_variable_mode mode)
+{
+ if (mode == vtn_variable_mode_atomic_counter) {
+ vtn_fail_if(glsl_without_array(type->type) != glsl_uint_type(),
+ "Variables in the AtomicCounter storage class should be "
+ "(possibly arrays of arrays of) uint.");
+ return wrap_type_in_array(glsl_atomic_uint_type(), type->type);
+ }
+
+ if (mode == vtn_variable_mode_uniform) {
+ switch (type->base_type) {
+ case vtn_base_type_array: {
+ const struct glsl_type *elem_type =
+ vtn_type_get_nir_type(b, type->array_element, mode);
+
+ return glsl_array_type(elem_type, type->length,
+ glsl_get_explicit_stride(type->type));
+ }
+
+ case vtn_base_type_struct: {
+ bool need_new_struct = false;
+ const uint32_t num_fields = type->length;
+ NIR_VLA(struct glsl_struct_field, fields, num_fields);
+ for (unsigned i = 0; i < num_fields; i++) {
+ fields[i] = *glsl_get_struct_field_data(type->type, i);
+ const struct glsl_type *field_nir_type =
+ vtn_type_get_nir_type(b, type->members[i], mode);
+ if (fields[i].type != field_nir_type) {
+ fields[i].type = field_nir_type;
+ need_new_struct = true;
+ }
+ }
+ if (need_new_struct) {
+ if (glsl_type_is_interface(type->type)) {
+ return glsl_interface_type(fields, num_fields,
+ /* packing */ 0, false,
+ glsl_get_type_name(type->type));
+ } else {
+ return glsl_struct_type(fields, num_fields,
+ glsl_get_type_name(type->type),
+ glsl_struct_type_is_packed(type->type));
+ }
+ } else {
+ /* No changes, just pass it on */
+ return type->type;
+ }
+ }
+
+ case vtn_base_type_image:
+ return type->glsl_image;
+
+ case vtn_base_type_sampler:
+ return glsl_bare_sampler_type();
+
+ case vtn_base_type_sampled_image:
+ return type->image->glsl_image;
+
+ default:
+ return type->type;
+ }
+ }
+
+ /* Layout decorations are allowed but ignored in certain conditions,
+ * to allow SPIR-V generators perform type deduplication. Discard
+ * unnecessary ones when passing to NIR.
+ */
+ if (!vtn_type_needs_explicit_layout(b, mode))
+ return glsl_get_bare_type(type->type);
+
+ return type->type;
+}
+