return dest;
}
+static const struct glsl_type *
+wrap_type_in_array(const struct glsl_type *type,
+ const struct glsl_type *array_type)
+{
+ if (!glsl_type_is_array(array_type))
+ return type;
+
+ const struct glsl_type *elem_type =
+ wrap_type_in_array(type, glsl_get_array_element(array_type));
+ return glsl_array_type(elem_type, glsl_get_length(array_type),
+ glsl_get_explicit_stride(array_type));
+}
+
+const struct glsl_type *
+vtn_type_get_nir_type(struct vtn_builder *b, struct vtn_type *type,
+ enum vtn_variable_mode mode)
+{
+ if (mode == vtn_variable_mode_atomic_counter) {
+ vtn_fail_if(glsl_without_array(type->type) != glsl_uint_type(),
+ "Variables in the AtomicCounter storage class should be "
+ "(possibly arrays of arrays of) uint.");
+ return wrap_type_in_array(glsl_atomic_uint_type(), type->type);
+ }
+
+ return type->type;
+}
+
static struct vtn_type *
mutable_matrix_member(struct vtn_builder *b, struct vtn_type *type, int member)
{
nir_variable_mode nir_mode =
base->mode == vtn_variable_mode_ssbo ? nir_var_mem_ssbo : nir_var_mem_ubo;
- tail = nir_build_deref_cast(&b->nb, desc, nir_mode, type->type,
+ tail = nir_build_deref_cast(&b->nb, desc, nir_mode,
+ vtn_type_get_nir_type(b, type, base->mode),
base->ptr_type->stride);
} else {
assert(base->var && base->var->var);
}
}
-/* Returns an atomic_uint type based on the original uint type. The returned
- * type will be equivalent to the original one but will have an atomic_uint
- * type as leaf instead of an uint.
- *
- * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
- */
-static const struct glsl_type *
-repair_atomic_type(const struct glsl_type *type)
-{
- assert(glsl_get_base_type(glsl_without_array(type)) == GLSL_TYPE_UINT);
- assert(glsl_type_is_scalar(glsl_without_array(type)));
-
- if (glsl_type_is_array(type)) {
- const struct glsl_type *atomic =
- repair_atomic_type(glsl_get_array_element(type));
-
- return glsl_array_type(atomic, glsl_get_length(type),
- glsl_get_explicit_stride(type));
- } else {
- return glsl_atomic_uint_type();
- }
-}
-
nir_deref_instr *
vtn_pointer_to_deref(struct vtn_builder *b, struct vtn_pointer *ptr)
{
ptr->offset = ssa;
}
} else {
- const struct glsl_type *deref_type = ptr_type->deref->type;
+ const struct glsl_type *deref_type =
+ vtn_type_get_nir_type(b, ptr_type->deref, ptr->mode);
if (!vtn_pointer_is_external_block(b, ptr)) {
ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
deref_type, ptr_type->stride);
* storage class with Block can be used.
*/
ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
- ptr_type->deref->type,
- ptr_type->stride);
+ deref_type, ptr_type->stride);
ptr->deref->dest.ssa.num_components =
glsl_get_vector_elements(ptr_type->type);
ptr->deref->dest.ssa.bit_size = glsl_get_bit_size(ptr_type->type);
/* For these, we create the variable normally */
var->var = rzalloc(b->shader, nir_variable);
var->var->name = ralloc_strdup(var->var, val->name);
-
- if (var->mode == vtn_variable_mode_atomic_counter) {
- /* Need to tweak the nir type here as at vtn_handle_type we don't
- * have the access to storage_class, that is the one that points us
- * that is an atomic uint.
- */
- var->var->type = repair_atomic_type(var->type->type);
- } else {
- /* Private variables don't have any explicit layout but some layouts
- * may have leaked through due to type deduplication in the SPIR-V.
- */
- var->var->type = var->type->type;
- }
+ var->var->type = vtn_type_get_nir_type(b, var->type, var->mode);
var->var->data.mode = nir_mode;
var->var->data.location = -1;
var->var->interface_type = NULL;
var->var = rzalloc(b->shader, nir_variable);
var->var->name = ralloc_strdup(var->var, val->name);
- var->var->type = var->type->type;
- var->var->interface_type = var->type->type;
+ var->var->type = vtn_type_get_nir_type(b, var->type, var->mode);
+ var->var->interface_type = var->var->type;
var->var->data.mode = nir_mode;
var->var->data.location = -1;
* layouts may have leaked through due to type deduplication in the
* SPIR-V.
*/
- var->var->type = var->type->type;
+ var->var->type = vtn_type_get_nir_type(b, var->type, var->mode);
var->var->data.mode = nir_var_mem_shared;
break;
* the SPIR-V. We do, however, keep the layouts in the variable's
* interface_type because we need offsets for XFB arrays of blocks.
*/
- var->var->type = var->type->type;
+ var->var->type = vtn_type_get_nir_type(b, var->type, var->mode);
var->var->data.mode = nir_mode;
var->var->data.patch = var->patch;
iface_type = iface_type->array_element;
}
if (iface_type->base_type == vtn_base_type_struct && iface_type->block)
- var->var->interface_type = iface_type->type;
+ var->var->interface_type = vtn_type_get_nir_type(b, iface_type,
+ var->mode);
if (per_vertex_type->base_type == vtn_base_type_struct &&
per_vertex_type->block) {