+ assert(base->var && base->var->var);
+ tail = nir_build_deref_var(&b->nb, base->var->var);
+ if (base->ptr_type && base->ptr_type->type) {
+ tail->dest.ssa.num_components =
+ glsl_get_vector_elements(base->ptr_type->type);
+ tail->dest.ssa.bit_size = glsl_get_bit_size(base->ptr_type->type);
+ }
+ }
+
+ if (idx == 0 && deref_chain->ptr_as_array) {
+ /* We start with a deref cast to get the stride. Hopefully, we'll be
+ * able to delete that cast eventually.
+ */
+ tail = nir_build_deref_cast(&b->nb, &tail->dest.ssa, tail->mode,
+ tail->type, base->ptr_type->stride);
+
+ nir_ssa_def *index = vtn_access_link_as_ssa(b, deref_chain->link[0], 1,
+ tail->dest.ssa.bit_size);
+ tail = nir_build_deref_ptr_as_array(&b->nb, tail, index);
+ idx++;
+ }
+
+ for (; idx < deref_chain->length; idx++) {
+ if (glsl_type_is_struct_or_ifc(type->type)) {
+ vtn_assert(deref_chain->link[idx].mode == vtn_access_mode_literal);
+ unsigned field = deref_chain->link[idx].id;
+ tail = nir_build_deref_struct(&b->nb, tail, field);
+ type = type->members[field];
+ } else {
+ nir_ssa_def *arr_index =
+ vtn_access_link_as_ssa(b, deref_chain->link[idx], 1,
+ tail->dest.ssa.bit_size);
+ tail = nir_build_deref_array(&b->nb, tail, arr_index);
+ type = type->array_element;
+ }
+
+ access |= type->access;
+ }
+
+ struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
+ ptr->mode = base->mode;
+ ptr->type = type;
+ ptr->var = base->var;
+ ptr->deref = tail;
+ ptr->access = access;
+
+ return ptr;
+}
+
+static struct vtn_pointer *
+vtn_ssa_offset_pointer_dereference(struct vtn_builder *b,
+ struct vtn_pointer *base,
+ struct vtn_access_chain *deref_chain)
+{
+ nir_ssa_def *block_index = base->block_index;
+ nir_ssa_def *offset = base->offset;
+ struct vtn_type *type = base->type;
+ enum gl_access_qualifier access = base->access;
+
+ unsigned idx = 0;
+ if (base->mode == vtn_variable_mode_ubo ||
+ base->mode == vtn_variable_mode_ssbo) {
+ if (!block_index) {
+ vtn_assert(base->var && base->type);
+ nir_ssa_def *desc_arr_idx;
+ if (glsl_type_is_array(type->type)) {
+ if (deref_chain->length >= 1) {
+ desc_arr_idx =
+ vtn_access_link_as_ssa(b, deref_chain->link[0], 1, 32);
+ idx++;
+ /* This consumes a level of type */
+ type = type->array_element;
+ access |= type->access;
+ } else {
+ /* This is annoying. We've been asked for a pointer to the
+ * array of UBOs/SSBOs and not a specifc buffer. Return a
+ * pointer with a descriptor index of 0 and we'll have to do
+ * a reindex later to adjust it to the right thing.
+ */
+ desc_arr_idx = nir_imm_int(&b->nb, 0);
+ }
+ } else if (deref_chain->ptr_as_array) {
+ /* You can't have a zero-length OpPtrAccessChain */
+ vtn_assert(deref_chain->length >= 1);
+ desc_arr_idx = vtn_access_link_as_ssa(b, deref_chain->link[0], 1, 32);
+ } else {
+ /* We have a regular non-array SSBO. */
+ desc_arr_idx = NULL;
+ }
+ block_index = vtn_variable_resource_index(b, base->var, desc_arr_idx);
+ } else if (deref_chain->ptr_as_array &&
+ type->base_type == vtn_base_type_struct && type->block) {
+ /* We are doing an OpPtrAccessChain on a pointer to a struct that is
+ * decorated block. This is an interesting corner in the SPIR-V
+ * spec. One interpretation would be that they client is clearly
+ * trying to treat that block as if it's an implicit array of blocks
+ * repeated in the buffer. However, the SPIR-V spec for the
+ * OpPtrAccessChain says:
+ *
+ * "Base is treated as the address of the first element of an
+ * array, and the Element element’s address is computed to be the
+ * base for the Indexes, as per OpAccessChain."
+ *
+ * Taken literally, that would mean that your struct type is supposed
+ * to be treated as an array of such a struct and, since it's
+ * decorated block, that means an array of blocks which corresponds
+ * to an array descriptor. Therefore, we need to do a reindex
+ * operation to add the index from the first link in the access chain
+ * to the index we recieved.
+ *
+ * The downside to this interpretation (there always is one) is that
+ * this might be somewhat surprising behavior to apps if they expect
+ * the implicit array behavior described above.
+ */
+ vtn_assert(deref_chain->length >= 1);
+ nir_ssa_def *offset_index =
+ vtn_access_link_as_ssa(b, deref_chain->link[0], 1, 32);
+ idx++;
+
+ block_index = vtn_resource_reindex(b, base->mode,
+ block_index, offset_index);
+ }
+ }
+
+ if (!offset) {
+ if (base->mode == vtn_variable_mode_workgroup) {
+ /* SLM doesn't need nor have a block index */
+ vtn_assert(!block_index);
+
+ /* We need the variable for the base offset */
+ vtn_assert(base->var);
+
+ /* We need ptr_type for size and alignment */
+ vtn_assert(base->ptr_type);
+
+ /* Assign location on first use so that we don't end up bloating SLM
+ * address space for variables which are never statically used.
+ */
+ if (base->var->shared_location < 0) {
+ vtn_assert(base->ptr_type->length > 0 && base->ptr_type->align > 0);
+ b->shader->num_shared = vtn_align_u32(b->shader->num_shared,
+ base->ptr_type->align);
+ base->var->shared_location = b->shader->num_shared;
+ b->shader->num_shared += base->ptr_type->length;
+ }
+
+ offset = nir_imm_int(&b->nb, base->var->shared_location);
+ } else if (base->mode == vtn_variable_mode_push_constant) {
+ /* Push constants neither need nor have a block index */
+ vtn_assert(!block_index);
+
+ /* Start off with at the start of the push constant block. */
+ offset = nir_imm_int(&b->nb, 0);
+ } else {
+ /* The code above should have ensured a block_index when needed. */
+ vtn_assert(block_index);
+
+ /* Start off with at the start of the buffer. */
+ offset = nir_imm_int(&b->nb, 0);
+ }