{
return ptr->mode == vtn_variable_mode_ubo ||
ptr->mode == vtn_variable_mode_ssbo ||
+ ptr->mode == vtn_variable_mode_push_constant ||
(ptr->mode == vtn_variable_mode_workgroup &&
b->options->lower_workgroup_access_to_offsets);
}
ptr->mode = base->mode;
ptr->type = type;
ptr->var = base->var;
+ ptr->deref = base->deref;
ptr->chain = chain;
return ptr;
}
offset = nir_imm_int(&b->nb, base->var->shared_location);
+ } else if (base->mode == vtn_variable_mode_push_constant) {
+ /* Push constants neither need nor have a block index */
+ vtn_assert(!block_index);
+
+ /* Start off with at the start of the push constant block. */
+ offset = nir_imm_int(&b->nb, 0);
} else {
/* The code above should have ensured a block_index when needed. */
vtn_assert(block_index);
}
}
-/* Crawls a chain of array derefs and rewrites the types so that the
- * lengths stay the same but the terminal type is the one given by
- * tail_type. This is useful for split structures.
- */
-static const struct glsl_type *
-rewrite_deref_var(struct vtn_builder *b, nir_deref_instr *deref,
- struct nir_variable *var)
-{
- /* Always set the mode */
- deref->mode = var->data.mode;
-
- if (deref->deref_type == nir_deref_type_var) {
- assert(deref->var == NULL);
- deref->var = var;
- deref->type = var->type;
- } else {
- assert(deref->deref_type == nir_deref_type_array);
- assert(deref->parent.is_ssa);
- nir_deref_instr *parent =
- nir_instr_as_deref(deref->parent.ssa->parent_instr);
- deref->type = rewrite_deref_var(b, parent, var);
- assert(deref->type);
- }
-
- /* Return of the child type of this deref*/
- if (glsl_type_is_array(deref->type))
- return glsl_get_array_element(deref->type);
- else
- return NULL;
-}
-
struct vtn_pointer *
vtn_pointer_for_variable(struct vtn_builder *b,
struct vtn_variable *var, struct vtn_type *ptr_type)
return pointer;
}
+/* Returns an atomic_uint type based on the original uint type. The returned
+ * type will be equivalent to the original one but will have an atomic_uint
+ * type as leaf instead of an uint.
+ *
+ * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
+ */
+static const struct glsl_type *
+repair_atomic_type(const struct glsl_type *type)
+{
+ assert(glsl_get_base_type(glsl_without_array(type)) == GLSL_TYPE_UINT);
+ assert(glsl_type_is_scalar(glsl_without_array(type)));
+
+ if (glsl_type_is_array(type)) {
+ const struct glsl_type *atomic =
+ repair_atomic_type(glsl_get_array_element(type));
+
+ return glsl_array_type(atomic, glsl_get_length(type));
+ } else {
+ return glsl_atomic_uint_type();
+ }
+}
+
nir_deref_instr *
vtn_pointer_to_deref(struct vtn_builder *b, struct vtn_pointer *ptr)
{
/* Do on-the-fly copy propagation for samplers. */
- if (ptr->var->copy_prop_sampler)
+ if (ptr->var && ptr->var->copy_prop_sampler)
return vtn_pointer_to_deref(b, ptr->var->copy_prop_sampler);
- nir_deref_instr *deref_var =
- nir_deref_instr_create(b->nb.shader, nir_deref_type_var);
- nir_ssa_dest_init(&deref_var->instr, &deref_var->dest, 1, 32, NULL);
- nir_builder_instr_insert(&b->nb, &deref_var->instr);
-
- if (ptr->var->var) {
- deref_var->mode = ptr->var->var->data.mode;
- deref_var->type = ptr->var->var->type;
- deref_var->var = ptr->var->var;
- /* Raw variable access */
- if (!ptr->chain)
- return deref_var;
+ nir_deref_instr *tail;
+ if (ptr->deref) {
+ tail = ptr->deref;
} else {
- vtn_assert(ptr->var->members);
- /* We'll fill out the rest of the deref_var later */
- deref_var->type = ptr->var->type->type;
+ assert(ptr->var && ptr->var->var);
+ tail = nir_build_deref_var(&b->nb, ptr->var->var);
}
+ /* Raw variable access */
+ if (!ptr->chain)
+ return tail;
+
struct vtn_access_chain *chain = ptr->chain;
vtn_assert(chain);
- struct vtn_type *deref_type = ptr->var->type;
- nir_deref_instr *tail = deref_var;
- nir_variable **members = ptr->var->members;
-
for (unsigned i = 0; i < chain->length; i++) {
- enum glsl_base_type base_type = glsl_get_base_type(deref_type->type);
- switch (base_type) {
- case GLSL_TYPE_UINT:
- case GLSL_TYPE_INT:
- case GLSL_TYPE_UINT16:
- case GLSL_TYPE_INT16:
- case GLSL_TYPE_UINT8:
- case GLSL_TYPE_INT8:
- case GLSL_TYPE_UINT64:
- case GLSL_TYPE_INT64:
- case GLSL_TYPE_FLOAT:
- case GLSL_TYPE_FLOAT16:
- case GLSL_TYPE_DOUBLE:
- case GLSL_TYPE_BOOL:
- case GLSL_TYPE_ARRAY: {
- deref_type = deref_type->array_element;
-
+ if (glsl_type_is_struct(tail->type)) {
+ vtn_assert(chain->link[i].mode == vtn_access_mode_literal);
+ unsigned idx = chain->link[i].id;
+ tail = nir_build_deref_struct(&b->nb, tail, idx);
+ } else {
nir_ssa_def *index;
if (chain->link[i].mode == vtn_access_mode_literal) {
index = nir_imm_int(&b->nb, chain->link[i].id);
index = vtn_ssa_value(b, chain->link[i].id)->def;
}
tail = nir_build_deref_array(&b->nb, tail, index);
- break;
- }
-
- case GLSL_TYPE_STRUCT: {
- vtn_assert(chain->link[i].mode == vtn_access_mode_literal);
- unsigned idx = chain->link[i].id;
- deref_type = deref_type->members[idx];
- if (members) {
- rewrite_deref_var(b, tail, members[idx]);
- assert(tail->type == deref_type->type);
- members = NULL;
- } else {
- tail = nir_build_deref_struct(&b->nb, tail, idx);
- }
- break;
- }
- default:
- vtn_fail("Invalid type for deref");
}
}
- vtn_assert(members == NULL);
return tail;
}
-nir_deref_var *
-vtn_pointer_to_deref_var(struct vtn_builder *b, struct vtn_pointer *ptr)
-{
- return nir_deref_instr_to_deref(vtn_pointer_to_deref(b, ptr), b);
-}
-
static void
_vtn_local_load_store(struct vtn_builder *b, bool load, nir_deref_instr *deref,
struct vtn_ssa_value *inout)
nir_ssa_def *
vtn_pointer_to_offset(struct vtn_builder *b, struct vtn_pointer *ptr,
- nir_ssa_def **index_out, unsigned *end_idx_out)
+ nir_ssa_def **index_out)
{
- if (vtn_pointer_uses_ssa_offset(b, ptr)) {
- if (!ptr->offset) {
- struct vtn_access_chain chain = {
- .length = 0,
- };
- ptr = vtn_ssa_offset_pointer_dereference(b, ptr, &chain);
- }
- *index_out = ptr->block_index;
- return ptr->offset;
- }
-
- vtn_assert(ptr->mode == vtn_variable_mode_push_constant);
- *index_out = NULL;
-
- unsigned idx = 0;
- struct vtn_type *type = ptr->var->type;
- nir_ssa_def *offset = nir_imm_int(&b->nb, 0);
-
- if (ptr->chain) {
- for (; idx < ptr->chain->length; idx++) {
- enum glsl_base_type base_type = glsl_get_base_type(type->type);
- switch (base_type) {
- case GLSL_TYPE_UINT:
- case GLSL_TYPE_INT:
- case GLSL_TYPE_UINT16:
- case GLSL_TYPE_INT16:
- case GLSL_TYPE_UINT8:
- case GLSL_TYPE_INT8:
- case GLSL_TYPE_UINT64:
- case GLSL_TYPE_INT64:
- case GLSL_TYPE_FLOAT:
- case GLSL_TYPE_FLOAT16:
- case GLSL_TYPE_DOUBLE:
- case GLSL_TYPE_BOOL:
- case GLSL_TYPE_ARRAY:
- offset = nir_iadd(&b->nb, offset,
- vtn_access_link_as_ssa(b, ptr->chain->link[idx],
- type->stride));
-
- type = type->array_element;
- break;
-
- case GLSL_TYPE_STRUCT: {
- vtn_assert(ptr->chain->link[idx].mode == vtn_access_mode_literal);
- unsigned member = ptr->chain->link[idx].id;
- offset = nir_iadd(&b->nb, offset,
- nir_imm_int(&b->nb, type->offsets[member]));
- type = type->members[member];
- break;
- }
-
- default:
- vtn_fail("Invalid type for deref");
- }
- }
+ assert(vtn_pointer_uses_ssa_offset(b, ptr));
+ if (!ptr->offset) {
+ struct vtn_access_chain chain = {
+ .length = 0,
+ };
+ ptr = vtn_ssa_offset_pointer_dereference(b, ptr, &chain);
}
-
- vtn_assert(type == ptr->type);
- if (end_idx_out)
- *end_idx_out = idx;
-
- return offset;
+ *index_out = ptr->block_index;
+ return ptr->offset;
}
/* Tries to compute the size of an interface block based on the strides and
}
}
-static void
-vtn_access_chain_get_offset_size(struct vtn_builder *b,
- struct vtn_access_chain *chain,
- struct vtn_type *type,
- unsigned *access_offset,
- unsigned *access_size)
-{
- *access_offset = 0;
-
- for (unsigned i = 0; i < chain->length; i++) {
- if (chain->link[i].mode != vtn_access_mode_literal)
- break;
-
- if (glsl_type_is_struct(type->type)) {
- *access_offset += type->offsets[chain->link[i].id];
- type = type->members[chain->link[i].id];
- } else {
- *access_offset += type->stride * chain->link[i].id;
- type = type->array_element;
- }
- }
-
- *access_size = vtn_type_block_size(b, type);
-}
-
static void
_vtn_load_store_tail(struct vtn_builder *b, nir_intrinsic_op op, bool load,
nir_ssa_def *index, nir_ssa_def *offset,
_vtn_block_load_store(struct vtn_builder *b, nir_intrinsic_op op, bool load,
nir_ssa_def *index, nir_ssa_def *offset,
unsigned access_offset, unsigned access_size,
- struct vtn_access_chain *chain, unsigned chain_idx,
struct vtn_type *type, struct vtn_ssa_value **inout)
{
- if (chain && chain_idx >= chain->length)
- chain = NULL;
-
- if (load && chain == NULL && *inout == NULL)
+ if (load && *inout == NULL)
*inout = vtn_create_ssa_value(b, type->type);
enum glsl_base_type base_type = glsl_get_base_type(type->type);
nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, i * type->stride));
_vtn_block_load_store(b, op, load, index, elem_off,
access_offset, access_size,
- NULL, 0,
type->array_element, &(*inout)->elems[i]);
}
return;
nir_iadd(&b->nb, offset, nir_imm_int(&b->nb, type->offsets[i]));
_vtn_block_load_store(b, op, load, index, elem_off,
access_offset, access_size,
- NULL, 0,
type->members[i], &(*inout)->elems[i]);
}
return;
break;
case vtn_variable_mode_push_constant:
op = nir_intrinsic_load_push_constant;
- vtn_access_chain_get_offset_size(b, src->chain, src->var->type,
- &access_offset, &access_size);
+ access_size = b->shader->num_uniforms;
break;
case vtn_variable_mode_workgroup:
op = nir_intrinsic_load_shared;
}
nir_ssa_def *offset, *index = NULL;
- unsigned chain_idx;
- offset = vtn_pointer_to_offset(b, src, &index, &chain_idx);
+ offset = vtn_pointer_to_offset(b, src, &index);
struct vtn_ssa_value *value = NULL;
_vtn_block_load_store(b, op, true, index, offset,
access_offset, access_size,
- src->chain, chain_idx, src->type, &value);
+ src->type, &value);
return value;
}
}
nir_ssa_def *offset, *index = NULL;
- unsigned chain_idx;
- offset = vtn_pointer_to_offset(b, dst, &index, &chain_idx);
+ offset = vtn_pointer_to_offset(b, dst, &index);
_vtn_block_load_store(b, op, false, index, offset,
- 0, 0, dst->chain, chain_idx, dst->type, &src);
+ 0, 0, dst->type, &src);
}
static void
*location = FRAG_RESULT_STENCIL;
vtn_assert(*mode == nir_var_shader_out);
break;
+ case SpvBuiltInWorkDim:
+ *location = SYSTEM_VALUE_WORK_DIM;
+ set_mode_system_value(b, mode);
+ break;
+ case SpvBuiltInGlobalSize:
+ *location = SYSTEM_VALUE_GLOBAL_GROUP_SIZE;
+ set_mode_system_value(b, mode);
+ break;
default:
- vtn_fail("unsupported builtin");
+ vtn_fail("unsupported builtin: %u", builtin);
}
}
case SpvDecorationAliased:
case SpvDecorationUniform:
case SpvDecorationStream:
- case SpvDecorationOffset:
case SpvDecorationLinkageAttributes:
break; /* Do nothing with these here */
break;
case SpvDecorationXfbBuffer:
+ var_data->explicit_xfb_buffer = true;
+ var_data->xfb_buffer = dec->literals[0];
+ break;
case SpvDecorationXfbStride:
- vtn_warn("Vulkan does not have transform feedback: %s",
- spirv_decoration_to_string(dec->decoration));
+ var_data->explicit_xfb_stride = true;
+ var_data->xfb_stride = dec->literals[0];
+ break;
+ case SpvDecorationOffset:
+ var_data->explicit_offset = true;
+ var_data->offset = dec->literals[0];
break;
case SpvDecorationCPacked:
case SpvDecorationPatch:
vtn_var->patch = true;
break;
+ case SpvDecorationOffset:
+ vtn_var->offset = dec->literals[0];
+ break;
default:
break;
}
*/
if (dec->decoration == SpvDecorationLocation) {
unsigned location = dec->literals[0];
- bool is_vertex_input;
+ bool is_vertex_input = false;
if (b->shader->info.stage == MESA_SHADER_FRAGMENT &&
vtn_var->mode == vtn_variable_mode_output) {
- is_vertex_input = false;
location += FRAG_RESULT_DATA0;
} else if (b->shader->info.stage == MESA_SHADER_VERTEX &&
vtn_var->mode == vtn_variable_mode_input) {
location += VERT_ATTRIB_GENERIC0;
} else if (vtn_var->mode == vtn_variable_mode_input ||
vtn_var->mode == vtn_variable_mode_output) {
- is_vertex_input = false;
location += vtn_var->patch ? VARYING_SLOT_PATCH0 : VARYING_SLOT_VAR0;
} else if (vtn_var->mode != vtn_variable_mode_uniform) {
vtn_warn("Location must be on input, output, uniform, sampler or "
return;
}
- if (vtn_var->var) {
+ if (vtn_var->var->num_members == 0) {
/* This handles the member and lone variable cases */
vtn_var->var->data.location = location;
} else {
/* This handles the structure member case */
- assert(vtn_var->members);
- unsigned length =
- glsl_get_length(glsl_without_array(vtn_var->type->type));
- for (unsigned i = 0; i < length; i++) {
- vtn_var->members[i]->data.location = location;
- location +=
- glsl_count_attribute_slots(vtn_var->members[i]->interface_type,
- is_vertex_input);
+ assert(vtn_var->var->members);
+ for (unsigned i = 0; i < vtn_var->var->num_members; i++) {
+ vtn_var->var->members[i].location = location;
+ const struct glsl_type *member_type =
+ glsl_get_struct_field(vtn_var->var->interface_type, i);
+ location += glsl_count_attribute_slots(member_type,
+ is_vertex_input);
}
}
return;
} else {
if (vtn_var->var) {
- assert(member == -1);
- apply_var_decoration(b, &vtn_var->var->data, dec);
- } else if (vtn_var->members) {
- if (member >= 0) {
+ if (vtn_var->var->num_members == 0) {
+ assert(member == -1);
+ apply_var_decoration(b, &vtn_var->var->data, dec);
+ } else if (member >= 0) {
/* Member decorations must come from a type */
assert(val->value_type == vtn_value_type_type);
- apply_var_decoration(b, &vtn_var->members[member]->data, dec);
+ apply_var_decoration(b, &vtn_var->var->members[member], dec);
} else {
unsigned length =
glsl_get_length(glsl_without_array(vtn_var->type->type));
for (unsigned i = 0; i < length; i++)
- apply_var_decoration(b, &vtn_var->members[i]->data, dec);
+ apply_var_decoration(b, &vtn_var->var->members[i], dec);
}
} else {
/* A few variables, those with external storage, have no actual
mode = vtn_variable_mode_workgroup;
nir_mode = nir_var_shared;
break;
+ case SpvStorageClassAtomicCounter:
+ mode = vtn_variable_mode_uniform;
+ nir_mode = nir_var_uniform;
+ break;
case SpvStorageClassCrossWorkgroup:
case SpvStorageClassGeneric:
- case SpvStorageClassAtomicCounter:
default:
vtn_fail("Unhandled variable storage class");
}
nir_ssa_def *
vtn_pointer_to_ssa(struct vtn_builder *b, struct vtn_pointer *ptr)
{
- /* This pointer needs to have a pointer type with actual storage */
- vtn_assert(ptr->ptr_type);
- vtn_assert(ptr->ptr_type->type);
+ if (vtn_pointer_uses_ssa_offset(b, ptr)) {
+ /* This pointer needs to have a pointer type with actual storage */
+ vtn_assert(ptr->ptr_type);
+ vtn_assert(ptr->ptr_type->type);
- if (!ptr->offset) {
- /* If we don't have an offset then we must be a pointer to the variable
- * itself.
- */
- vtn_assert(!ptr->offset && !ptr->block_index);
+ if (!ptr->offset) {
+ /* If we don't have an offset then we must be a pointer to the variable
+ * itself.
+ */
+ vtn_assert(!ptr->offset && !ptr->block_index);
- struct vtn_access_chain chain = {
- .length = 0,
- };
- ptr = vtn_ssa_offset_pointer_dereference(b, ptr, &chain);
- }
+ struct vtn_access_chain chain = {
+ .length = 0,
+ };
+ ptr = vtn_ssa_offset_pointer_dereference(b, ptr, &chain);
+ }
- vtn_assert(ptr->offset);
- if (ptr->block_index) {
- vtn_assert(ptr->mode == vtn_variable_mode_ubo ||
- ptr->mode == vtn_variable_mode_ssbo);
- return nir_vec2(&b->nb, ptr->block_index, ptr->offset);
+ vtn_assert(ptr->offset);
+ if (ptr->block_index) {
+ vtn_assert(ptr->mode == vtn_variable_mode_ubo ||
+ ptr->mode == vtn_variable_mode_ssbo);
+ return nir_vec2(&b->nb, ptr->block_index, ptr->offset);
+ } else {
+ vtn_assert(ptr->mode == vtn_variable_mode_workgroup);
+ return ptr->offset;
+ }
} else {
- vtn_assert(ptr->mode == vtn_variable_mode_workgroup);
- return ptr->offset;
+ return &vtn_pointer_to_deref(b, ptr)->dest.ssa;
}
}
{
vtn_assert(ssa->num_components <= 2 && ssa->bit_size == 32);
vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
- vtn_assert(ptr_type->deref->base_type != vtn_base_type_pointer);
- /* This pointer type needs to have actual storage */
- vtn_assert(ptr_type->type);
+
+ struct vtn_type *interface_type = ptr_type->deref;
+ while (interface_type->base_type == vtn_base_type_array)
+ interface_type = interface_type->array_element;
struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
+ nir_variable_mode nir_mode;
ptr->mode = vtn_storage_class_to_mode(b, ptr_type->storage_class,
- ptr_type, NULL);
+ interface_type, &nir_mode);
ptr->type = ptr_type->deref;
ptr->ptr_type = ptr_type;
- if (ssa->num_components > 1) {
+ if (ptr->mode == vtn_variable_mode_ubo ||
+ ptr->mode == vtn_variable_mode_ssbo) {
+ /* This pointer type needs to have actual storage */
+ vtn_assert(ptr_type->type);
vtn_assert(ssa->num_components == 2);
- vtn_assert(ptr->mode == vtn_variable_mode_ubo ||
- ptr->mode == vtn_variable_mode_ssbo);
ptr->block_index = nir_channel(&b->nb, ssa, 0);
ptr->offset = nir_channel(&b->nb, ssa, 1);
- } else {
+ } else if (ptr->mode == vtn_variable_mode_workgroup ||
+ ptr->mode == vtn_variable_mode_push_constant) {
+ /* This pointer type needs to have actual storage */
+ vtn_assert(ptr_type->type);
vtn_assert(ssa->num_components == 1);
- vtn_assert(ptr->mode == vtn_variable_mode_workgroup);
ptr->block_index = NULL;
ptr->offset = ssa;
+ } else {
+ ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
+ ptr_type->deref->type);
}
return ptr;
/* For these, we create the variable normally */
var->var = rzalloc(b->shader, nir_variable);
var->var->name = ralloc_strdup(var->var, val->name);
- var->var->type = var->type->type;
+
+ /* Need to tweak the nir type here as at vtn_handle_type we don't have
+ * the access to storage_class, that is the one that points us that is
+ * an atomic uint.
+ */
+ if (storage_class == SpvStorageClassAtomicCounter) {
+ var->var->type = repair_atomic_type(var->type->type);
+ } else {
+ var->var->type = var->type->type;
+ }
var->var->data.mode = nir_mode;
var->var->data.location = -1;
var->var->interface_type = NULL;
* able to preserve that information.
*/
- int array_length = -1;
struct vtn_type *interface_type = var->type;
if (is_per_vertex_inout(var, b->shader->info.stage)) {
/* In Geometry shaders (and some tessellation), inputs come
* check should be sufficient.
*/
interface_type = var->type->array_element;
- array_length = glsl_get_length(var->type->type);
}
+ var->var = rzalloc(b->shader, nir_variable);
+ var->var->name = ralloc_strdup(var->var, val->name);
+ var->var->type = var->type->type;
+ var->var->interface_type = interface_type->type;
+ var->var->data.mode = nir_mode;
+ var->var->data.patch = var->patch;
+
if (glsl_type_is_struct(interface_type->type)) {
- /* It's a struct. Split it. */
- unsigned num_members = glsl_get_length(interface_type->type);
- var->members = ralloc_array(b, nir_variable *, num_members);
-
- for (unsigned i = 0; i < num_members; i++) {
- const struct glsl_type *mtype = interface_type->members[i]->type;
- if (array_length >= 0)
- mtype = glsl_array_type(mtype, array_length);
-
- var->members[i] = rzalloc(b->shader, nir_variable);
- var->members[i]->name =
- ralloc_asprintf(var->members[i], "%s.%d", val->name, i);
- var->members[i]->type = mtype;
- var->members[i]->interface_type =
- interface_type->members[i]->type;
- var->members[i]->data.mode = nir_mode;
- var->members[i]->data.patch = var->patch;
-
- if (initializer) {
- assert(i < initializer->num_elements);
- var->members[i]->constant_initializer =
- nir_constant_clone(initializer->elements[i], var->members[i]);
- }
+ /* It's a struct. Set it up as per-member. */
+ var->var->num_members = glsl_get_length(interface_type->type);
+ var->var->members = rzalloc_array(var->var, struct nir_variable_data,
+ var->var->num_members);
+
+ for (unsigned i = 0; i < var->var->num_members; i++) {
+ var->var->members[i].mode = nir_mode;
+ var->var->members[i].patch = var->patch;
}
-
- initializer = NULL;
- } else {
- var->var = rzalloc(b->shader, nir_variable);
- var->var->name = ralloc_strdup(var->var, val->name);
- var->var->type = var->type->type;
- var->var->interface_type = interface_type->type;
- var->var->data.mode = nir_mode;
- var->var->data.patch = var->patch;
}
/* For inputs and outputs, we need to grab locations and builtin
break;
}
- case vtn_variable_mode_param:
- vtn_fail("Not created through OpVariable");
-
case vtn_variable_mode_ubo:
case vtn_variable_mode_ssbo:
case vtn_variable_mode_push_constant:
var->var->data.explicit_binding = var->explicit_binding;
var->var->data.descriptor_set = var->descriptor_set;
var->var->data.index = var->input_attachment_index;
+ var->var->data.offset = var->offset;
if (glsl_type_is_image(without_array->type))
var->var->data.image.format = without_array->image_format;
}
if (var->mode == vtn_variable_mode_local) {
- vtn_assert(var->members == NULL && var->var != NULL);
+ vtn_assert(var->var != NULL && var->var->members == NULL);
nir_function_impl_add_variable(b->nb.impl, var->var);
} else if (var->var) {
nir_shader_add_variable(b->shader, var->var);
- } else if (var->members) {
- unsigned count = glsl_get_length(without_array->type);
- for (unsigned i = 0; i < count; i++) {
- vtn_assert(var->members[i]->data.mode != nir_var_local);
- nir_shader_add_variable(b->shader, var->members[i]);
- }
} else {
vtn_assert(vtn_pointer_is_external_block(b, val->pointer));
}