ptr->mode = base->mode;
ptr->type = type;
ptr->var = base->var;
+ ptr->deref = base->deref;
ptr->chain = chain;
return ptr;
return pointer;
}
+/* Returns an atomic_uint type based on the original uint type. The returned
+ * type will be equivalent to the original one but will have an atomic_uint
+ * type as leaf instead of an uint.
+ *
+ * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
+ */
+static const struct glsl_type *
+repair_atomic_type(const struct glsl_type *type)
+{
+ assert(glsl_get_base_type(glsl_without_array(type)) == GLSL_TYPE_UINT);
+ assert(glsl_type_is_scalar(glsl_without_array(type)));
+
+ if (glsl_type_is_array(type)) {
+ const struct glsl_type *atomic =
+ repair_atomic_type(glsl_get_array_element(type));
+
+ return glsl_array_type(atomic, glsl_get_length(type));
+ } else {
+ return glsl_atomic_uint_type();
+ }
+}
+
nir_deref_instr *
vtn_pointer_to_deref(struct vtn_builder *b, struct vtn_pointer *ptr)
{
/* Do on-the-fly copy propagation for samplers. */
- if (ptr->var->copy_prop_sampler)
+ if (ptr->var && ptr->var->copy_prop_sampler)
return vtn_pointer_to_deref(b, ptr->var->copy_prop_sampler);
- nir_deref_instr *deref_var =
- nir_deref_instr_create(b->nb.shader, nir_deref_type_var);
- nir_ssa_dest_init(&deref_var->instr, &deref_var->dest, 1, 32, NULL);
- nir_builder_instr_insert(&b->nb, &deref_var->instr);
+ nir_deref_instr *tail;
+ if (ptr->deref) {
+ tail = ptr->deref;
+ } else {
+ assert(ptr->var && ptr->var->var);
+ tail = nir_build_deref_var(&b->nb, ptr->var->var);
+ }
- assert(ptr->var->var);
- deref_var->mode = ptr->var->var->data.mode;
- deref_var->type = ptr->var->var->type;
- deref_var->var = ptr->var->var;
/* Raw variable access */
if (!ptr->chain)
- return deref_var;
+ return tail;
struct vtn_access_chain *chain = ptr->chain;
vtn_assert(chain);
- struct vtn_type *deref_type = ptr->var->type;
- nir_deref_instr *tail = deref_var;
-
for (unsigned i = 0; i < chain->length; i++) {
- enum glsl_base_type base_type = glsl_get_base_type(deref_type->type);
- switch (base_type) {
- case GLSL_TYPE_UINT:
- case GLSL_TYPE_INT:
- case GLSL_TYPE_UINT16:
- case GLSL_TYPE_INT16:
- case GLSL_TYPE_UINT8:
- case GLSL_TYPE_INT8:
- case GLSL_TYPE_UINT64:
- case GLSL_TYPE_INT64:
- case GLSL_TYPE_FLOAT:
- case GLSL_TYPE_FLOAT16:
- case GLSL_TYPE_DOUBLE:
- case GLSL_TYPE_BOOL:
- case GLSL_TYPE_ARRAY: {
- deref_type = deref_type->array_element;
-
+ if (glsl_type_is_struct(tail->type)) {
+ vtn_assert(chain->link[i].mode == vtn_access_mode_literal);
+ unsigned idx = chain->link[i].id;
+ tail = nir_build_deref_struct(&b->nb, tail, idx);
+ } else {
nir_ssa_def *index;
if (chain->link[i].mode == vtn_access_mode_literal) {
index = nir_imm_int(&b->nb, chain->link[i].id);
index = vtn_ssa_value(b, chain->link[i].id)->def;
}
tail = nir_build_deref_array(&b->nb, tail, index);
- break;
- }
-
- case GLSL_TYPE_STRUCT: {
- vtn_assert(chain->link[i].mode == vtn_access_mode_literal);
- unsigned idx = chain->link[i].id;
- deref_type = deref_type->members[idx];
- tail = nir_build_deref_struct(&b->nb, tail, idx);
- break;
- }
- default:
- vtn_fail("Invalid type for deref");
}
}
return tail;
}
-nir_deref_var *
-vtn_pointer_to_deref_var(struct vtn_builder *b, struct vtn_pointer *ptr)
-{
- return nir_deref_instr_to_deref(vtn_pointer_to_deref(b, ptr), b);
-}
-
static void
_vtn_local_load_store(struct vtn_builder *b, bool load, nir_deref_instr *deref,
struct vtn_ssa_value *inout)
*location = FRAG_RESULT_STENCIL;
vtn_assert(*mode == nir_var_shader_out);
break;
+ case SpvBuiltInWorkDim:
+ *location = SYSTEM_VALUE_WORK_DIM;
+ set_mode_system_value(b, mode);
+ break;
+ case SpvBuiltInGlobalSize:
+ *location = SYSTEM_VALUE_GLOBAL_GROUP_SIZE;
+ set_mode_system_value(b, mode);
+ break;
default:
- vtn_fail("unsupported builtin");
+ vtn_fail("unsupported builtin: %u", builtin);
}
}
case SpvDecorationAliased:
case SpvDecorationUniform:
case SpvDecorationStream:
- case SpvDecorationOffset:
case SpvDecorationLinkageAttributes:
break; /* Do nothing with these here */
break;
case SpvDecorationXfbBuffer:
+ var_data->explicit_xfb_buffer = true;
+ var_data->xfb_buffer = dec->literals[0];
+ break;
case SpvDecorationXfbStride:
- vtn_warn("Vulkan does not have transform feedback: %s",
- spirv_decoration_to_string(dec->decoration));
+ var_data->explicit_xfb_stride = true;
+ var_data->xfb_stride = dec->literals[0];
+ break;
+ case SpvDecorationOffset:
+ var_data->explicit_offset = true;
+ var_data->offset = dec->literals[0];
break;
case SpvDecorationCPacked:
case SpvDecorationPatch:
vtn_var->patch = true;
break;
+ case SpvDecorationOffset:
+ vtn_var->offset = dec->literals[0];
+ break;
default:
break;
}
*/
if (dec->decoration == SpvDecorationLocation) {
unsigned location = dec->literals[0];
- bool is_vertex_input;
+ bool is_vertex_input = false;
if (b->shader->info.stage == MESA_SHADER_FRAGMENT &&
vtn_var->mode == vtn_variable_mode_output) {
- is_vertex_input = false;
location += FRAG_RESULT_DATA0;
} else if (b->shader->info.stage == MESA_SHADER_VERTEX &&
vtn_var->mode == vtn_variable_mode_input) {
location += VERT_ATTRIB_GENERIC0;
} else if (vtn_var->mode == vtn_variable_mode_input ||
vtn_var->mode == vtn_variable_mode_output) {
- is_vertex_input = false;
location += vtn_var->patch ? VARYING_SLOT_PATCH0 : VARYING_SLOT_VAR0;
} else if (vtn_var->mode != vtn_variable_mode_uniform) {
vtn_warn("Location must be on input, output, uniform, sampler or "
mode = vtn_variable_mode_workgroup;
nir_mode = nir_var_shared;
break;
+ case SpvStorageClassAtomicCounter:
+ mode = vtn_variable_mode_uniform;
+ nir_mode = nir_var_uniform;
+ break;
case SpvStorageClassCrossWorkgroup:
case SpvStorageClassGeneric:
- case SpvStorageClassAtomicCounter:
default:
vtn_fail("Unhandled variable storage class");
}
nir_ssa_def *
vtn_pointer_to_ssa(struct vtn_builder *b, struct vtn_pointer *ptr)
{
- /* This pointer needs to have a pointer type with actual storage */
- vtn_assert(ptr->ptr_type);
- vtn_assert(ptr->ptr_type->type);
-
- if (!ptr->offset) {
- /* If we don't have an offset then we must be a pointer to the variable
- * itself.
- */
- vtn_assert(!ptr->offset && !ptr->block_index);
+ if (vtn_pointer_uses_ssa_offset(b, ptr)) {
+ /* This pointer needs to have a pointer type with actual storage */
+ vtn_assert(ptr->ptr_type);
+ vtn_assert(ptr->ptr_type->type);
+
+ if (!ptr->offset) {
+ /* If we don't have an offset then we must be a pointer to the variable
+ * itself.
+ */
+ vtn_assert(!ptr->offset && !ptr->block_index);
- struct vtn_access_chain chain = {
- .length = 0,
- };
- ptr = vtn_ssa_offset_pointer_dereference(b, ptr, &chain);
- }
+ struct vtn_access_chain chain = {
+ .length = 0,
+ };
+ ptr = vtn_ssa_offset_pointer_dereference(b, ptr, &chain);
+ }
- vtn_assert(ptr->offset);
- if (ptr->block_index) {
- vtn_assert(ptr->mode == vtn_variable_mode_ubo ||
- ptr->mode == vtn_variable_mode_ssbo);
- return nir_vec2(&b->nb, ptr->block_index, ptr->offset);
+ vtn_assert(ptr->offset);
+ if (ptr->block_index) {
+ vtn_assert(ptr->mode == vtn_variable_mode_ubo ||
+ ptr->mode == vtn_variable_mode_ssbo);
+ return nir_vec2(&b->nb, ptr->block_index, ptr->offset);
+ } else {
+ vtn_assert(ptr->mode == vtn_variable_mode_workgroup);
+ return ptr->offset;
+ }
} else {
- vtn_assert(ptr->mode == vtn_variable_mode_workgroup);
- return ptr->offset;
+ return &vtn_pointer_to_deref(b, ptr)->dest.ssa;
}
}
{
vtn_assert(ssa->num_components <= 2 && ssa->bit_size == 32);
vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
- vtn_assert(ptr_type->deref->base_type != vtn_base_type_pointer);
- /* This pointer type needs to have actual storage */
- vtn_assert(ptr_type->type);
+
+ struct vtn_type *interface_type = ptr_type->deref;
+ while (interface_type->base_type == vtn_base_type_array)
+ interface_type = interface_type->array_element;
struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
+ nir_variable_mode nir_mode;
ptr->mode = vtn_storage_class_to_mode(b, ptr_type->storage_class,
- ptr_type, NULL);
+ interface_type, &nir_mode);
ptr->type = ptr_type->deref;
ptr->ptr_type = ptr_type;
- if (ssa->num_components > 1) {
+ if (ptr->mode == vtn_variable_mode_ubo ||
+ ptr->mode == vtn_variable_mode_ssbo) {
+ /* This pointer type needs to have actual storage */
+ vtn_assert(ptr_type->type);
vtn_assert(ssa->num_components == 2);
- vtn_assert(ptr->mode == vtn_variable_mode_ubo ||
- ptr->mode == vtn_variable_mode_ssbo);
ptr->block_index = nir_channel(&b->nb, ssa, 0);
ptr->offset = nir_channel(&b->nb, ssa, 1);
- } else {
+ } else if (ptr->mode == vtn_variable_mode_workgroup ||
+ ptr->mode == vtn_variable_mode_push_constant) {
+ /* This pointer type needs to have actual storage */
+ vtn_assert(ptr_type->type);
vtn_assert(ssa->num_components == 1);
- vtn_assert(ptr->mode == vtn_variable_mode_workgroup ||
- ptr->mode == vtn_variable_mode_push_constant);
ptr->block_index = NULL;
ptr->offset = ssa;
+ } else {
+ ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
+ ptr_type->deref->type);
}
return ptr;
/* For these, we create the variable normally */
var->var = rzalloc(b->shader, nir_variable);
var->var->name = ralloc_strdup(var->var, val->name);
- var->var->type = var->type->type;
+
+ /* Need to tweak the nir type here as at vtn_handle_type we don't have
+ * the access to storage_class, that is the one that points us that is
+ * an atomic uint.
+ */
+ if (storage_class == SpvStorageClassAtomicCounter) {
+ var->var->type = repair_atomic_type(var->type->type);
+ } else {
+ var->var->type = var->type->type;
+ }
var->var->data.mode = nir_mode;
var->var->data.location = -1;
var->var->interface_type = NULL;
break;
}
- case vtn_variable_mode_param:
- vtn_fail("Not created through OpVariable");
-
case vtn_variable_mode_ubo:
case vtn_variable_mode_ssbo:
case vtn_variable_mode_push_constant:
var->var->data.explicit_binding = var->explicit_binding;
var->var->data.descriptor_set = var->descriptor_set;
var->var->data.index = var->input_attachment_index;
+ var->var->data.offset = var->offset;
if (glsl_type_is_image(without_array->type))
var->var->data.image.format = without_array->image_format;