#include "nir_deref.h"
#include <vulkan/vulkan_core.h>
+static void
+ptr_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
+ const struct vtn_decoration *dec, void *void_ptr)
+{
+ struct vtn_pointer *ptr = void_ptr;
+
+ switch (dec->decoration) {
+ case SpvDecorationNonUniformEXT:
+ ptr->access |= ACCESS_NON_UNIFORM;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static struct vtn_pointer*
+vtn_decorate_pointer(struct vtn_builder *b, struct vtn_value *val,
+ struct vtn_pointer *ptr)
+{
+ struct vtn_pointer dummy = { .access = 0 };
+ vtn_foreach_decoration(b, val, ptr_decoration_cb, &dummy);
+
+ /* If we're adding access flags, make a copy of the pointer. We could
+ * probably just OR them in without doing so but this prevents us from
+ * leaking them any further than actually specified in the SPIR-V.
+ */
+ if (dummy.access & ~ptr->access) {
+ struct vtn_pointer *copy = ralloc(b, struct vtn_pointer);
+ *copy = *ptr;
+ copy->access |= dummy.access;
+ return copy;
+ }
+
+ return ptr;
+}
+
+struct vtn_value *
+vtn_push_pointer(struct vtn_builder *b, uint32_t value_id,
+ struct vtn_pointer *ptr)
+{
+ struct vtn_value *val = vtn_push_value(b, value_id, vtn_value_type_pointer);
+ val->pointer = vtn_decorate_pointer(b, val, ptr);
+ return val;
+}
+
+void
+vtn_copy_value(struct vtn_builder *b, uint32_t src_value_id,
+ uint32_t dst_value_id)
+{
+ struct vtn_value *src = vtn_untyped_value(b, src_value_id);
+ struct vtn_value *dst = vtn_untyped_value(b, dst_value_id);
+ struct vtn_value src_copy = *src;
+
+ vtn_fail_if(dst->value_type != vtn_value_type_invalid,
+ "SPIR-V id %u has already been written by another instruction",
+ dst_value_id);
+
+ vtn_fail_if(dst->type->id != src->type->id,
+ "Result Type must equal Operand type");
+
+ src_copy.name = dst->name;
+ src_copy.decoration = dst->decoration;
+ src_copy.type = dst->type;
+ *dst = src_copy;
+
+ if (dst->value_type == vtn_value_type_pointer)
+ dst->pointer = vtn_decorate_pointer(b, dst, dst->pointer);
+}
+
static struct vtn_access_chain *
vtn_access_chain_create(struct vtn_builder *b, unsigned length)
{
}
bool
-vtn_pointer_uses_ssa_offset(struct vtn_builder *b,
- struct vtn_pointer *ptr)
+vtn_mode_uses_ssa_offset(struct vtn_builder *b,
+ enum vtn_variable_mode mode)
{
- return ((ptr->mode == vtn_variable_mode_ubo ||
- ptr->mode == vtn_variable_mode_ssbo) &&
+ return ((mode == vtn_variable_mode_ubo ||
+ mode == vtn_variable_mode_ssbo) &&
b->options->lower_ubo_ssbo_access_to_offsets) ||
- ptr->mode == vtn_variable_mode_push_constant ||
- (ptr->mode == vtn_variable_mode_workgroup &&
- b->options->lower_workgroup_access_to_offsets);
+ mode == vtn_variable_mode_push_constant;
+}
+
+static bool
+vtn_mode_is_cross_invocation(struct vtn_builder *b,
+ enum vtn_variable_mode mode)
+{
+ return mode == vtn_variable_mode_ssbo ||
+ mode == vtn_variable_mode_ubo ||
+ mode == vtn_variable_mode_phys_ssbo ||
+ mode == vtn_variable_mode_push_constant ||
+ mode == vtn_variable_mode_workgroup ||
+ mode == vtn_variable_mode_cross_workgroup;
}
static bool
{
return ptr->mode == vtn_variable_mode_ssbo ||
ptr->mode == vtn_variable_mode_ubo ||
- ptr->mode == vtn_variable_mode_push_constant ||
- (ptr->mode == vtn_variable_mode_workgroup &&
- b->options->lower_workgroup_access_to_offsets);
+ ptr->mode == vtn_variable_mode_phys_ssbo ||
+ ptr->mode == vtn_variable_mode_push_constant;
}
static nir_ssa_def *
nir_ssa_def *ssa = vtn_ssa_value(b, link.id)->def;
if (ssa->bit_size != bit_size)
ssa = nir_i2i(&b->nb, ssa, bit_size);
- if (stride != 1)
- ssa = nir_imul_imm(&b->nb, ssa, stride);
- return ssa;
+ return nir_imul_imm(&b->nb, ssa, stride);
}
}
vtn_variable_resource_index(struct vtn_builder *b, struct vtn_variable *var,
nir_ssa_def *desc_array_index)
{
+ vtn_assert(b->options->environment == NIR_SPIRV_VULKAN);
+
if (!desc_array_index) {
- vtn_assert(glsl_type_is_struct(var->type->type));
+ vtn_assert(glsl_type_is_struct_or_ifc(var->type->type));
desc_array_index = nir_imm_int(&b->nb, 0);
}
nir_intrinsic_set_binding(instr, var->binding);
nir_intrinsic_set_desc_type(instr, vk_desc_type_for_mode(b, var->mode));
- nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
+ vtn_fail_if(var->mode != vtn_variable_mode_ubo &&
+ var->mode != vtn_variable_mode_ssbo,
+ "Invalid mode for vulkan_resource_index");
+
+ nir_address_format addr_format = vtn_mode_to_address_format(b, var->mode);
+ const struct glsl_type *index_type =
+ b->options->lower_ubo_ssbo_access_to_offsets ?
+ glsl_uint_type() : nir_address_format_to_glsl_type(addr_format);
+
+ instr->num_components = glsl_get_vector_elements(index_type);
+ nir_ssa_dest_init(&instr->instr, &instr->dest, instr->num_components,
+ glsl_get_bit_size(index_type), NULL);
nir_builder_instr_insert(&b->nb, &instr->instr);
return &instr->dest.ssa;
vtn_resource_reindex(struct vtn_builder *b, enum vtn_variable_mode mode,
nir_ssa_def *base_index, nir_ssa_def *offset_index)
{
+ vtn_assert(b->options->environment == NIR_SPIRV_VULKAN);
+
nir_intrinsic_instr *instr =
nir_intrinsic_instr_create(b->nb.shader,
nir_intrinsic_vulkan_resource_reindex);
instr->src[1] = nir_src_for_ssa(offset_index);
nir_intrinsic_set_desc_type(instr, vk_desc_type_for_mode(b, mode));
- nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
+ vtn_fail_if(mode != vtn_variable_mode_ubo && mode != vtn_variable_mode_ssbo,
+ "Invalid mode for vulkan_resource_reindex");
+
+ nir_address_format addr_format = vtn_mode_to_address_format(b, mode);
+ const struct glsl_type *index_type =
+ b->options->lower_ubo_ssbo_access_to_offsets ?
+ glsl_uint_type() : nir_address_format_to_glsl_type(addr_format);
+
+ instr->num_components = glsl_get_vector_elements(index_type);
+ nir_ssa_dest_init(&instr->instr, &instr->dest, instr->num_components,
+ glsl_get_bit_size(index_type), NULL);
nir_builder_instr_insert(&b->nb, &instr->instr);
return &instr->dest.ssa;
static nir_ssa_def *
vtn_descriptor_load(struct vtn_builder *b, enum vtn_variable_mode mode,
- const struct glsl_type *desc_type, nir_ssa_def *desc_index)
+ nir_ssa_def *desc_index)
{
+ vtn_assert(b->options->environment == NIR_SPIRV_VULKAN);
+
nir_intrinsic_instr *desc_load =
nir_intrinsic_instr_create(b->nb.shader,
nir_intrinsic_load_vulkan_descriptor);
desc_load->src[0] = nir_src_for_ssa(desc_index);
- desc_load->num_components = glsl_get_vector_elements(desc_type);
nir_intrinsic_set_desc_type(desc_load, vk_desc_type_for_mode(b, mode));
+
+ vtn_fail_if(mode != vtn_variable_mode_ubo && mode != vtn_variable_mode_ssbo,
+ "Invalid mode for load_vulkan_descriptor");
+
+ nir_address_format addr_format = vtn_mode_to_address_format(b, mode);
+ const struct glsl_type *ptr_type =
+ nir_address_format_to_glsl_type(addr_format);
+
+ desc_load->num_components = glsl_get_vector_elements(ptr_type);
nir_ssa_dest_init(&desc_load->instr, &desc_load->dest,
desc_load->num_components,
- glsl_get_bit_size(desc_type), NULL);
+ glsl_get_bit_size(ptr_type), NULL);
nir_builder_instr_insert(&b->nb, &desc_load->instr);
return &desc_load->dest.ssa;
struct vtn_access_chain *deref_chain)
{
struct vtn_type *type = base->type;
- enum gl_access_qualifier access = base->access;
+ enum gl_access_qualifier access = base->access | deref_chain->access;
unsigned idx = 0;
nir_deref_instr *tail;
if (base->deref) {
tail = base->deref;
- } else if (vtn_pointer_is_external_block(b, base)) {
+ } else if (b->options->environment == NIR_SPIRV_VULKAN &&
+ vtn_pointer_is_external_block(b, base)) {
nir_ssa_def *block_index = base->block_index;
/* We dereferencing an external block pointer. Correctness of this
* final block index. Insert a descriptor load and cast to a deref to
* start the deref chain.
*/
- nir_ssa_def *desc =
- vtn_descriptor_load(b, base->mode, base->ptr_type->type, block_index);
+ nir_ssa_def *desc = vtn_descriptor_load(b, base->mode, block_index);
assert(base->mode == vtn_variable_mode_ssbo ||
base->mode == vtn_variable_mode_ubo);
nir_variable_mode nir_mode =
- base->mode == vtn_variable_mode_ssbo ? nir_var_ssbo : nir_var_mem_ubo;
+ base->mode == vtn_variable_mode_ssbo ? nir_var_mem_ssbo : nir_var_mem_ubo;
- tail = nir_build_deref_cast(&b->nb, desc, nir_mode, type->type,
+ tail = nir_build_deref_cast(&b->nb, desc, nir_mode,
+ vtn_type_get_nir_type(b, type, base->mode),
base->ptr_type->stride);
} else {
assert(base->var && base->var->var);
}
for (; idx < deref_chain->length; idx++) {
- if (glsl_type_is_struct(type->type)) {
+ if (glsl_type_is_struct_or_ifc(type->type)) {
vtn_assert(deref_chain->link[idx].mode == vtn_access_mode_literal);
unsigned field = deref_chain->link[idx].id;
tail = nir_build_deref_struct(&b->nb, tail, field);
break;
}
+ case GLSL_TYPE_INTERFACE:
case GLSL_TYPE_STRUCT: {
vtn_assert(deref_chain->link[idx].mode == vtn_access_mode_literal);
unsigned member = deref_chain->link[idx].id;
}
}
-struct vtn_pointer *
-vtn_pointer_for_variable(struct vtn_builder *b,
- struct vtn_variable *var, struct vtn_type *ptr_type)
-{
- struct vtn_pointer *pointer = rzalloc(b, struct vtn_pointer);
-
- pointer->mode = var->mode;
- pointer->type = var->type;
- vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
- vtn_assert(ptr_type->deref->type == var->type->type);
- pointer->ptr_type = ptr_type;
- pointer->var = var;
- pointer->access = var->access | var->type->access;
-
- return pointer;
-}
-
-/* Returns an atomic_uint type based on the original uint type. The returned
- * type will be equivalent to the original one but will have an atomic_uint
- * type as leaf instead of an uint.
- *
- * Manages uint scalars, arrays, and arrays of arrays of any nested depth.
- */
-static const struct glsl_type *
-repair_atomic_type(const struct glsl_type *type)
-{
- assert(glsl_get_base_type(glsl_without_array(type)) == GLSL_TYPE_UINT);
- assert(glsl_type_is_scalar(glsl_without_array(type)));
-
- if (glsl_type_is_array(type)) {
- const struct glsl_type *atomic =
- repair_atomic_type(glsl_get_array_element(type));
-
- return glsl_array_type(atomic, glsl_get_length(type),
- glsl_get_explicit_stride(type));
- } else {
- return glsl_atomic_uint_type();
- }
-}
-
nir_deref_instr *
vtn_pointer_to_deref(struct vtn_builder *b, struct vtn_pointer *ptr)
{
- if (b->wa_glslang_179) {
- /* Do on-the-fly copy propagation for samplers. */
- if (ptr->var && ptr->var->copy_prop_sampler)
- return vtn_pointer_to_deref(b, ptr->var->copy_prop_sampler);
- }
-
vtn_assert(!vtn_pointer_uses_ssa_offset(b, ptr));
if (!ptr->deref) {
struct vtn_access_chain chain = {
static void
_vtn_local_load_store(struct vtn_builder *b, bool load, nir_deref_instr *deref,
- struct vtn_ssa_value *inout)
+ struct vtn_ssa_value *inout,
+ enum gl_access_qualifier access)
{
if (glsl_type_is_vector_or_scalar(deref->type)) {
if (load) {
- inout->def = nir_load_deref(&b->nb, deref);
+ inout->def = nir_load_deref_with_access(&b->nb, deref, access);
} else {
- nir_store_deref(&b->nb, deref, inout->def, ~0);
+ nir_store_deref_with_access(&b->nb, deref, inout->def, ~0, access);
}
} else if (glsl_type_is_array(deref->type) ||
glsl_type_is_matrix(deref->type)) {
unsigned elems = glsl_get_length(deref->type);
for (unsigned i = 0; i < elems; i++) {
nir_deref_instr *child =
- nir_build_deref_array(&b->nb, deref, nir_imm_int(&b->nb, i));
- _vtn_local_load_store(b, load, child, inout->elems[i]);
+ nir_build_deref_array_imm(&b->nb, deref, i);
+ _vtn_local_load_store(b, load, child, inout->elems[i], access);
}
} else {
- vtn_assert(glsl_type_is_struct(deref->type));
+ vtn_assert(glsl_type_is_struct_or_ifc(deref->type));
unsigned elems = glsl_get_length(deref->type);
for (unsigned i = 0; i < elems; i++) {
nir_deref_instr *child = nir_build_deref_struct(&b->nb, deref, i);
- _vtn_local_load_store(b, load, child, inout->elems[i]);
+ _vtn_local_load_store(b, load, child, inout->elems[i], access);
}
}
}
}
struct vtn_ssa_value *
-vtn_local_load(struct vtn_builder *b, nir_deref_instr *src)
+vtn_local_load(struct vtn_builder *b, nir_deref_instr *src,
+ enum gl_access_qualifier access)
{
nir_deref_instr *src_tail = get_deref_tail(src);
struct vtn_ssa_value *val = vtn_create_ssa_value(b, src_tail->type);
- _vtn_local_load_store(b, true, src_tail, val);
+ _vtn_local_load_store(b, true, src_tail, val, access);
if (src_tail != src) {
val->type = src->type;
- if (nir_src_is_const(src->arr.index))
- val->def = vtn_vector_extract(b, val->def,
- nir_src_as_uint(src->arr.index));
- else
- val->def = vtn_vector_extract_dynamic(b, val->def, src->arr.index.ssa);
+ val->def = nir_vector_extract(&b->nb, val->def, src->arr.index.ssa);
}
return val;
void
vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src,
- nir_deref_instr *dest)
+ nir_deref_instr *dest, enum gl_access_qualifier access)
{
nir_deref_instr *dest_tail = get_deref_tail(dest);
if (dest_tail != dest) {
struct vtn_ssa_value *val = vtn_create_ssa_value(b, dest_tail->type);
- _vtn_local_load_store(b, true, dest_tail, val);
+ _vtn_local_load_store(b, true, dest_tail, val, access);
- if (nir_src_is_const(dest->arr.index))
- val->def = vtn_vector_insert(b, val->def, src->def,
- nir_src_as_uint(dest->arr.index));
- else
- val->def = vtn_vector_insert_dynamic(b, val->def, src->def,
- dest->arr.index.ssa);
- _vtn_local_load_store(b, false, dest_tail, val);
+ val->def = nir_vector_insert(&b->nb, val->def, src->def,
+ dest->arr.index.ssa);
+ _vtn_local_load_store(b, false, dest_tail, val, access);
} else {
- _vtn_local_load_store(b, false, dest_tail, src);
+ _vtn_local_load_store(b, false, dest_tail, src, access);
}
}
nir_intrinsic_set_range(instr, access_size);
}
- if (op == nir_intrinsic_load_ssbo ||
+ if (op == nir_intrinsic_load_ubo ||
+ op == nir_intrinsic_load_ssbo ||
op == nir_intrinsic_store_ssbo) {
nir_intrinsic_set_access(instr, access);
}
struct vtn_type *type, enum gl_access_qualifier access,
struct vtn_ssa_value **inout)
{
- if (load && *inout == NULL)
- *inout = vtn_create_ssa_value(b, type->type);
-
enum glsl_base_type base_type = glsl_get_base_type(type->type);
switch (base_type) {
case GLSL_TYPE_UINT:
return;
}
+ case GLSL_TYPE_INTERFACE:
case GLSL_TYPE_STRUCT: {
unsigned elems = glsl_get_length(type->type);
for (unsigned i = 0; i < elems; i++) {
nir_ssa_def *offset, *index = NULL;
offset = vtn_pointer_to_offset(b, src, &index);
- struct vtn_ssa_value *value = NULL;
+ struct vtn_ssa_value *value = vtn_create_ssa_value(b, src->type->type);
_vtn_block_load_store(b, op, true, index, offset,
access_offset, access_size,
src->type, src->access, &value);
static void
_vtn_variable_load_store(struct vtn_builder *b, bool load,
struct vtn_pointer *ptr,
+ enum gl_access_qualifier access,
struct vtn_ssa_value **inout)
{
+ if (ptr->mode == vtn_variable_mode_uniform) {
+ if (ptr->type->base_type == vtn_base_type_image ||
+ ptr->type->base_type == vtn_base_type_sampler) {
+ /* See also our handling of OpTypeSampler and OpTypeImage */
+ vtn_assert(load);
+ (*inout)->def = vtn_pointer_to_ssa(b, ptr);
+ return;
+ } else if (ptr->type->base_type == vtn_base_type_sampled_image) {
+ /* See also our handling of OpTypeSampledImage */
+ vtn_assert(load);
+ struct vtn_sampled_image si = {
+ .image = vtn_pointer_to_deref(b, ptr),
+ .sampler = vtn_pointer_to_deref(b, ptr),
+ };
+ (*inout)->def = vtn_sampled_image_to_nir_ssa(b, si);
+ return;
+ }
+ }
+
enum glsl_base_type base_type = glsl_get_base_type(ptr->type->type);
switch (base_type) {
case GLSL_TYPE_UINT:
if (glsl_type_is_vector_or_scalar(ptr->type->type)) {
/* We hit a vector or scalar; go ahead and emit the load[s] */
nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
- if (vtn_pointer_is_external_block(b, ptr)) {
- /* If it's external, we call nir_load/store_deref directly. The
- * vtn_local_load/store helpers are too clever and do magic to
- * avoid array derefs of vectors. That magic is both less
- * efficient than the direct load/store and, in the case of
+ if (vtn_mode_is_cross_invocation(b, ptr->mode)) {
+ /* If it's cross-invocation, we call nir_load/store_deref
+ * directly. The vtn_local_load/store helpers are too clever and
+ * do magic to avoid array derefs of vectors. That magic is both
+ * less efficient than the direct load/store and, in the case of
* stores, is broken because it creates a race condition if two
* threads are writing to different components of the same vector
* due to the load+insert+store it uses to emulate the array
* deref.
*/
if (load) {
- *inout = vtn_create_ssa_value(b, ptr->type->type);
- (*inout)->def = nir_load_deref(&b->nb, deref);
+ (*inout)->def = nir_load_deref_with_access(&b->nb, deref,
+ ptr->type->access | access);
} else {
- nir_store_deref(&b->nb, deref, (*inout)->def, ~0);
+ nir_store_deref_with_access(&b->nb, deref, (*inout)->def, ~0,
+ ptr->type->access | access);
}
} else {
if (load) {
- *inout = vtn_local_load(b, deref);
+ *inout = vtn_local_load(b, deref, ptr->type->access | access);
} else {
- vtn_local_store(b, *inout, deref);
+ vtn_local_store(b, *inout, deref, ptr->type->access | access);
}
}
return;
}
/* Fall through */
+ case GLSL_TYPE_INTERFACE:
case GLSL_TYPE_ARRAY:
case GLSL_TYPE_STRUCT: {
unsigned elems = glsl_get_length(ptr->type->type);
- if (load) {
- vtn_assert(*inout == NULL);
- *inout = rzalloc(b, struct vtn_ssa_value);
- (*inout)->type = ptr->type->type;
- (*inout)->elems = rzalloc_array(b, struct vtn_ssa_value *, elems);
- }
-
struct vtn_access_chain chain = {
.length = 1,
.link = {
for (unsigned i = 0; i < elems; i++) {
chain.link[0].id = i;
struct vtn_pointer *elem = vtn_pointer_dereference(b, ptr, &chain);
- _vtn_variable_load_store(b, load, elem, &(*inout)->elems[i]);
+ _vtn_variable_load_store(b, load, elem, ptr->type->access | access,
+ &(*inout)->elems[i]);
}
return;
}
if (vtn_pointer_uses_ssa_offset(b, src)) {
return vtn_block_load(b, src);
} else {
- struct vtn_ssa_value *val = NULL;
- _vtn_variable_load_store(b, true, src, &val);
+ struct vtn_ssa_value *val = vtn_create_ssa_value(b, src->type->type);
+ _vtn_variable_load_store(b, true, src, src->access, &val);
return val;
}
}
dest->mode == vtn_variable_mode_workgroup);
vtn_block_store(b, src, dest);
} else {
- _vtn_variable_load_store(b, false, dest, &src);
+ _vtn_variable_load_store(b, false, dest, dest->access, &src);
}
}
_vtn_variable_copy(struct vtn_builder *b, struct vtn_pointer *dest,
struct vtn_pointer *src)
{
- vtn_assert(src->type->type == dest->type->type);
+ vtn_assert(glsl_get_bare_type(src->type->type) ==
+ glsl_get_bare_type(dest->type->type));
enum glsl_base_type base_type = glsl_get_base_type(src->type->type);
switch (base_type) {
case GLSL_TYPE_UINT:
vtn_variable_store(b, vtn_variable_load(b, src), dest);
return;
+ case GLSL_TYPE_INTERFACE:
case GLSL_TYPE_ARRAY:
case GLSL_TYPE_STRUCT: {
struct vtn_access_chain chain = {
set_mode_system_value(b, mode);
break;
case SpvBuiltInFragCoord:
- *location = VARYING_SLOT_POS;
vtn_assert(*mode == nir_var_shader_in);
+ if (b->options && b->options->frag_coord_is_sysval) {
+ *mode = nir_var_system_value;
+ *location = SYSTEM_VALUE_FRAG_COORD;
+ } else {
+ *location = VARYING_SLOT_POS;
+ }
break;
case SpvBuiltInPointCoord:
*location = VARYING_SLOT_PNTC;
*location = SYSTEM_VALUE_GLOBAL_INVOCATION_ID;
set_mode_system_value(b, mode);
break;
+ case SpvBuiltInGlobalLinearId:
+ *location = SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX;
+ set_mode_system_value(b, mode);
+ break;
+ case SpvBuiltInGlobalOffset:
+ *location = SYSTEM_VALUE_BASE_GLOBAL_INVOCATION_ID;
+ set_mode_system_value(b, mode);
+ break;
case SpvBuiltInBaseVertex:
/* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
- * semantic as SPIR-V BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
+ * semantic as Vulkan BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
*/
- *location = SYSTEM_VALUE_FIRST_VERTEX;
+ if (b->options->environment == NIR_SPIRV_OPENGL)
+ *location = SYSTEM_VALUE_BASE_VERTEX;
+ else
+ *location = SYSTEM_VALUE_FIRST_VERTEX;
set_mode_system_value(b, mode);
break;
case SpvBuiltInBaseInstance:
set_mode_system_value(b, mode);
break;
case SpvBuiltInViewIndex:
- *location = SYSTEM_VALUE_VIEW_INDEX;
- set_mode_system_value(b, mode);
+ if (b->options && b->options->view_index_is_input) {
+ *location = VARYING_SLOT_VIEW_INDEX;
+ vtn_assert(*mode == nir_var_shader_in);
+ } else {
+ *location = SYSTEM_VALUE_VIEW_INDEX;
+ set_mode_system_value(b, mode);
+ }
break;
case SpvBuiltInSubgroupEqMask:
*location = SYSTEM_VALUE_SUBGROUP_EQ_MASK,
*location = SYSTEM_VALUE_GLOBAL_GROUP_SIZE;
set_mode_system_value(b, mode);
break;
+ case SpvBuiltInBaryCoordNoPerspAMD:
+ *location = SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL;
+ set_mode_system_value(b, mode);
+ break;
+ case SpvBuiltInBaryCoordNoPerspCentroidAMD:
+ *location = SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID;
+ set_mode_system_value(b, mode);
+ break;
+ case SpvBuiltInBaryCoordNoPerspSampleAMD:
+ *location = SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE;
+ set_mode_system_value(b, mode);
+ break;
+ case SpvBuiltInBaryCoordSmoothAMD:
+ *location = SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL;
+ set_mode_system_value(b, mode);
+ break;
+ case SpvBuiltInBaryCoordSmoothCentroidAMD:
+ *location = SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID;
+ set_mode_system_value(b, mode);
+ break;
+ case SpvBuiltInBaryCoordSmoothSampleAMD:
+ *location = SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE;
+ set_mode_system_value(b, mode);
+ break;
+ case SpvBuiltInBaryCoordPullModelAMD:
+ *location = SYSTEM_VALUE_BARYCENTRIC_PULL_MODEL;
+ set_mode_system_value(b, mode);
+ break;
default:
- vtn_fail("unsupported builtin: %u", builtin);
+ vtn_fail("Unsupported builtin: %s (%u)",
+ spirv_builtin_to_string(builtin), builtin);
}
}
case SpvDecorationFlat:
var_data->interpolation = INTERP_MODE_FLAT;
break;
+ case SpvDecorationExplicitInterpAMD:
+ var_data->interpolation = INTERP_MODE_EXPLICIT;
+ break;
case SpvDecorationCentroid:
var_data->centroid = true;
break;
var_data->read_only = true;
break;
case SpvDecorationNonReadable:
- var_data->image.access |= ACCESS_NON_READABLE;
+ var_data->access |= ACCESS_NON_READABLE;
break;
case SpvDecorationNonWritable:
var_data->read_only = true;
- var_data->image.access |= ACCESS_NON_WRITEABLE;
+ var_data->access |= ACCESS_NON_WRITEABLE;
break;
case SpvDecorationRestrict:
- var_data->image.access |= ACCESS_RESTRICT;
+ var_data->access |= ACCESS_RESTRICT;
+ break;
+ case SpvDecorationAliased:
+ var_data->access &= ~ACCESS_RESTRICT;
break;
case SpvDecorationVolatile:
- var_data->image.access |= ACCESS_VOLATILE;
+ var_data->access |= ACCESS_VOLATILE;
break;
case SpvDecorationCoherent:
- var_data->image.access |= ACCESS_COHERENT;
+ var_data->access |= ACCESS_COHERENT;
break;
case SpvDecorationComponent:
- var_data->location_frac = dec->literals[0];
+ var_data->location_frac = dec->operands[0];
break;
case SpvDecorationIndex:
- var_data->index = dec->literals[0];
+ var_data->index = dec->operands[0];
break;
case SpvDecorationBuiltIn: {
- SpvBuiltIn builtin = dec->literals[0];
+ SpvBuiltIn builtin = dec->operands[0];
nir_variable_mode mode = var_data->mode;
vtn_get_builtin_location(b, builtin, &var_data->location, &mode);
switch (builtin) {
case SpvBuiltInTessLevelOuter:
case SpvBuiltInTessLevelInner:
+ case SpvBuiltInClipDistance:
+ case SpvBuiltInCullDistance:
var_data->compact = true;
break;
- case SpvBuiltInFragCoord:
- var_data->pixel_center_integer = b->pixel_center_integer;
- /* fallthrough */
- case SpvBuiltInSamplePosition:
- var_data->origin_upper_left = b->origin_upper_left;
- break;
default:
break;
}
case SpvDecorationRowMajor:
case SpvDecorationColMajor:
case SpvDecorationMatrixStride:
- case SpvDecorationAliased:
case SpvDecorationUniform:
+ case SpvDecorationUniformId:
case SpvDecorationLinkageAttributes:
break; /* Do nothing with these here */
case SpvDecorationXfbBuffer:
var_data->explicit_xfb_buffer = true;
- var_data->xfb_buffer = dec->literals[0];
+ var_data->xfb.buffer = dec->operands[0];
var_data->always_active_io = true;
break;
case SpvDecorationXfbStride:
var_data->explicit_xfb_stride = true;
- var_data->xfb_stride = dec->literals[0];
+ var_data->xfb.stride = dec->operands[0];
break;
case SpvDecorationOffset:
var_data->explicit_offset = true;
- var_data->offset = dec->literals[0];
+ var_data->offset = dec->operands[0];
break;
case SpvDecorationStream:
- var_data->stream = dec->literals[0];
+ var_data->stream = dec->operands[0];
break;
case SpvDecorationCPacked:
case SpvDecorationFPRoundingMode:
case SpvDecorationFPFastMathMode:
case SpvDecorationAlignment:
- vtn_warn("Decoration only allowed for CL-style kernels: %s",
- spirv_decoration_to_string(dec->decoration));
+ if (b->shader->info.stage != MESA_SHADER_KERNEL) {
+ vtn_warn("Decoration only allowed for CL-style kernels: %s",
+ spirv_decoration_to_string(dec->decoration));
+ }
+ break;
+
+ case SpvDecorationUserSemantic:
+ case SpvDecorationUserTypeGOOGLE:
+ /* User semantic decorations can safely be ignored by the driver. */
break;
- case SpvDecorationHlslSemanticGOOGLE:
- /* HLSL semantic decorations can safely be ignored by the driver. */
+ case SpvDecorationRestrictPointerEXT:
+ case SpvDecorationAliasedPointerEXT:
+ /* TODO: We should actually plumb alias information through NIR. */
break;
default:
- vtn_fail("Unhandled decoration");
+ vtn_fail_with_decoration("Unhandled decoration", dec->decoration);
}
}
/* Handle decorations that apply to a vtn_variable as a whole */
switch (dec->decoration) {
case SpvDecorationBinding:
- vtn_var->binding = dec->literals[0];
+ vtn_var->binding = dec->operands[0];
vtn_var->explicit_binding = true;
return;
case SpvDecorationDescriptorSet:
- vtn_var->descriptor_set = dec->literals[0];
+ vtn_var->descriptor_set = dec->operands[0];
return;
case SpvDecorationInputAttachmentIndex:
- vtn_var->input_attachment_index = dec->literals[0];
+ vtn_var->input_attachment_index = dec->operands[0];
return;
case SpvDecorationPatch:
vtn_var->patch = true;
break;
case SpvDecorationOffset:
- vtn_var->offset = dec->literals[0];
+ vtn_var->offset = dec->operands[0];
break;
case SpvDecorationNonWritable:
vtn_var->access |= ACCESS_NON_WRITEABLE;
case SpvDecorationCoherent:
vtn_var->access |= ACCESS_COHERENT;
break;
- case SpvDecorationHlslCounterBufferGOOGLE:
- /* HLSL semantic decorations can safely be ignored by the driver. */
- break;
+ case SpvDecorationCounterBuffer:
+ /* Counter buffer decorations can safely be ignored by the driver. */
+ return;
default:
break;
}
* special case.
*/
if (dec->decoration == SpvDecorationLocation) {
- unsigned location = dec->literals[0];
- bool is_vertex_input = false;
+ unsigned location = dec->operands[0];
if (b->shader->info.stage == MESA_SHADER_FRAGMENT &&
vtn_var->mode == vtn_variable_mode_output) {
location += FRAG_RESULT_DATA0;
} else if (b->shader->info.stage == MESA_SHADER_VERTEX &&
vtn_var->mode == vtn_variable_mode_input) {
- is_vertex_input = true;
location += VERT_ATTRIB_GENERIC0;
} else if (vtn_var->mode == vtn_variable_mode_input ||
vtn_var->mode == vtn_variable_mode_output) {
} else {
/* This handles the structure member case */
assert(vtn_var->var->members);
- for (unsigned i = 0; i < vtn_var->var->num_members; i++) {
- vtn_var->var->members[i].location = location;
- const struct glsl_type *member_type =
- glsl_get_struct_field(vtn_var->var->interface_type, i);
- location += glsl_count_attribute_slots(member_type,
- is_vertex_input);
- }
+
+ if (member == -1)
+ vtn_var->base_location = location;
+ else
+ vtn_var->var->members[member].location = location;
}
+
return;
} else {
if (vtn_var->var) {
if (vtn_var->var->num_members == 0) {
- assert(member == -1);
- apply_var_decoration(b, &vtn_var->var->data, dec);
+ /* We call this function on types as well as variables and not all
+ * struct types get split so we can end up having stray member
+ * decorations; just ignore them.
+ */
+ if (member == -1)
+ apply_var_decoration(b, &vtn_var->var->data, dec);
} else if (member >= 0) {
/* Member decorations must come from a type */
assert(val->value_type == vtn_value_type_type);
*/
vtn_assert(vtn_var->mode == vtn_variable_mode_ubo ||
vtn_var->mode == vtn_variable_mode_ssbo ||
- vtn_var->mode == vtn_variable_mode_push_constant ||
- (vtn_var->mode == vtn_variable_mode_workgroup &&
- b->options->lower_workgroup_access_to_offsets));
+ vtn_var->mode == vtn_variable_mode_push_constant);
}
}
}
-static enum vtn_variable_mode
+enum vtn_variable_mode
vtn_storage_class_to_mode(struct vtn_builder *b,
SpvStorageClass class,
struct vtn_type *interface_type,
nir_variable_mode nir_mode;
switch (class) {
case SpvStorageClassUniform:
- if (interface_type->block) {
+ /* Assume it's an UBO if we lack the interface_type. */
+ if (!interface_type || interface_type->block) {
mode = vtn_variable_mode_ubo;
nir_mode = nir_var_mem_ubo;
} else if (interface_type->buffer_block) {
mode = vtn_variable_mode_ssbo;
- nir_mode = nir_var_ssbo;
+ nir_mode = nir_var_mem_ssbo;
} else {
/* Default-block uniforms, coming from gl_spirv */
mode = vtn_variable_mode_uniform;
break;
case SpvStorageClassStorageBuffer:
mode = vtn_variable_mode_ssbo;
- nir_mode = nir_var_ssbo;
+ nir_mode = nir_var_mem_ssbo;
+ break;
+ case SpvStorageClassPhysicalStorageBuffer:
+ mode = vtn_variable_mode_phys_ssbo;
+ nir_mode = nir_var_mem_global;
break;
case SpvStorageClassUniformConstant:
- mode = vtn_variable_mode_uniform;
- nir_mode = nir_var_uniform;
+ if (b->shader->info.stage == MESA_SHADER_KERNEL) {
+ if (b->options->constant_as_global) {
+ mode = vtn_variable_mode_cross_workgroup;
+ nir_mode = nir_var_mem_global;
+ } else {
+ mode = vtn_variable_mode_ubo;
+ nir_mode = nir_var_mem_ubo;
+ }
+ } else {
+ mode = vtn_variable_mode_uniform;
+ nir_mode = nir_var_uniform;
+ }
break;
case SpvStorageClassPushConstant:
mode = vtn_variable_mode_push_constant;
break;
case SpvStorageClassWorkgroup:
mode = vtn_variable_mode_workgroup;
- nir_mode = nir_var_shared;
+ nir_mode = nir_var_mem_shared;
break;
case SpvStorageClassAtomicCounter:
- mode = vtn_variable_mode_uniform;
+ mode = vtn_variable_mode_atomic_counter;
nir_mode = nir_var_uniform;
break;
case SpvStorageClassCrossWorkgroup:
+ mode = vtn_variable_mode_cross_workgroup;
+ nir_mode = nir_var_mem_global;
+ break;
+ case SpvStorageClassImage:
+ mode = vtn_variable_mode_image;
+ nir_mode = nir_var_mem_ubo;
+ break;
case SpvStorageClassGeneric:
default:
- vtn_fail("Unhandled variable storage class");
+ vtn_fail("Unhandled variable storage class: %s (%u)",
+ spirv_storageclass_to_string(class), class);
}
if (nir_mode_out)
return mode;
}
+nir_address_format
+vtn_mode_to_address_format(struct vtn_builder *b, enum vtn_variable_mode mode)
+{
+ switch (mode) {
+ case vtn_variable_mode_ubo:
+ return b->options->ubo_addr_format;
+
+ case vtn_variable_mode_ssbo:
+ return b->options->ssbo_addr_format;
+
+ case vtn_variable_mode_phys_ssbo:
+ return b->options->phys_ssbo_addr_format;
+
+ case vtn_variable_mode_push_constant:
+ return b->options->push_const_addr_format;
+
+ case vtn_variable_mode_workgroup:
+ return b->options->shared_addr_format;
+
+ case vtn_variable_mode_cross_workgroup:
+ return b->options->global_addr_format;
+
+ case vtn_variable_mode_function:
+ if (b->physical_ptrs)
+ return b->options->temp_addr_format;
+ /* Fall through. */
+
+ case vtn_variable_mode_private:
+ case vtn_variable_mode_uniform:
+ case vtn_variable_mode_atomic_counter:
+ case vtn_variable_mode_input:
+ case vtn_variable_mode_output:
+ case vtn_variable_mode_image:
+ return nir_address_format_logical;
+ }
+
+ unreachable("Invalid variable mode");
+}
+
nir_ssa_def *
vtn_pointer_to_ssa(struct vtn_builder *b, struct vtn_pointer *ptr)
{
}
} else {
if (vtn_pointer_is_external_block(b, ptr) &&
- vtn_type_contains_block(b, ptr->type)) {
- const unsigned bit_size = glsl_get_bit_size(ptr->ptr_type->type);
- const unsigned num_components =
- glsl_get_vector_elements(ptr->ptr_type->type);
-
+ vtn_type_contains_block(b, ptr->type) &&
+ ptr->mode != vtn_variable_mode_phys_ssbo) {
/* In this case, we're looking for a block index and not an actual
* deref.
+ *
+ * For PhysicalStorageBuffer pointers, we don't have a block index
+ * at all because we get the pointer directly from the client. This
+ * assumes that there will never be a SSBO binding variable using the
+ * PhysicalStorageBuffer storage class. This assumption appears
+ * to be correct according to the Vulkan spec because the table,
+ * "Shader Resource and Storage Class Correspondence," the only the
+ * Uniform storage class with BufferBlock or the StorageBuffer
+ * storage class with Block can be used.
*/
if (!ptr->block_index) {
/* If we don't have a block_index then we must be a pointer to the
ptr = vtn_nir_deref_pointer_dereference(b, ptr, &chain);
}
- /* A block index is just a 32-bit value but the pointer has some
- * other dimensionality. Cram it in there and we'll unpack it later
- * in vtn_pointer_from_ssa.
- */
- const unsigned swiz[4] = { 0, };
- return nir_swizzle(&b->nb, nir_u2u(&b->nb, ptr->block_index, bit_size),
- swiz, num_components, false);
+ return ptr->block_index;
} else {
return &vtn_pointer_to_deref(b, ptr)->dest.ssa;
}
vtn_pointer_from_ssa(struct vtn_builder *b, nir_ssa_def *ssa,
struct vtn_type *ptr_type)
{
- vtn_assert(ssa->num_components <= 2 && ssa->bit_size == 32);
vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
- struct vtn_type *interface_type = ptr_type->deref;
- while (interface_type->base_type == vtn_base_type_array)
- interface_type = interface_type->array_element;
-
struct vtn_pointer *ptr = rzalloc(b, struct vtn_pointer);
+ struct vtn_type *without_array =
+ vtn_type_without_array(ptr_type->deref);
+
nir_variable_mode nir_mode;
ptr->mode = vtn_storage_class_to_mode(b, ptr_type->storage_class,
- interface_type, &nir_mode);
+ without_array, &nir_mode);
ptr->type = ptr_type->deref;
ptr->ptr_type = ptr_type;
- if (b->wa_glslang_179) {
- /* To work around https://github.com/KhronosGroup/glslang/issues/179 we
- * need to whack the mode because it creates a function parameter with
- * the Function storage class even though it's a pointer to a sampler.
- * If we don't do this, then NIR won't get rid of the deref_cast for us.
- */
- if (ptr->mode == vtn_variable_mode_function &&
- (ptr->type->base_type == vtn_base_type_sampler ||
- ptr->type->base_type == vtn_base_type_sampled_image)) {
- ptr->mode = vtn_variable_mode_uniform;
- nir_mode = nir_var_uniform;
- }
- }
-
if (vtn_pointer_uses_ssa_offset(b, ptr)) {
/* This pointer type needs to have actual storage */
vtn_assert(ptr_type->type);
ptr->offset = ssa;
}
} else {
- const struct glsl_type *deref_type = ptr_type->deref->type;
+ const struct glsl_type *deref_type =
+ vtn_type_get_nir_type(b, ptr_type->deref, ptr->mode);
if (!vtn_pointer_is_external_block(b, ptr)) {
- assert(ssa->bit_size == 32 && ssa->num_components == 1);
ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
- glsl_get_bare_type(deref_type), 0);
- } else if (vtn_type_contains_block(b, ptr->type)) {
+ deref_type, ptr_type->stride);
+ } else if (vtn_type_contains_block(b, ptr->type) &&
+ ptr->mode != vtn_variable_mode_phys_ssbo) {
/* This is a pointer to somewhere in an array of blocks, not a
- * pointer to somewhere inside the block. We squashed it into a
- * random vector type before so just pick off the first channel and
- * cast it back to 32 bits.
+ * pointer to somewhere inside the block. Set the block index
+ * instead of making a cast.
*/
- ptr->block_index = nir_u2u32(&b->nb, nir_channel(&b->nb, ssa, 0));
+ ptr->block_index = ssa;
} else {
/* This is a pointer to something internal or a pointer inside a
* block. It's just a regular cast.
+ *
+ * For PhysicalStorageBuffer pointers, we don't have a block index
+ * at all because we get the pointer directly from the client. This
+ * assumes that there will never be a SSBO binding variable using the
+ * PhysicalStorageBuffer storage class. This assumption appears
+ * to be correct according to the Vulkan spec because the table,
+ * "Shader Resource and Storage Class Correspondence," the only the
+ * Uniform storage class with BufferBlock or the StorageBuffer
+ * storage class with Block can be used.
*/
ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
- ptr_type->deref->type,
- ptr_type->stride);
+ deref_type, ptr_type->stride);
ptr->deref->dest.ssa.num_components =
glsl_get_vector_elements(ptr_type->type);
ptr->deref->dest.ssa.bit_size = glsl_get_bit_size(ptr_type->type);
return false;
}
+static void
+assign_missing_member_locations(struct vtn_variable *var)
+{
+ unsigned length =
+ glsl_get_length(glsl_without_array(var->type->type));
+ int location = var->base_location;
+
+ for (unsigned i = 0; i < length; i++) {
+ /* From the Vulkan spec:
+ *
+ * “If the structure type is a Block but without a Location, then each
+ * of its members must have a Location decoration.”
+ *
+ */
+ if (var->type->block) {
+ assert(var->base_location != -1 ||
+ var->var->members[i].location != -1);
+ }
+
+ /* From the Vulkan spec:
+ *
+ * “Any member with its own Location decoration is assigned that
+ * location. Each remaining member is assigned the location after the
+ * immediately preceding member in declaration order.”
+ */
+ if (var->var->members[i].location != -1)
+ location = var->var->members[i].location;
+ else
+ var->var->members[i].location = location;
+
+ /* Below we use type instead of interface_type, because interface_type
+ * is only available when it is a Block. This code also supports
+ * input/outputs that are just structs
+ */
+ const struct glsl_type *member_type =
+ glsl_get_struct_field(glsl_without_array(var->type->type), i);
+
+ location +=
+ glsl_count_attribute_slots(member_type,
+ false /* is_gl_vertex_input */);
+ }
+}
+
+
static void
vtn_create_variable(struct vtn_builder *b, struct vtn_value *val,
struct vtn_type *ptr_type, SpvStorageClass storage_class,
- nir_constant *initializer)
+ nir_constant *const_initializer, nir_variable *var_initializer)
{
vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
struct vtn_type *type = ptr_type->deref;
- struct vtn_type *without_array = type;
- while(glsl_type_is_array(without_array->type))
- without_array = without_array->array_element;
+ struct vtn_type *without_array = vtn_type_without_array(ptr_type->deref);
enum vtn_variable_mode mode;
nir_variable_mode nir_mode;
b->shader->info.num_ssbos++;
break;
case vtn_variable_mode_uniform:
- if (glsl_type_is_image(without_array->type))
- b->shader->info.num_images++;
- else if (glsl_type_is_sampler(without_array->type))
- b->shader->info.num_textures++;
+ if (without_array->base_type == vtn_base_type_image) {
+ if (glsl_type_is_image(without_array->glsl_image))
+ b->shader->info.num_images++;
+ else if (glsl_type_is_sampler(without_array->glsl_image))
+ b->shader->info.num_textures++;
+ }
break;
case vtn_variable_mode_push_constant:
b->shader->num_uniforms = vtn_type_block_size(b, type);
break;
+
+ case vtn_variable_mode_image:
+ vtn_fail("Cannot create a variable with the Image storage class");
+ break;
+
+ case vtn_variable_mode_phys_ssbo:
+ vtn_fail("Cannot create a variable with the "
+ "PhysicalStorageBuffer storage class");
+ break;
+
default:
/* No tallying is needed */
break;
struct vtn_variable *var = rzalloc(b, struct vtn_variable);
var->type = type;
var->mode = mode;
+ var->base_location = -1;
- vtn_assert(val->value_type == vtn_value_type_pointer);
- val->pointer = vtn_pointer_for_variable(b, var, ptr_type);
+ val->pointer = rzalloc(b, struct vtn_pointer);
+ val->pointer->mode = var->mode;
+ val->pointer->type = var->type;
+ val->pointer->ptr_type = ptr_type;
+ val->pointer->var = var;
+ val->pointer->access = var->type->access;
switch (var->mode) {
case vtn_variable_mode_function:
case vtn_variable_mode_private:
case vtn_variable_mode_uniform:
+ case vtn_variable_mode_atomic_counter:
/* For these, we create the variable normally */
var->var = rzalloc(b->shader, nir_variable);
var->var->name = ralloc_strdup(var->var, val->name);
-
- if (storage_class == SpvStorageClassAtomicCounter) {
- /* Need to tweak the nir type here as at vtn_handle_type we don't
- * have the access to storage_class, that is the one that points us
- * that is an atomic uint.
- */
- var->var->type = repair_atomic_type(var->type->type);
- } else {
- /* Private variables don't have any explicit layout but some layouts
- * may have leaked through due to type deduplication in the SPIR-V.
- */
- var->var->type = glsl_get_bare_type(var->type->type);
- }
+ var->var->type = vtn_type_get_nir_type(b, var->type, var->mode);
var->var->data.mode = nir_mode;
var->var->data.location = -1;
var->var->interface_type = NULL;
break;
+ case vtn_variable_mode_ubo:
+ case vtn_variable_mode_ssbo:
+ var->var = rzalloc(b->shader, nir_variable);
+ var->var->name = ralloc_strdup(var->var, val->name);
+
+ var->var->type = vtn_type_get_nir_type(b, var->type, var->mode);
+ var->var->interface_type = var->var->type;
+
+ var->var->data.mode = nir_mode;
+ var->var->data.location = -1;
+
+ break;
+
case vtn_variable_mode_workgroup:
- if (b->options->lower_workgroup_access_to_offsets) {
- var->shared_location = -1;
- } else {
- /* Create the variable normally */
- var->var = rzalloc(b->shader, nir_variable);
- var->var->name = ralloc_strdup(var->var, val->name);
- /* Workgroup variables don't have any explicit layout but some
- * layouts may have leaked through due to type deduplication in the
- * SPIR-V.
- */
- var->var->type = glsl_get_bare_type(var->type->type);
- var->var->data.mode = nir_var_shared;
- }
+ /* Create the variable normally */
+ var->var = rzalloc(b->shader, nir_variable);
+ var->var->name = ralloc_strdup(var->var, val->name);
+ var->var->type = vtn_type_get_nir_type(b, var->type, var->mode);
+ var->var->data.mode = nir_var_mem_shared;
break;
case vtn_variable_mode_input:
var->patch = false;
vtn_foreach_decoration(b, val, var_is_patch_cb, &var->patch);
if (glsl_type_is_array(var->type->type) &&
- glsl_type_is_struct(without_array->type)) {
+ glsl_type_is_struct_or_ifc(without_array->type)) {
vtn_foreach_decoration(b, vtn_value(b, without_array->id,
vtn_value_type_type),
var_is_patch_cb, &var->patch);
* able to preserve that information.
*/
- struct vtn_type *interface_type = var->type;
+ struct vtn_type *per_vertex_type = var->type;
if (is_per_vertex_inout(var, b->shader->info.stage)) {
/* In Geometry shaders (and some tessellation), inputs come
* in per-vertex arrays. However, some builtins come in
* any case, there are no non-builtin arrays allowed so this
* check should be sufficient.
*/
- interface_type = var->type->array_element;
+ per_vertex_type = var->type->array_element;
}
var->var = rzalloc(b->shader, nir_variable);
var->var->name = ralloc_strdup(var->var, val->name);
- /* In Vulkan, shader I/O variables don't have any explicit layout but
- * some layouts may have leaked through due to type deduplication in
- * the SPIR-V.
- */
- var->var->type = glsl_get_bare_type(var->type->type);
- var->var->interface_type = interface_type->type;
+ var->var->type = vtn_type_get_nir_type(b, var->type, var->mode);
var->var->data.mode = nir_mode;
var->var->data.patch = var->patch;
- if (glsl_type_is_struct(interface_type->type)) {
+ /* Figure out the interface block type. */
+ struct vtn_type *iface_type = per_vertex_type;
+ if (var->mode == vtn_variable_mode_output &&
+ (b->shader->info.stage == MESA_SHADER_VERTEX ||
+ b->shader->info.stage == MESA_SHADER_TESS_EVAL ||
+ b->shader->info.stage == MESA_SHADER_GEOMETRY)) {
+ /* For vertex data outputs, we can end up with arrays of blocks for
+ * transform feedback where each array element corresponds to a
+ * different XFB output buffer.
+ */
+ while (iface_type->base_type == vtn_base_type_array)
+ iface_type = iface_type->array_element;
+ }
+ if (iface_type->base_type == vtn_base_type_struct && iface_type->block)
+ var->var->interface_type = vtn_type_get_nir_type(b, iface_type,
+ var->mode);
+
+ if (per_vertex_type->base_type == vtn_base_type_struct &&
+ per_vertex_type->block) {
/* It's a struct. Set it up as per-member. */
- var->var->num_members = glsl_get_length(interface_type->type);
+ var->var->num_members = glsl_get_length(per_vertex_type->type);
var->var->members = rzalloc_array(var->var, struct nir_variable_data,
var->var->num_members);
for (unsigned i = 0; i < var->var->num_members; i++) {
var->var->members[i].mode = nir_mode;
var->var->members[i].patch = var->patch;
+ var->var->members[i].location = -1;
}
}
/* For inputs and outputs, we need to grab locations and builtin
- * information from the interface type.
+ * information from the per-vertex type.
*/
- vtn_foreach_decoration(b, vtn_value(b, interface_type->id,
+ vtn_foreach_decoration(b, vtn_value(b, per_vertex_type->id,
vtn_value_type_type),
var_decoration_cb, var);
break;
}
- case vtn_variable_mode_ubo:
- case vtn_variable_mode_ssbo:
case vtn_variable_mode_push_constant:
+ case vtn_variable_mode_cross_workgroup:
/* These don't need actual variables. */
break;
+
+ case vtn_variable_mode_image:
+ case vtn_variable_mode_phys_ssbo:
+ unreachable("Should have been caught before");
}
- if (initializer) {
+ /* We can only have one type of initializer */
+ assert(!(const_initializer && var_initializer));
+ if (const_initializer) {
var->var->constant_initializer =
- nir_constant_clone(initializer, var->var);
+ nir_constant_clone(const_initializer, var->var);
+ }
+ if (var_initializer)
+ var->var->pointer_initializer = var_initializer;
+
+ if (var->mode == vtn_variable_mode_uniform ||
+ var->mode == vtn_variable_mode_ssbo) {
+ /* SSBOs and images are assumed to not alias in the Simple, GLSL and Vulkan memory models */
+ var->var->data.access |= b->mem_model != SpvMemoryModelOpenCL ? ACCESS_RESTRICT : 0;
}
vtn_foreach_decoration(b, val, var_decoration_cb, var);
+ vtn_foreach_decoration(b, val, ptr_decoration_cb, val->pointer);
- if (var->mode == vtn_variable_mode_uniform) {
+ /* Propagate access flags from the OpVariable decorations. */
+ val->pointer->access |= var->access;
+
+ if ((var->mode == vtn_variable_mode_input ||
+ var->mode == vtn_variable_mode_output) &&
+ var->var->members) {
+ assign_missing_member_locations(var);
+ }
+
+ if (var->mode == vtn_variable_mode_uniform ||
+ var->mode == vtn_variable_mode_ubo ||
+ var->mode == vtn_variable_mode_ssbo ||
+ var->mode == vtn_variable_mode_atomic_counter) {
/* XXX: We still need the binding information in the nir_variable
* for these. We should fix that.
*/
var->var->data.index = var->input_attachment_index;
var->var->data.offset = var->offset;
- if (glsl_type_is_image(without_array->type))
+ if (glsl_type_is_image(glsl_without_array(var->var->type)))
var->var->data.image.format = without_array->image_format;
}
glsl_get_type_name(src_type->type));
}
+static nir_ssa_def *
+nir_shrink_zero_pad_vec(nir_builder *b, nir_ssa_def *val,
+ unsigned num_components)
+{
+ if (val->num_components == num_components)
+ return val;
+
+ nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
+ for (unsigned i = 0; i < num_components; i++) {
+ if (i < val->num_components)
+ comps[i] = nir_channel(b, val, i);
+ else
+ comps[i] = nir_imm_intN_t(b, 0, val->bit_size);
+ }
+ return nir_vec(b, comps, num_components);
+}
+
+static nir_ssa_def *
+nir_sloppy_bitcast(nir_builder *b, nir_ssa_def *val,
+ const struct glsl_type *type)
+{
+ const unsigned num_components = glsl_get_vector_elements(type);
+ const unsigned bit_size = glsl_get_bit_size(type);
+
+ /* First, zero-pad to ensure that the value is big enough that when we
+ * bit-cast it, we don't loose anything.
+ */
+ if (val->bit_size < bit_size) {
+ const unsigned src_num_components_needed =
+ vtn_align_u32(val->num_components, bit_size / val->bit_size);
+ val = nir_shrink_zero_pad_vec(b, val, src_num_components_needed);
+ }
+
+ val = nir_bitcast_vector(b, val, bit_size);
+
+ return nir_shrink_zero_pad_vec(b, val, num_components);
+}
+
+static bool
+vtn_get_mem_operands(struct vtn_builder *b, const uint32_t *w, unsigned count,
+ unsigned *idx, SpvMemoryAccessMask *access, unsigned *alignment,
+ SpvScope *dest_scope, SpvScope *src_scope)
+{
+ *access = 0;
+ *alignment = 0;
+ if (*idx >= count)
+ return false;
+
+ *access = w[(*idx)++];
+ if (*access & SpvMemoryAccessAlignedMask) {
+ vtn_assert(*idx < count);
+ *alignment = w[(*idx)++];
+ }
+
+ if (*access & SpvMemoryAccessMakePointerAvailableMask) {
+ vtn_assert(*idx < count);
+ vtn_assert(dest_scope);
+ *dest_scope = vtn_constant_uint(b, w[(*idx)++]);
+ }
+
+ if (*access & SpvMemoryAccessMakePointerVisibleMask) {
+ vtn_assert(*idx < count);
+ vtn_assert(src_scope);
+ *src_scope = vtn_constant_uint(b, w[(*idx)++]);
+ }
+
+ return true;
+}
+
+SpvMemorySemanticsMask
+vtn_mode_to_memory_semantics(enum vtn_variable_mode mode)
+{
+ switch (mode) {
+ case vtn_variable_mode_ssbo:
+ case vtn_variable_mode_phys_ssbo:
+ return SpvMemorySemanticsUniformMemoryMask;
+ case vtn_variable_mode_workgroup:
+ return SpvMemorySemanticsWorkgroupMemoryMask;
+ case vtn_variable_mode_cross_workgroup:
+ return SpvMemorySemanticsCrossWorkgroupMemoryMask;
+ case vtn_variable_mode_atomic_counter:
+ return SpvMemorySemanticsAtomicCounterMemoryMask;
+ case vtn_variable_mode_image:
+ return SpvMemorySemanticsImageMemoryMask;
+ case vtn_variable_mode_output:
+ return SpvMemorySemanticsOutputMemoryMask;
+ default:
+ return SpvMemorySemanticsMaskNone;
+ }
+}
+
void
vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
switch (opcode) {
case SpvOpUndef: {
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_undef);
- val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
+ val->type = vtn_get_type(b, w[1]);
break;
}
case SpvOpVariable: {
- struct vtn_type *ptr_type = vtn_value(b, w[1], vtn_value_type_type)->type;
+ struct vtn_type *ptr_type = vtn_get_type(b, w[1]);
struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_pointer);
SpvStorageClass storage_class = w[3];
- nir_constant *initializer = NULL;
- if (count > 4)
- initializer = vtn_value(b, w[4], vtn_value_type_constant)->constant;
+ nir_constant *const_initializer = NULL;
+ nir_variable *var_initializer = NULL;
+ if (count > 4) {
+ struct vtn_value *init = vtn_untyped_value(b, w[4]);
+ switch (init->value_type) {
+ case vtn_value_type_constant:
+ const_initializer = init->constant;
+ break;
+ case vtn_value_type_pointer:
+ var_initializer = init->pointer->var->var;
+ break;
+ default:
+ vtn_fail("SPIR-V variable initializer %u must be constant or pointer",
+ w[4]);
+ }
+ }
+
+ vtn_create_variable(b, val, ptr_type, storage_class, const_initializer, var_initializer);
+
+ break;
+ }
+
+ case SpvOpConstantSampler: {
+ /* Synthesize a pointer-to-sampler type, create a variable of that type,
+ * and give the variable a constant initializer with the sampler params */
+ struct vtn_type *sampler_type = vtn_value(b, w[1], vtn_value_type_type)->type;
+ struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_pointer);
+
+ struct vtn_type *ptr_type = rzalloc(b, struct vtn_type);
+ ptr_type = rzalloc(b, struct vtn_type);
+ ptr_type->base_type = vtn_base_type_pointer;
+ ptr_type->deref = sampler_type;
+ ptr_type->storage_class = SpvStorageClassUniform;
+
+ ptr_type->type = nir_address_format_to_glsl_type(
+ vtn_mode_to_address_format(b, vtn_variable_mode_function));
+
+ vtn_create_variable(b, val, ptr_type, ptr_type->storage_class, NULL, NULL);
+
+ nir_variable *nir_var = val->pointer->var->var;
+ nir_var->data.sampler.is_inline_sampler = true;
+ nir_var->data.sampler.addressing_mode = w[3];
+ nir_var->data.sampler.normalized_coordinates = w[4];
+ nir_var->data.sampler.filter_mode = w[5];
- vtn_create_variable(b, val, ptr_type, storage_class, initializer);
break;
}
case SpvOpAccessChain:
case SpvOpPtrAccessChain:
- case SpvOpInBoundsAccessChain: {
+ case SpvOpInBoundsAccessChain:
+ case SpvOpInBoundsPtrAccessChain: {
struct vtn_access_chain *chain = vtn_access_chain_create(b, count - 4);
- chain->ptr_as_array = (opcode == SpvOpPtrAccessChain);
+ enum gl_access_qualifier access = 0;
+ chain->ptr_as_array = (opcode == SpvOpPtrAccessChain || opcode == SpvOpInBoundsPtrAccessChain);
unsigned idx = 0;
for (int i = 4; i < count; i++) {
struct vtn_value *link_val = vtn_untyped_value(b, w[i]);
if (link_val->value_type == vtn_value_type_constant) {
chain->link[idx].mode = vtn_access_mode_literal;
- switch (glsl_get_bit_size(link_val->type->type)) {
- case 8:
- chain->link[idx].id = link_val->constant->values[0].i8[0];
- break;
- case 16:
- chain->link[idx].id = link_val->constant->values[0].i16[0];
- break;
- case 32:
- chain->link[idx].id = link_val->constant->values[0].i32[0];
- break;
- case 64:
- chain->link[idx].id = link_val->constant->values[0].i64[0];
- break;
- default:
- vtn_fail("Invalid bit size");
- }
+ chain->link[idx].id = vtn_constant_int(b, w[i]);
} else {
chain->link[idx].mode = vtn_access_mode_id;
chain->link[idx].id = w[i];
-
}
idx++;
}
- struct vtn_type *ptr_type = vtn_value(b, w[1], vtn_value_type_type)->type;
- struct vtn_value *base_val = vtn_untyped_value(b, w[3]);
- if (base_val->value_type == vtn_value_type_sampled_image) {
- /* This is rather insane. SPIR-V allows you to use OpSampledImage
- * to combine an array of images with a single sampler to get an
- * array of sampled images that all share the same sampler.
- * Fortunately, this means that we can more-or-less ignore the
- * sampler when crawling the access chain, but it does leave us
- * with this rather awkward little special-case.
- */
- struct vtn_value *val =
- vtn_push_value(b, w[2], vtn_value_type_sampled_image);
- val->sampled_image = ralloc(b, struct vtn_sampled_image);
- val->sampled_image->type = base_val->sampled_image->type;
- val->sampled_image->image =
- vtn_pointer_dereference(b, base_val->sampled_image->image, chain);
- val->sampled_image->sampler = base_val->sampled_image->sampler;
- } else {
- vtn_assert(base_val->value_type == vtn_value_type_pointer);
- struct vtn_value *val =
- vtn_push_value(b, w[2], vtn_value_type_pointer);
- val->pointer = vtn_pointer_dereference(b, base_val->pointer, chain);
- val->pointer->ptr_type = ptr_type;
- }
+ struct vtn_type *ptr_type = vtn_get_type(b, w[1]);
+ struct vtn_pointer *base =
+ vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
+ struct vtn_pointer *ptr = vtn_pointer_dereference(b, base, chain);
+ ptr->ptr_type = ptr_type;
+ ptr->access |= access;
+ vtn_push_pointer(b, w[2], ptr);
break;
}
}
case SpvOpLoad: {
- struct vtn_type *res_type =
- vtn_value(b, w[1], vtn_value_type_type)->type;
+ struct vtn_type *res_type = vtn_get_type(b, w[1]);
struct vtn_value *src_val = vtn_value(b, w[3], vtn_value_type_pointer);
struct vtn_pointer *src = src_val->pointer;
vtn_assert_types_equal(b, opcode, res_type, src_val->type->deref);
- if (glsl_type_is_image(res_type->type) ||
- glsl_type_is_sampler(res_type->type)) {
- vtn_push_value(b, w[2], vtn_value_type_pointer)->pointer = src;
- return;
+ unsigned idx = 4, alignment;
+ SpvMemoryAccessMask access;
+ SpvScope scope;
+ vtn_get_mem_operands(b, w, count, &idx, &access, &alignment, NULL, &scope);
+ if (access & SpvMemoryAccessMakePointerVisibleMask) {
+ SpvMemorySemanticsMask semantics =
+ SpvMemorySemanticsMakeVisibleMask |
+ vtn_mode_to_memory_semantics(src->mode);
+ vtn_emit_memory_barrier(b, scope, semantics);
}
- vtn_push_ssa(b, w[2], res_type, vtn_variable_load(b, src));
+ vtn_push_ssa_value(b, w[2], vtn_variable_load(b, src));
break;
}
vtn_assert_types_equal(b, opcode, dest_val->type->deref, src_val->type);
- if (glsl_type_is_sampler(dest->type->type)) {
- if (b->wa_glslang_179) {
- vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
- "propagation to workaround the problem.");
- vtn_assert(dest->var->copy_prop_sampler == NULL);
- dest->var->copy_prop_sampler =
- vtn_value(b, w[2], vtn_value_type_pointer)->pointer;
- } else {
- vtn_fail("Vulkan does not allow OpStore of a sampler or image.");
- }
- break;
- }
+ unsigned idx = 3, alignment;
+ SpvMemoryAccessMask access;
+ SpvScope scope;
+ vtn_get_mem_operands(b, w, count, &idx, &access, &alignment, &scope, NULL);
struct vtn_ssa_value *src = vtn_ssa_value(b, w[2]);
vtn_variable_store(b, src, dest);
+
+ if (access & SpvMemoryAccessMakePointerAvailableMask) {
+ SpvMemorySemanticsMask semantics =
+ SpvMemorySemanticsMakeAvailableMask |
+ vtn_mode_to_memory_semantics(dest->mode);
+ vtn_emit_memory_barrier(b, scope, semantics);
+ }
break;
}
case SpvOpArrayLength: {
struct vtn_pointer *ptr =
vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
+ const uint32_t field = w[4];
+
+ vtn_fail_if(ptr->type->base_type != vtn_base_type_struct,
+ "OpArrayLength must take a pointer to a structure type");
+ vtn_fail_if(field != ptr->type->length - 1 ||
+ ptr->type->members[field]->base_type != vtn_base_type_array,
+ "OpArrayLength must reference the last memeber of the "
+ "structure and that must be an array");
- const uint32_t offset = ptr->var->type->offsets[w[4]];
- const uint32_t stride = ptr->var->type->members[w[4]]->stride;
+ const uint32_t offset = ptr->type->offsets[field];
+ const uint32_t stride = ptr->type->members[field]->stride;
if (!ptr->block_index) {
struct vtn_access_chain chain = {
.length = 0,
};
- ptr = vtn_ssa_offset_pointer_dereference(b, ptr, &chain);
+ ptr = vtn_pointer_dereference(b, ptr, &chain);
vtn_assert(ptr->block_index);
}
nir_imm_int(&b->nb, 0u)),
nir_imm_int(&b->nb, stride));
- struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
- val->ssa = vtn_create_ssa_value(b, glsl_uint_type());
- val->ssa->def = array_length;
+ vtn_push_nir_ssa(b, w[2], array_length);
+ break;
+ }
+
+ case SpvOpConvertPtrToU: {
+ struct vtn_type *u_type = vtn_get_type(b, w[1]);
+ struct vtn_type *ptr_type = vtn_get_value_type(b, w[3]);
+
+ vtn_fail_if(ptr_type->base_type != vtn_base_type_pointer ||
+ ptr_type->type == NULL,
+ "OpConvertPtrToU can only be used on physical pointers");
+
+ vtn_fail_if(u_type->base_type != vtn_base_type_vector &&
+ u_type->base_type != vtn_base_type_scalar,
+ "OpConvertPtrToU can only be used to cast to a vector or "
+ "scalar type");
+
+ /* The pointer will be converted to an SSA value automatically */
+ nir_ssa_def *ptr = vtn_get_nir_ssa(b, w[3]);
+ nir_ssa_def *u = nir_sloppy_bitcast(&b->nb, ptr, u_type->type);
+ vtn_push_nir_ssa(b, w[2], u);
+ break;
+ }
+
+ case SpvOpConvertUToPtr: {
+ struct vtn_type *ptr_type = vtn_get_type(b, w[1]);
+ struct vtn_type *u_type = vtn_get_value_type(b, w[3]);
+
+ vtn_fail_if(ptr_type->base_type != vtn_base_type_pointer ||
+ ptr_type->type == NULL,
+ "OpConvertUToPtr can only be used on physical pointers");
+
+ vtn_fail_if(u_type->base_type != vtn_base_type_vector &&
+ u_type->base_type != vtn_base_type_scalar,
+ "OpConvertUToPtr can only be used to cast from a vector or "
+ "scalar type");
+
+ nir_ssa_def *u = vtn_get_nir_ssa(b, w[3]);
+ nir_ssa_def *ptr = nir_sloppy_bitcast(&b->nb, u, ptr_type->type);
+ vtn_push_pointer(b, w[2], vtn_pointer_from_ssa(b, ptr, ptr_type));
break;
}
case SpvOpCopyMemorySized:
default:
- vtn_fail("Unhandled opcode");
+ vtn_fail_with_opcode("Unhandled opcode", opcode);
}
}