{
return ptr->mode == vtn_variable_mode_ssbo ||
ptr->mode == vtn_variable_mode_ubo ||
+ ptr->mode == vtn_variable_mode_phys_ssbo ||
ptr->mode == vtn_variable_mode_push_constant ||
(ptr->mode == vtn_variable_mode_workgroup &&
b->options->lower_workgroup_access_to_offsets);
nir_ssa_def *ssa = vtn_ssa_value(b, link.id)->def;
if (ssa->bit_size != bit_size)
ssa = nir_i2i(&b->nb, ssa, bit_size);
- if (stride != 1)
- ssa = nir_imul_imm(&b->nb, ssa, stride);
- return ssa;
+ return nir_imul_imm(&b->nb, ssa, stride);
}
}
}
}
+static const struct glsl_type *
+vtn_ptr_type_for_mode(struct vtn_builder *b, enum vtn_variable_mode mode)
+{
+ switch (mode) {
+ case vtn_variable_mode_ubo:
+ return b->options->ubo_ptr_type;
+ case vtn_variable_mode_ssbo:
+ return b->options->ssbo_ptr_type;
+ default:
+ vtn_fail("Invalid mode for vulkan_resource_index");
+ }
+}
+
static nir_ssa_def *
vtn_variable_resource_index(struct vtn_builder *b, struct vtn_variable *var,
nir_ssa_def *desc_array_index)
{
if (!desc_array_index) {
- vtn_assert(glsl_type_is_struct(var->type->type));
+ vtn_assert(glsl_type_is_struct_or_ifc(var->type->type));
desc_array_index = nir_imm_int(&b->nb, 0);
}
nir_intrinsic_set_binding(instr, var->binding);
nir_intrinsic_set_desc_type(instr, vk_desc_type_for_mode(b, var->mode));
- nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
+ const struct glsl_type *index_type =
+ b->options->lower_ubo_ssbo_access_to_offsets ?
+ glsl_uint_type() : vtn_ptr_type_for_mode(b, var->mode);
+
+ instr->num_components = glsl_get_vector_elements(index_type);
+ nir_ssa_dest_init(&instr->instr, &instr->dest, instr->num_components,
+ glsl_get_bit_size(index_type), NULL);
nir_builder_instr_insert(&b->nb, &instr->instr);
return &instr->dest.ssa;
instr->src[1] = nir_src_for_ssa(offset_index);
nir_intrinsic_set_desc_type(instr, vk_desc_type_for_mode(b, mode));
- nir_ssa_dest_init(&instr->instr, &instr->dest, 1, 32, NULL);
+ const struct glsl_type *index_type =
+ b->options->lower_ubo_ssbo_access_to_offsets ?
+ glsl_uint_type() : vtn_ptr_type_for_mode(b, mode);
+
+ instr->num_components = glsl_get_vector_elements(index_type);
+ nir_ssa_dest_init(&instr->instr, &instr->dest, instr->num_components,
+ glsl_get_bit_size(index_type), NULL);
nir_builder_instr_insert(&b->nb, &instr->instr);
return &instr->dest.ssa;
static nir_ssa_def *
vtn_descriptor_load(struct vtn_builder *b, enum vtn_variable_mode mode,
- const struct glsl_type *desc_type, nir_ssa_def *desc_index)
+ nir_ssa_def *desc_index)
{
nir_intrinsic_instr *desc_load =
nir_intrinsic_instr_create(b->nb.shader,
nir_intrinsic_load_vulkan_descriptor);
desc_load->src[0] = nir_src_for_ssa(desc_index);
- desc_load->num_components = glsl_get_vector_elements(desc_type);
nir_intrinsic_set_desc_type(desc_load, vk_desc_type_for_mode(b, mode));
+
+ const struct glsl_type *ptr_type = vtn_ptr_type_for_mode(b, mode);
+
+ desc_load->num_components = glsl_get_vector_elements(ptr_type);
nir_ssa_dest_init(&desc_load->instr, &desc_load->dest,
desc_load->num_components,
- glsl_get_bit_size(desc_type), NULL);
+ glsl_get_bit_size(ptr_type), NULL);
nir_builder_instr_insert(&b->nb, &desc_load->instr);
return &desc_load->dest.ssa;
* final block index. Insert a descriptor load and cast to a deref to
* start the deref chain.
*/
- nir_ssa_def *desc =
- vtn_descriptor_load(b, base->mode, base->ptr_type->type, block_index);
+ nir_ssa_def *desc = vtn_descriptor_load(b, base->mode, block_index);
assert(base->mode == vtn_variable_mode_ssbo ||
base->mode == vtn_variable_mode_ubo);
nir_variable_mode nir_mode =
- base->mode == vtn_variable_mode_ssbo ? nir_var_ssbo : nir_var_ubo;
+ base->mode == vtn_variable_mode_ssbo ? nir_var_mem_ssbo : nir_var_mem_ubo;
tail = nir_build_deref_cast(&b->nb, desc, nir_mode, type->type,
base->ptr_type->stride);
}
for (; idx < deref_chain->length; idx++) {
- if (glsl_type_is_struct(type->type)) {
+ if (glsl_type_is_struct_or_ifc(type->type)) {
vtn_assert(deref_chain->link[idx].mode == vtn_access_mode_literal);
unsigned field = deref_chain->link[idx].id;
tail = nir_build_deref_struct(&b->nb, tail, field);
nir_deref_instr *
vtn_pointer_to_deref(struct vtn_builder *b, struct vtn_pointer *ptr)
{
- /* Do on-the-fly copy propagation for samplers. */
- if (ptr->var && ptr->var->copy_prop_sampler)
- return vtn_pointer_to_deref(b, ptr->var->copy_prop_sampler);
+ if (b->wa_glslang_179) {
+ /* Do on-the-fly copy propagation for samplers. */
+ if (ptr->var && ptr->var->copy_prop_sampler)
+ return vtn_pointer_to_deref(b, ptr->var->copy_prop_sampler);
+ }
vtn_assert(!vtn_pointer_uses_ssa_offset(b, ptr));
if (!ptr->deref) {
unsigned elems = glsl_get_length(deref->type);
for (unsigned i = 0; i < elems; i++) {
nir_deref_instr *child =
- nir_build_deref_array(&b->nb, deref, nir_imm_int(&b->nb, i));
+ nir_build_deref_array_imm(&b->nb, deref, i);
_vtn_local_load_store(b, load, child, inout->elems[i]);
}
} else {
- vtn_assert(glsl_type_is_struct(deref->type));
+ vtn_assert(glsl_type_is_struct_or_ifc(deref->type));
unsigned elems = glsl_get_length(deref->type);
for (unsigned i = 0; i < elems; i++) {
nir_deref_instr *child = nir_build_deref_struct(&b->nb, deref, i);
*location = SYSTEM_VALUE_GLOBAL_INVOCATION_ID;
set_mode_system_value(b, mode);
break;
+ case SpvBuiltInGlobalLinearId:
+ *location = SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX;
+ set_mode_system_value(b, mode);
+ break;
case SpvBuiltInBaseVertex:
/* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
* semantic as SPIR-V BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
switch (builtin) {
case SpvBuiltInTessLevelOuter:
case SpvBuiltInTessLevelInner:
+ case SpvBuiltInClipDistance:
+ case SpvBuiltInCullDistance:
var_data->compact = true;
break;
- case SpvBuiltInFragCoord:
- var_data->pixel_center_integer = b->pixel_center_integer;
- /* fallthrough */
- case SpvBuiltInSamplePosition:
- var_data->origin_upper_left = b->origin_upper_left;
- break;
default:
break;
}
case SpvDecorationFPRoundingMode:
case SpvDecorationFPFastMathMode:
case SpvDecorationAlignment:
- vtn_warn("Decoration only allowed for CL-style kernels: %s",
- spirv_decoration_to_string(dec->decoration));
+ if (b->shader->info.stage != MESA_SHADER_KERNEL) {
+ vtn_warn("Decoration only allowed for CL-style kernels: %s",
+ spirv_decoration_to_string(dec->decoration));
+ }
break;
case SpvDecorationHlslSemanticGOOGLE:
/* HLSL semantic decorations can safely be ignored by the driver. */
break;
+ case SpvDecorationRestrictPointerEXT:
+ case SpvDecorationAliasedPointerEXT:
+ /* TODO: We should actually plumb alias information through NIR. */
+ break;
+
default:
vtn_fail("Unhandled decoration");
}
*/
if (dec->decoration == SpvDecorationLocation) {
unsigned location = dec->literals[0];
- bool is_vertex_input = false;
if (b->shader->info.stage == MESA_SHADER_FRAGMENT &&
vtn_var->mode == vtn_variable_mode_output) {
location += FRAG_RESULT_DATA0;
} else if (b->shader->info.stage == MESA_SHADER_VERTEX &&
vtn_var->mode == vtn_variable_mode_input) {
- is_vertex_input = true;
location += VERT_ATTRIB_GENERIC0;
} else if (vtn_var->mode == vtn_variable_mode_input ||
vtn_var->mode == vtn_variable_mode_output) {
} else {
/* This handles the structure member case */
assert(vtn_var->var->members);
- for (unsigned i = 0; i < vtn_var->var->num_members; i++) {
- vtn_var->var->members[i].location = location;
- const struct glsl_type *member_type =
- glsl_get_struct_field(vtn_var->var->interface_type, i);
- location += glsl_count_attribute_slots(member_type,
- is_vertex_input);
- }
+
+ if (member == -1)
+ vtn_var->base_location = location;
+ else
+ vtn_var->var->members[member].location = location;
}
+
return;
} else {
if (vtn_var->var) {
if (vtn_var->var->num_members == 0) {
- assert(member == -1);
- apply_var_decoration(b, &vtn_var->var->data, dec);
+ /* We call this function on types as well as variables and not all
+ * struct types get split so we can end up having stray member
+ * decorations; just ignore them.
+ */
+ if (member == -1)
+ apply_var_decoration(b, &vtn_var->var->data, dec);
} else if (member >= 0) {
/* Member decorations must come from a type */
assert(val->value_type == vtn_value_type_type);
case SpvStorageClassUniform:
if (interface_type->block) {
mode = vtn_variable_mode_ubo;
- nir_mode = nir_var_ubo;
+ nir_mode = nir_var_mem_ubo;
} else if (interface_type->buffer_block) {
mode = vtn_variable_mode_ssbo;
- nir_mode = nir_var_ssbo;
+ nir_mode = nir_var_mem_ssbo;
} else {
/* Default-block uniforms, coming from gl_spirv */
mode = vtn_variable_mode_uniform;
break;
case SpvStorageClassStorageBuffer:
mode = vtn_variable_mode_ssbo;
- nir_mode = nir_var_ssbo;
+ nir_mode = nir_var_mem_ssbo;
+ break;
+ case SpvStorageClassPhysicalStorageBufferEXT:
+ mode = vtn_variable_mode_phys_ssbo;
+ nir_mode = nir_var_mem_global;
break;
case SpvStorageClassUniformConstant:
mode = vtn_variable_mode_uniform;
break;
case SpvStorageClassPrivate:
mode = vtn_variable_mode_private;
- nir_mode = nir_var_private;
+ nir_mode = nir_var_shader_temp;
break;
case SpvStorageClassFunction:
mode = vtn_variable_mode_function;
- nir_mode = nir_var_function;
+ nir_mode = nir_var_function_temp;
break;
case SpvStorageClassWorkgroup:
mode = vtn_variable_mode_workgroup;
- nir_mode = nir_var_shared;
+ nir_mode = nir_var_mem_shared;
break;
case SpvStorageClassAtomicCounter:
mode = vtn_variable_mode_uniform;
nir_mode = nir_var_uniform;
break;
case SpvStorageClassCrossWorkgroup:
+ mode = vtn_variable_mode_cross_workgroup;
+ nir_mode = nir_var_mem_global;
+ break;
case SpvStorageClassGeneric:
default:
vtn_fail("Unhandled variable storage class");
}
} else {
if (vtn_pointer_is_external_block(b, ptr) &&
- vtn_type_contains_block(b, ptr->type)) {
- const unsigned bit_size = glsl_get_bit_size(ptr->ptr_type->type);
- const unsigned num_components =
- glsl_get_vector_elements(ptr->ptr_type->type);
-
+ vtn_type_contains_block(b, ptr->type) &&
+ ptr->mode != vtn_variable_mode_phys_ssbo) {
/* In this case, we're looking for a block index and not an actual
* deref.
+ *
+ * For PhysicalStorageBufferEXT pointers, we don't have a block index
+ * at all because we get the pointer directly from the client. This
+ * assumes that there will never be a SSBO binding variable using the
+ * PhysicalStorageBufferEXT storage class. This assumption appears
+ * to be correct according to the Vulkan spec because the table,
+ * "Shader Resource and Storage Class Correspondence," the only the
+ * Uniform storage class with BufferBlock or the StorageBuffer
+ * storage class with Block can be used.
*/
if (!ptr->block_index) {
/* If we don't have a block_index then we must be a pointer to the
ptr = vtn_nir_deref_pointer_dereference(b, ptr, &chain);
}
- /* A block index is just a 32-bit value but the pointer has some
- * other dimensionality. Cram it in there and we'll unpack it later
- * in vtn_pointer_from_ssa.
- */
- const unsigned swiz[4] = { 0, };
- return nir_swizzle(&b->nb, nir_u2u(&b->nb, ptr->block_index, bit_size),
- swiz, num_components, false);
+ return ptr->block_index;
} else {
return &vtn_pointer_to_deref(b, ptr)->dest.ssa;
}
vtn_pointer_from_ssa(struct vtn_builder *b, nir_ssa_def *ssa,
struct vtn_type *ptr_type)
{
- vtn_assert(ssa->num_components <= 2 && ssa->bit_size == 32);
vtn_assert(ptr_type->base_type == vtn_base_type_pointer);
struct vtn_type *interface_type = ptr_type->deref;
ptr->type = ptr_type->deref;
ptr->ptr_type = ptr_type;
+ if (b->wa_glslang_179) {
+ /* To work around https://github.com/KhronosGroup/glslang/issues/179 we
+ * need to whack the mode because it creates a function parameter with
+ * the Function storage class even though it's a pointer to a sampler.
+ * If we don't do this, then NIR won't get rid of the deref_cast for us.
+ */
+ if (ptr->mode == vtn_variable_mode_function &&
+ (ptr->type->base_type == vtn_base_type_sampler ||
+ ptr->type->base_type == vtn_base_type_sampled_image)) {
+ ptr->mode = vtn_variable_mode_uniform;
+ nir_mode = nir_var_uniform;
+ }
+ }
+
if (vtn_pointer_uses_ssa_offset(b, ptr)) {
/* This pointer type needs to have actual storage */
vtn_assert(ptr_type->type);
} else {
const struct glsl_type *deref_type = ptr_type->deref->type;
if (!vtn_pointer_is_external_block(b, ptr)) {
- assert(ssa->bit_size == 32 && ssa->num_components == 1);
ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
- glsl_get_bare_type(deref_type), 0);
- } else if (vtn_type_contains_block(b, ptr->type)) {
+ deref_type, 0);
+ } else if (vtn_type_contains_block(b, ptr->type) &&
+ ptr->mode != vtn_variable_mode_phys_ssbo) {
/* This is a pointer to somewhere in an array of blocks, not a
- * pointer to somewhere inside the block. We squashed it into a
- * random vector type before so just pick off the first channel and
- * cast it back to 32 bits.
+ * pointer to somewhere inside the block. Set the block index
+ * instead of making a cast.
*/
- ptr->block_index = nir_u2u32(&b->nb, nir_channel(&b->nb, ssa, 0));
+ ptr->block_index = ssa;
} else {
/* This is a pointer to something internal or a pointer inside a
* block. It's just a regular cast.
+ *
+ * For PhysicalStorageBufferEXT pointers, we don't have a block index
+ * at all because we get the pointer directly from the client. This
+ * assumes that there will never be a SSBO binding variable using the
+ * PhysicalStorageBufferEXT storage class. This assumption appears
+ * to be correct according to the Vulkan spec because the table,
+ * "Shader Resource and Storage Class Correspondence," the only the
+ * Uniform storage class with BufferBlock or the StorageBuffer
+ * storage class with Block can be used.
*/
ptr->deref = nir_build_deref_cast(&b->nb, ssa, nir_mode,
ptr_type->deref->type,
return false;
}
+static void
+assign_missing_member_locations(struct vtn_variable *var)
+{
+ unsigned length =
+ glsl_get_length(glsl_without_array(var->type->type));
+ int location = var->base_location;
+
+ for (unsigned i = 0; i < length; i++) {
+ /* From the Vulkan spec:
+ *
+ * “If the structure type is a Block but without a Location, then each
+ * of its members must have a Location decoration.”
+ *
+ */
+ if (var->type->block) {
+ assert(var->base_location != -1 ||
+ var->var->members[i].location != -1);
+ }
+
+ /* From the Vulkan spec:
+ *
+ * “Any member with its own Location decoration is assigned that
+ * location. Each remaining member is assigned the location after the
+ * immediately preceding member in declaration order.”
+ */
+ if (var->var->members[i].location != -1)
+ location = var->var->members[i].location;
+ else
+ var->var->members[i].location = location;
+
+ /* Below we use type instead of interface_type, because interface_type
+ * is only available when it is a Block. This code also supports
+ * input/outputs that are just structs
+ */
+ const struct glsl_type *member_type =
+ glsl_get_struct_field(glsl_without_array(var->type->type), i);
+
+ location +=
+ glsl_count_attribute_slots(member_type,
+ false /* is_gl_vertex_input */);
+ }
+}
+
+
static void
vtn_create_variable(struct vtn_builder *b, struct vtn_value *val,
struct vtn_type *ptr_type, SpvStorageClass storage_class,
case vtn_variable_mode_push_constant:
b->shader->num_uniforms = vtn_type_block_size(b, type);
break;
+
+ case vtn_variable_mode_phys_ssbo:
+ vtn_fail("Cannot create a variable with the "
+ "PhysicalStorageBufferEXT storage class");
+ break;
+
default:
/* No tallying is needed */
break;
struct vtn_variable *var = rzalloc(b, struct vtn_variable);
var->type = type;
var->mode = mode;
+ var->base_location = -1;
vtn_assert(val->value_type == vtn_value_type_pointer);
val->pointer = vtn_pointer_for_variable(b, var, ptr_type);
/* Private variables don't have any explicit layout but some layouts
* may have leaked through due to type deduplication in the SPIR-V.
*/
- var->var->type = glsl_get_bare_type(var->type->type);
+ var->var->type = var->type->type;
}
var->var->data.mode = nir_mode;
var->var->data.location = -1;
* layouts may have leaked through due to type deduplication in the
* SPIR-V.
*/
- var->var->type = glsl_get_bare_type(var->type->type);
- var->var->data.mode = nir_var_shared;
+ var->var->type = var->type->type;
+ var->var->data.mode = nir_var_mem_shared;
}
break;
var->patch = false;
vtn_foreach_decoration(b, val, var_is_patch_cb, &var->patch);
if (glsl_type_is_array(var->type->type) &&
- glsl_type_is_struct(without_array->type)) {
+ glsl_type_is_struct_or_ifc(without_array->type)) {
vtn_foreach_decoration(b, vtn_value(b, without_array->id,
vtn_value_type_type),
var_is_patch_cb, &var->patch);
* able to preserve that information.
*/
- struct vtn_type *interface_type = var->type;
+ struct vtn_type *per_vertex_type = var->type;
if (is_per_vertex_inout(var, b->shader->info.stage)) {
/* In Geometry shaders (and some tessellation), inputs come
* in per-vertex arrays. However, some builtins come in
* any case, there are no non-builtin arrays allowed so this
* check should be sufficient.
*/
- interface_type = var->type->array_element;
+ per_vertex_type = var->type->array_element;
}
var->var = rzalloc(b->shader, nir_variable);
var->var->name = ralloc_strdup(var->var, val->name);
/* In Vulkan, shader I/O variables don't have any explicit layout but
* some layouts may have leaked through due to type deduplication in
- * the SPIR-V.
+ * the SPIR-V. We do, however, keep the layouts in the variable's
+ * interface_type because we need offsets for XFB arrays of blocks.
*/
- var->var->type = glsl_get_bare_type(var->type->type);
- var->var->interface_type = interface_type->type;
+ var->var->type = var->type->type;
var->var->data.mode = nir_mode;
var->var->data.patch = var->patch;
- if (glsl_type_is_struct(interface_type->type)) {
+ /* Figure out the interface block type. */
+ struct vtn_type *iface_type = per_vertex_type;
+ if (var->mode == vtn_variable_mode_output &&
+ (b->shader->info.stage == MESA_SHADER_VERTEX ||
+ b->shader->info.stage == MESA_SHADER_TESS_EVAL ||
+ b->shader->info.stage == MESA_SHADER_GEOMETRY)) {
+ /* For vertex data outputs, we can end up with arrays of blocks for
+ * transform feedback where each array element corresponds to a
+ * different XFB output buffer.
+ */
+ while (iface_type->base_type == vtn_base_type_array)
+ iface_type = iface_type->array_element;
+ }
+ if (iface_type->base_type == vtn_base_type_struct && iface_type->block)
+ var->var->interface_type = iface_type->type;
+
+ if (per_vertex_type->base_type == vtn_base_type_struct &&
+ per_vertex_type->block) {
/* It's a struct. Set it up as per-member. */
- var->var->num_members = glsl_get_length(interface_type->type);
+ var->var->num_members = glsl_get_length(per_vertex_type->type);
var->var->members = rzalloc_array(var->var, struct nir_variable_data,
var->var->num_members);
for (unsigned i = 0; i < var->var->num_members; i++) {
var->var->members[i].mode = nir_mode;
var->var->members[i].patch = var->patch;
+ var->var->members[i].location = -1;
}
}
/* For inputs and outputs, we need to grab locations and builtin
- * information from the interface type.
+ * information from the per-vertex type.
*/
- vtn_foreach_decoration(b, vtn_value(b, interface_type->id,
+ vtn_foreach_decoration(b, vtn_value(b, per_vertex_type->id,
vtn_value_type_type),
var_decoration_cb, var);
break;
case vtn_variable_mode_ubo:
case vtn_variable_mode_ssbo:
case vtn_variable_mode_push_constant:
+ case vtn_variable_mode_cross_workgroup:
/* These don't need actual variables. */
break;
+
+ case vtn_variable_mode_phys_ssbo:
+ unreachable("Should have been caught before");
}
if (initializer) {
vtn_foreach_decoration(b, val, var_decoration_cb, var);
+ if ((var->mode == vtn_variable_mode_input ||
+ var->mode == vtn_variable_mode_output) &&
+ var->var->members) {
+ assign_missing_member_locations(var);
+ }
+
if (var->mode == vtn_variable_mode_uniform) {
/* XXX: We still need the binding information in the nir_variable
* for these. We should fix that.
glsl_get_type_name(src_type->type));
}
+static nir_ssa_def *
+nir_shrink_zero_pad_vec(nir_builder *b, nir_ssa_def *val,
+ unsigned num_components)
+{
+ if (val->num_components == num_components)
+ return val;
+
+ nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
+ for (unsigned i = 0; i < num_components; i++) {
+ if (i < val->num_components)
+ comps[i] = nir_channel(b, val, i);
+ else
+ comps[i] = nir_imm_intN_t(b, 0, val->bit_size);
+ }
+ return nir_vec(b, comps, num_components);
+}
+
+static nir_ssa_def *
+nir_sloppy_bitcast(nir_builder *b, nir_ssa_def *val,
+ const struct glsl_type *type)
+{
+ const unsigned num_components = glsl_get_vector_elements(type);
+ const unsigned bit_size = glsl_get_bit_size(type);
+
+ /* First, zero-pad to ensure that the value is big enough that when we
+ * bit-cast it, we don't loose anything.
+ */
+ if (val->bit_size < bit_size) {
+ const unsigned src_num_components_needed =
+ vtn_align_u32(val->num_components, bit_size / val->bit_size);
+ val = nir_shrink_zero_pad_vec(b, val, src_num_components_needed);
+ }
+
+ val = nir_bitcast_vector(b, val, bit_size);
+
+ return nir_shrink_zero_pad_vec(b, val, num_components);
+}
+
void
vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
case SpvOpAccessChain:
case SpvOpPtrAccessChain:
- case SpvOpInBoundsAccessChain: {
+ case SpvOpInBoundsAccessChain:
+ case SpvOpInBoundsPtrAccessChain: {
struct vtn_access_chain *chain = vtn_access_chain_create(b, count - 4);
- chain->ptr_as_array = (opcode == SpvOpPtrAccessChain);
+ chain->ptr_as_array = (opcode == SpvOpPtrAccessChain || opcode == SpvOpInBoundsPtrAccessChain);
unsigned idx = 0;
for (int i = 4; i < count; i++) {
vtn_assert_types_equal(b, opcode, dest_val->type->deref, src_val->type);
if (glsl_type_is_sampler(dest->type->type)) {
- vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
- "propagation to workaround the problem.");
- vtn_assert(dest->var->copy_prop_sampler == NULL);
- dest->var->copy_prop_sampler =
- vtn_value(b, w[2], vtn_value_type_pointer)->pointer;
+ if (b->wa_glslang_179) {
+ vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
+ "propagation to workaround the problem.");
+ vtn_assert(dest->var->copy_prop_sampler == NULL);
+ dest->var->copy_prop_sampler =
+ vtn_value(b, w[2], vtn_value_type_pointer)->pointer;
+ } else {
+ vtn_fail("Vulkan does not allow OpStore of a sampler or image.");
+ }
break;
}
case SpvOpArrayLength: {
struct vtn_pointer *ptr =
vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
+ const uint32_t field = w[4];
+
+ vtn_fail_if(ptr->type->base_type != vtn_base_type_struct,
+ "OpArrayLength must take a pointer to a structure type");
+ vtn_fail_if(field != ptr->type->length - 1 ||
+ ptr->type->members[field]->base_type != vtn_base_type_array,
+ "OpArrayLength must reference the last memeber of the "
+ "structure and that must be an array");
- const uint32_t offset = ptr->var->type->offsets[w[4]];
- const uint32_t stride = ptr->var->type->members[w[4]]->stride;
+ const uint32_t offset = ptr->type->offsets[field];
+ const uint32_t stride = ptr->type->members[field]->stride;
if (!ptr->block_index) {
struct vtn_access_chain chain = {
.length = 0,
};
- ptr = vtn_ssa_offset_pointer_dereference(b, ptr, &chain);
+ ptr = vtn_pointer_dereference(b, ptr, &chain);
vtn_assert(ptr->block_index);
}
break;
}
+ case SpvOpConvertPtrToU: {
+ struct vtn_value *u_val = vtn_push_value(b, w[2], vtn_value_type_ssa);
+
+ vtn_fail_if(u_val->type->base_type != vtn_base_type_vector &&
+ u_val->type->base_type != vtn_base_type_scalar,
+ "OpConvertPtrToU can only be used to cast to a vector or "
+ "scalar type");
+
+ /* The pointer will be converted to an SSA value automatically */
+ nir_ssa_def *ptr_ssa = vtn_ssa_value(b, w[3])->def;
+
+ u_val->ssa = vtn_create_ssa_value(b, u_val->type->type);
+ u_val->ssa->def = nir_sloppy_bitcast(&b->nb, ptr_ssa, u_val->type->type);
+ break;
+ }
+
+ case SpvOpConvertUToPtr: {
+ struct vtn_value *ptr_val =
+ vtn_push_value(b, w[2], vtn_value_type_pointer);
+ struct vtn_value *u_val = vtn_value(b, w[3], vtn_value_type_ssa);
+
+ vtn_fail_if(ptr_val->type->type == NULL,
+ "OpConvertUToPtr can only be used on physical pointers");
+
+ vtn_fail_if(u_val->type->base_type != vtn_base_type_vector &&
+ u_val->type->base_type != vtn_base_type_scalar,
+ "OpConvertUToPtr can only be used to cast from a vector or "
+ "scalar type");
+
+ nir_ssa_def *ptr_ssa = nir_sloppy_bitcast(&b->nb, u_val->ssa->def,
+ ptr_val->type->type);
+ ptr_val->pointer = vtn_pointer_from_ssa(b, ptr_ssa, ptr_val->type);
+ break;
+ }
+
case SpvOpCopyMemorySized:
default:
vtn_fail("Unhandled opcode");