mode == vtn_variable_mode_push_constant;
}
+static bool
+vtn_mode_is_cross_invocation(struct vtn_builder *b,
+ enum vtn_variable_mode mode)
+{
+ return mode == vtn_variable_mode_ssbo ||
+ mode == vtn_variable_mode_ubo ||
+ mode == vtn_variable_mode_phys_ssbo ||
+ mode == vtn_variable_mode_push_constant ||
+ mode == vtn_variable_mode_workgroup ||
+ mode == vtn_variable_mode_cross_workgroup;
+}
+
static bool
vtn_pointer_is_external_block(struct vtn_builder *b,
struct vtn_pointer *ptr)
nir_deref_instr *
vtn_pointer_to_deref(struct vtn_builder *b, struct vtn_pointer *ptr)
{
- if (b->wa_glslang_179) {
- /* Do on-the-fly copy propagation for samplers. */
- if (ptr->var && ptr->var->copy_prop_sampler)
- return vtn_pointer_to_deref(b, ptr->var->copy_prop_sampler);
- }
-
vtn_assert(!vtn_pointer_uses_ssa_offset(b, ptr));
if (!ptr->deref) {
struct vtn_access_chain chain = {
enum gl_access_qualifier access,
struct vtn_ssa_value **inout)
{
+ if (ptr->mode == vtn_variable_mode_uniform) {
+ if (ptr->type->base_type == vtn_base_type_image ||
+ ptr->type->base_type == vtn_base_type_sampler) {
+ /* See also our handling of OpTypeSampler and OpTypeImage */
+ vtn_assert(load);
+ (*inout)->def = vtn_pointer_to_ssa(b, ptr);
+ return;
+ } else if (ptr->type->base_type == vtn_base_type_sampled_image) {
+ /* See also our handling of OpTypeSampledImage */
+ vtn_assert(load);
+ struct vtn_sampled_image si = {
+ .image = vtn_pointer_to_deref(b, ptr),
+ .sampler = vtn_pointer_to_deref(b, ptr),
+ };
+ (*inout)->def = vtn_sampled_image_to_nir_ssa(b, si);
+ return;
+ }
+ }
+
enum glsl_base_type base_type = glsl_get_base_type(ptr->type->type);
switch (base_type) {
case GLSL_TYPE_UINT:
if (glsl_type_is_vector_or_scalar(ptr->type->type)) {
/* We hit a vector or scalar; go ahead and emit the load[s] */
nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
- if (vtn_pointer_is_external_block(b, ptr)) {
- /* If it's external, we call nir_load/store_deref directly. The
- * vtn_local_load/store helpers are too clever and do magic to
- * avoid array derefs of vectors. That magic is both less
- * efficient than the direct load/store and, in the case of
+ if (vtn_mode_is_cross_invocation(b, ptr->mode)) {
+ /* If it's cross-invocation, we call nir_load/store_deref
+ * directly. The vtn_local_load/store helpers are too clever and
+ * do magic to avoid array derefs of vectors. That magic is both
+ * less efficient than the direct load/store and, in the case of
* stores, is broken because it creates a race condition if two
* threads are writing to different components of the same vector
* due to the load+insert+store it uses to emulate the array
_vtn_variable_copy(struct vtn_builder *b, struct vtn_pointer *dest,
struct vtn_pointer *src)
{
- vtn_assert(src->type->type == dest->type->type);
+ vtn_assert(glsl_get_bare_type(src->type->type) ==
+ glsl_get_bare_type(dest->type->type));
enum glsl_base_type base_type = glsl_get_base_type(src->type->type);
switch (base_type) {
case GLSL_TYPE_UINT:
*location = SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX;
set_mode_system_value(b, mode);
break;
+ case SpvBuiltInGlobalOffset:
+ *location = SYSTEM_VALUE_BASE_GLOBAL_INVOCATION_ID;
+ set_mode_system_value(b, mode);
+ break;
case SpvBuiltInBaseVertex:
/* OpenGL gl_BaseVertex (SYSTEM_VALUE_BASE_VERTEX) is not the same
* semantic as Vulkan BaseVertex (SYSTEM_VALUE_FIRST_VERTEX).
set_mode_system_value(b, mode);
break;
case SpvBuiltInViewIndex:
- *location = SYSTEM_VALUE_VIEW_INDEX;
- set_mode_system_value(b, mode);
+ if (b->options && b->options->view_index_is_input) {
+ *location = VARYING_SLOT_VIEW_INDEX;
+ vtn_assert(*mode == nir_var_shader_in);
+ } else {
+ *location = SYSTEM_VALUE_VIEW_INDEX;
+ set_mode_system_value(b, mode);
+ }
break;
case SpvBuiltInSubgroupEqMask:
*location = SYSTEM_VALUE_SUBGROUP_EQ_MASK,
ptr->type = ptr_type->deref;
ptr->ptr_type = ptr_type;
- if (b->wa_glslang_179) {
- /* To work around https://github.com/KhronosGroup/glslang/issues/179 we
- * need to whack the mode because it creates a function parameter with
- * the Function storage class even though it's a pointer to a sampler.
- * If we don't do this, then NIR won't get rid of the deref_cast for us.
- */
- if (ptr->mode == vtn_variable_mode_function &&
- (ptr->type->base_type == vtn_base_type_sampler ||
- ptr->type->base_type == vtn_base_type_sampled_image)) {
- ptr->mode = vtn_variable_mode_uniform;
- nir_mode = nir_var_uniform;
- }
- }
-
if (vtn_pointer_uses_ssa_offset(b, ptr)) {
/* This pointer type needs to have actual storage */
vtn_assert(ptr_type->type);
b->shader->info.num_ssbos++;
break;
case vtn_variable_mode_uniform:
- if (glsl_type_is_image(without_array->type))
- b->shader->info.num_images++;
- else if (glsl_type_is_sampler(without_array->type))
- b->shader->info.num_textures++;
+ if (without_array->base_type == vtn_base_type_image) {
+ if (glsl_type_is_image(without_array->glsl_image))
+ b->shader->info.num_images++;
+ else if (glsl_type_is_sampler(without_array->glsl_image))
+ b->shader->info.num_textures++;
+ }
break;
case vtn_variable_mode_push_constant:
b->shader->num_uniforms = vtn_type_block_size(b, type);
/* Create the variable normally */
var->var = rzalloc(b->shader, nir_variable);
var->var->name = ralloc_strdup(var->var, val->name);
- /* Workgroup variables don't have any explicit layout but some
- * layouts may have leaked through due to type deduplication in the
- * SPIR-V.
- */
var->var->type = vtn_type_get_nir_type(b, var->type, var->mode);
var->var->data.mode = nir_var_mem_shared;
break;
var->var = rzalloc(b->shader, nir_variable);
var->var->name = ralloc_strdup(var->var, val->name);
- /* In Vulkan, shader I/O variables don't have any explicit layout but
- * some layouts may have leaked through due to type deduplication in
- * the SPIR-V. We do, however, keep the layouts in the variable's
- * interface_type because we need offsets for XFB arrays of blocks.
- */
var->var->type = vtn_type_get_nir_type(b, var->type, var->mode);
var->var->data.mode = nir_mode;
var->var->data.patch = var->patch;
if (var->mode == vtn_variable_mode_uniform ||
var->mode == vtn_variable_mode_ubo ||
- var->mode == vtn_variable_mode_ssbo) {
+ var->mode == vtn_variable_mode_ssbo ||
+ var->mode == vtn_variable_mode_atomic_counter) {
/* XXX: We still need the binding information in the nir_variable
* for these. We should fix that.
*/
var->var->data.index = var->input_attachment_index;
var->var->data.offset = var->offset;
- if (glsl_type_is_image(without_array->type))
+ if (glsl_type_is_image(glsl_without_array(var->var->type)))
var->var->data.image.format = without_array->image_format;
}
}
struct vtn_type *ptr_type = vtn_get_type(b, w[1]);
- struct vtn_value *base_val = vtn_untyped_value(b, w[3]);
- if (base_val->value_type == vtn_value_type_sampled_image) {
- /* This is rather insane. SPIR-V allows you to use OpSampledImage
- * to combine an array of images with a single sampler to get an
- * array of sampled images that all share the same sampler.
- * Fortunately, this means that we can more-or-less ignore the
- * sampler when crawling the access chain, but it does leave us
- * with this rather awkward little special-case.
- */
- struct vtn_value *val =
- vtn_push_value(b, w[2], vtn_value_type_sampled_image);
- val->sampled_image = ralloc(b, struct vtn_sampled_image);
- val->sampled_image->image =
- vtn_pointer_dereference(b, base_val->sampled_image->image, chain);
- val->sampled_image->sampler = base_val->sampled_image->sampler;
- val->sampled_image->image =
- vtn_decorate_pointer(b, val, val->sampled_image->image);
- val->sampled_image->sampler =
- vtn_decorate_pointer(b, val, val->sampled_image->sampler);
- } else {
- vtn_assert(base_val->value_type == vtn_value_type_pointer);
- struct vtn_pointer *ptr =
- vtn_pointer_dereference(b, base_val->pointer, chain);
- ptr->ptr_type = ptr_type;
- ptr->access |= access;
- vtn_push_pointer(b, w[2], ptr);
- }
+ struct vtn_pointer *base =
+ vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
+ struct vtn_pointer *ptr = vtn_pointer_dereference(b, base, chain);
+ ptr->ptr_type = ptr_type;
+ ptr->access |= access;
+ vtn_push_pointer(b, w[2], ptr);
break;
}
vtn_assert_types_equal(b, opcode, res_type, src_val->type->deref);
- if (res_type->base_type == vtn_base_type_image ||
- res_type->base_type == vtn_base_type_sampler) {
- vtn_push_pointer(b, w[2], src);
- return;
- } else if (res_type->base_type == vtn_base_type_sampled_image) {
- struct vtn_value *val =
- vtn_push_value(b, w[2], vtn_value_type_sampled_image);
- val->sampled_image = ralloc(b, struct vtn_sampled_image);
- val->sampled_image->image = val->sampled_image->sampler =
- vtn_decorate_pointer(b, val, src);
- return;
- }
-
if (count > 4) {
unsigned idx = 5;
SpvMemoryAccessMask access = w[4];
vtn_assert_types_equal(b, opcode, dest_val->type->deref, src_val->type);
- if (glsl_type_is_sampler(dest->type->type)) {
- if (b->wa_glslang_179) {
- vtn_warn("OpStore of a sampler detected. Doing on-the-fly copy "
- "propagation to workaround the problem.");
- vtn_assert(dest->var->copy_prop_sampler == NULL);
- struct vtn_value *v = vtn_untyped_value(b, w[2]);
- if (v->value_type == vtn_value_type_sampled_image) {
- dest->var->copy_prop_sampler = v->sampled_image->sampler;
- } else {
- vtn_assert(v->value_type == vtn_value_type_pointer);
- dest->var->copy_prop_sampler = v->pointer;
- }
- } else {
- vtn_fail("Vulkan does not allow OpStore of a sampler or image.");
- }
- break;
- }
-
struct vtn_ssa_value *src = vtn_ssa_value(b, w[2]);
vtn_variable_store(b, src, dest);