return idx;
}
+static void
+non_uniform_decoration_cb(struct vtn_builder *b,
+ struct vtn_value *val, int member,
+ const struct vtn_decoration *dec, void *void_ctx)
+{
+ enum gl_access_qualifier *access = void_ctx;
+ switch (dec->decoration) {
+ case SpvDecorationNonUniformEXT:
+ *access |= ACCESS_NON_UNIFORM;
+ break;
+
+ default:
+ break;
+ }
+}
+
+
static void
vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
is_shadow && glsl_get_components(ret_type->type) == 1;
instr->component = gather_component;
- if (image && (image->access & ACCESS_NON_UNIFORM))
+ /* The Vulkan spec says:
+ *
+ * "If an instruction loads from or stores to a resource (including
+ * atomics and image instructions) and the resource descriptor being
+ * accessed is not dynamically uniform, then the operand corresponding
+ * to that resource (e.g. the pointer or sampled image operand) must be
+ * decorated with NonUniform."
+ *
+ * It's very careful to specify that the exact operand must be decorated
+ * NonUniform. The SPIR-V parser is not expected to chase through long
+ * chains to find the NonUniform decoration. It's either right there or we
+ * can assume it doesn't exist.
+ */
+ enum gl_access_qualifier access = 0;
+ vtn_foreach_decoration(b, sampled_val, non_uniform_decoration_cb, &access);
+
+ if (image && (access & ACCESS_NON_UNIFORM))
instr->texture_non_uniform = true;
- if (sampler && (sampler->access & ACCESS_NON_UNIFORM))
+ if (sampler && (access & ACCESS_NON_UNIFORM))
instr->sampler_non_uniform = true;
/* for non-query ops, get dest_type from sampler type */
SpvScope scope = SpvScopeInvocation;
SpvMemorySemanticsMask semantics = 0;
+ struct vtn_value *res_val;
switch (opcode) {
case SpvOpAtomicExchange:
case SpvOpAtomicCompareExchange:
case SpvOpAtomicOr:
case SpvOpAtomicXor:
case SpvOpAtomicFAddEXT:
- image = *vtn_value(b, w[3], vtn_value_type_image_pointer)->image;
+ res_val = vtn_value(b, w[3], vtn_value_type_image_pointer);
+ image = *res_val->image;
scope = vtn_constant_uint(b, w[4]);
semantics = vtn_constant_uint(b, w[5]);
break;
case SpvOpAtomicStore:
- image = *vtn_value(b, w[1], vtn_value_type_image_pointer)->image;
+ res_val = vtn_value(b, w[1], vtn_value_type_image_pointer);
+ image = *res_val->image;
scope = vtn_constant_uint(b, w[2]);
semantics = vtn_constant_uint(b, w[3]);
break;
case SpvOpImageQuerySize:
- image.image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
+ res_val = vtn_value(b, w[3], vtn_value_type_pointer);
+ image.image = res_val->pointer;
image.coord = NULL;
image.sample = NULL;
image.lod = NULL;
break;
case SpvOpImageRead: {
- image.image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
+ res_val = vtn_value(b, w[3], vtn_value_type_pointer);
+ image.image = res_val->pointer;
image.coord = get_image_coord(b, w[4]);
const SpvImageOperandsMask operands =
}
case SpvOpImageWrite: {
- image.image = vtn_value(b, w[1], vtn_value_type_pointer)->pointer;
+ res_val = vtn_value(b, w[1], vtn_value_type_pointer);
+ image.image = res_val->pointer;
image.coord = get_image_coord(b, w[2]);
/* texel = w[3] */
intrin->src[2] = nir_src_for_ssa(image.sample);
}
- nir_intrinsic_set_access(intrin, image.image->access);
+ /* The Vulkan spec says:
+ *
+ * "If an instruction loads from or stores to a resource (including
+ * atomics and image instructions) and the resource descriptor being
+ * accessed is not dynamically uniform, then the operand corresponding
+ * to that resource (e.g. the pointer or sampled image operand) must be
+ * decorated with NonUniform."
+ *
+ * It's very careful to specify that the exact operand must be decorated
+ * NonUniform. The SPIR-V parser is not expected to chase through long
+ * chains to find the NonUniform decoration. It's either right there or we
+ * can assume it doesn't exist.
+ */
+ enum gl_access_qualifier access = 0;
+ vtn_foreach_decoration(b, res_val, non_uniform_decoration_cb, &access);
+ nir_intrinsic_set_access(intrin, access);
switch (opcode) {
case SpvOpAtomicLoad:
struct vtn_ssa_value *transposed;
const struct glsl_type *type;
-
- /* Access qualifiers */
- enum gl_access_qualifier access;
};
enum vtn_base_type {
}
}
-static inline enum gl_access_qualifier vtn_value_access(struct vtn_value *value)
-{
- switch (value->value_type) {
- case vtn_value_type_invalid:
- case vtn_value_type_undef:
- case vtn_value_type_string:
- case vtn_value_type_decoration_group:
- case vtn_value_type_constant:
- case vtn_value_type_function:
- case vtn_value_type_block:
- case vtn_value_type_extension:
- return 0;
- case vtn_value_type_type:
- return value->type->access;
- case vtn_value_type_pointer:
- return value->pointer->access;
- case vtn_value_type_ssa:
- return value->ssa->access;
- case vtn_value_type_image_pointer:
- return value->image->image->access;
- case vtn_value_type_sampled_image:
- return value->sampled_image->image->access |
- value->sampled_image->sampler->access;
- }
-
- unreachable("invalid type");
-}
-
struct vtn_ssa_value *vtn_ssa_value(struct vtn_builder *b, uint32_t value_id);
struct vtn_value *vtn_push_value_pointer(struct vtn_builder *b,
return val;
}
-static void
-ssa_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
- const struct vtn_decoration *dec, void *void_ctx)
-{
- switch (dec->decoration) {
- case SpvDecorationNonUniformEXT:
- if (val->value_type == vtn_value_type_ssa) {
- val->ssa->access |= ACCESS_NON_UNIFORM;
- } else if (val->value_type == vtn_value_type_pointer) {
- val->pointer->access |= ACCESS_NON_UNIFORM;
- } else if (val->value_type == vtn_value_type_sampled_image) {
- val->sampled_image->image->access |= ACCESS_NON_UNIFORM;
- } else if (val->value_type == vtn_value_type_image_pointer) {
- val->image->image->access |= ACCESS_NON_UNIFORM;
- }
- break;
-
- default:
- break;
- }
-}
-
struct vtn_value *
vtn_push_ssa(struct vtn_builder *b, uint32_t value_id,
struct vtn_type *type, struct vtn_ssa_value *ssa)
} else {
val = vtn_push_value(b, value_id, vtn_value_type_ssa);
val->ssa = ssa;
- vtn_foreach_decoration(b, val, ssa_decoration_cb, NULL);
}
return val;
}
src_copy.type = dst->type;
*dst = src_copy;
- vtn_foreach_decoration(b, dst, ssa_decoration_cb, NULL);
+ if (dst->value_type == vtn_value_type_pointer)
+ dst->pointer = vtn_decorate_pointer(b, dst, dst->pointer);
}
static struct vtn_access_chain *
chain->link[idx].mode = vtn_access_mode_id;
chain->link[idx].id = w[i];
}
- access |= vtn_value_access(link_val);
idx++;
}
u_val->ssa = vtn_create_ssa_value(b, u_val->type->type);
u_val->ssa->def = nir_sloppy_bitcast(&b->nb, ptr_ssa->def, u_val->type->type);
- u_val->ssa->access |= ptr_ssa->access;
break;
}
ptr_val->type->type);
ptr_val->pointer = vtn_pointer_from_ssa(b, ptr_ssa, ptr_val->type);
vtn_foreach_decoration(b, ptr_val, ptr_decoration_cb, ptr_val->pointer);
- ptr_val->pointer->access |= u_val->ssa->access;
break;
}