glsl_get_explicit_stride(array_type));
}
+static bool
+vtn_type_needs_explicit_layout(struct vtn_builder *b, enum vtn_variable_mode mode)
+{
+ /* For OpenCL we never want to strip the info from the types, and it makes
+ * type comparisons easier in later stages.
+ */
+ if (b->options->environment == NIR_SPIRV_OPENCL)
+ return true;
+
+ switch (mode) {
+ case vtn_variable_mode_input:
+ case vtn_variable_mode_output:
+ /* Layout decorations kept because we need offsets for XFB arrays of
+ * blocks.
+ */
+ return b->shader->info.has_transform_feedback_varyings;
+
+ case vtn_variable_mode_ssbo:
+ case vtn_variable_mode_phys_ssbo:
+ case vtn_variable_mode_ubo:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
const struct glsl_type *
vtn_type_get_nir_type(struct vtn_builder *b, struct vtn_type *type,
enum vtn_variable_mode mode)
}
}
+ /* Layout decorations are allowed but ignored in certain conditions,
+ * to allow SPIR-V generators perform type deduplication. Discard
+ * unnecessary ones when passing to NIR.
+ */
+ if (!vtn_type_needs_explicit_layout(b, mode))
+ return glsl_get_bare_type(type->type);
+
return type->type;
}
SpvScope scope = SpvScopeInvocation;
SpvMemorySemanticsMask semantics = 0;
+ enum gl_access_qualifier access = 0;
+
struct vtn_value *res_val;
switch (opcode) {
case SpvOpAtomicExchange:
image = *res_val->image;
scope = vtn_constant_uint(b, w[4]);
semantics = vtn_constant_uint(b, w[5]);
+ access |= ACCESS_COHERENT;
break;
case SpvOpAtomicStore:
image = *res_val->image;
scope = vtn_constant_uint(b, w[2]);
semantics = vtn_constant_uint(b, w[3]);
+ access |= ACCESS_COHERENT;
break;
case SpvOpImageQuerySize:
* chains to find the NonUniform decoration. It's either right there or we
* can assume it doesn't exist.
*/
- enum gl_access_qualifier access = 0;
vtn_foreach_decoration(b, res_val, non_uniform_decoration_cb, &access);
nir_intrinsic_set_access(intrin, access);
nir_intrinsic_op op = get_ssbo_nir_atomic_op(b, opcode);
atomic = nir_intrinsic_instr_create(b->nb.shader, op);
+ nir_intrinsic_set_access(atomic, ACCESS_COHERENT);
+
int src = 0;
switch (opcode) {
case SpvOpAtomicLoad:
atomic = nir_intrinsic_instr_create(b->nb.shader, op);
atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
+ if (ptr->mode != vtn_variable_mode_workgroup)
+ nir_intrinsic_set_access(atomic, ACCESS_COHERENT);
+
switch (opcode) {
case SpvOpAtomicLoad:
atomic->num_components = glsl_get_vector_elements(deref_type);
if (entry_point->num_params && b->shader->info.stage == MESA_SHADER_KERNEL)
entry_point = vtn_emit_kernel_entry_point_wrapper(b, entry_point);
+ /* structurize the CFG */
+ nir_lower_goto_ifs(b->shader);
+
entry_point->is_entrypoint = true;
/* When multiple shader stages exist in the same SPIR-V module, we