glsl_get_explicit_stride(array_type));
}
+static bool
+vtn_type_needs_explicit_layout(struct vtn_builder *b, enum vtn_variable_mode mode)
+{
+ /* For OpenCL we never want to strip the info from the types, and it makes
+ * type comparisons easier in later stages.
+ */
+ if (b->options->environment == NIR_SPIRV_OPENCL)
+ return true;
+
+ switch (mode) {
+ case vtn_variable_mode_input:
+ case vtn_variable_mode_output:
+ /* Layout decorations kept because we need offsets for XFB arrays of
+ * blocks.
+ */
+ return b->shader->info.has_transform_feedback_varyings;
+
+ case vtn_variable_mode_ssbo:
+ case vtn_variable_mode_phys_ssbo:
+ case vtn_variable_mode_ubo:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
const struct glsl_type *
vtn_type_get_nir_type(struct vtn_builder *b, struct vtn_type *type,
enum vtn_variable_mode mode)
}
if (mode == vtn_variable_mode_uniform) {
- struct vtn_type *tail = vtn_type_without_array(type);
- if (tail->base_type == vtn_base_type_image) {
- return wrap_type_in_array(tail->glsl_image, type->type);
- } else if (tail->base_type == vtn_base_type_sampler) {
- return wrap_type_in_array(glsl_bare_sampler_type(), type->type);
- } else if (tail->base_type == vtn_base_type_sampled_image) {
- return wrap_type_in_array(tail->image->glsl_image, type->type);
+ switch (type->base_type) {
+ case vtn_base_type_array: {
+ const struct glsl_type *elem_type =
+ vtn_type_get_nir_type(b, type->array_element, mode);
+
+ return glsl_array_type(elem_type, type->length,
+ glsl_get_explicit_stride(type->type));
+ }
+
+ case vtn_base_type_struct: {
+ bool need_new_struct = false;
+ const uint32_t num_fields = type->length;
+ NIR_VLA(struct glsl_struct_field, fields, num_fields);
+ for (unsigned i = 0; i < num_fields; i++) {
+ fields[i] = *glsl_get_struct_field_data(type->type, i);
+ const struct glsl_type *field_nir_type =
+ vtn_type_get_nir_type(b, type->members[i], mode);
+ if (fields[i].type != field_nir_type) {
+ fields[i].type = field_nir_type;
+ need_new_struct = true;
+ }
+ }
+ if (need_new_struct) {
+ if (glsl_type_is_interface(type->type)) {
+ return glsl_interface_type(fields, num_fields,
+ /* packing */ 0, false,
+ glsl_get_type_name(type->type));
+ } else {
+ return glsl_struct_type(fields, num_fields,
+ glsl_get_type_name(type->type),
+ glsl_struct_type_is_packed(type->type));
+ }
+ } else {
+ /* No changes, just pass it on */
+ return type->type;
+ }
+ }
+
+ case vtn_base_type_image:
+ return type->glsl_image;
+
+ case vtn_base_type_sampler:
+ return glsl_bare_sampler_type();
+
+ case vtn_base_type_sampled_image:
+ return type->image->glsl_image;
+
+ default:
+ return type->type;
}
}
+ /* Layout decorations are allowed but ignored in certain conditions,
+ * to allow SPIR-V generators perform type deduplication. Discard
+ * unnecessary ones when passing to NIR.
+ */
+ if (!vtn_type_needs_explicit_layout(b, mode))
+ return glsl_get_bare_type(type->type);
+
return type->type;
}
SpvScope scope = SpvScopeInvocation;
SpvMemorySemanticsMask semantics = 0;
+ enum gl_access_qualifier access = 0;
+
struct vtn_value *res_val;
switch (opcode) {
case SpvOpAtomicExchange:
image = *res_val->image;
scope = vtn_constant_uint(b, w[4]);
semantics = vtn_constant_uint(b, w[5]);
+ access |= ACCESS_COHERENT;
break;
case SpvOpAtomicStore:
image = *res_val->image;
scope = vtn_constant_uint(b, w[2]);
semantics = vtn_constant_uint(b, w[3]);
+ access |= ACCESS_COHERENT;
break;
case SpvOpImageQuerySize:
* chains to find the NonUniform decoration. It's either right there or we
* can assume it doesn't exist.
*/
- enum gl_access_qualifier access = 0;
vtn_foreach_decoration(b, res_val, non_uniform_decoration_cb, &access);
nir_intrinsic_set_access(intrin, access);
nir_intrinsic_op op = get_ssbo_nir_atomic_op(b, opcode);
atomic = nir_intrinsic_instr_create(b->nb.shader, op);
+ nir_intrinsic_set_access(atomic, ACCESS_COHERENT);
+
int src = 0;
switch (opcode) {
case SpvOpAtomicLoad:
atomic = nir_intrinsic_instr_create(b->nb.shader, op);
atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
+ if (ptr->mode != vtn_variable_mode_workgroup)
+ nir_intrinsic_set_access(atomic, ACCESS_COHERENT);
+
switch (opcode) {
case SpvOpAtomicLoad:
atomic->num_components = glsl_get_vector_elements(deref_type);
b->shader->info.cs.local_size[2] = mode->operands[2];
break;
- case SpvExecutionModeLocalSizeId:
- b->shader->info.cs.local_size[0] = vtn_constant_uint(b, mode->operands[0]);
- b->shader->info.cs.local_size[1] = vtn_constant_uint(b, mode->operands[1]);
- b->shader->info.cs.local_size[2] = vtn_constant_uint(b, mode->operands[2]);
- break;
-
case SpvExecutionModeLocalSizeHint:
- case SpvExecutionModeLocalSizeHintId:
break; /* Nothing to do with this */
case SpvExecutionModeOutputVertices:
case SpvExecutionModeDenormFlushToZero:
case SpvExecutionModeSignedZeroInfNanPreserve:
case SpvExecutionModeRoundingModeRTE:
- case SpvExecutionModeRoundingModeRTZ:
- /* Already handled in vtn_handle_rounding_mode_in_execution_mode() */
+ case SpvExecutionModeRoundingModeRTZ: {
+ unsigned execution_mode = 0;
+ switch (mode->exec_mode) {
+ case SpvExecutionModeDenormPreserve:
+ switch (mode->operands[0]) {
+ case 16: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP16; break;
+ case 32: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP32; break;
+ case 64: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP64; break;
+ default: vtn_fail("Floating point type not supported");
+ }
+ break;
+ case SpvExecutionModeDenormFlushToZero:
+ switch (mode->operands[0]) {
+ case 16: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16; break;
+ case 32: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32; break;
+ case 64: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64; break;
+ default: vtn_fail("Floating point type not supported");
+ }
+ break;
+ case SpvExecutionModeSignedZeroInfNanPreserve:
+ switch (mode->operands[0]) {
+ case 16: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16; break;
+ case 32: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32; break;
+ case 64: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64; break;
+ default: vtn_fail("Floating point type not supported");
+ }
+ break;
+ case SpvExecutionModeRoundingModeRTE:
+ switch (mode->operands[0]) {
+ case 16: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16; break;
+ case 32: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32; break;
+ case 64: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64; break;
+ default: vtn_fail("Floating point type not supported");
+ }
+ break;
+ case SpvExecutionModeRoundingModeRTZ:
+ switch (mode->operands[0]) {
+ case 16: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16; break;
+ case 32: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32; break;
+ case 64: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64; break;
+ default: vtn_fail("Floating point type not supported");
+ }
+ break;
+ default:
+ break;
+ }
+
+ b->shader->info.float_controls_execution_mode |= execution_mode;
+ break;
+ }
+
+ case SpvExecutionModeLocalSizeId:
+ case SpvExecutionModeLocalSizeHintId:
+ /* Handled later by vtn_handle_execution_mode_id(). */
break;
default:
}
static void
-vtn_handle_rounding_mode_in_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
- const struct vtn_decoration *mode, void *data)
+vtn_handle_execution_mode_id(struct vtn_builder *b, struct vtn_value *entry_point,
+ const struct vtn_decoration *mode, UNUSED void *data)
{
- vtn_assert(b->entry_point == entry_point);
- unsigned execution_mode = 0;
+ vtn_assert(b->entry_point == entry_point);
- switch(mode->exec_mode) {
- case SpvExecutionModeDenormPreserve:
- switch (mode->operands[0]) {
- case 16: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP16; break;
- case 32: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP32; break;
- case 64: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP64; break;
- default: vtn_fail("Floating point type not supported");
- }
- break;
- case SpvExecutionModeDenormFlushToZero:
- switch (mode->operands[0]) {
- case 16: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16; break;
- case 32: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32; break;
- case 64: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64; break;
- default: vtn_fail("Floating point type not supported");
- }
- break;
- case SpvExecutionModeSignedZeroInfNanPreserve:
- switch (mode->operands[0]) {
- case 16: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16; break;
- case 32: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32; break;
- case 64: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64; break;
- default: vtn_fail("Floating point type not supported");
- }
- break;
- case SpvExecutionModeRoundingModeRTE:
- switch (mode->operands[0]) {
- case 16: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16; break;
- case 32: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32; break;
- case 64: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64; break;
- default: vtn_fail("Floating point type not supported");
- }
+ switch (mode->exec_mode) {
+ case SpvExecutionModeLocalSizeId:
+ b->shader->info.cs.local_size[0] = vtn_constant_uint(b, mode->operands[0]);
+ b->shader->info.cs.local_size[1] = vtn_constant_uint(b, mode->operands[1]);
+ b->shader->info.cs.local_size[2] = vtn_constant_uint(b, mode->operands[2]);
break;
- case SpvExecutionModeRoundingModeRTZ:
- switch (mode->operands[0]) {
- case 16: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16; break;
- case 32: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32; break;
- case 64: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64; break;
- default: vtn_fail("Floating point type not supported");
- }
+
+ case SpvExecutionModeLocalSizeHintId:
+ /* Nothing to do with this hint. */
break;
default:
+ /* Nothing to do. Literal execution modes already handled by
+ * vtn_handle_execution_mode(). */
break;
}
-
- b->shader->info.float_controls_execution_mode |= execution_mode;
}
static bool
if (stage == MESA_SHADER_GEOMETRY)
b->shader->info.gs.invocations = 1;
- /* Parse rounding mode execution modes. This has to happen earlier than
- * other changes in the execution modes since they can affect, for example,
- * the result of the floating point constants.
- */
+ /* Parse execution modes. */
vtn_foreach_execution_mode(b, b->entry_point,
- vtn_handle_rounding_mode_in_execution_mode, NULL);
+ vtn_handle_execution_mode, NULL);
b->specializations = spec;
b->num_specializations = num_spec;
words = vtn_foreach_instruction(b, words, word_end,
vtn_handle_variable_or_type_instruction);
- /* Parse execution modes */
+ /* Parse execution modes that depend on IDs. Must happen after we have
+ * constants parsed.
+ */
vtn_foreach_execution_mode(b, b->entry_point,
- vtn_handle_execution_mode, NULL);
+ vtn_handle_execution_mode_id, NULL);
if (b->workgroup_size_builtin) {
vtn_assert(b->workgroup_size_builtin->type->type ==
if (entry_point->num_params && b->shader->info.stage == MESA_SHADER_KERNEL)
entry_point = vtn_emit_kernel_entry_point_wrapper(b, entry_point);
+ /* structurize the CFG */
+ nir_lower_goto_ifs(b->shader);
+
entry_point->is_entrypoint = true;
/* When multiple shader stages exist in the same SPIR-V module, we