+SpvMemorySemanticsMask
+vtn_storage_class_to_memory_semantics(SpvStorageClass sc)
+{
+ switch (sc) {
+ case SpvStorageClassStorageBuffer:
+ case SpvStorageClassPhysicalStorageBuffer:
+ return SpvMemorySemanticsUniformMemoryMask;
+ case SpvStorageClassWorkgroup:
+ return SpvMemorySemanticsWorkgroupMemoryMask;
+ default:
+ return SpvMemorySemanticsMaskNone;
+ }
+}
+
+static void
+vtn_split_barrier_semantics(struct vtn_builder *b,
+ SpvMemorySemanticsMask semantics,
+ SpvMemorySemanticsMask *before,
+ SpvMemorySemanticsMask *after)
+{
+ /* For memory semantics embedded in operations, we split them into up to
+ * two barriers, to be added before and after the operation. This is less
+ * strict than if we propagated until the final backend stage, but still
+ * result in correct execution.
+ *
+ * A further improvement could be pipe this information (and use!) into the
+ * next compiler layers, at the expense of making the handling of barriers
+ * more complicated.
+ */
+
+ *before = SpvMemorySemanticsMaskNone;
+ *after = SpvMemorySemanticsMaskNone;
+
+ SpvMemorySemanticsMask order_semantics =
+ semantics & (SpvMemorySemanticsAcquireMask |
+ SpvMemorySemanticsReleaseMask |
+ SpvMemorySemanticsAcquireReleaseMask |
+ SpvMemorySemanticsSequentiallyConsistentMask);
+
+ if (util_bitcount(order_semantics) > 1) {
+ /* Old GLSLang versions incorrectly set all the ordering bits. This was
+ * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
+ * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
+ */
+ vtn_warn("Multiple memory ordering semantics specified, "
+ "assuming AcquireRelease.");
+ order_semantics = SpvMemorySemanticsAcquireReleaseMask;
+ }
+
+ const SpvMemorySemanticsMask av_vis_semantics =
+ semantics & (SpvMemorySemanticsMakeAvailableMask |
+ SpvMemorySemanticsMakeVisibleMask);
+
+ const SpvMemorySemanticsMask storage_semantics =
+ semantics & (SpvMemorySemanticsUniformMemoryMask |
+ SpvMemorySemanticsSubgroupMemoryMask |
+ SpvMemorySemanticsWorkgroupMemoryMask |
+ SpvMemorySemanticsCrossWorkgroupMemoryMask |
+ SpvMemorySemanticsAtomicCounterMemoryMask |
+ SpvMemorySemanticsImageMemoryMask |
+ SpvMemorySemanticsOutputMemoryMask);
+
+ const SpvMemorySemanticsMask other_semantics =
+ semantics & ~(order_semantics | av_vis_semantics | storage_semantics);
+
+ if (other_semantics)
+ vtn_warn("Ignoring unhandled memory semantics: %u\n", other_semantics);
+
+ /* SequentiallyConsistent is treated as AcquireRelease. */
+
+ /* The RELEASE barrier happens BEFORE the operation, and it is usually
+ * associated with a Store. All the write operations with a matching
+ * semantics will not be reordered after the Store.
+ */
+ if (order_semantics & (SpvMemorySemanticsReleaseMask |
+ SpvMemorySemanticsAcquireReleaseMask |
+ SpvMemorySemanticsSequentiallyConsistentMask)) {
+ *before |= SpvMemorySemanticsReleaseMask | storage_semantics;
+ }
+
+ /* The ACQUIRE barrier happens AFTER the operation, and it is usually
+ * associated with a Load. All the operations with a matching semantics
+ * will not be reordered before the Load.
+ */
+ if (order_semantics & (SpvMemorySemanticsAcquireMask |
+ SpvMemorySemanticsAcquireReleaseMask |
+ SpvMemorySemanticsSequentiallyConsistentMask)) {
+ *after |= SpvMemorySemanticsAcquireMask | storage_semantics;
+ }
+
+ if (av_vis_semantics & SpvMemorySemanticsMakeVisibleMask)
+ *before |= SpvMemorySemanticsMakeVisibleMask | storage_semantics;
+
+ if (av_vis_semantics & SpvMemorySemanticsMakeAvailableMask)
+ *after |= SpvMemorySemanticsMakeAvailableMask | storage_semantics;
+}
+
+static void
+vtn_emit_scoped_memory_barrier(struct vtn_builder *b, SpvScope scope,
+ SpvMemorySemanticsMask semantics)
+{
+ nir_memory_semantics nir_semantics = 0;
+
+ SpvMemorySemanticsMask order_semantics =
+ semantics & (SpvMemorySemanticsAcquireMask |
+ SpvMemorySemanticsReleaseMask |
+ SpvMemorySemanticsAcquireReleaseMask |
+ SpvMemorySemanticsSequentiallyConsistentMask);
+
+ if (util_bitcount(order_semantics) > 1) {
+ /* Old GLSLang versions incorrectly set all the ordering bits. This was
+ * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
+ * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
+ */
+ vtn_warn("Multiple memory ordering semantics bits specified, "
+ "assuming AcquireRelease.");
+ order_semantics = SpvMemorySemanticsAcquireReleaseMask;
+ }
+
+ switch (order_semantics) {
+ case 0:
+ /* Not an ordering barrier. */
+ break;
+
+ case SpvMemorySemanticsAcquireMask:
+ nir_semantics = NIR_MEMORY_ACQUIRE;
+ break;
+
+ case SpvMemorySemanticsReleaseMask:
+ nir_semantics = NIR_MEMORY_RELEASE;
+ break;
+
+ case SpvMemorySemanticsSequentiallyConsistentMask:
+ /* Fall through. Treated as AcquireRelease in Vulkan. */
+ case SpvMemorySemanticsAcquireReleaseMask:
+ nir_semantics = NIR_MEMORY_ACQUIRE | NIR_MEMORY_RELEASE;
+ break;
+
+ default:
+ unreachable("Invalid memory order semantics");
+ }
+
+ if (semantics & SpvMemorySemanticsMakeAvailableMask) {
+ vtn_fail_if(!b->options->caps.vk_memory_model,
+ "To use MakeAvailable memory semantics the VulkanMemoryModel "
+ "capability must be declared.");
+ nir_semantics |= NIR_MEMORY_MAKE_AVAILABLE;
+ }
+
+ if (semantics & SpvMemorySemanticsMakeVisibleMask) {
+ vtn_fail_if(!b->options->caps.vk_memory_model,
+ "To use MakeVisible memory semantics the VulkanMemoryModel "
+ "capability must be declared.");
+ nir_semantics |= NIR_MEMORY_MAKE_VISIBLE;
+ }
+
+ /* Vulkan Environment for SPIR-V says "SubgroupMemory, CrossWorkgroupMemory,
+ * and AtomicCounterMemory are ignored".
+ */
+ semantics &= ~(SpvMemorySemanticsSubgroupMemoryMask |
+ SpvMemorySemanticsCrossWorkgroupMemoryMask |
+ SpvMemorySemanticsAtomicCounterMemoryMask);
+
+ /* TODO: Consider adding nir_var_mem_image mode to NIR so it can be used
+ * for SpvMemorySemanticsImageMemoryMask.
+ */
+
+ nir_variable_mode modes = 0;
+ if (semantics & (SpvMemorySemanticsUniformMemoryMask |
+ SpvMemorySemanticsImageMemoryMask)) {
+ modes |= nir_var_uniform |
+ nir_var_mem_ubo |
+ nir_var_mem_ssbo |
+ nir_var_mem_global;
+ }
+ if (semantics & SpvMemorySemanticsWorkgroupMemoryMask)
+ modes |= nir_var_mem_shared;
+ if (semantics & SpvMemorySemanticsOutputMemoryMask) {
+ modes |= nir_var_shader_out;
+ }
+
+ /* No barrier to add. */
+ if (nir_semantics == 0 || modes == 0)
+ return;
+
+ nir_scope nir_scope;
+ switch (scope) {
+ case SpvScopeDevice:
+ vtn_fail_if(b->options->caps.vk_memory_model &&
+ !b->options->caps.vk_memory_model_device_scope,
+ "If the Vulkan memory model is declared and any instruction "
+ "uses Device scope, the VulkanMemoryModelDeviceScope "
+ "capability must be declared.");
+ nir_scope = NIR_SCOPE_DEVICE;
+ break;
+
+ case SpvScopeQueueFamily:
+ vtn_fail_if(!b->options->caps.vk_memory_model,
+ "To use Queue Family scope, the VulkanMemoryModel capability "
+ "must be declared.");
+ nir_scope = NIR_SCOPE_QUEUE_FAMILY;
+ break;
+
+ case SpvScopeWorkgroup:
+ nir_scope = NIR_SCOPE_WORKGROUP;
+ break;
+
+ case SpvScopeSubgroup:
+ nir_scope = NIR_SCOPE_SUBGROUP;
+ break;
+
+ case SpvScopeInvocation:
+ nir_scope = NIR_SCOPE_INVOCATION;
+ break;
+
+ default:
+ vtn_fail("Invalid memory scope");
+ }
+
+ nir_intrinsic_instr *intrin =
+ nir_intrinsic_instr_create(b->shader, nir_intrinsic_scoped_memory_barrier);
+ nir_intrinsic_set_memory_semantics(intrin, nir_semantics);
+
+ nir_intrinsic_set_memory_modes(intrin, modes);
+ nir_intrinsic_set_memory_scope(intrin, nir_scope);
+ nir_builder_instr_insert(&b->nb, &intrin->instr);
+}
+