+ VINTRP = 1 << 12,
+ DPP = 1 << 13,
+ SDWA = 1 << 14,
+};
+
+enum storage_class : uint8_t {
+ storage_none = 0x0, /* no synchronization and can be reordered around aliasing stores */
+ storage_buffer = 0x1, /* SSBOs and global memory */
+ storage_atomic_counter = 0x2, /* not used for Vulkan */
+ storage_image = 0x4,
+ storage_shared = 0x8, /* or TCS output */
+ storage_vmem_output = 0x10, /* GS or TCS output stores using VMEM */
+ storage_scratch = 0x20,
+ storage_vgpr_spill = 0x40,
+ storage_count = 8,
+};
+
+enum memory_semantics : uint8_t {
+ semantic_none = 0x0,
+ /* for loads: don't move any access after this load to before this load (even other loads)
+ * for barriers: don't move any access after the barrier to before any
+ * atomics/control_barriers/sendmsg_gs_done before the barrier */
+ semantic_acquire = 0x1,
+ /* for stores: don't move any access before this store to after this store
+ * for barriers: don't move any access before the barrier to after any
+ * atomics/control_barriers/sendmsg_gs_done after the barrier */
+ semantic_release = 0x2,
+
+ /* the rest are for load/stores/atomics only */
+ /* cannot be DCE'd or CSE'd */
+ semantic_volatile = 0x4,
+ /* does not interact with barriers and assumes this lane is the only lane
+ * accessing this memory */
+ semantic_private = 0x8,
+ /* this operation can be reordered around operations of the same storage. says nothing about barriers */
+ semantic_can_reorder = 0x10,
+ /* this is a atomic instruction (may only read or write memory) */
+ semantic_atomic = 0x20,
+ /* this is instruction both reads and writes memory */
+ semantic_rmw = 0x40,
+
+ semantic_acqrel = semantic_acquire | semantic_release,
+ semantic_atomicrmw = semantic_volatile | semantic_atomic | semantic_rmw,
+};
+
+enum sync_scope : uint8_t {
+ scope_invocation = 0,
+ scope_subgroup = 1,
+ scope_workgroup = 2,
+ scope_queuefamily = 3,
+ scope_device = 4,