+enum memory_semantics : uint8_t {
+ semantic_none = 0x0,
+ /* for loads: don't move any access after this load to before this load (even other loads)
+ * for barriers: don't move any access after the barrier to before any
+ * atomics/control_barriers/sendmsg_gs_done before the barrier */
+ semantic_acquire = 0x1,
+ /* for stores: don't move any access before this store to after this store
+ * for barriers: don't move any access before the barrier to after any
+ * atomics/control_barriers/sendmsg_gs_done after the barrier */
+ semantic_release = 0x2,
+
+ /* the rest are for load/stores/atomics only */
+ /* cannot be DCE'd or CSE'd */
+ semantic_volatile = 0x4,
+ /* does not interact with barriers and assumes this lane is the only lane
+ * accessing this memory */
+ semantic_private = 0x8,
+ /* this operation can be reordered around operations of the same storage. says nothing about barriers */
+ semantic_can_reorder = 0x10,
+ /* this is a atomic instruction (may only read or write memory) */
+ semantic_atomic = 0x20,
+ /* this is instruction both reads and writes memory */
+ semantic_rmw = 0x40,
+
+ semantic_acqrel = semantic_acquire | semantic_release,
+ semantic_atomicrmw = semantic_volatile | semantic_atomic | semantic_rmw,
+};
+
+enum sync_scope : uint8_t {
+ scope_invocation = 0,
+ scope_subgroup = 1,
+ scope_workgroup = 2,
+ scope_queuefamily = 3,
+ scope_device = 4,
+};
+
+struct memory_sync_info {
+ memory_sync_info() : storage(storage_none), semantics(semantic_none), scope(scope_invocation) {}
+ memory_sync_info(int storage, int semantics=0, sync_scope scope=scope_invocation)
+ : storage((storage_class)storage), semantics((memory_semantics)semantics), scope(scope) {}
+
+ storage_class storage:8;
+ memory_semantics semantics:8;
+ sync_scope scope:8;
+
+ bool operator == (const memory_sync_info& rhs) const {
+ return storage == rhs.storage &&
+ semantics == rhs.semantics &&
+ scope == rhs.scope;
+ }
+
+ bool can_reorder() const {
+ if (semantics & semantic_acqrel)
+ return false;
+ /* Also check storage so that zero-initialized memory_sync_info can be
+ * reordered. */
+ return (!storage || (semantics & semantic_can_reorder)) && !(semantics & semantic_volatile);
+ }
+};
+static_assert(sizeof(memory_sync_info) == 3, "Unexpected padding");
+