} nir_memory_semantics;
 
 typedef enum {
+   NIR_SCOPE_NONE,
    NIR_SCOPE_INVOCATION,
    NIR_SCOPE_SUBGROUP,
    NIR_SCOPE_WORKGROUP,
     */
    NIR_INTRINSIC_MEMORY_SCOPE,
 
+   /**
+    * Value of nir_scope.
+    */
+   NIR_INTRINSIC_EXECUTION_SCOPE,
+
    NIR_INTRINSIC_NUM_INDEX_FLAGS,
 
 } nir_intrinsic_index_flag;
 INTRINSIC_IDX_ACCESSORS(memory_semantics, MEMORY_SEMANTICS, nir_memory_semantics)
 INTRINSIC_IDX_ACCESSORS(memory_modes, MEMORY_MODES, nir_variable_mode)
 INTRINSIC_IDX_ACCESSORS(memory_scope, MEMORY_SCOPE, nir_scope)
+INTRINSIC_IDX_ACCESSORS(execution_scope, EXECUTION_SCOPE, nir_scope)
 
 static inline void
 nir_intrinsic_set_align(nir_intrinsic_instr *intrin,
     * to imul with masked inputs and iadd */
    bool has_umad24;
 
-   /* Whether to generate only scoped_memory_barrier intrinsics instead of the
-    * set of memory barrier intrinsics based on GLSL.
+   /* Whether to generate only scoped_barrier intrinsics instead of the set of
+    * memory and control barrier intrinsics based on GLSL.
     */
-   bool use_scoped_memory_barrier;
+   bool use_scoped_barrier;
 
    /**
     * Is this the Intel vec4 backend?
 
    unreachable("bad compare func");
 }
 
+static inline void
+nir_scoped_barrier(nir_builder *b,
+                   nir_scope exec_scope,
+                   nir_scope mem_scope,
+                   nir_memory_semantics mem_semantics,
+                   nir_variable_mode mem_modes)
+{
+   nir_intrinsic_instr *intrin =
+      nir_intrinsic_instr_create(b->shader, nir_intrinsic_scoped_barrier);
+   nir_intrinsic_set_execution_scope(intrin, exec_scope);
+   nir_intrinsic_set_memory_scope(intrin, mem_scope);
+   nir_intrinsic_set_memory_semantics(intrin, mem_semantics);
+   nir_intrinsic_set_memory_modes(intrin, mem_modes);
+   nir_builder_instr_insert(b, &intrin->instr);
+}
+
 static inline void
 nir_scoped_memory_barrier(nir_builder *b,
                           nir_scope scope,
                           nir_memory_semantics semantics,
                           nir_variable_mode modes)
 {
-   nir_intrinsic_instr *intrin =
-      nir_intrinsic_instr_create(b->shader, nir_intrinsic_scoped_memory_barrier);
-   nir_intrinsic_set_memory_scope(intrin, scope);
-   nir_intrinsic_set_memory_semantics(intrin, semantics);
-   nir_intrinsic_set_memory_modes(intrin, modes);
-   nir_builder_instr_insert(b, &intrin->instr);
+   nir_scoped_barrier(b, NIR_SCOPE_NONE, scope, semantics, modes);
 }
 
 #endif /* NIR_BUILDER_H */
 
 MEMORY_MODES = "NIR_INTRINSIC_MEMORY_MODES"
 # Scope of a memory operation
 MEMORY_SCOPE = "NIR_INTRINSIC_MEMORY_SCOPE"
+# Scope of a control barrier
+EXECUTION_SCOPE = "NIR_INTRINSIC_EXECUTION_SCOPE"
 
 #
 # Possible flags:
 # intrinsic.
 barrier("memory_barrier")
 
-# Memory barrier with explicit scope.  Follows the semantics of SPIR-V
-# OpMemoryBarrier, used to implement Vulkan Memory Model.  Storage that the
-# barrierr applies is represented using NIR variable modes.
-intrinsic("scoped_memory_barrier",
-          indices=[MEMORY_SEMANTICS, MEMORY_MODES, MEMORY_SCOPE])
+# Control/Memory barrier with explicit scope.  Follows the semantics of SPIR-V
+# OpMemoryBarrier and OpControlBarrier, used to implement Vulkan Memory Model.
+# Storage that the barrier applies is represented using NIR variable modes.
+# For an OpMemoryBarrier, set EXECUTION_SCOPE to NIR_SCOPE_NONE.
+intrinsic("scoped_barrier",
+          indices=[EXECUTION_SCOPE, MEMORY_SEMANTICS, MEMORY_MODES, MEMORY_SCOPE])
 
 # Shader clock intrinsic with semantics analogous to the clock2x32ARB()
 # GLSL intrinsic.
 
       state->image_barriers = true;
       break;
 
-   case nir_intrinsic_scoped_memory_barrier:
+   case nir_intrinsic_scoped_barrier:
       /* TODO: Could be more granular if we had nir_var_mem_image. */
       if (nir_intrinsic_memory_modes(instr) & (nir_var_mem_ubo |
                                                nir_var_mem_ssbo |
 
          }
 
          nir_intrinsic_instr *current = nir_instr_as_intrinsic(instr);
-         if (current->intrinsic != nir_intrinsic_scoped_memory_barrier) {
+         if (current->intrinsic != nir_intrinsic_scoped_barrier ||
+             nir_intrinsic_memory_scope(current) != NIR_SCOPE_NONE) {
             prev = NULL;
             continue;
          }
 
          combine_stores_with_modes(state, nir_var_shader_out);
          break;
 
-      case nir_intrinsic_scoped_memory_barrier:
+      case nir_intrinsic_scoped_barrier:
          if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_RELEASE) {
             combine_stores_with_modes(state,
                                       nir_intrinsic_memory_modes(intrin));
 
                               nir_var_mem_global;
             break;
 
-         case nir_intrinsic_scoped_memory_barrier:
+         case nir_intrinsic_scoped_barrier:
             if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
                written->modes |= nir_intrinsic_memory_modes(intrin);
             break;
          apply_barrier_for_modes(copies, nir_var_shader_out);
          break;
 
-      case nir_intrinsic_scoped_memory_barrier:
+      case nir_intrinsic_scoped_barrier:
          if (debug) dump_instr(instr);
 
          if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE)
 
          clear_unused_for_modes(&unused_writes, nir_var_shader_out);
          break;
 
-      case nir_intrinsic_scoped_memory_barrier: {
+      case nir_intrinsic_scoped_barrier: {
          if (nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_RELEASE) {
             clear_unused_for_modes(&unused_writes,
                                    nir_intrinsic_memory_modes(intrin));
 
       case nir_intrinsic_memory_barrier_shared:
          modes = nir_var_mem_shared;
          break;
-      case nir_intrinsic_scoped_memory_barrier:
+      case nir_intrinsic_scoped_barrier:
+        if (nir_intrinsic_memory_scope(intrin) == NIR_SCOPE_NONE)
+            break;
+
          modes = nir_intrinsic_memory_modes(intrin);
          acquire = nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_ACQUIRE;
          release = nir_intrinsic_memory_semantics(intrin) & NIR_MEMORY_RELEASE;
 
       [NIR_INTRINSIC_MEMORY_SEMANTICS] = "mem_semantics",
       [NIR_INTRINSIC_MEMORY_MODES] = "mem_modes",
       [NIR_INTRINSIC_MEMORY_SCOPE] = "mem_scope",
+      [NIR_INTRINSIC_EXECUTION_SCOPE] = "exec_scope",
    };
    for (unsigned idx = 1; idx < NIR_INTRINSIC_NUM_INDEX_FLAGS; idx++) {
       if (!info->index_map[idx])
          break;
       }
 
+      case NIR_INTRINSIC_EXECUTION_SCOPE:
       case NIR_INTRINSIC_MEMORY_SCOPE: {
-         fprintf(fp, " mem_scope=");
+         fprintf(fp, " %s=", index_name[idx]);
          switch (nir_intrinsic_memory_scope(instr)) {
+         case NIR_SCOPE_NONE:         fprintf(fp, "NONE");         break;
          case NIR_SCOPE_DEVICE:       fprintf(fp, "DEVICE");       break;
          case NIR_SCOPE_QUEUE_FAMILY: fprintf(fp, "QUEUE_FAMILY"); break;
          case NIR_SCOPE_WORKGROUP:    fprintf(fp, "WORKGROUP");    break;
 
    if (nir_semantics == 0 || modes == 0)
       return;
 
-   nir_scope nir_scope = vtn_scope_to_nir_scope(b, scope);
-   nir_intrinsic_instr *intrin =
-      nir_intrinsic_instr_create(b->shader, nir_intrinsic_scoped_memory_barrier);
-   nir_intrinsic_set_memory_semantics(intrin, nir_semantics);
-
-   nir_intrinsic_set_memory_modes(intrin, modes);
-   nir_intrinsic_set_memory_scope(intrin, nir_scope);
-   nir_builder_instr_insert(&b->nb, &intrin->instr);
+   nir_scope nir_mem_scope = vtn_scope_to_nir_scope(b, scope);
+   nir_scoped_barrier(&b->nb, NIR_SCOPE_NONE, nir_mem_scope, nir_semantics, modes);
 }
 
 struct vtn_ssa_value *
 vtn_emit_memory_barrier(struct vtn_builder *b, SpvScope scope,
                         SpvMemorySemanticsMask semantics)
 {
-   if (b->shader->options->use_scoped_memory_barrier) {
+   if (b->shader->options->use_scoped_barrier) {
       vtn_emit_scoped_memory_barrier(b, scope, semantics);
       return;
    }
 
    .use_interpolated_input_intrinsics = true,                                 \
    .vertex_id_zero_based = true,                                              \
    .lower_base_vertex = true,                                                 \
-   .use_scoped_memory_barrier = true,                                         \
+   .use_scoped_barrier = true,                                                \
    .support_8bit_alu = true,                                                  \
    .support_16bit_alu = true
 
 
       break;
    }
 
-   case nir_intrinsic_scoped_memory_barrier:
+   case nir_intrinsic_scoped_barrier:
+      assert(nir_intrinsic_execution_scope(instr) == NIR_SCOPE_NONE);
    case nir_intrinsic_group_memory_barrier:
    case nir_intrinsic_memory_barrier_shared:
    case nir_intrinsic_memory_barrier_buffer:
          SHADER_OPCODE_INTERLOCK : SHADER_OPCODE_MEMORY_FENCE;
 
       switch (instr->intrinsic) {
-      case nir_intrinsic_scoped_memory_barrier: {
+      case nir_intrinsic_scoped_barrier: {
          nir_variable_mode modes = nir_intrinsic_memory_modes(instr);
          l3_fence = modes & (nir_var_shader_out |
                              nir_var_mem_ssbo |
 
       break;
    }
 
-   case nir_intrinsic_memory_barrier:
-   case nir_intrinsic_scoped_memory_barrier: {
+   case nir_intrinsic_scoped_barrier:
+      assert(nir_intrinsic_execution_scope(instr) == NIR_SCOPE_NONE);
+      /* Fall through. */
+   case nir_intrinsic_memory_barrier: {
       const vec4_builder bld =
          vec4_builder(this).at_end().annotate(current_annotation, base_ir);
       const dst_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);