spirv: parse unstructured CFG
authorKarol Herbst <kherbst@redhat.com>
Fri, 15 May 2020 10:47:53 +0000 (12:47 +0200)
committerMarge Bot <eric+marge@anholt.net>
Fri, 14 Aug 2020 20:35:37 +0000 (20:35 +0000)
v2 (Boris Brezillon): handle functions with return values
v3: call structurizer
v4: entire rewrite
v5: fix handling of already visited default branches
v2 (Jason Ekstrand): Stop walking hash tables

Signed-off-by: Karol Herbst <kherbst@redhat.com>
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Tested-by: Jesse Natalie <jenatali@microsoft.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/2401>

src/compiler/nir/nir.c
src/compiler/spirv/spirv_to_nir.c
src/compiler/spirv/vtn_cfg.c
src/compiler/spirv/vtn_private.h

index d1db749423eb38cbff82b03afd10d96c71a83455..d8ff265d9f3f8440e7033dd4fa6c714d8be0152f 100644 (file)
@@ -732,8 +732,6 @@ reduce_cursor(nir_cursor cursor)
 {
    switch (cursor.option) {
    case nir_cursor_before_block:
-      assert(nir_cf_node_prev(&cursor.block->cf_node) == NULL ||
-             nir_cf_node_prev(&cursor.block->cf_node)->type != nir_cf_node_block);
       if (exec_list_is_empty(&cursor.block->instr_list)) {
          /* Empty block.  After is as good as before. */
          cursor.option = nir_cursor_after_block;
index 2ef9b8c3673c9efbe9ce1ca577f83500b5fcb677..747d1a0cdcce5e929984a34ce794f406e718ce1b 100644 (file)
@@ -5594,6 +5594,9 @@ spirv_to_nir(const uint32_t *words, size_t word_count,
    if (entry_point->num_params && b->shader->info.stage == MESA_SHADER_KERNEL)
       entry_point = vtn_emit_kernel_entry_point_wrapper(b, entry_point);
 
+   /* structurize the CFG */
+   nir_lower_goto_ifs(b->shader);
+
    entry_point->is_entrypoint = true;
 
    /* When multiple shader stages exist in the same SPIR-V module, we
index 023b2decf7fec41039c0f1f677edd384a90fbbdc..9dc2a842da65ed859c52fe3c00b74444b8cb1aaf 100644 (file)
@@ -22,6 +22,7 @@
  */
 
 #include "vtn_private.h"
+#include "spirv_info.h"
 #include "nir/nir_vla.h"
 
 static struct vtn_block *
@@ -817,6 +818,9 @@ vtn_build_cfg(struct vtn_builder *b, const uint32_t *words, const uint32_t *end)
    vtn_foreach_instruction(b, words, end,
                            vtn_cfg_handle_prepass_instruction);
 
+   if (b->shader->info.stage == MESA_SHADER_KERNEL)
+      return;
+
    vtn_foreach_cf_node(func_node, &b->functions) {
       struct vtn_function *func = vtn_cf_node_as_function(func_node);
 
@@ -1186,6 +1190,141 @@ vtn_emit_cf_list_structured(struct vtn_builder *b, struct list_head *cf_list,
    }
 }
 
+static struct nir_block *
+vtn_new_unstructured_block(struct vtn_builder *b, struct vtn_function *func)
+{
+   struct nir_block *n = nir_block_create(b->shader);
+   exec_list_push_tail(&func->impl->body, &n->cf_node.node);
+   n->cf_node.parent = &func->impl->cf_node;
+   return n;
+}
+
+static void
+vtn_add_unstructured_block(struct vtn_builder *b,
+                           struct vtn_function *func,
+                           struct list_head *work_list,
+                           struct vtn_block *block)
+{
+   if (!block->block) {
+      block->block = vtn_new_unstructured_block(b, func);
+      list_addtail(&block->node.link, work_list);
+   }
+}
+
+static void
+vtn_emit_cf_func_unstructured(struct vtn_builder *b, struct vtn_function *func,
+                              vtn_instruction_handler handler)
+{
+   struct list_head work_list;
+   list_inithead(&work_list);
+
+   func->start_block->block = nir_start_block(func->impl);
+   list_addtail(&func->start_block->node.link, &work_list);
+   while (!list_is_empty(&work_list)) {
+      struct vtn_block *block =
+         list_first_entry(&work_list, struct vtn_block, node.link);
+      list_del(&block->node.link);
+
+      vtn_assert(block->block);
+
+      const uint32_t *block_start = block->label;
+      const uint32_t *block_end = block->branch;
+
+      b->nb.cursor = nir_after_block(block->block);
+      block_start = vtn_foreach_instruction(b, block_start, block_end,
+                                            vtn_handle_phis_first_pass);
+      vtn_foreach_instruction(b, block_start, block_end, handler);
+      block->end_nop = nir_intrinsic_instr_create(b->nb.shader,
+                                                  nir_intrinsic_nop);
+      nir_builder_instr_insert(&b->nb, &block->end_nop->instr);
+
+      SpvOp op = *block_end & SpvOpCodeMask;
+      switch (op) {
+      case SpvOpBranch: {
+         struct vtn_block *branch_block = vtn_block(b, block->branch[1]);
+         vtn_add_unstructured_block(b, func, &work_list, branch_block);
+         nir_goto(&b->nb, branch_block->block);
+         break;
+      }
+
+      case SpvOpBranchConditional: {
+         nir_ssa_def *cond = vtn_ssa_value(b, block->branch[1])->def;
+         struct vtn_block *then_block = vtn_block(b, block->branch[2]);
+         struct vtn_block *else_block = vtn_block(b, block->branch[3]);
+
+         vtn_add_unstructured_block(b, func, &work_list, then_block);
+         if (then_block == else_block) {
+            nir_goto(&b->nb, then_block->block);
+         } else {
+            vtn_add_unstructured_block(b, func, &work_list, else_block);
+            nir_goto_if(&b->nb, then_block->block, nir_src_for_ssa(cond),
+                                else_block->block);
+         }
+
+         break;
+      }
+
+      case SpvOpSwitch: {
+         struct list_head cases;
+         list_inithead(&cases);
+         vtn_parse_switch(b, NULL, block->branch, &cases);
+
+         nir_ssa_def *sel = vtn_get_nir_ssa(b, block->branch[1]);
+
+         struct vtn_case *def = NULL;
+         vtn_foreach_cf_node(case_node, &cases) {
+            struct vtn_case *cse = vtn_cf_node_as_case(case_node);
+            if (cse->is_default) {
+               assert(def == NULL);
+               def = cse;
+               continue;
+            }
+
+            nir_ssa_def *cond = nir_imm_false(&b->nb);
+            util_dynarray_foreach(&cse->values, uint64_t, val) {
+               nir_ssa_def *imm = nir_imm_intN_t(&b->nb, *val, sel->bit_size);
+               cond = nir_ior(&b->nb, cond, nir_ieq(&b->nb, sel, imm));
+            }
+
+            /* block for the next check */
+            nir_block *e = vtn_new_unstructured_block(b, func);
+            vtn_add_unstructured_block(b, func, &work_list, cse->block);
+
+            /* add branching */
+            nir_goto_if(&b->nb, cse->block->block, nir_src_for_ssa(cond), e);
+            b->nb.cursor = nir_after_block(e);
+         }
+
+         vtn_assert(def != NULL);
+         vtn_add_unstructured_block(b, func, &work_list, def->block);
+
+         /* now that all cases are handled, branch into the default block */
+         nir_goto(&b->nb, def->block->block);
+         break;
+      }
+
+      case SpvOpKill: {
+         nir_intrinsic_instr *discard =
+            nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_discard);
+         nir_builder_instr_insert(&b->nb, &discard->instr);
+         nir_goto(&b->nb, b->func->impl->end_block);
+         break;
+      }
+
+      case SpvOpUnreachable:
+      case SpvOpReturn:
+      case SpvOpReturnValue: {
+         vtn_emit_ret_store(b, block);
+         nir_goto(&b->nb, b->func->impl->end_block);
+         break;
+      }
+
+      default:
+         vtn_fail("Unhandled opcode %s", spirv_op_to_string(op));
+      }
+   }
+}
+
 void
 vtn_function_emit(struct vtn_builder *b, struct vtn_function *func,
                   vtn_instruction_handler instruction_handler)
@@ -1197,7 +1336,13 @@ vtn_function_emit(struct vtn_builder *b, struct vtn_function *func,
    b->has_loop_continue = false;
    b->phi_table = _mesa_pointer_hash_table_create(b);
 
-   vtn_emit_cf_list_structured(b, &func->body, NULL, NULL, instruction_handler);
+   if (b->shader->info.stage == MESA_SHADER_KERNEL) {
+      b->func->impl->structured = false;
+      vtn_emit_cf_func_unstructured(b, func, instruction_handler);
+   } else {
+      vtn_emit_cf_list_structured(b, &func->body, NULL, NULL,
+                                  instruction_handler);
+   }
 
    vtn_foreach_instruction(b, func->start_block->label, func->end,
                            vtn_handle_phi_second_pass);
index 8ee4f7be70b8d7ca4788e56e273270988f815bda..6fc4f2e4f3eab67d55cb3be54351a79191e86a4f 100644 (file)
@@ -242,6 +242,9 @@ struct vtn_block {
 
    /** Every block ends in a nop intrinsic so that we can find it again */
    nir_intrinsic_instr *end_nop;
+
+   /** attached nir_block */
+   struct nir_block *block;
 };
 
 struct vtn_function {