nir: add nir_var_shader_storage
[mesa.git] / src / glsl / nir / nir_from_ssa.c
index b644eb506a9698468d087db02b82245b93544615..1fd8b24d33dd07e0e435b9a6c2c014d18aca111b 100644 (file)
@@ -26,6 +26,7 @@
  */
 
 #include "nir.h"
+#include "nir_vla.h"
 
 /*
  * This file implements an out-of-SSA pass as described in "Revisiting
@@ -36,7 +37,7 @@
 struct from_ssa_state {
    void *mem_ctx;
    void *dead_ctx;
-   struct hash_table *ssa_table;
+   bool phi_webs_only;
    struct hash_table *merge_node_table;
    nir_instr *instr;
    nir_function_impl *impl;
@@ -54,13 +55,8 @@ ssa_def_dominates(nir_ssa_def *a, nir_ssa_def *b)
    } else if (a->parent_instr->block == b->parent_instr->block) {
       return a->live_index <= b->live_index;
    } else {
-      nir_block *block = b->parent_instr->block;
-      while (block->imm_dom != NULL) {
-         if (block->imm_dom == a->parent_instr->block)
-            return true;
-         block = block->imm_dom;
-      }
-      return false;
+      return nir_block_dominates(a->parent_instr->block,
+                                 b->parent_instr->block);
    }
 }
 
@@ -186,7 +182,7 @@ merge_merge_sets(merge_set *a, merge_set *b)
 static bool
 merge_sets_interfere(merge_set *a, merge_set *b)
 {
-   merge_node *dom[a->size + b->size];
+   NIR_VLA(merge_node *, dom, a->size + b->size);
    int dom_idx = -1;
 
    struct exec_node *an = exec_list_get_head(&a->nodes);
@@ -227,48 +223,90 @@ merge_sets_interfere(merge_set *a, merge_set *b)
    return false;
 }
 
-static nir_parallel_copy_instr *
-block_get_parallel_copy_at_end(nir_block *block, void *mem_ctx)
+static bool
+add_parallel_copy_to_end_of_block(nir_block *block, void *void_state)
 {
-   nir_instr *last_instr = nir_block_last_instr(block);
+   struct from_ssa_state *state = void_state;
 
-   /* First we try and find a parallel copy if it already exists.  If the
-    * last instruction is a jump, it will be right before the jump;
-    * otherwise, it will be the last instruction.
-    */
-   nir_instr *pcopy_instr;
-   if (last_instr != NULL && last_instr->type == nir_instr_type_jump)
-      pcopy_instr = nir_instr_prev(last_instr);
-   else
-      pcopy_instr = last_instr;
+   bool need_end_copy = false;
+   if (block->successors[0]) {
+      nir_instr *instr = nir_block_first_instr(block->successors[0]);
+      if (instr && instr->type == nir_instr_type_phi)
+         need_end_copy = true;
+   }
 
-   if (pcopy_instr != NULL &&
-       pcopy_instr->type == nir_instr_type_parallel_copy) {
-      /* A parallel copy already exists. */
-      nir_parallel_copy_instr *pcopy = nir_instr_as_parallel_copy(pcopy_instr);
+   if (block->successors[1]) {
+      nir_instr *instr = nir_block_first_instr(block->successors[1]);
+      if (instr && instr->type == nir_instr_type_phi)
+         need_end_copy = true;
+   }
 
-      /* This parallel copy may be the copy for the beginning of some
-       * block, so we need to check for that before we return it.
+   if (need_end_copy) {
+      /* If one of our successors has at least one phi node, we need to
+       * create a parallel copy at the end of the block but before the jump
+       * (if there is one).
        */
-      if (pcopy->at_end)
-         return pcopy;
+      nir_parallel_copy_instr *pcopy =
+         nir_parallel_copy_instr_create(state->dead_ctx);
+
+      nir_instr *last_instr = nir_block_last_instr(block);
+      if (last_instr && last_instr->type == nir_instr_type_jump) {
+         nir_instr_insert_before(last_instr, &pcopy->instr);
+      } else {
+         nir_instr_insert_after_block(block, &pcopy->instr);
+      }
    }
 
-   /* At this point, we haven't found a suitable parallel copy, so we
-    * have to create one.
-    */
-   nir_parallel_copy_instr *pcopy = nir_parallel_copy_instr_create(mem_ctx);
-   pcopy->at_end = true;
+   return true;
+}
 
-   if (last_instr && last_instr->type == nir_instr_type_jump) {
-      nir_instr_insert_before(last_instr, &pcopy->instr);
-   } else {
-      nir_instr_insert_after_block(block, &pcopy->instr);
-   }
+static nir_parallel_copy_instr *
+get_parallel_copy_at_end_of_block(nir_block *block)
+{
+   nir_instr *last_instr = nir_block_last_instr(block);
+   if (last_instr == NULL)
+      return NULL;
 
-   return pcopy;
+   /* The last instruction may be a jump in which case the parallel copy is
+    * right before it.
+    */
+   if (last_instr->type == nir_instr_type_jump)
+      last_instr = nir_instr_prev(last_instr);
+
+   if (last_instr && last_instr->type == nir_instr_type_parallel_copy)
+      return nir_instr_as_parallel_copy(last_instr);
+   else
+      return NULL;
 }
 
+/** Isolate phi nodes with parallel copies
+ *
+ * In order to solve the dependency problems with the sources and
+ * destinations of phi nodes, we first isolate them by adding parallel
+ * copies to the beginnings and ends of basic blocks.  For every block with
+ * phi nodes, we add a parallel copy immediately following the last phi
+ * node that copies the destinations of all of the phi nodes to new SSA
+ * values.  We also add a parallel copy to the end of every block that has
+ * a successor with phi nodes that, for each phi node in each successor,
+ * copies the corresponding sorce of the phi node and adjust the phi to
+ * used the destination of the parallel copy.
+ *
+ * In SSA form, each value has exactly one definition.  What this does is
+ * ensure that each value used in a phi also has exactly one use.  The
+ * destinations of phis are only used by the parallel copy immediately
+ * following the phi nodes and.  Thanks to the parallel copy at the end of
+ * the predecessor block, the sources of phi nodes are are the only use of
+ * that value.  This allows us to immediately assign all the sources and
+ * destinations of any given phi node to the same register without worrying
+ * about interference at all.  We do coalescing to get rid of the parallel
+ * copies where possible.
+ *
+ * Before this pass can be run, we have to iterate over the blocks with
+ * add_parallel_copy_to_end_of_block to ensure that the parallel copies at
+ * the ends of blocks exist.  We can create the ones at the beginnings as
+ * we go, but the ones at the ends of blocks need to be created ahead of
+ * time because of potential back-edges in the CFG.
+ */
 static bool
 isolate_phi_nodes_block(nir_block *block, void *void_state)
 {
@@ -301,58 +339,36 @@ isolate_phi_nodes_block(nir_block *block, void *void_state)
 
       nir_phi_instr *phi = nir_instr_as_phi(instr);
       assert(phi->dest.is_ssa);
-      foreach_list_typed(nir_phi_src, src, node, &phi->srcs) {
+      nir_foreach_phi_src(phi, src) {
          nir_parallel_copy_instr *pcopy =
-            block_get_parallel_copy_at_end(src->pred, state->dead_ctx);
-
-         nir_parallel_copy_copy *copy = ralloc(state->dead_ctx,
-                                               nir_parallel_copy_copy);
-         exec_list_push_tail(&pcopy->copies, &copy->node);
-
-         copy->src = nir_src_copy(src->src, state->dead_ctx);
-         _mesa_set_add(src->src.ssa->uses,
-                       _mesa_hash_pointer(&pcopy->instr), &pcopy->instr);
-
-         copy->dest.is_ssa = true;
-         nir_ssa_def_init(&pcopy->instr, &copy->dest.ssa,
-                          phi->dest.ssa.num_components, src->src.ssa->name);
-
-         struct set_entry *entry = _mesa_set_search(src->src.ssa->uses,
-                                                    _mesa_hash_pointer(instr),
-                                                    instr);
-         if (entry)
-            /* It is possible that a phi node can use the same source twice
-             * but for different basic blocks.  If that happens, entry will
-             * be NULL because we already deleted it.  This is safe
-             * because, by the time the loop is done, we will have deleted
-             * all of the sources of the phi from their respective use sets
-             * and moved them to the parallel copy definitions.
-             */
-            _mesa_set_remove(src->src.ssa->uses, entry);
+            get_parallel_copy_at_end_of_block(src->pred);
+         assert(pcopy);
+
+         nir_parallel_copy_entry *entry = rzalloc(state->dead_ctx,
+                                                  nir_parallel_copy_entry);
+         nir_ssa_dest_init(&pcopy->instr, &entry->dest,
+                           phi->dest.ssa.num_components, src->src.ssa->name);
+         exec_list_push_tail(&pcopy->entries, &entry->node);
+
+         assert(src->src.is_ssa);
+         nir_instr_rewrite_src(&pcopy->instr, &entry->src, src->src);
 
-         src->src.ssa = &copy->dest.ssa;
-         _mesa_set_add(copy->dest.ssa.uses, _mesa_hash_pointer(instr), instr);
+         nir_instr_rewrite_src(&phi->instr, &src->src,
+                               nir_src_for_ssa(&entry->dest.ssa));
       }
 
-      nir_parallel_copy_copy *copy = ralloc(state->dead_ctx,
-                                            nir_parallel_copy_copy);
-      exec_list_push_tail(&block_pcopy->copies, &copy->node);
-
-      copy->dest.is_ssa = true;
-      nir_ssa_def_init(&block_pcopy->instr, &copy->dest.ssa,
-                       phi->dest.ssa.num_components, phi->dest.ssa.name);
-
-      nir_src copy_dest_src = {
-         .ssa = &copy->dest.ssa,
-         .is_ssa = true,
-      };
-      nir_ssa_def_rewrite_uses(&phi->dest.ssa, copy_dest_src, state->mem_ctx);
-
-      copy->src.is_ssa = true;
-      copy->src.ssa = &phi->dest.ssa;
-      _mesa_set_add(phi->dest.ssa.uses,
-                    _mesa_hash_pointer(&block_pcopy->instr),
-                    &block_pcopy->instr);
+      nir_parallel_copy_entry *entry = rzalloc(state->dead_ctx,
+                                               nir_parallel_copy_entry);
+      nir_ssa_dest_init(&block_pcopy->instr, &entry->dest,
+                        phi->dest.ssa.num_components, phi->dest.ssa.name);
+      exec_list_push_tail(&block_pcopy->entries, &entry->node);
+
+      nir_ssa_def_rewrite_uses(&phi->dest.ssa,
+                               nir_src_for_ssa(&entry->dest.ssa),
+                               state->mem_ctx);
+
+      nir_instr_rewrite_src(&block_pcopy->instr, &entry->src,
+                            nir_src_for_ssa(&phi->dest.ssa));
    }
 
    return true;
@@ -373,7 +389,7 @@ coalesce_phi_nodes_block(nir_block *block, void *void_state)
       assert(phi->dest.is_ssa);
       merge_node *dest_node = get_merge_node(&phi->dest.ssa, state);
 
-      foreach_list_typed(nir_phi_src, src, node, &phi->srcs) {
+      nir_foreach_phi_src(phi, src) {
          assert(src->src.is_ssa);
          merge_node *src_node = get_merge_node(src->src.ssa, state);
          if (src_node->set != dest_node->set)
@@ -385,25 +401,25 @@ coalesce_phi_nodes_block(nir_block *block, void *void_state)
 }
 
 static void
-agressive_coalesce_parallel_copy(nir_parallel_copy_instr *pcopy,
+aggressive_coalesce_parallel_copy(nir_parallel_copy_instr *pcopy,
                                  struct from_ssa_state *state)
 {
-   foreach_list_typed_safe(nir_parallel_copy_copy, copy, node, &pcopy->copies) {
-      if (!copy->src.is_ssa)
+   nir_foreach_parallel_copy_entry(pcopy, entry) {
+      if (!entry->src.is_ssa)
          continue;
 
       /* Since load_const instructions are SSA only, we can't replace their
        * destinations with registers and, therefore, can't coalesce them.
        */
-      if (copy->src.ssa->parent_instr->type == nir_instr_type_load_const)
+      if (entry->src.ssa->parent_instr->type == nir_instr_type_load_const)
          continue;
 
       /* Don't try and coalesce these */
-      if (copy->dest.ssa.num_components != copy->src.ssa->num_components)
+      if (entry->dest.ssa.num_components != entry->src.ssa->num_components)
          continue;
 
-      merge_node *src_node = get_merge_node(copy->src.ssa, state);
-      merge_node *dest_node = get_merge_node(&copy->dest.ssa, state);
+      merge_node *src_node = get_merge_node(entry->src.ssa, state);
+      merge_node *dest_node = get_merge_node(&entry->dest.ssa, state);
 
       if (src_node->set == dest_node->set)
          continue;
@@ -414,43 +430,44 @@ agressive_coalesce_parallel_copy(nir_parallel_copy_instr *pcopy,
 }
 
 static bool
-agressive_coalesce_block(nir_block *block, void *void_state)
+aggressive_coalesce_block(nir_block *block, void *void_state)
 {
    struct from_ssa_state *state = void_state;
 
+   nir_parallel_copy_instr *start_pcopy = NULL;
    nir_foreach_instr(block, instr) {
       /* Phi nodes only ever come at the start of a block */
       if (instr->type != nir_instr_type_phi) {
          if (instr->type != nir_instr_type_parallel_copy)
             break; /* The parallel copy must be right after the phis */
 
-         nir_parallel_copy_instr *pcopy = nir_instr_as_parallel_copy(instr);
+         start_pcopy = nir_instr_as_parallel_copy(instr);
 
-         agressive_coalesce_parallel_copy(pcopy, state);
-
-         if (pcopy->at_end)
-            return true;
+         aggressive_coalesce_parallel_copy(start_pcopy, state);
 
          break;
       }
    }
 
-   nir_instr *last_instr = nir_block_last_instr(block);
-   if (last_instr && last_instr->type == nir_instr_type_parallel_copy) {
-      nir_parallel_copy_instr *pcopy = nir_instr_as_parallel_copy(last_instr);
-      if (pcopy->at_end)
-         agressive_coalesce_parallel_copy(pcopy, state);
-   }
+   nir_parallel_copy_instr *end_pcopy =
+      get_parallel_copy_at_end_of_block(block);
+
+   if (end_pcopy && end_pcopy != start_pcopy)
+      aggressive_coalesce_parallel_copy(end_pcopy, state);
 
    return true;
 }
 
-static nir_register *
-get_register_for_ssa_def(nir_ssa_def *def, struct from_ssa_state *state)
+static bool
+rewrite_ssa_def(nir_ssa_def *def, void *void_state)
 {
+   struct from_ssa_state *state = void_state;
+   nir_register *reg;
+
    struct hash_entry *entry =
       _mesa_hash_table_search(state->merge_node_table, def);
    if (entry) {
+      /* In this case, we're part of a phi web.  Use the web's register. */
       merge_node *node = (merge_node *)entry->data;
 
       /* If it doesn't have a register yet, create one.  Note that all of
@@ -464,80 +481,52 @@ get_register_for_ssa_def(nir_ssa_def *def, struct from_ssa_state *state)
          node->set->reg->num_array_elems = 0;
       }
 
-      return node->set->reg;
-   }
-
-   entry = _mesa_hash_table_search(state->ssa_table, def);
-   if (entry) {
-      return (nir_register *)entry->data;
+      reg = node->set->reg;
    } else {
+      if (state->phi_webs_only)
+         return true;
+
       /* We leave load_const SSA values alone.  They act as immediates to
        * the backend.  If it got coalesced into a phi, that's ok.
        */
       if (def->parent_instr->type == nir_instr_type_load_const)
-         return NULL;
+         return true;
 
-      nir_register *reg = nir_local_reg_create(state->impl);
+      reg = nir_local_reg_create(state->impl);
       reg->name = def->name;
       reg->num_components = def->num_components;
       reg->num_array_elems = 0;
-
-      _mesa_hash_table_insert(state->ssa_table, def, reg);
-      return reg;
    }
-}
 
-static bool
-rewrite_ssa_src(nir_src *src, void *void_state)
-{
-   struct from_ssa_state *state = void_state;
+   nir_ssa_def_rewrite_uses(def, nir_src_for_reg(reg), state->mem_ctx);
+   assert(list_empty(&def->uses) && list_empty(&def->if_uses));
 
-   if (src->is_ssa) {
-      nir_register *reg = get_register_for_ssa_def(src->ssa, state);
-
-      if (reg == NULL) {
-         assert(src->ssa->parent_instr->type == nir_instr_type_load_const);
-         return true;
-      }
-
-      memset(src, 0, sizeof *src);
-      src->reg.reg = reg;
-
-      /* We don't need to remove it from the uses set because that is going
-       * away.  We just need to add it to the one for the register. */
-      _mesa_set_add(reg->uses, _mesa_hash_pointer(state->instr), state->instr);
+   if (def->parent_instr->type == nir_instr_type_ssa_undef) {
+      /* If it's an ssa_undef instruction, remove it since we know we just got
+       * rid of all its uses.
+       */
+      nir_instr *parent_instr = def->parent_instr;
+      nir_instr_remove(parent_instr);
+      ralloc_steal(state->dead_ctx, parent_instr);
+      return true;
    }
 
-   return true;
-}
+   assert(def->parent_instr->type != nir_instr_type_load_const);
 
-static bool
-rewrite_ssa_dest(nir_dest *dest, void *void_state)
-{
-   struct from_ssa_state *state = void_state;
-
-   if (dest->is_ssa) {
-      nir_register *reg = get_register_for_ssa_def(&dest->ssa, state);
-
-      if (reg == NULL) {
-         assert(dest->ssa.parent_instr->type == nir_instr_type_load_const);
-         return true;
-      }
-
-      _mesa_set_destroy(dest->ssa.uses, NULL);
-      _mesa_set_destroy(dest->ssa.if_uses, NULL);
-
-      memset(dest, 0, sizeof *dest);
-      dest->reg.reg = reg;
+   /* At this point we know a priori that this SSA def is part of a
+    * nir_dest.  We can use exec_node_data to get the dest pointer.
+    */
+   nir_dest *dest = exec_node_data(nir_dest, def, ssa);
 
-      _mesa_set_add(reg->defs, _mesa_hash_pointer(state->instr), state->instr);
-   }
+   *dest = nir_dest_for_reg(reg);
+   dest->reg.parent_instr = state->instr;
+   list_addtail(&dest->reg.def_link, &reg->defs);
 
    return true;
 }
 
 /* Resolves ssa definitions to registers.  While we're at it, we also
- * remove phi nodes and ssa_undef instructions
+ * remove phi nodes.
  */
 static bool
 resolve_registers_block(nir_block *block, void *void_state)
@@ -546,35 +535,15 @@ resolve_registers_block(nir_block *block, void *void_state)
 
    nir_foreach_instr_safe(block, instr) {
       state->instr = instr;
-      nir_foreach_src(instr, rewrite_ssa_src, state);
-      nir_foreach_dest(instr, rewrite_ssa_dest, state);
+      nir_foreach_ssa_def(instr, rewrite_ssa_def, state);
 
-      if (instr->type == nir_instr_type_ssa_undef ||
-          instr->type == nir_instr_type_phi) {
+      if (instr->type == nir_instr_type_phi) {
          nir_instr_remove(instr);
          ralloc_steal(state->dead_ctx, instr);
       }
    }
    state->instr = NULL;
 
-   nir_if *following_if = nir_block_following_if(block);
-   if (following_if && following_if->condition.is_ssa) {
-      nir_register *reg = get_register_for_ssa_def(following_if->condition.ssa,
-                                                   state);
-      if (reg) {
-         memset(&following_if->condition, 0, sizeof following_if->condition);
-         following_if->condition.reg.reg = reg;
-
-         _mesa_set_add(reg->if_uses, _mesa_hash_pointer(following_if),
-                       following_if);
-      } else {
-         /* FIXME: We really shouldn't hit this.  We should be doing
-          * constant control flow propagation.
-          */
-         assert(following_if->condition.ssa->parent_instr->type == nir_instr_type_load_const);
-      }
-   }
-
    return true;
 }
 
@@ -585,22 +554,16 @@ emit_copy(nir_parallel_copy_instr *pcopy, nir_src src, nir_src dest_src,
    assert(!dest_src.is_ssa &&
           dest_src.reg.indirect == NULL &&
           dest_src.reg.base_offset == 0);
-   nir_dest dest = {
-      .reg.reg = dest_src.reg.reg,
-      .reg.indirect = NULL,
-      .reg.base_offset = 0,
-      .is_ssa = false,
-   };
 
    if (src.is_ssa)
-      assert(src.ssa->num_components >= dest.reg.reg->num_components);
+      assert(src.ssa->num_components >= dest_src.reg.reg->num_components);
    else
-      assert(src.reg.reg->num_components >= dest.reg.reg->num_components);
+      assert(src.reg.reg->num_components >= dest_src.reg.reg->num_components);
 
    nir_alu_instr *mov = nir_alu_instr_create(mem_ctx, nir_op_imov);
-   mov->src[0].src = nir_src_copy(src, mem_ctx);
-   mov->dest.dest = nir_dest_copy(dest, mem_ctx);
-   mov->dest.write_mask = (1 << dest.reg.reg->num_components) - 1;
+   nir_src_copy(&mov->src[0].src, &src, mem_ctx);
+   mov->dest.dest = nir_dest_for_reg(dest_src.reg.reg);
+   mov->dest.write_mask = (1 << dest_src.reg.reg->num_components) - 1;
 
    nir_instr_insert_before(&pcopy->instr, &mov->instr);
 }
@@ -614,7 +577,7 @@ emit_copy(nir_parallel_copy_instr *pcopy, nir_src src, nir_src dest_src,
  *
  * The algorithm works by playing this little shell game with the values.
  * We start by recording where every source value is and which source value
- * each destination value should recieve.  We then grab any copy whose
+ * each destination value should receive.  We then grab any copy whose
  * destination is "empty", i.e. not used as a source, and do the following:
  *  - Find where its source value currently lives
  *  - Emit the move instruction
@@ -632,12 +595,11 @@ resolve_parallel_copy(nir_parallel_copy_instr *pcopy,
                       struct from_ssa_state *state)
 {
    unsigned num_copies = 0;
-   foreach_list_typed_safe(nir_parallel_copy_copy, copy, node, &pcopy->copies) {
+   nir_foreach_parallel_copy_entry(pcopy, entry) {
       /* Sources may be SSA */
-      if (!copy->src.is_ssa && copy->src.reg.reg == copy->dest.reg.reg)
+      if (!entry->src.is_ssa && entry->src.reg.reg == entry->dest.reg.reg)
          continue;
 
-      /* Set both indices equal to UINT_MAX to mark them as not indexed yet. */
       num_copies++;
    }
 
@@ -648,21 +610,16 @@ resolve_parallel_copy(nir_parallel_copy_instr *pcopy,
    }
 
    /* The register/source corresponding to the given index */
-   nir_src values[num_copies * 2];
-   memset(values, 0, sizeof values);
+   NIR_VLA_ZERO(nir_src, values, num_copies * 2);
 
-   /* The current location of a given piece of data */
-   int loc[num_copies * 2];
+   /* The current location of a given piece of data.  We will use -1 for "null" */
+   NIR_VLA_FILL(int, loc, num_copies * 2, -1);
 
-   /* The piece of data that the given piece of data is to be copied from */
-   int pred[num_copies * 2];
-
-   /* Initialize loc and pred.  We will use -1 for "null" */
-   memset(loc, -1, sizeof loc);
-   memset(pred, -1, sizeof pred);
+   /* The piece of data that the given piece of data is to be copied from.  We will use -1 for "null" */
+   NIR_VLA_FILL(int, pred, num_copies * 2, -1);
 
    /* The destinations we have yet to properly fill */
-   int to_do[num_copies * 2];
+   NIR_VLA(int, to_do, num_copies * 2);
    int to_do_idx = -1;
 
    /* Now we set everything up:
@@ -671,27 +628,22 @@ resolve_parallel_copy(nir_parallel_copy_instr *pcopy,
     *  - Predicessors are recorded from sources and destinations
     */
    int num_vals = 0;
-   foreach_list_typed(nir_parallel_copy_copy, copy, node, &pcopy->copies) {
+   nir_foreach_parallel_copy_entry(pcopy, entry) {
       /* Sources may be SSA */
-      if (!copy->src.is_ssa && copy->src.reg.reg == copy->dest.reg.reg)
+      if (!entry->src.is_ssa && entry->src.reg.reg == entry->dest.reg.reg)
          continue;
 
       int src_idx = -1;
       for (int i = 0; i < num_vals; ++i) {
-         if (nir_srcs_equal(values[i], copy->src))
+         if (nir_srcs_equal(values[i], entry->src))
             src_idx = i;
       }
       if (src_idx < 0) {
          src_idx = num_vals++;
-         values[src_idx] = copy->src;
+         values[src_idx] = entry->src;
       }
 
-      nir_src dest_src = {
-         .reg.reg = copy->dest.reg.reg,
-         .reg.indirect = NULL,
-         .reg.base_offset = 0,
-         .is_ssa = false,
-      };
+      nir_src dest_src = nir_src_for_reg(entry->dest.reg.reg);
 
       int dest_idx = -1;
       for (int i = 0; i < num_vals; ++i) {
@@ -717,7 +669,7 @@ resolve_parallel_copy(nir_parallel_copy_instr *pcopy,
    }
 
    /* Currently empty destinations we can go ahead and fill */
-   int ready[num_copies * 2];
+   NIR_VLA(int, ready, num_copies * 2);
    int ready_idx = -1;
 
    /* Mark the ones that are ready for copying.  We know an index is a
@@ -801,36 +753,32 @@ resolve_parallel_copies_block(nir_block *block, void *void_state)
       resolve_parallel_copy(pcopy, state);
    }
 
-   nir_instr *last_instr = nir_block_last_instr(block);
-   if (last_instr == NULL)
-      return true; /* Now empty, nothing to do. */
-
-   /* If the last instruction is a jump, the parallel copy will be before
-    * the jump.
+   /* It's possible that the above code already cleaned up the end parallel
+    * copy.  However, doing so removed it form the instructions list so we
+    * won't find it here.  Therefore, it's safe to go ahead and just look
+    * for one and clean it up if it exists.
     */
-   if (last_instr->type == nir_instr_type_jump)
-      last_instr = nir_instr_prev(last_instr);
-
-   if (last_instr && last_instr->type == nir_instr_type_parallel_copy) {
-      nir_parallel_copy_instr *pcopy = nir_instr_as_parallel_copy(last_instr);
-      if (pcopy->at_end)
-         resolve_parallel_copy(pcopy, state);
-   }
+   nir_parallel_copy_instr *end_pcopy =
+      get_parallel_copy_at_end_of_block(block);
+   if (end_pcopy)
+      resolve_parallel_copy(end_pcopy, state);
 
    return true;
 }
 
 static void
-nir_convert_from_ssa_impl(nir_function_impl *impl)
+nir_convert_from_ssa_impl(nir_function_impl *impl, bool phi_webs_only)
 {
    struct from_ssa_state state;
 
    state.mem_ctx = ralloc_parent(impl);
    state.dead_ctx = ralloc_context(NULL);
    state.impl = impl;
+   state.phi_webs_only = phi_webs_only;
    state.merge_node_table = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
                                                     _mesa_key_pointer_equal);
 
+   nir_foreach_block(impl, add_parallel_copy_to_end_of_block, &state);
    nir_foreach_block(impl, isolate_phi_nodes_block, &state);
 
    /* Mark metadata as dirty before we ask for liveness analysis */
@@ -841,10 +789,8 @@ nir_convert_from_ssa_impl(nir_function_impl *impl)
                               nir_metadata_dominance);
 
    nir_foreach_block(impl, coalesce_phi_nodes_block, &state);
-   nir_foreach_block(impl, agressive_coalesce_block, &state);
+   nir_foreach_block(impl, aggressive_coalesce_block, &state);
 
-   state.ssa_table = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
-                                             _mesa_key_pointer_equal);
    nir_foreach_block(impl, resolve_registers_block, &state);
 
    nir_foreach_block(impl, resolve_parallel_copies_block, &state);
@@ -853,16 +799,15 @@ nir_convert_from_ssa_impl(nir_function_impl *impl)
                                nir_metadata_dominance);
 
    /* Clean up dead instructions and the hash tables */
-   _mesa_hash_table_destroy(state.ssa_table, NULL);
    _mesa_hash_table_destroy(state.merge_node_table, NULL);
    ralloc_free(state.dead_ctx);
 }
 
 void
-nir_convert_from_ssa(nir_shader *shader)
+nir_convert_from_ssa(nir_shader *shader, bool phi_webs_only)
 {
    nir_foreach_overload(shader, overload) {
       if (overload->impl)
-         nir_convert_from_ssa_impl(overload->impl);
+         nir_convert_from_ssa_impl(overload->impl, phi_webs_only);
    }
 }