turnip: Move tu_bo functions to tu_drm.c
[mesa.git] / src / freedreno / ir3 / ir3_ra.c
index 39507184105a2a86a47c81ea9d9ffbb5bdc024c7..abf665288fd240ca25ce6bfab73b562fdef02222 100644 (file)
 #include "util/bitset.h"
 
 #include "ir3.h"
-#include "ir3_compiler.h"
+#include "ir3_shader.h"
+#include "ir3_ra.h"
+
+
+#ifdef DEBUG
+#define RA_DEBUG (ir3_shader_debug & IR3_DBG_RAMSGS)
+#else
+#define RA_DEBUG 0
+#endif
+#define d(fmt, ...) do { if (RA_DEBUG) { \
+       printf("RA: "fmt"\n", ##__VA_ARGS__); \
+} } while (0)
+
+#define di(instr, fmt, ...) do { if (RA_DEBUG) { \
+       printf("RA: "fmt": ", ##__VA_ARGS__); \
+       ir3_print_instr(instr); \
+} } while (0)
 
 /*
  * Register Assignment:
  * the result.
  */
 
-static const unsigned class_sizes[] = {
-       1, 2, 3, 4,
-       4 + 4, /* txd + 1d/2d */
-       4 + 6, /* txd + 3d */
-};
-#define class_count ARRAY_SIZE(class_sizes)
-
-static const unsigned half_class_sizes[] = {
-       1, 2, 3, 4,
-};
-#define half_class_count  ARRAY_SIZE(half_class_sizes)
-
-/* seems to just be used for compute shaders?  Seems like vec1 and vec3
- * are sufficient (for now?)
- */
-static const unsigned high_class_sizes[] = {
-       1, 3,
-};
-#define high_class_count ARRAY_SIZE(high_class_sizes)
-
-#define total_class_count (class_count + half_class_count + high_class_count)
-
-/* Below a0.x are normal regs.  RA doesn't need to assign a0.x/p0.x. */
-#define NUM_REGS             (4 * 48)  /* r0 to r47 */
-#define NUM_HIGH_REGS        (4 * 8)   /* r48 to r55 */
-#define FIRST_HIGH_REG       (4 * 48)
-/* Number of virtual regs in a given class: */
-#define CLASS_REGS(i)        (NUM_REGS - (class_sizes[i] - 1))
-#define HALF_CLASS_REGS(i)   (NUM_REGS - (half_class_sizes[i] - 1))
-#define HIGH_CLASS_REGS(i)   (NUM_HIGH_REGS - (high_class_sizes[i] - 1))
-
-#define HALF_OFFSET          (class_count)
-#define HIGH_OFFSET          (class_count + half_class_count)
-
-/* register-set, created one time, used for all shaders: */
-struct ir3_ra_reg_set {
-       struct ra_regs *regs;
-       unsigned int classes[class_count];
-       unsigned int half_classes[half_class_count];
-       unsigned int high_classes[high_class_count];
-       /* maps flat virtual register space to base gpr: */
-       uint16_t *ra_reg_to_gpr;
-       /* maps cls,gpr to flat virtual register space: */
-       uint16_t **gpr_to_ra_reg;
-};
-
-static void
-build_q_values(unsigned int **q_values, unsigned off,
-               const unsigned *sizes, unsigned count)
-{
-       for (unsigned i = 0; i < count; i++) {
-               q_values[i + off] = rzalloc_array(q_values, unsigned, total_class_count);
-
-               /* From register_allocate.c:
-                *
-                * q(B,C) (indexed by C, B is this register class) in
-                * Runeson/Nyström paper.  This is "how many registers of B could
-                * the worst choice register from C conflict with".
-                *
-                * If we just let the register allocation algorithm compute these
-                * values, is extremely expensive.  However, since all of our
-                * registers are laid out, we can very easily compute them
-                * ourselves.  View the register from C as fixed starting at GRF n
-                * somewhere in the middle, and the register from B as sliding back
-                * and forth.  Then the first register to conflict from B is the
-                * one starting at n - class_size[B] + 1 and the last register to
-                * conflict will start at n + class_size[B] - 1.  Therefore, the
-                * number of conflicts from B is class_size[B] + class_size[C] - 1.
-                *
-                *   +-+-+-+-+-+-+     +-+-+-+-+-+-+
-                * B | | | | | |n| --> | | | | | | |
-                *   +-+-+-+-+-+-+     +-+-+-+-+-+-+
-                *             +-+-+-+-+-+
-                * C           |n| | | | |
-                *             +-+-+-+-+-+
-                *
-                * (Idea copied from brw_fs_reg_allocate.cpp)
-                */
-               for (unsigned j = 0; j < count; j++)
-                       q_values[i + off][j + off] = sizes[i] + sizes[j] - 1;
-       }
-}
-
-/* One-time setup of RA register-set, which describes all the possible
- * "virtual" registers and their interferences.  Ie. double register
- * occupies (and conflicts with) two single registers, and so forth.
- * Since registers do not need to be aligned to their class size, they
- * can conflict with other registers in the same class too.  Ie:
- *
- *    Single (base) |  Double
- *    --------------+---------------
- *       R0         |  D0
- *       R1         |  D0 D1
- *       R2         |     D1 D2
- *       R3         |        D2
- *           .. and so on..
- *
- * (NOTE the disassembler uses notation like r0.x/y/z/w but those are
- * really just four scalar registers.  Don't let that confuse you.)
- */
-struct ir3_ra_reg_set *
-ir3_ra_alloc_reg_set(struct ir3_compiler *compiler)
-{
-       struct ir3_ra_reg_set *set = rzalloc(compiler, struct ir3_ra_reg_set);
-       unsigned ra_reg_count, reg, first_half_reg, first_high_reg, base;
-       unsigned int **q_values;
-
-       /* calculate # of regs across all classes: */
-       ra_reg_count = 0;
-       for (unsigned i = 0; i < class_count; i++)
-               ra_reg_count += CLASS_REGS(i);
-       for (unsigned i = 0; i < half_class_count; i++)
-               ra_reg_count += HALF_CLASS_REGS(i);
-       for (unsigned i = 0; i < high_class_count; i++)
-               ra_reg_count += HIGH_CLASS_REGS(i);
-
-       /* allocate and populate q_values: */
-       q_values = ralloc_array(set, unsigned *, total_class_count);
 
-       build_q_values(q_values, 0, class_sizes, class_count);
-       build_q_values(q_values, HALF_OFFSET, half_class_sizes, half_class_count);
-       build_q_values(q_values, HIGH_OFFSET, high_class_sizes, high_class_count);
+static struct ir3_instruction * name_to_instr(struct ir3_ra_ctx *ctx, unsigned name);
 
-       /* allocate the reg-set.. */
-       set->regs = ra_alloc_reg_set(set, ra_reg_count, true);
-       set->ra_reg_to_gpr = ralloc_array(set, uint16_t, ra_reg_count);
-       set->gpr_to_ra_reg = ralloc_array(set, uint16_t *, total_class_count);
-
-       /* .. and classes */
-       reg = 0;
-       for (unsigned i = 0; i < class_count; i++) {
-               set->classes[i] = ra_alloc_reg_class(set->regs);
-
-               set->gpr_to_ra_reg[i] = ralloc_array(set, uint16_t, CLASS_REGS(i));
-
-               for (unsigned j = 0; j < CLASS_REGS(i); j++) {
-                       ra_class_add_reg(set->regs, set->classes[i], reg);
-
-                       set->ra_reg_to_gpr[reg] = j;
-                       set->gpr_to_ra_reg[i][j] = reg;
-
-                       for (unsigned br = j; br < j + class_sizes[i]; br++)
-                               ra_add_transitive_reg_conflict(set->regs, br, reg);
-
-                       reg++;
-               }
-       }
-
-       first_half_reg = reg;
-       base = HALF_OFFSET;
-
-       for (unsigned i = 0; i < half_class_count; i++) {
-               set->half_classes[i] = ra_alloc_reg_class(set->regs);
-
-               set->gpr_to_ra_reg[base + i] =
-                               ralloc_array(set, uint16_t, HALF_CLASS_REGS(i));
-
-               for (unsigned j = 0; j < HALF_CLASS_REGS(i); j++) {
-                       ra_class_add_reg(set->regs, set->half_classes[i], reg);
-
-                       set->ra_reg_to_gpr[reg] = j;
-                       set->gpr_to_ra_reg[base + i][j] = reg;
-
-                       for (unsigned br = j; br < j + half_class_sizes[i]; br++)
-                               ra_add_transitive_reg_conflict(set->regs, br + first_half_reg, reg);
-
-                       reg++;
-               }
-       }
-
-       first_high_reg = reg;
-       base = HIGH_OFFSET;
-
-       for (unsigned i = 0; i < high_class_count; i++) {
-               set->high_classes[i] = ra_alloc_reg_class(set->regs);
-
-               set->gpr_to_ra_reg[base + i] =
-                               ralloc_array(set, uint16_t, HIGH_CLASS_REGS(i));
-
-               for (unsigned j = 0; j < HIGH_CLASS_REGS(i); j++) {
-                       ra_class_add_reg(set->regs, set->high_classes[i], reg);
-
-                       set->ra_reg_to_gpr[reg] = j;
-                       set->gpr_to_ra_reg[base + i][j] = reg;
-
-                       for (unsigned br = j; br < j + high_class_sizes[i]; br++)
-                               ra_add_transitive_reg_conflict(set->regs, br + first_high_reg, reg);
-
-                       reg++;
-               }
-       }
-
-       /* starting a6xx, half precision regs conflict w/ full precision regs: */
-       if (compiler->gpu_id >= 600) {
-               /* because of transitivity, we can get away with just setting up
-                * conflicts between the first class of full and half regs:
-                */
-               for (unsigned i = 0; i < half_class_count; i++) {
-                       /* NOTE there are fewer half class sizes, but they match the
-                        * first N full class sizes.. but assert in case that ever
-                        * accidentally changes:
-                        */
-                       debug_assert(class_sizes[i] == half_class_sizes[i]);
-                       for (unsigned j = 0; j < CLASS_REGS(i) / 2; j++) {
-                               unsigned freg  = set->gpr_to_ra_reg[i][j];
-                               unsigned hreg0 = set->gpr_to_ra_reg[i + HALF_OFFSET][(j * 2) + 0];
-                               unsigned hreg1 = set->gpr_to_ra_reg[i + HALF_OFFSET][(j * 2) + 1];
-
-                               ra_add_transitive_reg_pair_conflict(set->regs, freg, hreg0, hreg1);
-                       }
-               }
-
-               // TODO also need to update q_values, but for now:
-               ra_set_finalize(set->regs, NULL);
-       } else {
-               ra_set_finalize(set->regs, q_values);
-       }
-
-       ralloc_free(q_values);
-
-       return set;
-}
-
-/* additional block-data (per-block) */
-struct ir3_ra_block_data {
-       BITSET_WORD *def;        /* variables defined before used in block */
-       BITSET_WORD *use;        /* variables used before defined in block */
-       BITSET_WORD *livein;     /* which defs reach entry point of block */
-       BITSET_WORD *liveout;    /* which defs reach exit point of block */
-};
-
-/* additional instruction-data (per-instruction) */
-struct ir3_ra_instr_data {
-       /* cached instruction 'definer' info: */
-       struct ir3_instruction *defn;
-       int off, sz, cls;
-};
-
-/* register-assign context, per-shader */
-struct ir3_ra_ctx {
-       struct ir3_shader_variant *v;
-       struct ir3 *ir;
-
-       struct ir3_ra_reg_set *set;
-       struct ra_graph *g;
-
-       /* Are we in the scalar assignment pass?  In this pass, all larger-
-        * than-vec1 vales have already been assigned and pre-colored, so
-        * we only consider scalar values.
-        */
-       bool scalar_pass;
-
-       unsigned alloc_count;
-       /* one per class, plus one slot for arrays: */
-       unsigned class_alloc_count[total_class_count + 1];
-       unsigned class_base[total_class_count + 1];
-       unsigned instr_cnt;
-       unsigned *def, *use;     /* def/use table */
-       struct ir3_ra_instr_data *instrd;
-
-       /* Tracking for max half/full register assigned.  We don't need to
-        * track high registers.
-        *
-        * The feedback about registers used in first pass is used to choose
-        * a target register usage to round-robin between in the 2nd pass.
-        */
-       unsigned max_assigned;
-       unsigned max_half_assigned;
-};
+static bool name_is_array(struct ir3_ra_ctx *ctx, unsigned name);
+static struct ir3_array * name_to_array(struct ir3_ra_ctx *ctx, unsigned name);
 
 /* does it conflict? */
 static inline bool
@@ -363,49 +115,6 @@ intersects(unsigned a_start, unsigned a_end, unsigned b_start, unsigned b_end)
        return !((a_start >= b_end) || (b_start >= a_end));
 }
 
-static unsigned
-reg_size_for_array(struct ir3_array *arr)
-{
-       if (arr->half)
-               return DIV_ROUND_UP(arr->length, 2);
-
-       return arr->length;
-}
-
-static int
-size_to_class(unsigned sz, bool half, bool high)
-{
-       if (high) {
-               for (unsigned i = 0; i < high_class_count; i++)
-                       if (high_class_sizes[i] >= sz)
-                               return i + HIGH_OFFSET;
-       } else if (half) {
-               for (unsigned i = 0; i < half_class_count; i++)
-                       if (half_class_sizes[i] >= sz)
-                               return i + HALF_OFFSET;
-       } else {
-               for (unsigned i = 0; i < class_count; i++)
-                       if (class_sizes[i] >= sz)
-                               return i;
-       }
-       debug_assert(0);
-       return -1;
-}
-
-static bool
-writes_gpr(struct ir3_instruction *instr)
-{
-       if (dest_regs(instr) == 0)
-               return false;
-       /* is dest a normal temp register: */
-       struct ir3_register *reg = instr->regs[0];
-       debug_assert(!(reg->flags & (IR3_REG_CONST | IR3_REG_IMMED)));
-       if ((reg->num == regid(REG_A0, 0)) ||
-                       (reg->num == regid(REG_P0, 0)))
-               return false;
-       return true;
-}
-
 static bool
 instr_before(struct ir3_instruction *a, struct ir3_instruction *b)
 {
@@ -438,7 +147,6 @@ get_definer(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr,
                 * need to find the distance between where actual array starts
                 * and collect..  that probably doesn't happen currently.
                 */
-               struct ir3_register *src;
                int dsz, doff;
 
                /* note: don't use foreach_ssa_src as this gets called once
@@ -519,7 +227,7 @@ get_definer(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr,
                dd = get_definer(ctx, d->regs[1]->instr, &dsz, &doff);
 
                /* by definition, should come before: */
-               debug_assert(instr_before(dd, d));
+               ra_assert(ctx, instr_before(dd, d));
 
                *sz = MAX2(*sz, dsz);
 
@@ -529,7 +237,7 @@ get_definer(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr,
                d = dd;
        }
 
-       debug_assert(d->opc != OPC_META_SPLIT);
+       ra_assert(ctx, d->opc != OPC_META_SPLIT);
 
        id->defn = d;
        id->sz = *sz;
@@ -546,14 +254,14 @@ ra_block_find_definers(struct ir3_ra_ctx *ctx, struct ir3_block *block)
                if (instr->regs_count == 0)
                        continue;
                /* couple special cases: */
-               if (writes_addr(instr) || writes_pred(instr)) {
+               if (writes_addr0(instr) || writes_addr1(instr) || writes_pred(instr)) {
                        id->cls = -1;
                } else if (instr->regs[0]->flags & IR3_REG_ARRAY) {
                        id->cls = total_class_count;
                } else {
                        /* and the normal case: */
                        id->defn = get_definer(ctx, instr, &id->sz, &id->off);
-                       id->cls = size_to_class(id->sz, is_half(id->defn), is_high(id->defn));
+                       id->cls = ra_size_to_class(id->sz, is_half(id->defn), is_high(id->defn));
 
                        /* this is a bit of duct-tape.. if we have a scenario like:
                         *
@@ -640,13 +348,177 @@ ra_block_name_instructions(struct ir3_ra_ctx *ctx, struct ir3_block *block)
        }
 }
 
+/**
+ * Set a value for max register target.
+ *
+ * Currently this just rounds up to a multiple of full-vec4 (ie. the
+ * granularity that we configure the hw for.. there is no point to
+ * using r3.x if you aren't going to make r3.yzw available).  But
+ * in reality there seems to be multiple thresholds that affect the
+ * number of waves.. and we should round up the target to the next
+ * threshold when we round-robin registers, to give postsched more
+ * options.  When we understand that better, this is where we'd
+ * implement that.
+ */
+static void
+ra_set_register_target(struct ir3_ra_ctx *ctx, unsigned max_target)
+{
+       const unsigned hvec4 = 4;
+       const unsigned vec4 = 2 * hvec4;
+
+       ctx->max_target = align(max_target, vec4);
+
+       d("New max_target=%u", ctx->max_target);
+}
+
+static int
+pick_in_range(BITSET_WORD *regs, unsigned min, unsigned max)
+{
+       for (unsigned i = min; i <= max; i++) {
+               if (BITSET_TEST(regs, i)) {
+                       return i;
+               }
+       }
+       return -1;
+}
+
+static int
+pick_in_range_rev(BITSET_WORD *regs, int min, int max)
+{
+       for (int i = max; i >= min; i--) {
+               if (BITSET_TEST(regs, i)) {
+                       return i;
+               }
+       }
+       return -1;
+}
+
+/* register selector for the a6xx+ merged register file: */
+static unsigned int
+ra_select_reg_merged(unsigned int n, BITSET_WORD *regs, void *data)
+{
+       struct ir3_ra_ctx *ctx = data;
+       unsigned int class = ra_get_node_class(ctx->g, n);
+       bool half, high;
+       int sz = ra_class_to_size(class, &half, &high);
+
+       assert (sz > 0);
+
+       /* dimensions within the register class: */
+       unsigned max_target, start;
+
+       /* the regs bitset will include *all* of the virtual regs, but we lay
+        * out the different classes consecutively in the virtual register
+        * space.  So we just need to think about the base offset of a given
+        * class within the virtual register space, and offset the register
+        * space we search within by that base offset.
+        */
+       unsigned base;
+
+       /* TODO I think eventually we want to round-robin in vector pass
+        * as well, but needs some more work to calculate # of live vals
+        * for this.  (Maybe with some work, we could just figure out
+        * the scalar target and use that, since that is what we care
+        * about in the end.. but that would mean setting up use-def/
+        * liveranges for scalar pass before doing vector pass.)
+        *
+        * For now, in the vector class, just move assignments for scalar
+        * vals higher to hopefully prevent them from limiting where vecN
+        * values can be placed.  Since the scalar values are re-assigned
+        * in the 2nd pass, we don't really care where they end up in the
+        * vector pass.
+        */
+       if (!ctx->scalar_pass) {
+               base = ctx->set->gpr_to_ra_reg[class][0];
+               if (high) {
+                       max_target = HIGH_CLASS_REGS(class - HIGH_OFFSET);
+               } else if (half) {
+                       max_target = HALF_CLASS_REGS(class - HALF_OFFSET);
+               } else {
+                       max_target = CLASS_REGS(class);
+               }
+
+               if ((sz == 1) && !high) {
+                       return pick_in_range_rev(regs, base, base + max_target);
+               } else {
+                       return pick_in_range(regs, base, base + max_target);
+               }
+       } else {
+               ra_assert(ctx, sz == 1);
+       }
+
+       /* NOTE: this is only used in scalar pass, so the register
+        * class will be one of the scalar classes (ie. idx==0):
+        */
+       base = ctx->set->gpr_to_ra_reg[class][0];
+       if (high) {
+               max_target = HIGH_CLASS_REGS(0);
+               start = 0;
+       } else if (half) {
+               max_target = ctx->max_target;
+               start = ctx->start_search_reg;
+       } else {
+               max_target = ctx->max_target / 2;
+               start = ctx->start_search_reg;
+       }
+
+       /* For cat4 instructions, if the src reg is already assigned, and
+        * avail to pick, use it.  Because this doesn't introduce unnecessary
+        * dependencies, and it potentially avoids needing (ss) syncs to
+        * for write after read hazards:
+        */
+       struct ir3_instruction *instr = name_to_instr(ctx, n);
+       if (is_sfu(instr)) {
+               struct ir3_register *src = instr->regs[1];
+               int src_n;
+
+               if ((src->flags & IR3_REG_ARRAY) && !(src->flags & IR3_REG_RELATIV)) {
+                       struct ir3_array *arr = ir3_lookup_array(ctx->ir, src->array.id);
+                       src_n = arr->base + src->array.offset;
+               } else {
+                       src_n = scalar_name(ctx, src->instr, 0);
+               }
+
+               unsigned reg = ra_get_node_reg(ctx->g, src_n);
+
+               /* Check if the src register has been assigned yet: */
+               if (reg != NO_REG) {
+                       if (BITSET_TEST(regs, reg)) {
+                               return reg;
+                       }
+               }
+       }
+
+       int r = pick_in_range(regs, base + start, base + max_target);
+       if (r < 0) {
+               /* wrap-around: */
+               r = pick_in_range(regs, base, base + start);
+       }
+
+       if (r < 0) {
+               /* overflow, we need to increase max_target: */
+               ra_set_register_target(ctx, ctx->max_target + 1);
+               return ra_select_reg_merged(n, regs, data);
+       }
+
+       if (class == ctx->set->half_classes[0]) {
+               int n = r - base;
+               ctx->start_search_reg = (n + 1) % ctx->max_target;
+       } else if (class == ctx->set->classes[0]) {
+               int n = (r - base) * 2;
+               ctx->start_search_reg = (n + 1) % ctx->max_target;
+       }
+
+       return r;
+}
+
 static void
 ra_init(struct ir3_ra_ctx *ctx)
 {
        unsigned n, base;
 
        ir3_clear_mark(ctx->ir);
-       n = ir3_count_instructions(ctx->ir);
+       n = ir3_count_instructions_ra(ctx->ir);
 
        ctx->instrd = rzalloc_array(NULL, struct ir3_ra_instr_data, n);
 
@@ -671,54 +543,85 @@ ra_init(struct ir3_ra_ctx *ctx)
        base = ctx->class_base[total_class_count];
        foreach_array (arr, &ctx->ir->array_list) {
                arr->base = base;
-               ctx->class_alloc_count[total_class_count] += reg_size_for_array(arr);
-               base += reg_size_for_array(arr);
+               ctx->class_alloc_count[total_class_count] += arr->length;
+               base += arr->length;
        }
        ctx->alloc_count += ctx->class_alloc_count[total_class_count];
 
+       /* Add vreg names for r0.xyz */
+       ctx->r0_xyz_nodes = ctx->alloc_count;
+       ctx->alloc_count += 3;
+       ctx->hr0_xyz_nodes = ctx->alloc_count;
+       ctx->alloc_count += 3;
+
+       /* Add vreg name for prefetch-exclusion range: */
+       ctx->prefetch_exclude_node = ctx->alloc_count++;
+
+       if (RA_DEBUG) {
+               d("INSTRUCTION VREG NAMES:");
+               foreach_block (block, &ctx->ir->block_list) {
+                       foreach_instr (instr, &block->instr_list) {
+                               if (!ctx->instrd[instr->ip].defn)
+                                       continue;
+                               if (!writes_gpr(instr))
+                                       continue;
+                               di(instr, "%04u", scalar_name(ctx, instr, 0));
+                       }
+               }
+               d("ARRAY VREG NAMES:");
+               foreach_array (arr, &ctx->ir->array_list) {
+                       d("%04u: arr%u", arr->base, arr->id);
+               }
+               d("EXTRA VREG NAMES:");
+               d("%04u: r0_xyz_nodes", ctx->r0_xyz_nodes);
+               d("%04u: hr0_xyz_nodes", ctx->hr0_xyz_nodes);
+               d("%04u: prefetch_exclude_node", ctx->prefetch_exclude_node);
+       }
+
        ctx->g = ra_alloc_interference_graph(ctx->set->regs, ctx->alloc_count);
        ralloc_steal(ctx->g, ctx->instrd);
        ctx->def = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
        ctx->use = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
+
+       /* TODO add selector callback for split (pre-a6xx) register file: */
+       if (ctx->v->mergedregs) {
+               ra_set_select_reg_callback(ctx->g, ra_select_reg_merged, ctx);
+
+               if (ctx->scalar_pass) {
+                       ctx->name_to_instr = _mesa_hash_table_create(ctx->g,
+                                       _mesa_hash_int, _mesa_key_int_equal);
+               }
+       }
 }
 
-static unsigned
-__ra_name(struct ir3_ra_ctx *ctx, int cls, struct ir3_instruction *defn)
+/* Map the name back to instruction: */
+static struct ir3_instruction *
+name_to_instr(struct ir3_ra_ctx *ctx, unsigned name)
 {
-       unsigned name;
-       debug_assert(cls >= 0);
-       debug_assert(cls < total_class_count);  /* we shouldn't get arrays here.. */
-       name = ctx->class_base[cls] + defn->name;
-       debug_assert(name < ctx->alloc_count);
-       return name;
+       ra_assert(ctx, !name_is_array(ctx, name));
+       struct hash_entry *entry = _mesa_hash_table_search(ctx->name_to_instr, &name);
+       if (entry)
+               return entry->data;
+       ra_unreachable(ctx, "invalid instr name");
+       return NULL;
 }
 
-static int
-ra_name(struct ir3_ra_ctx *ctx, struct ir3_ra_instr_data *id)
+static bool
+name_is_array(struct ir3_ra_ctx *ctx, unsigned name)
 {
-       /* TODO handle name mapping for arrays */
-       return __ra_name(ctx, id->cls, id->defn);
+       return name >= ctx->class_base[total_class_count];
 }
 
-/* Get the scalar name of the n'th component of an instruction dst: */
-static int
-scalar_name(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr, unsigned n)
+static struct ir3_array *
+name_to_array(struct ir3_ra_ctx *ctx, unsigned name)
 {
-       if (ctx->scalar_pass) {
-               if (instr->opc == OPC_META_SPLIT) {
-                       debug_assert(n == 0);     /* split results in a scalar */
-                       struct ir3_instruction *src = instr->regs[1]->instr;
-                       return scalar_name(ctx, src, instr->split.off);
-               } else if (instr->opc == OPC_META_COLLECT) {
-                       debug_assert(n < (instr->regs_count + 1));
-                       struct ir3_instruction *src = instr->regs[n + 1]->instr;
-                       return scalar_name(ctx, src, 0);
-               }
-       } else {
-               debug_assert(n == 0);
+       ra_assert(ctx, name_is_array(ctx, name));
+       foreach_array (arr, &ctx->ir->array_list) {
+               if (name < (arr->base + arr->length))
+                       return arr;
        }
-
-       return ra_name(ctx, &ctx->instrd[instr->ip]) + n;
+       ra_unreachable(ctx, "invalid array name");
+       return NULL;
 }
 
 static void
@@ -731,7 +634,12 @@ static void
 __def(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
                struct ir3_instruction *instr)
 {
-       debug_assert(name < ctx->alloc_count);
+       ra_assert(ctx, name < ctx->alloc_count);
+
+       /* split/collect do not actually define any real value */
+       if ((instr->opc == OPC_META_SPLIT) || (instr->opc == OPC_META_COLLECT))
+               return;
+
        /* defined on first write: */
        if (!ctx->def[name])
                ctx->def[name] = instr->ip;
@@ -743,7 +651,7 @@ static void
 __use(struct ir3_ra_ctx *ctx, struct ir3_ra_block_data *bd, unsigned name,
                struct ir3_instruction *instr)
 {
-       debug_assert(name < ctx->alloc_count);
+       ra_assert(ctx, name < ctx->alloc_count);
        ctx->use[name] = MAX2(ctx->use[name], instr->ip);
        if (!BITSET_TEST(bd->def, name))
                BITSET_SET(bd->use, name);
@@ -776,145 +684,92 @@ ra_block_compute_live_ranges(struct ir3_ra_ctx *ctx, struct ir3_block *block)
        }
 
        foreach_instr (instr, &block->instr_list) {
-               struct ir3_instruction *src;
-               struct ir3_register *reg;
-
-               if (writes_gpr(instr)) {
-                       struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
-                       struct ir3_register *dst = instr->regs[0];
-
-                       if (dst->flags & IR3_REG_ARRAY) {
-                               struct ir3_array *arr =
-                                       ir3_lookup_array(ctx->ir, dst->array.id);
-                               unsigned i;
+               foreach_def (name, ctx, instr) {
+                       if (name_is_array(ctx, name)) {
+                               struct ir3_array *arr = name_to_array(ctx, name);
 
                                arr->start_ip = MIN2(arr->start_ip, instr->ip);
                                arr->end_ip = MAX2(arr->end_ip, instr->ip);
 
-                               /* set the node class now.. in case we don't encounter
-                                * this array dst again.  From register_alloc algo's
-                                * perspective, these are all single/scalar regs:
-                                */
-                               for (i = 0; i < arr->length; i++) {
+                               for (unsigned i = 0; i < arr->length; i++) {
                                        unsigned name = arr->base + i;
                                        if(arr->half)
                                                ra_set_node_class(ctx->g, name, ctx->set->half_classes[0]);
                                        else
                                                ra_set_node_class(ctx->g, name, ctx->set->classes[0]);
                                }
-
-                               /* indirect write is treated like a write to all array
-                                * elements, since we don't know which one is actually
-                                * written:
-                                */
-                               if (dst->flags & IR3_REG_RELATIV) {
-                                       for (i = 0; i < arr->length; i++) {
-                                               unsigned name = arr->base + i;
-                                               def(name, instr);
-                                       }
+                       } else {
+                               struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
+                               if (is_high(instr)) {
+                                       ra_set_node_class(ctx->g, name,
+                                                       ctx->set->high_classes[id->cls - HIGH_OFFSET]);
+                               } else if (is_half(instr)) {
+                                       ra_set_node_class(ctx->g, name,
+                                                       ctx->set->half_classes[id->cls - HALF_OFFSET]);
                                } else {
-                                       unsigned name = arr->base + dst->array.offset;
-                                       def(name, instr);
-                               }
-                       } else if (id->defn == instr) {
-                               /* in scalar pass, we aren't considering virtual register
-                                * classes, ie. if an instruction writes a vec2, then it
-                                * defines two different scalar register names.
-                                */
-                               unsigned n = ctx->scalar_pass ? dest_regs(instr) : 1;
-                               for (unsigned i = 0; i < n; i++) {
-                                       unsigned name = scalar_name(ctx, instr, i);
-
-                                       /* tex instructions actually have a wrmask, and
-                                        * don't touch masked out components.  We can't do
-                                        * anything useful about that in the first pass,
-                                        * but in the scalar pass we can realize these
-                                        * registers are available:
-                                        */
-                                       if (ctx->scalar_pass && is_tex_or_prefetch(instr) &&
-                                                       !(instr->regs[0]->wrmask & (1 << i)))
-                                               continue;
-
-                                       def(name, instr);
-
-                                       if ((instr->opc == OPC_META_INPUT) && first_non_input)
-                                               use(name, first_non_input);
-
-                                       if (is_high(instr)) {
-                                               ra_set_node_class(ctx->g, name,
-                                                               ctx->set->high_classes[id->cls - HIGH_OFFSET]);
-                                       } else if (is_half(instr)) {
-                                               ra_set_node_class(ctx->g, name,
-                                                               ctx->set->half_classes[id->cls - HALF_OFFSET]);
-                                       } else {
-                                               ra_set_node_class(ctx->g, name,
-                                                               ctx->set->classes[id->cls]);
-                                       }
+                                       ra_set_node_class(ctx->g, name,
+                                                       ctx->set->classes[id->cls]);
                                }
                        }
+
+                       def(name, instr);
+
+                       if ((instr->opc == OPC_META_INPUT) && first_non_input)
+                               use(name, first_non_input);
+
+                       /* Texture instructions with writemasks can be treated as smaller
+                        * vectors (or just scalars!) to allocate knowing that the
+                        * masked-out regs won't be written, but we need to make sure that
+                        * the start of the vector doesn't come before the first register
+                        * or we'll wrap.
+                        */
+                       if (is_tex_or_prefetch(instr)) {
+                               int writemask_skipped_regs = ffs(instr->regs[0]->wrmask) - 1;
+                               int r0_xyz = is_half(instr) ?
+                                       ctx->hr0_xyz_nodes : ctx->r0_xyz_nodes;
+                               for (int i = 0; i < writemask_skipped_regs; i++)
+                                       ra_add_node_interference(ctx->g, name, r0_xyz + i);
+                       }
+
+                       /* Pre-fetched textures have a lower limit for bits to encode dst
+                        * register, so add additional interference with registers above
+                        * that limit.
+                        */
+                       if (instr->opc == OPC_META_TEX_PREFETCH) {
+                               ra_add_node_interference(ctx->g, name,
+                                               ctx->prefetch_exclude_node);
+                       }
                }
 
-               foreach_src(reg, instr) {
-                       if (reg->flags & IR3_REG_ARRAY) {
-                               struct ir3_array *arr =
-                                       ir3_lookup_array(ctx->ir, reg->array.id);
+               foreach_use (name, ctx, instr) {
+                       if (name_is_array(ctx, name)) {
+                               struct ir3_array *arr = name_to_array(ctx, name);
+
                                arr->start_ip = MIN2(arr->start_ip, instr->ip);
                                arr->end_ip = MAX2(arr->end_ip, instr->ip);
 
-                               /* indirect read is treated like a read from all array
-                                * elements, since we don't know which one is actually
-                                * read:
+                               /* NOTE: arrays are not SSA so unconditionally
+                                * set use bit:
                                 */
-                               if (reg->flags & IR3_REG_RELATIV) {
-                                       unsigned i;
-                                       for (i = 0; i < arr->length; i++) {
-                                               unsigned name = arr->base + i;
-                                               use(name, instr);
-                                               BITSET_SET(bd->use, name);
-                                       }
-                               } else {
-                                       unsigned name = arr->base + reg->array.offset;
-                                       use(name, instr);
-                                       /* NOTE: arrays are not SSA so unconditionally
-                                        * set use bit:
-                                        */
-                                       BITSET_SET(bd->use, name);
-                                       debug_assert(reg->array.offset < arr->length);
-                               }
-                       } else if (ctx->scalar_pass) {
-                               struct ir3_instruction *src = reg->instr;
-                               /* skip things that aren't SSA: */
-                               unsigned n = src ? dest_regs(src) : 0;
-
-                               /* in scalar pass, we aren't considering virtual register
-                                * classes, ie. if an instruction writes a vec2, then it
-                                * defines two different scalar register names.
-                                *
-                                * We need to traverse up thru collect/split to find the
-                                * actual non-meta instruction names for each of the
-                                * components:
+                               BITSET_SET(bd->use, name);
+                       }
+
+                       use(name, instr);
+               }
+
+               foreach_name (name, ctx, instr) {
+                       /* split/collect instructions have duplicate names
+                        * as real instructions, so they skip the hashtable:
+                        */
+                       if (ctx->name_to_instr && !((instr->opc == OPC_META_SPLIT) ||
+                                       (instr->opc == OPC_META_COLLECT))) {
+                               /* this is slightly annoying, we can't just use an
+                                * integer on the stack
                                 */
-                               for (unsigned i = 0; i < n; i++) {
-                                       /* Need to filter out a couple special cases, ie.
-                                        * writes to a0.x or p0.x:
-                                        */
-                                       if (!writes_gpr(src))
-                                               continue;
-
-                                       /* split takes a src w/ wrmask potentially greater
-                                        * than 0x1, but it really only cares about a single
-                                        * component.  This shows up in splits coming out of
-                                        * a tex instruction w/ wrmask=.z, for example.
-                                        */
-                                       if (ctx->scalar_pass && (instr->opc == OPC_META_SPLIT) &&
-                                                       !(i == instr->split.off))
-                                               continue;
-
-                                       use(scalar_name(ctx, src, i), instr);
-                               }
-                       } else if ((src = ssa(reg)) && writes_gpr(src)) {
-                               unsigned name = ra_name(ctx, &ctx->instrd[src->ip]);
-                               use(name, instr);
+                               unsigned *key = ralloc(ctx->name_to_instr, unsigned);
+                               *key = name;
+                               ra_assert(ctx, !_mesa_hash_table_search(ctx->name_to_instr, key));
+                               _mesa_hash_table_insert(ctx->name_to_instr, key, instr);
                        }
                }
        }
@@ -931,6 +786,9 @@ ra_compute_livein_liveout(struct ir3_ra_ctx *ctx)
 
                /* update livein: */
                for (unsigned i = 0; i < bitset_words; i++) {
+                       /* anything used but not def'd within a block is
+                        * by definition a live value coming into the block:
+                        */
                        BITSET_WORD new_livein =
                                (bd->use[i] | (bd->liveout[i] & ~bd->def[i]));
 
@@ -951,6 +809,9 @@ ra_compute_livein_liveout(struct ir3_ra_ctx *ctx)
                        succ_bd = succ->data;
 
                        for (unsigned i = 0; i < bitset_words; i++) {
+                               /* add anything that is livein in a successor block
+                                * to our liveout:
+                                */
                                BITSET_WORD new_liveout =
                                        (succ_bd->livein[i] & ~bd->liveout[i]);
 
@@ -969,7 +830,7 @@ static void
 print_bitset(const char *name, BITSET_WORD *bs, unsigned cnt)
 {
        bool first = true;
-       debug_printf("  %s:", name);
+       debug_printf("RA:  %s:", name);
        for (unsigned i = 0; i < cnt; i++) {
                if (BITSET_TEST(bs, i)) {
                        if (!first)
@@ -981,6 +842,187 @@ print_bitset(const char *name, BITSET_WORD *bs, unsigned cnt)
        debug_printf("\n");
 }
 
+/* size of one component of instruction result, ie. half vs full: */
+static unsigned
+live_size(struct ir3_instruction *instr)
+{
+       if (is_half(instr)) {
+               return 1;
+       } else if (is_high(instr)) {
+               /* doesn't count towards footprint */
+               return 0;
+       } else {
+               return 2;
+       }
+}
+
+static unsigned
+name_size(struct ir3_ra_ctx *ctx, unsigned name)
+{
+       if (name_is_array(ctx, name)) {
+               struct ir3_array *arr = name_to_array(ctx, name);
+               return arr->half ? 1 : 2;
+       } else {
+               struct ir3_instruction *instr = name_to_instr(ctx, name);
+               /* in scalar pass, each name represents on scalar value,
+                * half or full precision
+                */
+               return live_size(instr);
+       }
+}
+
+static unsigned
+ra_calc_block_live_values(struct ir3_ra_ctx *ctx, struct ir3_block *block)
+{
+       struct ir3_ra_block_data *bd = block->data;
+       unsigned name;
+
+       ra_assert(ctx, ctx->name_to_instr);
+
+       /* TODO this gets a bit more complicated in non-scalar pass.. but
+        * possibly a lowball estimate is fine to start with if we do
+        * round-robin in non-scalar pass?  Maybe we just want to handle
+        * that in a different fxn?
+        */
+       ra_assert(ctx, ctx->scalar_pass);
+
+       BITSET_WORD *live =
+               rzalloc_array(bd, BITSET_WORD, BITSET_WORDS(ctx->alloc_count));
+
+       /* Add the live input values: */
+       unsigned livein = 0;
+       BITSET_FOREACH_SET (name, bd->livein, ctx->alloc_count) {
+               livein += name_size(ctx, name);
+               BITSET_SET(live, name);
+       }
+
+       d("---------------------");
+       d("block%u: LIVEIN: %u", block_id(block), livein);
+
+       unsigned max = livein;
+       int cur_live = max;
+
+       /* Now that we know the live inputs to the block, iterate the
+        * instructions adjusting the current # of live values as we
+        * see their last use:
+        */
+       foreach_instr (instr, &block->instr_list) {
+               if (RA_DEBUG)
+                       print_bitset("LIVE", live, ctx->alloc_count);
+               di(instr, "CALC");
+
+               unsigned new_live = 0;    /* newly live values */
+               unsigned new_dead = 0;    /* newly no-longer live values */
+               unsigned next_dead = 0;   /* newly dead following this instr */
+
+               foreach_def (name, ctx, instr) {
+                       /* NOTE: checking ctx->def filters out things like split/
+                        * collect which are just redefining existing live names
+                        * or array writes to already live array elements:
+                        */
+                       if (ctx->def[name] != instr->ip)
+                               continue;
+                       new_live += live_size(instr);
+                       d("NEW_LIVE: %u (new_live=%u, use=%u)", name, new_live, ctx->use[name]);
+                       BITSET_SET(live, name);
+                       /* There can be cases where this is *also* the last use
+                        * of a value, for example instructions that write multiple
+                        * values, only some of which are used.  These values are
+                        * dead *after* (rather than during) this instruction.
+                        */
+                       if (ctx->use[name] != instr->ip)
+                               continue;
+                       next_dead += live_size(instr);
+                       d("NEXT_DEAD: %u (next_dead=%u)", name, next_dead);
+                       BITSET_CLEAR(live, name);
+               }
+
+               /* To be more resilient against special cases where liverange
+                * is extended (like first_non_input), rather than using the
+                * foreach_use() iterator, we iterate the current live values
+                * instead:
+                */
+               BITSET_FOREACH_SET (name, live, ctx->alloc_count) {
+                       /* Is this the last use? */
+                       if (ctx->use[name] != instr->ip)
+                               continue;
+                       new_dead += name_size(ctx, name);
+                       d("NEW_DEAD: %u (new_dead=%u)", name, new_dead);
+                       BITSET_CLEAR(live, name);
+               }
+
+               cur_live += new_live;
+               cur_live -= new_dead;
+
+               ra_assert(ctx, cur_live >= 0);
+               d("CUR_LIVE: %u", cur_live);
+
+               max = MAX2(max, cur_live);
+
+               /* account for written values which are not used later,
+                * but after updating max (since they are for one cycle
+                * live)
+                */
+               cur_live -= next_dead;
+               ra_assert(ctx, cur_live >= 0);
+
+               if (RA_DEBUG) {
+                       unsigned cnt = 0;
+                       BITSET_FOREACH_SET (name, live, ctx->alloc_count) {
+                               cnt += name_size(ctx, name);
+                       }
+                       ra_assert(ctx, cur_live == cnt);
+               }
+       }
+
+       d("block%u max=%u", block_id(block), max);
+
+       /* the remaining live should match liveout (for extra sanity testing): */
+       if (RA_DEBUG) {
+               unsigned new_dead = 0;
+               BITSET_FOREACH_SET (name, live, ctx->alloc_count) {
+                       /* Is this the last use? */
+                       if (ctx->use[name] != block->end_ip)
+                               continue;
+                       new_dead += name_size(ctx, name);
+                       d("NEW_DEAD: %u (new_dead=%u)", name, new_dead);
+                       BITSET_CLEAR(live, name);
+               }
+               unsigned liveout = 0;
+               BITSET_FOREACH_SET (name, bd->liveout, ctx->alloc_count) {
+                       liveout += name_size(ctx, name);
+                       BITSET_CLEAR(live, name);
+               }
+
+               if (cur_live != liveout) {
+                       print_bitset("LEAKED", live, ctx->alloc_count);
+                       /* TODO there are a few edge cases where live-range extension
+                        * tells us a value is livein.  But not used by the block or
+                        * liveout for the block.  Possibly a bug in the liverange
+                        * extension.  But for now leave the assert disabled:
+                       ra_assert(ctx, cur_live == liveout);
+                        */
+               }
+       }
+
+       ralloc_free(live);
+
+       return max;
+}
+
+static unsigned
+ra_calc_max_live_values(struct ir3_ra_ctx *ctx)
+{
+       unsigned max = 0;
+
+       foreach_block (block, &ctx->ir->block_list) {
+               unsigned block_live = ra_calc_block_live_values(ctx, block);
+               max = MAX2(max, block_live);
+       }
+
+       return max;
+}
+
 static void
 ra_add_interference(struct ir3_ra_ctx *ctx)
 {
@@ -992,6 +1034,19 @@ ra_add_interference(struct ir3_ra_ctx *ctx)
                arr->end_ip = 0;
        }
 
+       /* set up the r0.xyz precolor regs. */
+       for (int i = 0; i < 3; i++) {
+               ra_set_node_reg(ctx->g, ctx->r0_xyz_nodes + i, i);
+               ra_set_node_reg(ctx->g, ctx->hr0_xyz_nodes + i,
+                               ctx->set->first_half_reg + i);
+       }
+
+       /* pre-color node that conflict with half/full regs higher than what
+        * can be encoded for tex-prefetch:
+        */
+       ra_set_node_reg(ctx->g, ctx->prefetch_exclude_node,
+                       ctx->set->prefetch_exclude_reg);
+
        /* compute live ranges (use/def) on a block level, also updating
         * block's def/use bitmasks (used below to calculate per-block
         * livein/liveout):
@@ -1003,34 +1058,21 @@ ra_add_interference(struct ir3_ra_ctx *ctx)
        /* update per-block livein/liveout: */
        while (ra_compute_livein_liveout(ctx)) {}
 
-       if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
-               debug_printf("AFTER LIVEIN/OUT:\n");
+       if (RA_DEBUG) {
+               d("AFTER LIVEIN/OUT:");
                foreach_block (block, &ir->block_list) {
                        struct ir3_ra_block_data *bd = block->data;
-                       debug_printf("block%u:\n", block_id(block));
+                       d("block%u:", block_id(block));
                        print_bitset("  def", bd->def, ctx->alloc_count);
                        print_bitset("  use", bd->use, ctx->alloc_count);
                        print_bitset("  l/i", bd->livein, ctx->alloc_count);
                        print_bitset("  l/o", bd->liveout, ctx->alloc_count);
                }
                foreach_array (arr, &ir->array_list) {
-                       debug_printf("array%u:\n", arr->id);
-                       debug_printf("  length:   %u\n", arr->length);
-                       debug_printf("  start_ip: %u\n", arr->start_ip);
-                       debug_printf("  end_ip:   %u\n", arr->end_ip);
-               }
-               debug_printf("INSTRUCTION VREG NAMES:\n");
-               foreach_block (block, &ctx->ir->block_list) {
-                       foreach_instr (instr, &block->instr_list) {
-                               if (!ctx->instrd[instr->ip].defn)
-                                       continue;
-                               debug_printf("%04u: ", scalar_name(ctx, instr, 0));
-                               ir3_print_instr(instr);
-                       }
-               }
-               debug_printf("ARRAY VREG NAMES:\n");
-               foreach_array (arr, &ctx->ir->array_list) {
-                       debug_printf("%04u: arr%u\n", arr->base, arr->id);
+                       d("array%u:", arr->id);
+                       d("   length:   %u", arr->length);
+                       d("   start_ip: %u", arr->start_ip);
+                       d("   end_ip:   %u", arr->end_ip);
                }
        }
 
@@ -1055,18 +1097,16 @@ ra_add_interference(struct ir3_ra_ctx *ctx)
                                if (BITSET_TEST(bd->livein, i + arr->base)) {
                                        arr->start_ip = MIN2(arr->start_ip, block->start_ip);
                                }
-                               if (BITSET_TEST(bd->livein, i + arr->base)) {
+                               if (BITSET_TEST(bd->liveout, i + arr->base)) {
                                        arr->end_ip = MAX2(arr->end_ip, block->end_ip);
                                }
                        }
                }
        }
 
-       /* need to fix things up to keep outputs live: */
-       struct ir3_instruction *out;
-       foreach_output(out, ir) {
-               unsigned name = ra_name(ctx, &ctx->instrd[out->ip]);
-               ctx->use[name] = ctx->instr_cnt;
+       if (ctx->name_to_instr) {
+               unsigned max = ra_calc_max_live_values(ctx);
+               ra_set_register_target(ctx, max);
        }
 
        for (unsigned i = 0; i < ctx->alloc_count; i++) {
@@ -1079,78 +1119,6 @@ ra_add_interference(struct ir3_ra_ctx *ctx)
        }
 }
 
-/* some instructions need fix-up if dst register is half precision: */
-static void fixup_half_instr_dst(struct ir3_instruction *instr)
-{
-       switch (opc_cat(instr->opc)) {
-       case 1: /* move instructions */
-               instr->cat1.dst_type = half_type(instr->cat1.dst_type);
-               break;
-       case 3:
-               switch (instr->opc) {
-               case OPC_MAD_F32:
-                       /* Available for that dest is half and srcs are full.
-                        * eg. mad.f32 hr0, r0.x, r0.y, r0.z
-                        */
-                       if (instr->regs[1]->flags & IR3_REG_HALF)
-                               instr->opc = OPC_MAD_F16;
-                       break;
-               case OPC_SEL_B32:
-                       instr->opc = OPC_SEL_B16;
-                       break;
-               case OPC_SEL_S32:
-                       instr->opc = OPC_SEL_S16;
-                       break;
-               case OPC_SEL_F32:
-                       instr->opc = OPC_SEL_F16;
-                       break;
-               case OPC_SAD_S32:
-                       instr->opc = OPC_SAD_S16;
-                       break;
-               /* instructions may already be fixed up: */
-               case OPC_MAD_F16:
-               case OPC_SEL_B16:
-               case OPC_SEL_S16:
-               case OPC_SEL_F16:
-               case OPC_SAD_S16:
-                       break;
-               default:
-                       assert(0);
-                       break;
-               }
-               break;
-       case 4:
-               switch (instr->opc) {
-               case OPC_RSQ:
-                       instr->opc = OPC_HRSQ;
-                       break;
-               case OPC_LOG2:
-                       instr->opc = OPC_HLOG2;
-                       break;
-               case OPC_EXP2:
-                       instr->opc = OPC_HEXP2;
-                       break;
-               default:
-                       break;
-               }
-               break;
-       case 5:
-               instr->cat5.type = half_type(instr->cat5.type);
-               break;
-       }
-}
-/* some instructions need fix-up if src register is half precision: */
-static void fixup_half_instr_src(struct ir3_instruction *instr)
-{
-       switch (instr->opc) {
-       case OPC_MOV:
-               instr->cat1.src_type = half_type(instr->cat1.src_type);
-               break;
-       default:
-               break;
-       }
-}
-
 /* NOTE: instr could be NULL for IR3_REG_ARRAY case, for the first
  * array access(es) which do not have any previous access to depend
  * on from scheduling point of view
@@ -1188,7 +1156,7 @@ reg_assign(struct ir3_ra_ctx *ctx, struct ir3_register *reg,
                 */
                if (ctx->scalar_pass && is_tex_or_prefetch(id->defn)) {
                        unsigned n = ffs(id->defn->regs[0]->wrmask);
-                       debug_assert(n > 0);
+                       ra_assert(ctx, n > 0);
                        first_component = n - 1;
                }
 
@@ -1196,9 +1164,9 @@ reg_assign(struct ir3_ra_ctx *ctx, struct ir3_register *reg,
                unsigned r = ra_get_node_reg(ctx->g, name);
                unsigned num = ctx->set->ra_reg_to_gpr[r] + id->off;
 
-               debug_assert(!(reg->flags & IR3_REG_RELATIV));
+               ra_assert(ctx, !(reg->flags & IR3_REG_RELATIV));
 
-               debug_assert(num >= first_component);
+               ra_assert(ctx, num >= first_component);
 
                if (is_high(id->defn))
                        num += FIRST_HIGH_REG;
@@ -1212,41 +1180,15 @@ reg_assign(struct ir3_ra_ctx *ctx, struct ir3_register *reg,
        }
 }
 
-static void
-account_assignment(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
-{
-       struct ir3_ra_instr_data *id;
-       struct ir3_register *dst = instr->regs[0];
-       unsigned max;
-
-       if (is_high(instr))
-               return;
-
-       if (dst->flags & IR3_REG_ARRAY) {
-               struct ir3_array *arr =
-                       ir3_lookup_array(ctx->ir, dst->array.id);
-               max = arr->reg + arr->length;
-       } else if ((id = &ctx->instrd[instr->ip]) && id->defn) {
-               unsigned name = scalar_name(ctx, id->defn, 0);
-               unsigned r = ra_get_node_reg(ctx->g, name);
-               max = ctx->set->ra_reg_to_gpr[r] + id->off;
-       } else {
-               return;
-       }
-
-       if (is_half(instr)) {
-               ctx->max_half_assigned = MAX2(ctx->max_half_assigned, max);
-       } else {
-               ctx->max_assigned = MAX2(ctx->max_assigned, max);
-       }
-}
-
 /* helper to determine which regs to assign in which pass: */
 static bool
 should_assign(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
 {
-       if ((instr->opc == OPC_META_SPLIT) ||
-                       (instr->opc == OPC_META_COLLECT))
+       if ((instr->opc == OPC_META_SPLIT) &&
+                       (util_bitcount(instr->regs[1]->wrmask) > 1))
+               return !ctx->scalar_pass;
+       if ((instr->opc == OPC_META_COLLECT) &&
+                       (util_bitcount(instr->regs[0]->wrmask) > 1))
                return !ctx->scalar_pass;
        return ctx->scalar_pass;
 }
@@ -1255,18 +1197,14 @@ static void
 ra_block_alloc(struct ir3_ra_ctx *ctx, struct ir3_block *block)
 {
        foreach_instr (instr, &block->instr_list) {
-               struct ir3_register *reg;
 
                if (writes_gpr(instr)) {
-                       account_assignment(ctx, instr);
                        if (should_assign(ctx, instr)) {
                                reg_assign(ctx, instr->regs[0], instr);
-                               if (instr->regs[0]->flags & IR3_REG_HALF)
-                                       fixup_half_instr_dst(instr);
                        }
                }
 
-               foreach_src_n(reg, n, instr) {
+               foreach_src_n (reg, n, instr) {
                        struct ir3_instruction *src = reg->instr;
 
                        if (src && !should_assign(ctx, src) && !should_assign(ctx, instr))
@@ -1278,9 +1216,6 @@ ra_block_alloc(struct ir3_ra_ctx *ctx, struct ir3_block *block)
                        /* Note: reg->instr could be null for IR3_REG_ARRAY */
                        if (src || (reg->flags & IR3_REG_ARRAY))
                                reg_assign(ctx, instr->regs[n+1], src);
-
-                       if (instr->regs[n+1]->flags & IR3_REG_HALF)
-                               fixup_half_instr_src(instr);
                }
        }
 
@@ -1289,8 +1224,6 @@ ra_block_alloc(struct ir3_ra_ctx *ctx, struct ir3_block *block)
         * them in the first pass:
         */
        if (!ctx->scalar_pass) {
-               struct ir3_instruction *in, *out;
-
                foreach_input (in, ctx->ir) {
                        reg_assign(ctx, in->regs[0], in);
                }
@@ -1300,6 +1233,114 @@ ra_block_alloc(struct ir3_ra_ctx *ctx, struct ir3_block *block)
        }
 }
 
+static void
+assign_arr_base(struct ir3_ra_ctx *ctx, struct ir3_array *arr,
+               struct ir3_instruction **precolor, unsigned nprecolor)
+{
+       /* In the mergedregs case, we convert full precision arrays
+        * to their effective half-precision base, and find conflicts
+        * amongst all other arrays/inputs.
+        *
+        * In the splitregs case (halfreg file and fullreg file do
+        * not conflict), we ignore arrays and other pre-colors that
+        * are not the same precision.
+        */
+       bool mergedregs = ctx->v->mergedregs;
+       unsigned base = 0;
+
+       /* figure out what else we conflict with which has already
+        * been assigned:
+        */
+retry:
+       foreach_array (arr2, &ctx->ir->array_list) {
+               if (arr2 == arr)
+                       break;
+               ra_assert(ctx, arr2->start_ip <= arr2->end_ip);
+
+               unsigned base2 = arr2->reg;
+               unsigned len2  = arr2->length;
+               unsigned len   = arr->length;
+
+               if (mergedregs) {
+                       /* convert into half-reg space: */
+                       if (!arr2->half) {
+                               base2 *= 2;
+                               len2  *= 2;
+                       }
+                       if (!arr->half) {
+                               len   *= 2;
+                       }
+               } else if (arr2->half != arr->half) {
+                       /* for split-register-file mode, we only conflict with
+                        * other arrays of same precision:
+                        */
+                       continue;
+               }
+
+               /* if it intersects with liverange AND register range.. */
+               if (intersects(arr->start_ip, arr->end_ip,
+                               arr2->start_ip, arr2->end_ip) &&
+                       intersects(base, base + len,
+                               base2, base2 + len2)) {
+                       base = MAX2(base, base2 + len2);
+                       goto retry;
+               }
+       }
+
+       /* also need to not conflict with any pre-assigned inputs: */
+       for (unsigned i = 0; i < nprecolor; i++) {
+               struct ir3_instruction *instr = precolor[i];
+
+               if (!instr || (instr->flags & IR3_INSTR_UNUSED))
+                       continue;
+
+               struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
+
+               /* only consider the first component: */
+               if (id->off > 0)
+                       continue;
+
+               unsigned name   = ra_name(ctx, id);
+               unsigned regid  = instr->regs[0]->num;
+               unsigned reglen = class_sizes[id->cls];
+               unsigned len    = arr->length;
+
+               if (mergedregs) {
+                       /* convert into half-reg space: */
+                       if (!is_half(instr)) {
+                               regid  *= 2;
+                               reglen *= 2;
+                       }
+                       if (!arr->half) {
+                               len   *= 2;
+                       }
+               } else if (is_half(instr) != arr->half) {
+                       /* for split-register-file mode, we only conflict with
+                        * other arrays of same precision:
+                        */
+                       continue;
+               }
+
+               /* Check if array intersects with liverange AND register
+                * range of the input:
+                */
+               if (intersects(arr->start_ip, arr->end_ip,
+                                               ctx->def[name], ctx->use[name]) &&
+                               intersects(base, base + len,
+                                               regid, regid + reglen)) {
+                       base = MAX2(base, regid + reglen);
+                       goto retry;
+               }
+       }
+
+       /* convert back from half-reg space to fullreg space: */
+       if (mergedregs && !arr->half) {
+               base = DIV_ROUND_UP(base, 2);
+       }
+
+       arr->reg = base;
+}
+
 /* handle pre-colored registers.  This includes "arrays" (which could be of
  * length 1, used for phi webs lowered to registers in nir), as well as
  * special shader input values that need to be pinned to certain registers.
@@ -1307,7 +1348,6 @@ ra_block_alloc(struct ir3_ra_ctx *ctx, struct ir3_block *block)
 static void
 ra_precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction **precolor, unsigned nprecolor)
 {
-       unsigned num_precolor = 0;
        for (unsigned i = 0; i < nprecolor; i++) {
                if (precolor[i] && !(precolor[i]->flags & IR3_INSTR_UNUSED)) {
                        struct ir3_instruction *instr = precolor[i];
@@ -1317,14 +1357,7 @@ ra_precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction **precolor, unsigned
 
                        struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
 
-                       debug_assert(!(instr->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH)));
-
-                       /* only consider the first component: */
-                       if (id->off > 0)
-                               continue;
-
-                       if (ctx->scalar_pass && !should_assign(ctx, instr))
-                               continue;
+                       ra_assert(ctx, !(instr->regs[0]->flags & (IR3_REG_HALF | IR3_REG_HIGH)));
 
                        /* 'base' is in scalar (class 0) but we need to map that
                         * the conflicting register of the appropriate class (ie.
@@ -1344,94 +1377,32 @@ ra_precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction **precolor, unsigned
                         *           .. and so on..
                         */
                        unsigned regid = instr->regs[0]->num;
+                       ra_assert(ctx, regid >= id->off);
+                       regid -= id->off;
+
                        unsigned reg = ctx->set->gpr_to_ra_reg[id->cls][regid];
                        unsigned name = ra_name(ctx, id);
                        ra_set_node_reg(ctx->g, name, reg);
-                       num_precolor = MAX2(regid, num_precolor);
                }
        }
 
-       /* pre-assign array elements:
-        *
-        * TODO this is going to need some work for half-precision.. possibly
-        * this is easier on a6xx, where we can just divide array size by two?
-        * But on a5xx and earlier it will need to track two bases.
+       /*
+        * Pre-assign array elements:
         */
        foreach_array (arr, &ctx->ir->array_list) {
-               unsigned base = 0;
 
                if (arr->end_ip == 0)
                        continue;
 
-               /* figure out what else we conflict with which has already
-                * been assigned:
-                */
-retry:
-               foreach_array (arr2, &ctx->ir->array_list) {
-                       if (arr2 == arr)
-                               break;
-                       if (arr2->end_ip == 0)
-                               continue;
-                       /* if it intersects with liverange AND register range.. */
-                       if (intersects(arr->start_ip, arr->end_ip,
-                                       arr2->start_ip, arr2->end_ip) &&
-                               intersects(base, base + reg_size_for_array(arr),
-                                       arr2->reg, arr2->reg + reg_size_for_array(arr2))) {
-                               base = MAX2(base, arr2->reg + reg_size_for_array(arr2));
-                               goto retry;
-                       }
-               }
-
-               /* also need to not conflict with any pre-assigned inputs: */
-               for (unsigned i = 0; i < nprecolor; i++) {
-                       struct ir3_instruction *instr = precolor[i];
-
-                       if (!instr || (instr->flags & IR3_INSTR_UNUSED))
-                               continue;
-
-                       struct ir3_ra_instr_data *id = &ctx->instrd[instr->ip];
-
-                       /* only consider the first component: */
-                       if (id->off > 0)
-                               continue;
-
-                       unsigned name = ra_name(ctx, id);
-                       unsigned regid = instr->regs[0]->num;
-
-                       /* Check if array intersects with liverange AND register
-                        * range of the input:
-                        */
-                       if (intersects(arr->start_ip, arr->end_ip,
-                                                       ctx->def[name], ctx->use[name]) &&
-                                       intersects(base, base + reg_size_for_array(arr),
-                                                       regid, regid + class_sizes[id->cls])) {
-                               base = MAX2(base, regid + class_sizes[id->cls]);
-                               goto retry;
-                       }
-               }
-
-               arr->reg = base;
+               if (!ctx->scalar_pass)
+                       assign_arr_base(ctx, arr, precolor, nprecolor);
 
                for (unsigned i = 0; i < arr->length; i++) {
-                       unsigned name, reg;
-
-                       if (arr->half) {
-                               /* Doesn't need to do this on older generations than a6xx,
-                                * since there's no conflict between full regs and half regs
-                                * on them.
-                                *
-                                * TODO Presumably "base" could start from 0 respectively
-                                * for half regs of arrays on older generations.
-                                */
-                               unsigned base_half = base * 2 + i;
-                               reg = ctx->set->gpr_to_ra_reg[0+HALF_OFFSET][base_half];
-                               base = base_half / 2 + 1;
-                       } else {
-                               reg = ctx->set->gpr_to_ra_reg[0][base++];
-                       }
+                       unsigned cls = arr->half ? HALF_OFFSET : 0;
 
-                       name = arr->base + i;
-                       ra_set_node_reg(ctx->g, name, reg);
+                       ra_set_node_reg(ctx->g,
+                                       arr->base + i,   /* vreg name */
+                                       ctx->set->gpr_to_ra_reg[cls][arr->reg + i]);
                }
        }
 
@@ -1477,18 +1448,19 @@ precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
 static void
 ra_precolor_assigned(struct ir3_ra_ctx *ctx)
 {
-       debug_assert(ctx->scalar_pass);
+       ra_assert(ctx, ctx->scalar_pass);
 
        foreach_block (block, &ctx->ir->block_list) {
                foreach_instr (instr, &block->instr_list) {
 
-                       if ((instr->opc != OPC_META_SPLIT) &&
-                                       (instr->opc != OPC_META_COLLECT))
+                       if (!writes_gpr(instr))
+                               continue;
+
+                       if (should_assign(ctx, instr))
                                continue;
 
                        precolor(ctx, instr);
 
-                       struct ir3_register *src;
                        foreach_src (src, instr) {
                                if (!src->instr)
                                        continue;
@@ -1526,7 +1498,6 @@ ra_sanity_check(struct ir3 *ir)
                                debug_assert(dst->num == (src->num + instr->split.off));
                        } else if (instr->opc == OPC_META_COLLECT) {
                                struct ir3_register *dst = instr->regs[0];
-                               struct ir3_register *src;
 
                                foreach_src_n (src, n, instr) {
                                        debug_assert(dst->num == (src->num - n));
@@ -1543,20 +1514,25 @@ ir3_ra_pass(struct ir3_shader_variant *v, struct ir3_instruction **precolor,
        struct ir3_ra_ctx ctx = {
                        .v = v,
                        .ir = v->ir,
-                       .set = v->ir->compiler->set,
+                       .set = v->mergedregs ?
+                               v->ir->compiler->mergedregs_set : v->ir->compiler->set,
                        .scalar_pass = scalar_pass,
        };
        int ret;
 
+       ret = setjmp(ctx.jmp_env);
+       if (ret)
+               goto fail;
+
        ra_init(&ctx);
        ra_add_interference(&ctx);
        ra_precolor(&ctx, precolor, nprecolor);
        if (scalar_pass)
                ra_precolor_assigned(&ctx);
        ret = ra_alloc(&ctx);
-       ra_destroy(&ctx);
 
-       printf("#### max_assigned=%u, max_half_assigned=%u\n", ctx.max_assigned, ctx.max_half_assigned);
+fail:
+       ra_destroy(&ctx);
 
        return ret;
 }
@@ -1572,20 +1548,14 @@ ir3_ra(struct ir3_shader_variant *v, struct ir3_instruction **precolor,
        if (ret)
                return ret;
 
-       if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
-               printf("AFTER RA (1st pass):\n");
-               ir3_print(v->ir);
-       }
+       ir3_debug_print(v->ir, "AFTER: ir3_ra (1st pass)");
 
        /* Second pass, assign the scalar registers: */
        ret = ir3_ra_pass(v, precolor, nprecolor, true);
        if (ret)
                return ret;
 
-       if (ir3_shader_debug & IR3_DBG_OPTMSGS) {
-               printf("AFTER RA (2nd pass):\n");
-               ir3_print(v->ir);
-       }
+       ir3_debug_print(v->ir, "AFTER: ir3_ra (2st pass)");
 
 #ifdef DEBUG
 #  define SANITY_CHECK DEBUG