freedreno/ir3: add 'high' register class
authorRob Clark <robdclark@gmail.com>
Wed, 12 Apr 2017 13:45:16 +0000 (09:45 -0400)
committerRob Clark <robdclark@gmail.com>
Mon, 17 Apr 2017 18:00:05 +0000 (14:00 -0400)
For compute shaders, we need to be able to allocate some "high"
registers (r48.x to r55.w).  (Possibly these are global to all threads
in a warp?)  Add a new register class to handle this.

Signed-off-by: Rob Clark <robdclark@gmail.com>
src/gallium/drivers/freedreno/ir3/ir3.c
src/gallium/drivers/freedreno/ir3/ir3.h
src/gallium/drivers/freedreno/ir3/ir3_ra.c

index c5a030282d8f832ef172c7ede24eb22b358cfdfd..ff2c342c357459de440d141e248f6f3c28d61f61 100644 (file)
@@ -106,7 +106,7 @@ static uint32_t reg(struct ir3_register *reg, struct ir3_info *info,
                        info->max_const = MAX2(info->max_const, max);
                } else if (val.num == 63) {
                        /* ignore writes to dummy register r63.x */
-               } else if ((max != REG_A0) && (max != REG_P0)) {
+               } else if (max < 48) {
                        if (reg->flags & IR3_REG_HALF) {
                                info->max_half_reg = MAX2(info->max_half_reg, max);
                        } else {
index 480b27ce5dae37183fafaecb532a2923a4187f46..bbe903d9d15e2be29c173f9d3c1f01e449c29e0e 100644 (file)
@@ -58,8 +58,14 @@ struct ir3_register {
                IR3_REG_CONST  = 0x001,
                IR3_REG_IMMED  = 0x002,
                IR3_REG_HALF   = 0x004,
-               IR3_REG_RELATIV= 0x008,
-               IR3_REG_R      = 0x010,
+               /* high registers are used for some things in compute shaders,
+                * for example.  Seems to be for things that are global to all
+                * threads in a wave, so possibly these are global/shared by
+                * all the threads in the wave?
+                */
+               IR3_REG_HIGH   = 0x008,
+               IR3_REG_RELATIV= 0x010,
+               IR3_REG_R      = 0x020,
                /* Most instructions, it seems, can do float abs/neg but not
                 * integer.  The CP pass needs to know what is intended (int or
                 * float) in order to do the right thing.  For this reason the
@@ -68,23 +74,23 @@ struct ir3_register {
                 * bitwise not, so split that out into a new flag to make it
                 * more clear.
                 */
-               IR3_REG_FNEG   = 0x020,
-               IR3_REG_FABS   = 0x040,
-               IR3_REG_SNEG   = 0x080,
-               IR3_REG_SABS   = 0x100,
-               IR3_REG_BNOT   = 0x200,
-               IR3_REG_EVEN   = 0x400,
-               IR3_REG_POS_INF= 0x800,
+               IR3_REG_FNEG   = 0x040,
+               IR3_REG_FABS   = 0x080,
+               IR3_REG_SNEG   = 0x100,
+               IR3_REG_SABS   = 0x200,
+               IR3_REG_BNOT   = 0x400,
+               IR3_REG_EVEN   = 0x800,
+               IR3_REG_POS_INF= 0x1000,
                /* (ei) flag, end-input?  Set on last bary, presumably to signal
                 * that the shader needs no more input:
                 */
-               IR3_REG_EI     = 0x1000,
+               IR3_REG_EI     = 0x2000,
                /* meta-flags, for intermediate stages of IR, ie.
                 * before register assignment is done:
                 */
-               IR3_REG_SSA    = 0x2000,   /* 'instr' is ptr to assigning instr */
-               IR3_REG_ARRAY  = 0x4000,
-               IR3_REG_PHI_SRC= 0x8000,   /* phi src, regs[0]->instr points to phi */
+               IR3_REG_SSA    = 0x4000,   /* 'instr' is ptr to assigning instr */
+               IR3_REG_ARRAY  = 0x8000,
+               IR3_REG_PHI_SRC= 0x10000,  /* phi src, regs[0]->instr points to phi */
 
        } flags;
        union {
index f70c779525b17a54c8d3f4795e8858e016166025..26c1508fbd2d177349ec8b86d9f9d8fb9fc6d8eb 100644 (file)
@@ -95,25 +95,78 @@ static const unsigned half_class_sizes[] = {
        1, 2, 3, 4,
 };
 #define half_class_count  ARRAY_SIZE(half_class_sizes)
-#define total_class_count (class_count + half_class_count)
+
+/* seems to just be used for compute shaders?  Seems like vec1 and vec3
+ * are sufficient (for now?)
+ */
+static const unsigned high_class_sizes[] = {
+       1, 3,
+};
+#define high_class_count ARRAY_SIZE(high_class_sizes)
+
+#define total_class_count (class_count + half_class_count + high_class_count)
 
 /* Below a0.x are normal regs.  RA doesn't need to assign a0.x/p0.x. */
-#define NUM_REGS             (4 * 48)
+#define NUM_REGS             (4 * 48)  /* r0 to r47 */
+#define NUM_HIGH_REGS        (4 * 8)   /* r48 to r55 */
+#define FIRST_HIGH_REG       (4 * 48)
 /* Number of virtual regs in a given class: */
 #define CLASS_REGS(i)        (NUM_REGS - (class_sizes[i] - 1))
 #define HALF_CLASS_REGS(i)   (NUM_REGS - (half_class_sizes[i] - 1))
+#define HIGH_CLASS_REGS(i)   (NUM_HIGH_REGS - (high_class_sizes[i] - 1))
+
+#define HALF_OFFSET          (class_count)
+#define HIGH_OFFSET          (class_count + half_class_count)
 
 /* register-set, created one time, used for all shaders: */
 struct ir3_ra_reg_set {
        struct ra_regs *regs;
        unsigned int classes[class_count];
        unsigned int half_classes[half_class_count];
+       unsigned int high_classes[high_class_count];
        /* maps flat virtual register space to base gpr: */
        uint16_t *ra_reg_to_gpr;
        /* maps cls,gpr to flat virtual register space: */
        uint16_t **gpr_to_ra_reg;
 };
 
+static void
+build_q_values(unsigned int **q_values, unsigned off,
+               const unsigned *sizes, unsigned count)
+{
+       for (unsigned i = 0; i < count; i++) {
+               q_values[i + off] = rzalloc_array(q_values, unsigned, total_class_count);
+
+               /* From register_allocate.c:
+                *
+                * q(B,C) (indexed by C, B is this register class) in
+                * Runeson/Nyström paper.  This is "how many registers of B could
+                * the worst choice register from C conflict with".
+                *
+                * If we just let the register allocation algorithm compute these
+                * values, is extremely expensive.  However, since all of our
+                * registers are laid out, we can very easily compute them
+                * ourselves.  View the register from C as fixed starting at GRF n
+                * somewhere in the middle, and the register from B as sliding back
+                * and forth.  Then the first register to conflict from B is the
+                * one starting at n - class_size[B] + 1 and the last register to
+                * conflict will start at n + class_size[B] - 1.  Therefore, the
+                * number of conflicts from B is class_size[B] + class_size[C] - 1.
+                *
+                *   +-+-+-+-+-+-+     +-+-+-+-+-+-+
+                * B | | | | | |n| --> | | | | | | |
+                *   +-+-+-+-+-+-+     +-+-+-+-+-+-+
+                *             +-+-+-+-+-+
+                * C           |n| | | | |
+                *             +-+-+-+-+-+
+                *
+                * (Idea copied from brw_fs_reg_allocate.cpp)
+                */
+               for (unsigned j = 0; j < count; j++)
+                       q_values[i + off][j + off] = sizes[i] + sizes[j] - 1;
+       }
+}
+
 /* One-time setup of RA register-set, which describes all the possible
  * "virtual" registers and their interferences.  Ie. double register
  * occupies (and conflicts with) two single registers, and so forth.
@@ -135,7 +188,7 @@ struct ir3_ra_reg_set *
 ir3_ra_alloc_reg_set(void *memctx)
 {
        struct ir3_ra_reg_set *set = rzalloc(memctx, struct ir3_ra_reg_set);
-       unsigned ra_reg_count, reg, first_half_reg;
+       unsigned ra_reg_count, reg, first_half_reg, first_high_reg, base;
        unsigned int **q_values;
 
        /* calculate # of regs across all classes: */
@@ -144,50 +197,15 @@ ir3_ra_alloc_reg_set(void *memctx)
                ra_reg_count += CLASS_REGS(i);
        for (unsigned i = 0; i < half_class_count; i++)
                ra_reg_count += HALF_CLASS_REGS(i);
+       for (unsigned i = 0; i < high_class_count; i++)
+               ra_reg_count += HIGH_CLASS_REGS(i);
 
        /* allocate and populate q_values: */
        q_values = ralloc_array(set, unsigned *, total_class_count);
-       for (unsigned i = 0; i < class_count; i++) {
-               q_values[i] = rzalloc_array(q_values, unsigned, total_class_count);
 
-               /* From register_allocate.c:
-                *
-                * q(B,C) (indexed by C, B is this register class) in
-                * Runeson/Nyström paper.  This is "how many registers of B could
-                * the worst choice register from C conflict with".
-                *
-                * If we just let the register allocation algorithm compute these
-                * values, is extremely expensive.  However, since all of our
-                * registers are laid out, we can very easily compute them
-                * ourselves.  View the register from C as fixed starting at GRF n
-                * somewhere in the middle, and the register from B as sliding back
-                * and forth.  Then the first register to conflict from B is the
-                * one starting at n - class_size[B] + 1 and the last register to
-                * conflict will start at n + class_size[B] - 1.  Therefore, the
-                * number of conflicts from B is class_size[B] + class_size[C] - 1.
-                *
-                *   +-+-+-+-+-+-+     +-+-+-+-+-+-+
-                * B | | | | | |n| --> | | | | | | |
-                *   +-+-+-+-+-+-+     +-+-+-+-+-+-+
-                *             +-+-+-+-+-+
-                * C           |n| | | | |
-                *             +-+-+-+-+-+
-                *
-                * (Idea copied from brw_fs_reg_allocate.cpp)
-                */
-               for (unsigned j = 0; j < class_count; j++)
-                       q_values[i][j] = class_sizes[i] + class_sizes[j] - 1;
-       }
-
-       for (unsigned i = class_count; i < total_class_count; i++) {
-               q_values[i] = ralloc_array(q_values, unsigned, total_class_count);
-
-               /* see comment above: */
-               for (unsigned j = class_count; j < total_class_count; j++) {
-                       q_values[i][j] = half_class_sizes[i - class_count] +
-                                       half_class_sizes[j - class_count] - 1;
-               }
-       }
+       build_q_values(q_values, 0, class_sizes, class_count);
+       build_q_values(q_values, HALF_OFFSET, half_class_sizes, half_class_count);
+       build_q_values(q_values, HIGH_OFFSET, high_class_sizes, high_class_count);
 
        /* allocate the reg-set.. */
        set->regs = ra_alloc_reg_set(set, ra_reg_count, true);
@@ -215,18 +233,19 @@ ir3_ra_alloc_reg_set(void *memctx)
        }
 
        first_half_reg = reg;
+       base = HALF_OFFSET;
 
        for (unsigned i = 0; i < half_class_count; i++) {
                set->half_classes[i] = ra_alloc_reg_class(set->regs);
 
-               set->gpr_to_ra_reg[class_count + i] =
-                               ralloc_array(set, uint16_t, CLASS_REGS(i));
+               set->gpr_to_ra_reg[base + i] =
+                               ralloc_array(set, uint16_t, HALF_CLASS_REGS(i));
 
                for (unsigned j = 0; j < HALF_CLASS_REGS(i); j++) {
                        ra_class_add_reg(set->regs, set->half_classes[i], reg);
 
                        set->ra_reg_to_gpr[reg] = j;
-                       set->gpr_to_ra_reg[class_count + i][j] = reg;
+                       set->gpr_to_ra_reg[base + i][j] = reg;
 
                        for (unsigned br = j; br < j + half_class_sizes[i]; br++)
                                ra_add_transitive_reg_conflict(set->regs, br + first_half_reg, reg);
@@ -235,6 +254,29 @@ ir3_ra_alloc_reg_set(void *memctx)
                }
        }
 
+       first_high_reg = reg;
+       base = HIGH_OFFSET;
+
+       for (unsigned i = 0; i < high_class_count; i++) {
+               set->high_classes[i] = ra_alloc_reg_class(set->regs);
+
+               set->gpr_to_ra_reg[base + i] =
+                               ralloc_array(set, uint16_t, HIGH_CLASS_REGS(i));
+
+               for (unsigned j = 0; j < HIGH_CLASS_REGS(i); j++) {
+                       ra_class_add_reg(set->regs, set->high_classes[i], reg);
+
+                       set->ra_reg_to_gpr[reg] = j;
+                       set->gpr_to_ra_reg[base + i][j] = reg;
+
+                       for (unsigned br = j; br < j + high_class_sizes[i]; br++)
+                               ra_add_transitive_reg_conflict(set->regs, br + first_high_reg, reg);
+
+                       reg++;
+               }
+       }
+
+
        ra_set_finalize(set->regs, q_values);
 
        ralloc_free(q_values);
@@ -287,13 +329,23 @@ is_half(struct ir3_instruction *instr)
        return !!(instr->regs[0]->flags & IR3_REG_HALF);
 }
 
+static bool
+is_high(struct ir3_instruction *instr)
+{
+       return !!(instr->regs[0]->flags & IR3_REG_HIGH);
+}
+
 static int
-size_to_class(unsigned sz, bool half)
+size_to_class(unsigned sz, bool half, bool high)
 {
-       if (half) {
+       if (high) {
+               for (unsigned i = 0; i < high_class_count; i++)
+                       if (high_class_sizes[i] >= sz)
+                               return i + HIGH_OFFSET;
+       } else if (half) {
                for (unsigned i = 0; i < half_class_count; i++)
                        if (half_class_sizes[i] >= sz)
-                               return i + class_count;
+                               return i + HALF_OFFSET;
        } else {
                for (unsigned i = 0; i < class_count; i++)
                        if (class_sizes[i] >= sz)
@@ -497,7 +549,7 @@ ra_block_find_definers(struct ir3_ra_ctx *ctx, struct ir3_block *block)
                        id->defn = instr;
                } else {
                        id->defn = get_definer(ctx, instr, &id->sz, &id->off);
-                       id->cls = size_to_class(id->sz, is_half(id->defn));
+                       id->cls = size_to_class(id->sz, is_half(id->defn), is_high(id->defn));
                }
        }
 }
@@ -710,9 +762,12 @@ ra_block_compute_live_ranges(struct ir3_ra_ctx *ctx, struct ir3_block *block)
 
                                def(name, id->defn);
 
-                               if (is_half(id->defn)) {
+                               if (is_high(id->defn)) {
                                        ra_set_node_class(ctx->g, name,
-                                                       ctx->set->half_classes[id->cls - class_count]);
+                                                       ctx->set->high_classes[id->cls - HIGH_OFFSET]);
+                               } else if (is_half(id->defn)) {
+                                       ra_set_node_class(ctx->g, name,
+                                                       ctx->set->half_classes[id->cls - HALF_OFFSET]);
                                } else {
                                        ra_set_node_class(ctx->g, name,
                                                        ctx->set->classes[id->cls]);
@@ -981,6 +1036,9 @@ reg_assign(struct ir3_ra_ctx *ctx, struct ir3_register *reg,
 
                debug_assert(!(reg->flags & IR3_REG_RELATIV));
 
+               if (is_high(id->defn))
+                       num += FIRST_HIGH_REG;
+
                reg->num = num;
                reg->flags &= ~(IR3_REG_SSA | IR3_REG_PHI_SRC);
 
@@ -1029,7 +1087,7 @@ ra_alloc(struct ir3_ra_ctx *ctx)
                unsigned i = 0, j;
                if (ctx->frag_face && (i < ir->ninputs) && ir->inputs[i]) {
                        struct ir3_instruction *instr = ir->inputs[i];
-                       int cls = size_to_class(1, true);
+                       int cls = size_to_class(1, true, false);
                        unsigned name = __ra_name(ctx, cls, instr);
                        unsigned reg = ctx->set->gpr_to_ra_reg[cls][0];