*/
#define WORK_STRIDE 10
+#define SHADOW_R27 17
/* Prepacked masks/swizzles for virtual register types */
static unsigned reg_type_to_mask[WORK_STRIDE] = {
int phys = virt / WORK_STRIDE;
int type = virt % WORK_STRIDE;
+ /* Apply shadow registers */
+
+ if (phys == SHADOW_R27)
+ phys = 27;
+
struct phys_reg r = {
.reg = phys,
.mask = reg_type_to_mask[type],
};
/* Report that we actually use this register, and return it */
- ctx->work_registers = MAX2(ctx->work_registers, phys);
+
+ if (phys < 16)
+ ctx->work_registers = MAX2(ctx->work_registers, phys);
+
return r;
}
classes[4*c + 2] = work_vec3;
classes[4*c + 3] = work_vec4;
- /* Special register classes have two registers in them */
- unsigned count = (c == REG_CLASS_WORK) ? work_count : 2;
+ /* Special register classes have other register counts */
+ unsigned count =
+ (c == REG_CLASS_WORK) ? work_count :
+ (c == REG_CLASS_LDST27) ? 1 : 2;
+
+ /* We arbitraily pick r17 (RA unused) as the shadow for r27 */
+ unsigned first_reg =
+ (c == REG_CLASS_LDST) ? 26 :
+ (c == REG_CLASS_LDST27) ? SHADOW_R27 :
+ (c == REG_CLASS_TEX) ? 28 : 0;
/* Add the full set of work registers */
- for (unsigned i = 0; i < count; ++i) {
+ for (unsigned i = first_reg; i < (first_reg + count); ++i) {
int base = WORK_STRIDE * i;
/* Build a full set of subdivisions */
}
}
+
+ /* All of the r27 registers in in LDST conflict with all of the
+ * registers in LD27 (pseudo/shadow register) */
+
+ for (unsigned a = 0; a < WORK_STRIDE; ++a) {
+ unsigned reg_a = (WORK_STRIDE * 27) + a;
+
+ for (unsigned b = 0; b < WORK_STRIDE; ++b) {
+ unsigned reg_b = (WORK_STRIDE * SHADOW_R27) + b;
+
+ ra_add_reg_conflict(regs, reg_a, reg_b);
+ ra_add_reg_conflict(regs, reg_b, reg_a);
+ }
+ }
+
/* We're done setting up */
ra_set_finalize(regs, NULL);
return regs;
}
-/* This routine gets a precomputed register set off the screen if it's able, or otherwise it computes one on the fly */
+/* This routine gets a precomputed register set off the screen if it's able, or
+ * otherwise it computes one on the fly */
static struct ra_regs *
get_register_set(struct midgard_screen *screen, unsigned work_count, unsigned **classes)
return created;
}
+/* Assign a (special) class, ensuring that it is compatible with whatever class
+ * was already set */
+
+static void
+set_class(unsigned *classes, unsigned node, unsigned class)
+{
+ /* Check that we're even a node */
+ if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
+ return;
+
+ /* First 4 are work, next 4 are load/store.. */
+ unsigned current_class = classes[node] >> 2;
+
+ /* Nothing to do */
+ if (class == current_class)
+ return;
+
+
+ if ((current_class == REG_CLASS_LDST27) && (class == REG_CLASS_LDST))
+ return;
+
+ /* If we're changing, we must not have already assigned a special class
+ */
+
+ bool compat = current_class == REG_CLASS_WORK;
+ compat |= (current_class == REG_CLASS_LDST) && (class == REG_CLASS_LDST27);
+
+ assert(compat);
+
+ classes[node] &= 0x3;
+ classes[node] |= (class << 2);
+}
+
+static void
+force_vec4(unsigned *classes, unsigned node)
+{
+ if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
+ return;
+
+ /* Force vec4 = 3 */
+ classes[node] |= 0x3;
+}
+
+/* Special register classes impose special constraints on who can read their
+ * values, so check that */
+
+static bool
+check_read_class(unsigned *classes, unsigned tag, unsigned node)
+{
+ /* Non-nodes are implicitly ok */
+ if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
+ return true;
+
+ unsigned current_class = classes[node] >> 2;
+
+ switch (current_class) {
+ case REG_CLASS_LDST:
+ case REG_CLASS_LDST27:
+ return (tag == TAG_LOAD_STORE_4);
+ default:
+ return (tag != TAG_LOAD_STORE_4);
+ }
+}
+
+/* Prepass before RA to ensure special class restrictions are met. The idea is
+ * to create a bit field of types of instructions that read a particular index.
+ * Later, we'll add moves as appropriate and rewrite to specialize by type. */
+
+static void
+mark_node_class (unsigned *bitfield, unsigned node)
+{
+ if ((node >= 0) && (node < SSA_FIXED_MINIMUM))
+ BITSET_SET(bitfield, node);
+}
+
+static midgard_instruction *
+mir_find_last_write(compiler_context *ctx, unsigned i)
+{
+ midgard_instruction *last_write = NULL;
+
+ mir_foreach_instr_global(ctx, ins) {
+ if (ins->compact_branch) continue;
+
+ if (ins->ssa_args.dest == i)
+ last_write = ins;
+ }
+
+ return last_write;
+}
+
+void
+mir_lower_special_reads(compiler_context *ctx)
+{
+ size_t sz = BITSET_WORDS(ctx->temp_count) * sizeof(BITSET_WORD);
+
+ /* Bitfields for the various types of registers we could have */
+
+ unsigned *alur = calloc(sz, 1);
+ unsigned *ldst = calloc(sz, 1);
+ unsigned *texr = calloc(sz, 1);
+ unsigned *texw = calloc(sz, 1);
+
+ /* Pass #1 is analysis, a linear scan to fill out the bitfields */
+
+ mir_foreach_instr_global(ctx, ins) {
+ if (ins->compact_branch) continue;
+
+ switch (ins->type) {
+ case TAG_ALU_4:
+ mark_node_class(alur, ins->ssa_args.src0);
+ mark_node_class(alur, ins->ssa_args.src1);
+ break;
+ case TAG_LOAD_STORE_4:
+ mark_node_class(ldst, ins->ssa_args.src0);
+ mark_node_class(ldst, ins->ssa_args.src1);
+ break;
+ case TAG_TEXTURE_4:
+ mark_node_class(texr, ins->ssa_args.src0);
+ mark_node_class(texr, ins->ssa_args.src1);
+ mark_node_class(texw, ins->ssa_args.dest);
+ break;
+ }
+ }
+
+ /* Pass #2 is lowering now that we've analyzed all the classes.
+ * Conceptually, if an index is only marked for a single type of use,
+ * there is nothing to lower. If it is marked for different uses, we
+ * split up based on the number of types of uses. To do so, we divide
+ * into N distinct classes of use (where N>1 by definition), emit N-1
+ * moves from the index to copies of the index, and finally rewrite N-1
+ * of the types of uses to use the corresponding move */
+
+ unsigned spill_idx = ctx->temp_count;
+
+ for (unsigned i = 0; i < ctx->temp_count; ++i) {
+ bool is_alur = BITSET_TEST(alur, i);
+ bool is_ldst = BITSET_TEST(ldst, i);
+ bool is_texr = BITSET_TEST(texr, i);
+ bool is_texw = BITSET_TEST(texw, i);
+
+ /* Analyse to check how many distinct uses there are. ALU ops
+ * (alur) can read the results of the texture pipeline (texw)
+ * but not ldst or texr. Load/store ops (ldst) cannot read
+ * anything but load/store inputs. Texture pipeline cannot read
+ * anything but texture inputs. TODO: Simplify. */
+
+ bool collision =
+ (is_alur && (is_ldst || is_texr)) ||
+ (is_ldst && (is_alur || is_texr || is_texw)) ||
+ (is_texr && (is_alur || is_ldst)) ||
+ (is_texw && (is_ldst));
+
+ if (!collision)
+ continue;
+
+ /* Use the index as-is as the work copy. Emit copies for
+ * special uses */
+
+ if (is_ldst) {
+ unsigned idx = spill_idx++;
+ midgard_instruction m = v_mov(i, blank_alu_src, idx);
+ midgard_instruction *use = mir_next_op(mir_find_last_write(ctx, i));
+ assert(use);
+ mir_insert_instruction_before(use, m);
+
+ /* Rewrite to use */
+ mir_rewrite_index_src_tag(ctx, i, idx, TAG_LOAD_STORE_4);
+ }
+ }
+
+ free(alur);
+ free(ldst);
+ free(texr);
+ free(texw);
+}
+
/* This routine performs the actual register allocation. It should be succeeded
* by install_registers */
/* Let's actually do register allocation */
int nodes = ctx->temp_count;
struct ra_graph *g = ra_alloc_interference_graph(regs, nodes);
-
- /* Determine minimum size needed to hold values, to indirectly
- * determine class */
+
+ /* Register class (as known to the Mesa register allocator) is actually
+ * the product of both semantic class (work, load/store, texture..) and
+ * size (vec2/vec3..). First, we'll go through and determine the
+ * minimum size needed to hold values */
unsigned *found_class = calloc(sizeof(unsigned), ctx->temp_count);
- mir_foreach_block(ctx, block) {
- mir_foreach_instr_in_block(block, ins) {
- if (ins->compact_branch) continue;
- if (ins->ssa_args.dest < 0) continue;
- if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
+ mir_foreach_instr_global(ctx, ins) {
+ if (ins->compact_branch) continue;
+ if (ins->ssa_args.dest < 0) continue;
+ if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
+
+ /* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
+ int class = util_logbase2(ins->mask);
+
+ /* Use the largest class if there's ambiguity, this
+ * handles partial writes */
+
+ int dest = ins->ssa_args.dest;
+ found_class[dest] = MAX2(found_class[dest], class);
+ }
+
+ /* Next, we'll determine semantic class. We default to zero (work).
+ * But, if we're used with a special operation, that will force us to a
+ * particular class. Each node must be assigned to exactly one class; a
+ * prepass before RA should have lowered what-would-have-been
+ * multiclass nodes into a series of moves to break it up into multiple
+ * nodes (TODO) */
+
+ mir_foreach_instr_global(ctx, ins) {
+ if (ins->compact_branch) continue;
- int class = util_logbase2(ins->mask) + 1;
+ /* Check if this operation imposes any classes */
- /* Use the largest class if there's ambiguity, this
- * handles partial writes */
+ if (ins->type == TAG_LOAD_STORE_4) {
+ bool force_r27 = OP_IS_R27_ONLY(ins->load_store.op);
+ unsigned class = force_r27 ? REG_CLASS_LDST27 : REG_CLASS_LDST;
- int dest = ins->ssa_args.dest;
- found_class[dest] = MAX2(found_class[dest], class);
+ set_class(found_class, ins->ssa_args.src0, class);
+ set_class(found_class, ins->ssa_args.src1, class);
+
+ if (force_r27) {
+ force_vec4(found_class, ins->ssa_args.dest);
+ force_vec4(found_class, ins->ssa_args.src0);
+ force_vec4(found_class, ins->ssa_args.src1);
+ }
}
}
+ /* Check that the semantics of the class are respected */
+ mir_foreach_instr_global(ctx, ins) {
+ if (ins->compact_branch) continue;
+
+ /* Non-load-store cannot read load/store */
+ assert(check_read_class(found_class, ins->type, ins->ssa_args.src0));
+ assert(check_read_class(found_class, ins->type, ins->ssa_args.src1));
+ }
+
for (unsigned i = 0; i < ctx->temp_count; ++i) {
unsigned class = found_class[i];
- if (!class) continue;
- ra_set_node_class(g, i, classes[class - 1]);
+ ra_set_node_class(g, i, classes[class]);
}
/* Determine liveness */
mir_foreach_instr_in_block(block, ins) {
if (ins->compact_branch) continue;
- /* Dest is < 0 for st_vary instructions, which break
- * the usual SSA conventions. Liveness analysis doesn't
- * make sense on these instructions, so skip them to
- * avoid memory corruption */
-
- if (ins->ssa_args.dest < 0) continue;
-
if (ins->ssa_args.dest < SSA_FIXED_MINIMUM) {
/* If this destination is not yet live, it is
* now since we just wrote it */
int dest = ins->ssa_args.dest;
- if (live_start[dest] == -1)
+ if (dest >= 0 && live_start[dest] == -1)
live_start[dest] = d;
}
}
case TAG_LOAD_STORE_4: {
- if (OP_IS_STORE_R26(ins->load_store.op)) {
- /* TODO: use ssa_args for st_vary */
- ins->load_store.reg = 0;
+ bool fixed = args.src0 >= SSA_FIXED_MINIMUM;
+
+ if (OP_IS_STORE_R26(ins->load_store.op) && fixed) {
+ ins->load_store.reg = SSA_REG_FROM_FIXED(args.src0);
+ } else if (OP_IS_STORE_VARY(ins->load_store.op)) {
+ struct phys_reg src = index_to_reg(ctx, g, args.src0);
+ assert(src.reg == 26 || src.reg == 27);
+
+ ins->load_store.reg = src.reg - 26;
+
+ /* TODO: swizzle/mask */
} else {
/* Which physical register we read off depends on
* whether we are loading or storing -- think about the