#include "compiler.h"
#include "midgard_ops.h"
-#include "util/register_allocate.h"
#include "util/u_math.h"
#include "util/u_memory.h"
-
-/* For work registers, we can subdivide in various ways. So we create
- * classes for the various sizes and conflict accordingly, keeping in
- * mind that physical registers are divided along 128-bit boundaries.
- * The important part is that 128-bit boundaries are not crossed.
- *
- * For each 128-bit register, we can subdivide to 32-bits 10 ways
- *
- * vec4: xyzw
- * vec3: xyz, yzw
- * vec2: xy, yz, zw,
- * vec1: x, y, z, w
- *
- * For each 64-bit register, we can subdivide similarly to 16-bit
- * (TODO: half-float RA, not that we support fp16 yet)
- */
-
-#define WORK_STRIDE 10
-
-/* We have overlapping register classes for special registers, handled via
- * shadows */
-
-#define SHADOW_R28 18
-#define SHADOW_R29 19
-
-/* Prepacked masks/swizzles for virtual register types */
-static unsigned reg_type_to_mask[WORK_STRIDE] = {
- 0xF, /* xyzw */
- 0x7, 0x7 << 1, /* xyz */
- 0x3, 0x3 << 1, 0x3 << 2, /* xy */
- 0x1, 0x1 << 1, 0x1 << 2, 0x1 << 3 /* x */
-};
-
-static unsigned reg_type_to_swizzle[WORK_STRIDE] = {
- SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
-
- SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
- SWIZZLE(COMPONENT_Y, COMPONENT_Z, COMPONENT_W, COMPONENT_W),
-
- SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
- SWIZZLE(COMPONENT_Y, COMPONENT_Z, COMPONENT_Z, COMPONENT_W),
- SWIZZLE(COMPONENT_Z, COMPONENT_W, COMPONENT_Z, COMPONENT_W),
-
- SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
- SWIZZLE(COMPONENT_Y, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
- SWIZZLE(COMPONENT_Z, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
- SWIZZLE(COMPONENT_W, COMPONENT_Y, COMPONENT_Z, COMPONENT_W),
-};
+#include "midgard_quirks.h"
struct phys_reg {
+ /* Physical register: 0-31 */
unsigned reg;
- unsigned mask;
- unsigned swizzle;
-};
-/* Given the mask/swizzle of both the register and the original source,
- * compose to find the actual mask/swizzle to give the hardware */
+ /* Byte offset into the physical register: 0-15 */
+ unsigned offset;
-static unsigned
-compose_writemask(unsigned mask, struct phys_reg reg)
-{
- /* Note: the reg mask is guaranteed to be contiguous. So we shift
- * into the X place, compose via a simple AND, and shift back */
+ /* Number of bytes in a component of this register */
+ unsigned size;
+};
- unsigned shift = __builtin_ctz(reg.mask);
- return ((reg.mask >> shift) & mask) << shift;
-}
+/* Shift up by reg_offset and horizontally by dst_offset. */
-static unsigned
-compose_swizzle(unsigned swizzle, unsigned mask,
- struct phys_reg reg, struct phys_reg dst)
+static void
+offset_swizzle(unsigned *swizzle, unsigned reg_offset, unsigned srcsize, unsigned dst_offset)
{
- unsigned out = pan_compose_swizzle(swizzle, reg.swizzle);
+ unsigned out[MIR_VEC_COMPONENTS];
- /* Based on the register mask, we need to adjust over. E.g if we're
- * writing to yz, a base swizzle of xy__ becomes _xy_. Save the
- * original first component (x). But to prevent duplicate shifting
- * (only applies to ALU -- mask param is set to xyzw out on L/S to
- * prevent changes), we have to account for the shift inherent to the
- * original writemask */
+ signed reg_comp = reg_offset / srcsize;
+ signed dst_comp = dst_offset / srcsize;
- unsigned rep = out & 0x3;
- unsigned shift = __builtin_ctz(dst.mask) - __builtin_ctz(mask);
- unsigned shifted = out << (2*shift);
+ unsigned max_component = (16 / srcsize) - 1;
- /* ..but we fill in the gaps so it appears to replicate */
+ assert(reg_comp * srcsize == reg_offset);
+ assert(dst_comp * srcsize == dst_offset);
- for (unsigned s = 0; s < shift; ++s)
- shifted |= rep << (2*s);
+ for (signed c = 0; c < MIR_VEC_COMPONENTS; ++c) {
+ signed comp = MAX2(c - dst_comp, 0);
+ out[c] = MIN2(swizzle[comp] + reg_comp, max_component);
+ }
- return shifted;
+ memcpy(swizzle, out, sizeof(out));
}
/* Helper to return the default phys_reg for a given register */
static struct phys_reg
-default_phys_reg(int reg)
+default_phys_reg(int reg, midgard_reg_mode size)
{
struct phys_reg r = {
.reg = reg,
- .mask = 0xF, /* xyzw */
- .swizzle = 0xE4 /* xyzw */
+ .offset = 0,
+ .size = mir_bytes_for_mode(size)
};
return r;
* register corresponds to */
static struct phys_reg
-index_to_reg(compiler_context *ctx, struct ra_graph *g, int reg)
+index_to_reg(compiler_context *ctx, struct lcra_state *l, unsigned reg, midgard_reg_mode size)
{
/* Check for special cases */
- if (reg >= SSA_FIXED_MINIMUM)
- return default_phys_reg(SSA_REG_FROM_FIXED(reg));
- else if ((reg < 0) || !g)
- return default_phys_reg(REGISTER_UNUSED);
-
- /* Special cases aside, we pick the underlying register */
- int virt = ra_get_node_reg(g, reg);
-
- /* Divide out the register and classification */
- int phys = virt / WORK_STRIDE;
- int type = virt % WORK_STRIDE;
-
- /* Apply shadow registers */
-
- if (phys >= SHADOW_R28 && phys <= SHADOW_R29)
- phys += 28 - SHADOW_R28;
+ if (reg == ~0)
+ return default_phys_reg(REGISTER_UNUSED, size);
+ else if (reg >= SSA_FIXED_MINIMUM)
+ return default_phys_reg(SSA_REG_FROM_FIXED(reg), size);
+ else if (!l)
+ return default_phys_reg(REGISTER_UNUSED, size);
struct phys_reg r = {
- .reg = phys,
- .mask = reg_type_to_mask[type],
- .swizzle = reg_type_to_swizzle[type]
+ .reg = l->solutions[reg] / 16,
+ .offset = l->solutions[reg] & 0xF,
+ .size = mir_bytes_for_mode(size)
};
/* Report that we actually use this register, and return it */
- if (phys < 16)
- ctx->work_registers = MAX2(ctx->work_registers, phys);
+ if (r.reg < 16)
+ ctx->work_registers = MAX2(ctx->work_registers, r.reg);
return r;
}
-/* This routine creates a register set. Should be called infrequently since
- * it's slow and can be cached. For legibility, variables are named in terms of
- * work registers, although it is also used to create the register set for
- * special register allocation */
-
-static void
-add_shadow_conflicts (struct ra_regs *regs, unsigned base, unsigned shadow)
-{
- for (unsigned a = 0; a < WORK_STRIDE; ++a) {
- unsigned reg_a = (WORK_STRIDE * base) + a;
-
- for (unsigned b = 0; b < WORK_STRIDE; ++b) {
- unsigned reg_b = (WORK_STRIDE * shadow) + b;
-
- ra_add_reg_conflict(regs, reg_a, reg_b);
- ra_add_reg_conflict(regs, reg_b, reg_a);
- }
- }
-}
-
-static struct ra_regs *
-create_register_set(unsigned work_count, unsigned *classes)
-{
- int virtual_count = 32 * WORK_STRIDE;
-
- /* First, initialize the RA */
- struct ra_regs *regs = ra_alloc_reg_set(NULL, virtual_count, true);
-
- for (unsigned c = 0; c < NR_REG_CLASSES; ++c) {
- int work_vec4 = ra_alloc_reg_class(regs);
- int work_vec3 = ra_alloc_reg_class(regs);
- int work_vec2 = ra_alloc_reg_class(regs);
- int work_vec1 = ra_alloc_reg_class(regs);
-
- classes[4*c + 0] = work_vec1;
- classes[4*c + 1] = work_vec2;
- classes[4*c + 2] = work_vec3;
- classes[4*c + 3] = work_vec4;
-
- /* Special register classes have other register counts */
- unsigned count =
- (c == REG_CLASS_WORK) ? work_count : 2;
-
- unsigned first_reg =
- (c == REG_CLASS_LDST) ? 26 :
- (c == REG_CLASS_TEXR) ? 28 :
- (c == REG_CLASS_TEXW) ? SHADOW_R28 :
- 0;
-
- /* Add the full set of work registers */
- for (unsigned i = first_reg; i < (first_reg + count); ++i) {
- int base = WORK_STRIDE * i;
-
- /* Build a full set of subdivisions */
- ra_class_add_reg(regs, work_vec4, base);
- ra_class_add_reg(regs, work_vec3, base + 1);
- ra_class_add_reg(regs, work_vec3, base + 2);
- ra_class_add_reg(regs, work_vec2, base + 3);
- ra_class_add_reg(regs, work_vec2, base + 4);
- ra_class_add_reg(regs, work_vec2, base + 5);
- ra_class_add_reg(regs, work_vec1, base + 6);
- ra_class_add_reg(regs, work_vec1, base + 7);
- ra_class_add_reg(regs, work_vec1, base + 8);
- ra_class_add_reg(regs, work_vec1, base + 9);
-
- for (unsigned a = 0; a < 10; ++a) {
- unsigned mask1 = reg_type_to_mask[a];
-
- for (unsigned b = 0; b < 10; ++b) {
- unsigned mask2 = reg_type_to_mask[b];
-
- if (mask1 & mask2)
- ra_add_reg_conflict(regs,
- base + a, base + b);
- }
- }
- }
- }
-
-
- /* We have duplicate classes */
- add_shadow_conflicts(regs, 28, SHADOW_R28);
- add_shadow_conflicts(regs, 29, SHADOW_R29);
-
- /* We're done setting up */
- ra_set_finalize(regs, NULL);
-
- return regs;
-}
-
-/* This routine gets a precomputed register set off the screen if it's able, or
- * otherwise it computes one on the fly */
-
-static struct ra_regs *
-get_register_set(struct midgard_screen *screen, unsigned work_count, unsigned **classes)
-{
- /* Bounds check */
- assert(work_count >= 8);
- assert(work_count <= 16);
-
- /* Compute index */
- unsigned index = work_count - 8;
-
- /* Find the reg set */
- struct ra_regs *cached = screen->regs[index];
-
- if (cached) {
- assert(screen->reg_classes[index]);
- *classes = screen->reg_classes[index];
- return cached;
- }
-
- /* Otherwise, create one */
- struct ra_regs *created = create_register_set(work_count, screen->reg_classes[index]);
-
- /* Cache it and use it */
- screen->regs[index] = created;
-
- *classes = screen->reg_classes[index];
- return created;
-}
-
-/* Assign a (special) class, ensuring that it is compatible with whatever class
- * was already set */
-
static void
set_class(unsigned *classes, unsigned node, unsigned class)
{
- /* Check that we're even a node */
- if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
- return;
-
- /* First 4 are work, next 4 are load/store.. */
- unsigned current_class = classes[node] >> 2;
-
- /* Nothing to do */
- if (class == current_class)
- return;
-
- /* If we're changing, we haven't assigned a special class */
- assert(current_class == REG_CLASS_WORK);
-
- classes[node] &= 0x3;
- classes[node] |= (class << 2);
-}
-
-static void
-force_vec4(unsigned *classes, unsigned node)
-{
- if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
- return;
-
- /* Force vec4 = 3 */
- classes[node] |= 0x3;
+ if (node < SSA_FIXED_MINIMUM && class != classes[node]) {
+ assert(classes[node] == REG_CLASS_WORK);
+ classes[node] = class;
+ }
}
/* Special register classes impose special constraints on who can read their
check_read_class(unsigned *classes, unsigned tag, unsigned node)
{
/* Non-nodes are implicitly ok */
- if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
+ if (node >= SSA_FIXED_MINIMUM)
return true;
- unsigned current_class = classes[node] >> 2;
-
- switch (current_class) {
+ switch (classes[node]) {
case REG_CLASS_LDST:
return (tag == TAG_LOAD_STORE_4);
case REG_CLASS_TEXR:
case REG_CLASS_TEXW:
return (tag != TAG_LOAD_STORE_4);
case REG_CLASS_WORK:
- return (tag == TAG_ALU_4);
+ return IS_ALU(tag);
default:
unreachable("Invalid class");
}
check_write_class(unsigned *classes, unsigned tag, unsigned node)
{
/* Non-nodes are implicitly ok */
- if ((node < 0) || (node >= SSA_FIXED_MINIMUM))
+ if (node >= SSA_FIXED_MINIMUM)
return true;
- unsigned current_class = classes[node] >> 2;
-
- switch (current_class) {
+ switch (classes[node]) {
case REG_CLASS_TEXR:
return true;
case REG_CLASS_TEXW:
return (tag == TAG_TEXTURE_4);
case REG_CLASS_LDST:
case REG_CLASS_WORK:
- return (tag == TAG_ALU_4) || (tag == TAG_LOAD_STORE_4);
+ return IS_ALU(tag) || (tag == TAG_LOAD_STORE_4);
default:
unreachable("Invalid class");
}
static void
mark_node_class (unsigned *bitfield, unsigned node)
{
- if ((node >= 0) && (node < SSA_FIXED_MINIMUM))
+ if (node < SSA_FIXED_MINIMUM)
BITSET_SET(bitfield, node);
}
{
size_t sz = BITSET_WORDS(ctx->temp_count) * sizeof(BITSET_WORD);
- /* Bitfields for the various types of registers we could have */
+ /* Bitfields for the various types of registers we could have. aluw can
+ * be written by either ALU or load/store */
unsigned *alur = calloc(sz, 1);
unsigned *aluw = calloc(sz, 1);
+ unsigned *brar = calloc(sz, 1);
unsigned *ldst = calloc(sz, 1);
unsigned *texr = calloc(sz, 1);
unsigned *texw = calloc(sz, 1);
mir_foreach_instr_global(ctx, ins) {
switch (ins->type) {
case TAG_ALU_4:
- mark_node_class(aluw, ins->ssa_args.dest);
- mark_node_class(alur, ins->ssa_args.src[0]);
- mark_node_class(alur, ins->ssa_args.src[1]);
+ mark_node_class(aluw, ins->dest);
+ mark_node_class(alur, ins->src[0]);
+ mark_node_class(alur, ins->src[1]);
+ mark_node_class(alur, ins->src[2]);
+
+ if (ins->compact_branch && ins->writeout)
+ mark_node_class(brar, ins->src[0]);
+
break;
case TAG_LOAD_STORE_4:
- mark_node_class(ldst, ins->ssa_args.src[0]);
- mark_node_class(ldst, ins->ssa_args.src[1]);
- mark_node_class(ldst, ins->ssa_args.src[2]);
+ mark_node_class(aluw, ins->dest);
+ mark_node_class(ldst, ins->src[0]);
+ mark_node_class(ldst, ins->src[1]);
+ mark_node_class(ldst, ins->src[2]);
break;
case TAG_TEXTURE_4:
- mark_node_class(texr, ins->ssa_args.src[0]);
- mark_node_class(texr, ins->ssa_args.src[1]);
- mark_node_class(texr, ins->ssa_args.src[2]);
- mark_node_class(texw, ins->ssa_args.dest);
+ mark_node_class(texr, ins->src[0]);
+ mark_node_class(texr, ins->src[1]);
+ mark_node_class(texr, ins->src[2]);
+ mark_node_class(texw, ins->dest);
break;
}
}
for (unsigned i = 0; i < ctx->temp_count; ++i) {
bool is_alur = BITSET_TEST(alur, i);
bool is_aluw = BITSET_TEST(aluw, i);
+ bool is_brar = BITSET_TEST(brar, i);
bool is_ldst = BITSET_TEST(ldst, i);
bool is_texr = BITSET_TEST(texr, i);
bool is_texw = BITSET_TEST(texw, i);
(is_alur && (is_ldst || is_texr)) ||
(is_ldst && (is_alur || is_texr || is_texw)) ||
(is_texr && (is_alur || is_ldst || is_texw)) ||
- (is_texw && (is_aluw || is_ldst || is_texr));
+ (is_texw && (is_aluw || is_ldst || is_texr)) ||
+ (is_brar && is_texw);
if (!collision)
continue;
/* Use the index as-is as the work copy. Emit copies for
* special uses */
- unsigned classes[] = { TAG_LOAD_STORE_4, TAG_TEXTURE_4, TAG_TEXTURE_4 };
- bool collisions[] = { is_ldst, is_texr, is_texw && is_aluw };
+ unsigned classes[] = { TAG_LOAD_STORE_4, TAG_TEXTURE_4, TAG_TEXTURE_4, TAG_ALU_4};
+ bool collisions[] = { is_ldst, is_texr, is_texw && is_aluw, is_brar };
for (unsigned j = 0; j < ARRAY_SIZE(collisions); ++j) {
if (!collisions[j]) continue;
unsigned idx = spill_idx++;
midgard_instruction m = hazard_write ?
- v_mov(idx, blank_alu_src, i) :
- v_mov(i, blank_alu_src, idx);
+ v_mov(idx, i) : v_mov(i, idx);
/* Insert move before each read/write, depending on the
* hazard we're trying to account for */
continue;
if (hazard_write) {
- if (pre_use->ssa_args.dest != i)
+ if (pre_use->dest != i)
continue;
} else {
if (!mir_has_arg(pre_use, i))
if (hazard_write) {
midgard_instruction *use = mir_next_op(pre_use);
assert(use);
- mir_insert_instruction_before(use, m);
+ mir_insert_instruction_before(ctx, use, m);
mir_rewrite_index_dst_single(pre_use, i, idx);
} else {
idx = spill_idx++;
- m = v_mov(i, blank_alu_src, idx);
- m.mask = mir_mask_of_read_components(pre_use, i);
- mir_insert_instruction_before(pre_use, m);
+ m = v_mov(i, idx);
+ m.mask = mir_from_bytemask(mir_bytemask_of_read_components(pre_use, i), midgard_reg_mode_32);
+ mir_insert_instruction_before(ctx, pre_use, m);
mir_rewrite_index_src_single(pre_use, i, idx);
}
}
free(alur);
free(aluw);
+ free(brar);
free(ldst);
free(texr);
free(texw);
}
-/* Routines for liveness analysis */
-
-static void
-liveness_gen(uint8_t *live, unsigned node, unsigned max, unsigned mask)
-{
- if ((node < 0) || (node >= max))
- return;
-
- live[node] |= mask;
-}
+/* We register allocate after scheduling, so we need to ensure instructions
+ * executing in parallel within a segment of a bundle don't clobber each
+ * other's registers. This is mostly a non-issue thanks to scheduling, but
+ * there are edge cases. In particular, after a register is written in a
+ * segment, it interferes with anything reading. */
static void
-liveness_kill(uint8_t *live, unsigned node, unsigned max, unsigned mask)
+mir_compute_segment_interference(
+ compiler_context *ctx,
+ struct lcra_state *l,
+ midgard_bundle *bun,
+ unsigned pivot,
+ unsigned i)
{
- if ((node < 0) || (node >= max))
- return;
-
- live[node] &= ~mask;
-}
-
-/* Updates live_in for a single instruction */
+ for (unsigned j = pivot; j < i; ++j) {
+ mir_foreach_src(bun->instructions[j], s) {
+ if (bun->instructions[j]->src[s] >= ctx->temp_count)
+ continue;
-static void
-liveness_ins_update(uint8_t *live, midgard_instruction *ins, unsigned max)
-{
- /* live_in[s] = GEN[s] + (live_out[s] - KILL[s]) */
+ for (unsigned q = pivot; q < i; ++q) {
+ if (bun->instructions[q]->dest >= ctx->temp_count)
+ continue;
- liveness_kill(live, ins->ssa_args.dest, max, ins->mask);
+ /* See dEQP-GLES2.functional.shaders.return.output_write_in_func_dynamic_fragment */
- mir_foreach_src(ins, src) {
- unsigned node = ins->ssa_args.src[src];
- unsigned mask = mir_mask_of_read_components(ins, node);
+ if (q >= j) {
+ if (!(bun->instructions[j]->unit == UNIT_SMUL && bun->instructions[q]->unit == UNIT_VLUT))
+ continue;
+ }
- liveness_gen(live, node, max, mask);
+ unsigned mask = mir_bytemask(bun->instructions[q]);
+ unsigned rmask = mir_bytemask_of_read_components(bun->instructions[j], bun->instructions[j]->src[s]);
+ lcra_add_node_interference(l, bun->instructions[q]->dest, mask, bun->instructions[j]->src[s], rmask);
+ }
+ }
}
}
-/* live_out[s] = sum { p in succ[s] } ( live_in[p] ) */
-
static void
-liveness_block_live_out(compiler_context *ctx, midgard_block *blk)
-{
- mir_foreach_successor(blk, succ) {
- for (unsigned i = 0; i < ctx->temp_count; ++i)
- blk->live_out[i] |= succ->live_in[i];
- }
-}
-
-/* Liveness analysis is a backwards-may dataflow analysis pass. Within a block,
- * we compute live_out from live_in. The intrablock pass is linear-time. It
- * returns whether progress was made. */
-
-static bool
-liveness_block_update(compiler_context *ctx, midgard_block *blk)
+mir_compute_bundle_interference(
+ compiler_context *ctx,
+ struct lcra_state *l,
+ midgard_bundle *bun)
{
- bool progress = false;
-
- liveness_block_live_out(ctx, blk);
-
- uint8_t *live = mem_dup(blk->live_out, ctx->temp_count);
-
- mir_foreach_instr_in_block_rev(blk, ins)
- liveness_ins_update(live, ins, ctx->temp_count);
+ if (!IS_ALU(bun->tag))
+ return;
- /* To figure out progress, diff live_in */
+ bool old = bun->instructions[0]->unit >= UNIT_VADD;
+ unsigned pivot = 0;
- for (unsigned i = 0; (i < ctx->temp_count) && !progress; ++i)
- progress |= (blk->live_in[i] != live[i]);
+ for (unsigned i = 1; i < bun->instruction_count; ++i) {
+ bool new = bun->instructions[i]->unit >= UNIT_VADD;
- free(blk->live_in);
- blk->live_in = live;
+ if (old != new) {
+ mir_compute_segment_interference(ctx, l, bun, 0, i);
+ pivot = i;
+ break;
+ }
+ }
- return progress;
+ mir_compute_segment_interference(ctx, l, bun, pivot, bun->instruction_count);
}
-/* Globally, liveness analysis uses a fixed-point algorithm based on a
- * worklist. We initialize a work list with the exit block. We iterate the work
- * list to compute live_in from live_out for each block on the work list,
- * adding the predecessors of the block to the work list if we made progress.
- */
-
static void
-mir_compute_liveness(
+mir_compute_interference(
compiler_context *ctx,
- struct ra_graph *g)
+ struct lcra_state *l)
{
- /* List of midgard_block */
- struct set *work_list;
+ /* First, we need liveness information to be computed per block */
+ mir_compute_liveness(ctx);
- work_list = _mesa_set_create(ctx,
- _mesa_hash_pointer,
- _mesa_key_pointer_equal);
+ /* We need to force r1.w live throughout a blend shader */
- /* Allocate */
-
- mir_foreach_block(ctx, block) {
- block->live_in = calloc(ctx->temp_count, 1);
- block->live_out = calloc(ctx->temp_count, 1);
- }
+ if (ctx->is_blend) {
+ unsigned r1w = ~0;
- /* Initialize the work list with the exit block */
- struct set_entry *cur;
-
- midgard_block *exit = mir_exit_block(ctx);
- cur = _mesa_set_add(work_list, exit);
-
- /* Iterate the work list */
-
- do {
- /* Pop off a block */
- midgard_block *blk = (struct midgard_block *) cur->key;
- _mesa_set_remove(work_list, cur);
-
- /* Update its liveness information */
- bool progress = liveness_block_update(ctx, blk);
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
+ mir_foreach_instr_in_block_rev(block, ins) {
+ if (ins->writeout)
+ r1w = ins->src[2];
+ }
- /* If we made progress, we need to process the predecessors */
+ if (r1w != ~0)
+ break;
+ }
- if (progress || (blk == exit)) {
- mir_foreach_predecessor(blk, pred)
- _mesa_set_add(work_list, pred);
+ mir_foreach_instr_global(ctx, ins) {
+ if (ins->dest < ctx->temp_count)
+ lcra_add_node_interference(l, ins->dest, mir_bytemask(ins), r1w, 0xF);
}
- } while((cur = _mesa_set_next_entry(work_list, NULL)) != NULL);
+ }
/* Now that every block has live_in/live_out computed, we can determine
* interference by walking each block linearly. Take live_out at the
* end of each block and walk the block backwards. */
- mir_foreach_block(ctx, blk) {
- uint8_t *live = calloc(ctx->temp_count, 1);
-
- mir_foreach_successor(blk, succ) {
- for (unsigned i = 0; i < ctx->temp_count; ++i)
- live[i] |= succ->live_in[i];
- }
+ mir_foreach_block(ctx, _blk) {
+ midgard_block *blk = (midgard_block *) _blk;
+ uint16_t *live = mem_dup(_blk->live_out, ctx->temp_count * sizeof(uint16_t));
mir_foreach_instr_in_block_rev(blk, ins) {
/* Mark all registers live after the instruction as
* interfering with the destination */
- unsigned dest = ins->ssa_args.dest;
+ unsigned dest = ins->dest;
- if (dest >= 0 && dest < ctx->temp_count) {
+ if (dest < ctx->temp_count) {
for (unsigned i = 0; i < ctx->temp_count; ++i)
- if (live[i])
- ra_add_node_interference(g, dest, i);
+ if (live[i]) {
+ unsigned mask = mir_bytemask(ins);
+ lcra_add_node_interference(l, dest, mask, i, live[i]);
+ }
}
/* Update live_in */
- liveness_ins_update(live, ins, ctx->temp_count);
+ mir_liveness_ins_update(live, ins, ctx->temp_count);
}
- }
- mir_foreach_block(ctx, blk) {
- free(blk->live_in);
- free(blk->live_out);
+ mir_foreach_bundle_in_block(blk, bun)
+ mir_compute_bundle_interference(ctx, l, bun);
+
+ free(live);
}
}
/* This routine performs the actual register allocation. It should be succeeded
* by install_registers */
-struct ra_graph *
+static struct lcra_state *
allocate_registers(compiler_context *ctx, bool *spilled)
{
/* The number of vec4 work registers available depends on when the
* uniforms start, so compute that first */
int work_count = 16 - MAX2((ctx->uniform_cutoff - 8), 0);
- unsigned *classes = NULL;
- struct ra_regs *regs = get_register_set(ctx->screen, work_count, &classes);
-
- assert(regs != NULL);
- assert(classes != NULL);
/* No register allocation to do with no SSA */
if (!ctx->temp_count)
return NULL;
- /* Let's actually do register allocation */
- int nodes = ctx->temp_count;
- struct ra_graph *g = ra_alloc_interference_graph(regs, nodes);
-
- /* Register class (as known to the Mesa register allocator) is actually
- * the product of both semantic class (work, load/store, texture..) and
- * size (vec2/vec3..). First, we'll go through and determine the
- * minimum size needed to hold values */
+ struct lcra_state *l = lcra_alloc_equations(ctx->temp_count, 1, 8, 16, 5);
+
+ /* Starts of classes, in bytes */
+ l->class_start[REG_CLASS_WORK] = 16 * 0;
+ l->class_start[REG_CLASS_LDST] = 16 * 26;
+ l->class_start[REG_CLASS_TEXR] = 16 * 28;
+ l->class_start[REG_CLASS_TEXW] = 16 * 28;
+
+ l->class_size[REG_CLASS_WORK] = 16 * work_count;
+ l->class_size[REG_CLASS_LDST] = 16 * 2;
+ l->class_size[REG_CLASS_TEXR] = 16 * 2;
+ l->class_size[REG_CLASS_TEXW] = 16 * 2;
+
+ lcra_set_disjoint_class(l, REG_CLASS_TEXR, REG_CLASS_TEXW);
+
+ /* To save space on T*20, we don't have real texture registers.
+ * Instead, tex inputs reuse the load/store pipeline registers, and
+ * tex outputs use work r0/r1. Note we still use TEXR/TEXW classes,
+ * noting that this handles interferences and sizes correctly. */
+
+ if (ctx->quirks & MIDGARD_INTERPIPE_REG_ALIASING) {
+ l->class_start[REG_CLASS_TEXR] = l->class_start[REG_CLASS_LDST];
+ l->class_start[REG_CLASS_TEXW] = l->class_start[REG_CLASS_WORK];
+ }
unsigned *found_class = calloc(sizeof(unsigned), ctx->temp_count);
+ unsigned *min_alignment = calloc(sizeof(unsigned), ctx->temp_count);
mir_foreach_instr_global(ctx, ins) {
- if (ins->ssa_args.dest < 0) continue;
- if (ins->ssa_args.dest >= SSA_FIXED_MINIMUM) continue;
+ /* Swizzles of 32-bit sources on 64-bit instructions need to be
+ * aligned to either bottom (xy) or top (zw). More general
+ * swizzle lowering should happen prior to scheduling (TODO),
+ * but once we get RA we shouldn't disrupt this further. Align
+ * sources of 64-bit instructions. */
+
+ if (ins->type == TAG_ALU_4 && ins->alu.reg_mode == midgard_reg_mode_64) {
+ mir_foreach_src(ins, v) {
+ unsigned s = ins->src[v];
+
+ if (s < ctx->temp_count)
+ min_alignment[s] = 3;
+ }
+ }
+
+ if (ins->type == TAG_LOAD_STORE_4 && OP_HAS_ADDRESS(ins->load_store.op)) {
+ mir_foreach_src(ins, v) {
+ unsigned s = ins->src[v];
+ unsigned size = mir_srcsize(ins, v);
+
+ if (s < ctx->temp_count)
+ min_alignment[s] = (size == midgard_reg_mode_64) ? 3 : 2;
+ }
+ }
+
+ if (ins->dest >= SSA_FIXED_MINIMUM) continue;
/* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
int class = util_logbase2(ins->mask);
/* Use the largest class if there's ambiguity, this
* handles partial writes */
- int dest = ins->ssa_args.dest;
+ int dest = ins->dest;
found_class[dest] = MAX2(found_class[dest], class);
+
+ /* XXX: Ensure swizzles align the right way with more LCRA constraints? */
+ if (ins->type == TAG_ALU_4 && ins->alu.reg_mode != midgard_reg_mode_32)
+ min_alignment[dest] = 3; /* (1 << 3) = 8 */
+
+ if (ins->type == TAG_LOAD_STORE_4 && ins->load_64)
+ min_alignment[dest] = 3;
+
+ /* We don't have a swizzle for the conditional and we don't
+ * want to muck with the conditional itself, so just force
+ * alignment for now */
+
+ if (ins->type == TAG_ALU_4 && OP_IS_CSEL_V(ins->alu.op))
+ min_alignment[dest] = 4; /* 1 << 4= 16-byte = vec4 */
+
+ }
+
+ for (unsigned i = 0; i < ctx->temp_count; ++i) {
+ lcra_set_alignment(l, i, min_alignment[i] ? min_alignment[i] : 2);
+ lcra_restrict_range(l, i, (found_class[i] + 1) * 4);
}
+
+ free(found_class);
+ free(min_alignment);
/* Next, we'll determine semantic class. We default to zero (work).
* But, if we're used with a special operation, that will force us to a
/* Check if this operation imposes any classes */
if (ins->type == TAG_LOAD_STORE_4) {
- bool force_vec4_only = OP_IS_VEC4_ONLY(ins->load_store.op);
-
- set_class(found_class, ins->ssa_args.src[0], REG_CLASS_LDST);
- set_class(found_class, ins->ssa_args.src[1], REG_CLASS_LDST);
- set_class(found_class, ins->ssa_args.src[2], REG_CLASS_LDST);
-
- if (force_vec4_only) {
- force_vec4(found_class, ins->ssa_args.dest);
- force_vec4(found_class, ins->ssa_args.src[0]);
- force_vec4(found_class, ins->ssa_args.src[1]);
- force_vec4(found_class, ins->ssa_args.src[2]);
+ set_class(l->class, ins->src[0], REG_CLASS_LDST);
+ set_class(l->class, ins->src[1], REG_CLASS_LDST);
+ set_class(l->class, ins->src[2], REG_CLASS_LDST);
+
+ if (OP_IS_VEC4_ONLY(ins->load_store.op)) {
+ lcra_restrict_range(l, ins->dest, 16);
+ lcra_restrict_range(l, ins->src[0], 16);
+ lcra_restrict_range(l, ins->src[1], 16);
+ lcra_restrict_range(l, ins->src[2], 16);
}
} else if (ins->type == TAG_TEXTURE_4) {
- set_class(found_class, ins->ssa_args.dest, REG_CLASS_TEXW);
- set_class(found_class, ins->ssa_args.src[0], REG_CLASS_TEXR);
- set_class(found_class, ins->ssa_args.src[1], REG_CLASS_TEXR);
- set_class(found_class, ins->ssa_args.src[2], REG_CLASS_TEXR);
+ set_class(l->class, ins->dest, REG_CLASS_TEXW);
+ set_class(l->class, ins->src[0], REG_CLASS_TEXR);
+ set_class(l->class, ins->src[1], REG_CLASS_TEXR);
+ set_class(l->class, ins->src[2], REG_CLASS_TEXR);
+ set_class(l->class, ins->src[3], REG_CLASS_TEXR);
}
}
/* Check that the semantics of the class are respected */
mir_foreach_instr_global(ctx, ins) {
- assert(check_write_class(found_class, ins->type, ins->ssa_args.dest));
- assert(check_read_class(found_class, ins->type, ins->ssa_args.src[0]));
- assert(check_read_class(found_class, ins->type, ins->ssa_args.src[1]));
- assert(check_read_class(found_class, ins->type, ins->ssa_args.src[2]));
+ assert(check_write_class(l->class, ins->type, ins->dest));
+ assert(check_read_class(l->class, ins->type, ins->src[0]));
+ assert(check_read_class(l->class, ins->type, ins->src[1]));
+ assert(check_read_class(l->class, ins->type, ins->src[2]));
}
- for (unsigned i = 0; i < ctx->temp_count; ++i) {
- unsigned class = found_class[i];
- ra_set_node_class(g, i, classes[class]);
- }
+ /* Mark writeout to r0, render target to r1.z, unknown to r1.w */
+ mir_foreach_instr_global(ctx, ins) {
+ if (!(ins->compact_branch && ins->writeout)) continue;
+
+ if (ins->src[0] < ctx->temp_count) {
+ if (ins->writeout_depth)
+ l->solutions[ins->src[0]] = (16 * 1) + COMPONENT_X * 4;
+ else if (ins->writeout_stencil)
+ l->solutions[ins->src[0]] = (16 * 1) + COMPONENT_Y * 4;
+ else
+ l->solutions[ins->src[0]] = 0;
+ }
- mir_compute_liveness(ctx, g);
+ if (ins->src[1] < ctx->temp_count)
+ l->solutions[ins->src[1]] = (16 * 1) + COMPONENT_Z * 4;
- if (!ra_allocate(g)) {
- *spilled = true;
- } else {
- *spilled = false;
+ if (ins->src[2] < ctx->temp_count)
+ l->solutions[ins->src[2]] = (16 * 1) + COMPONENT_W * 4;
}
+
+ mir_compute_interference(ctx, l);
- /* Whether we were successful or not, report the graph so we can
- * compute spill nodes */
-
- return g;
+ *spilled = !lcra_solve(l);
+ return l;
}
+
/* Once registers have been decided via register allocation
* (allocate_registers), we need to rewrite the MIR to use registers instead of
* indices */
static void
install_registers_instr(
compiler_context *ctx,
- struct ra_graph *g,
+ struct lcra_state *l,
midgard_instruction *ins)
{
- ssa_args args = ins->ssa_args;
-
switch (ins->type) {
- case TAG_ALU_4: {
- struct phys_reg src1 = index_to_reg(ctx, g, args.src[0]);
- struct phys_reg src2 = index_to_reg(ctx, g, args.src[1]);
- struct phys_reg dest = index_to_reg(ctx, g, args.dest);
+ case TAG_ALU_4:
+ case TAG_ALU_8:
+ case TAG_ALU_12:
+ case TAG_ALU_16: {
+ if (ins->compact_branch)
+ return;
+
+ struct phys_reg src1 = index_to_reg(ctx, l, ins->src[0], mir_srcsize(ins, 0));
+ struct phys_reg src2 = index_to_reg(ctx, l, ins->src[1], mir_srcsize(ins, 1));
+ struct phys_reg dest = index_to_reg(ctx, l, ins->dest, mir_typesize(ins));
- unsigned uncomposed_mask = ins->mask;
- ins->mask = compose_writemask(uncomposed_mask, dest);
+ mir_set_bytemask(ins, mir_bytemask(ins) << dest.offset);
- /* Adjust the dest mask if necessary. Mostly this is a no-op
- * but it matters for dot products */
- dest.mask = effective_writemask(&ins->alu, ins->mask);
+ unsigned dest_offset =
+ GET_CHANNEL_COUNT(alu_opcode_props[ins->alu.op].props) ? 0 :
+ dest.offset;
- midgard_vector_alu_src mod1 =
- vector_alu_from_unsigned(ins->alu.src1);
- mod1.swizzle = compose_swizzle(mod1.swizzle, uncomposed_mask, src1, dest);
- ins->alu.src1 = vector_alu_srco_unsigned(mod1);
+ offset_swizzle(ins->swizzle[0], src1.offset, src1.size, dest_offset);
ins->registers.src1_reg = src1.reg;
- ins->registers.src2_imm = args.inline_constant;
+ ins->registers.src2_imm = ins->has_inline_constant;
- if (args.inline_constant) {
+ if (ins->has_inline_constant) {
/* Encode inline 16-bit constant. See disassembler for
* where the algorithm is from */
} else {
midgard_vector_alu_src mod2 =
vector_alu_from_unsigned(ins->alu.src2);
- mod2.swizzle = compose_swizzle(
- mod2.swizzle, uncomposed_mask, src2, dest);
+ offset_swizzle(ins->swizzle[1], src2.offset, src2.size, dest_offset);
ins->alu.src2 = vector_alu_srco_unsigned(mod2);
ins->registers.src2_reg = src2.reg;
bool encodes_src = OP_IS_STORE(ins->load_store.op);
if (encodes_src) {
- struct phys_reg src = index_to_reg(ctx, g, args.src[0]);
+ struct phys_reg src = index_to_reg(ctx, l, ins->src[0], mir_srcsize(ins, 0));
assert(src.reg == 26 || src.reg == 27);
ins->load_store.reg = src.reg - 26;
-
- unsigned shift = __builtin_ctz(src.mask);
- unsigned adjusted_mask = src.mask >> shift;
- assert(((adjusted_mask + 1) & adjusted_mask) == 0);
-
- unsigned new_swizzle = 0;
- for (unsigned q = 0; q < 4; ++q) {
- unsigned c = (ins->load_store.swizzle >> (2*q)) & 3;
- new_swizzle |= (c + shift) << (2*q);
- }
-
- ins->load_store.swizzle = compose_swizzle(
- new_swizzle, src.mask,
- default_phys_reg(0), src);
+ offset_swizzle(ins->swizzle[0], src.offset, src.size, 0);
} else {
- unsigned r = encodes_src ?
- args.src[0] : args.dest;
+ struct phys_reg dst = index_to_reg(ctx, l, ins->dest, mir_typesize(ins));
- struct phys_reg src = index_to_reg(ctx, g, r);
-
- ins->load_store.reg = src.reg;
-
- ins->load_store.swizzle = compose_swizzle(
- ins->load_store.swizzle, 0xF,
- default_phys_reg(0), src);
-
- ins->mask = compose_writemask(
- ins->mask, src);
+ ins->load_store.reg = dst.reg;
+ offset_swizzle(ins->swizzle[0], 0, 4, dst.offset);
+ mir_set_bytemask(ins, mir_bytemask(ins) << dst.offset);
}
/* We also follow up by actual arguments */
- int src2 =
- encodes_src ? args.src[1] : args.src[0];
-
- int src3 =
- encodes_src ? args.src[2] : args.src[1];
+ unsigned src2 = ins->src[1];
+ unsigned src3 = ins->src[2];
+ midgard_reg_mode m32 = midgard_reg_mode_32;
- if (src2 >= 0) {
- struct phys_reg src = index_to_reg(ctx, g, src2);
- unsigned component = __builtin_ctz(src.mask);
+ if (src2 != ~0) {
+ struct phys_reg src = index_to_reg(ctx, l, src2, m32);
+ unsigned component = src.offset / src.size;
+ assert(component * src.size == src.offset);
ins->load_store.arg_1 |= midgard_ldst_reg(src.reg, component);
}
- if (src3 >= 0) {
- struct phys_reg src = index_to_reg(ctx, g, src3);
- unsigned component = __builtin_ctz(src.mask);
+ if (src3 != ~0) {
+ struct phys_reg src = index_to_reg(ctx, l, src3, m32);
+ unsigned component = src.offset / src.size;
+ assert(component * src.size == src.offset);
ins->load_store.arg_2 |= midgard_ldst_reg(src.reg, component);
}
}
case TAG_TEXTURE_4: {
- /* Grab RA results */
- struct phys_reg dest = index_to_reg(ctx, g, args.dest);
- struct phys_reg coord = index_to_reg(ctx, g, args.src[0]);
- struct phys_reg lod = index_to_reg(ctx, g, args.src[1]);
+ if (ins->texture.op == TEXTURE_OP_BARRIER)
+ break;
- assert(dest.reg == 28 || dest.reg == 29);
- assert(coord.reg == 28 || coord.reg == 29);
+ /* Grab RA results */
+ struct phys_reg dest = index_to_reg(ctx, l, ins->dest, mir_typesize(ins));
+ struct phys_reg coord = index_to_reg(ctx, l, ins->src[1], mir_srcsize(ins, 1));
+ struct phys_reg lod = index_to_reg(ctx, l, ins->src[2], mir_srcsize(ins, 2));
+ struct phys_reg offset = index_to_reg(ctx, l, ins->src[3], mir_srcsize(ins, 2));
/* First, install the texture coordinate */
ins->texture.in_reg_full = 1;
ins->texture.in_reg_upper = 0;
- ins->texture.in_reg_select = coord.reg - 28;
- ins->texture.in_reg_swizzle =
- compose_swizzle(ins->texture.in_reg_swizzle, 0xF, coord, dest);
+ ins->texture.in_reg_select = coord.reg & 1;
+ offset_swizzle(ins->swizzle[1], coord.offset, coord.size, 0);
/* Next, install the destination */
ins->texture.out_full = 1;
ins->texture.out_upper = 0;
- ins->texture.out_reg_select = dest.reg - 28;
- ins->texture.swizzle =
- compose_swizzle(ins->texture.swizzle, dest.mask, dest, dest);
- ins->mask =
- compose_writemask(ins->mask, dest);
+ ins->texture.out_reg_select = dest.reg & 1;
+ offset_swizzle(ins->swizzle[0], 0, 4, dest.offset);
+ mir_set_bytemask(ins, mir_bytemask(ins) << dest.offset);
/* If there is a register LOD/bias, use it */
- if (args.src[1] > -1) {
+ if (ins->src[2] != ~0) {
+ assert(!(lod.offset & 3));
midgard_tex_register_select sel = {
- .select = lod.reg,
+ .select = lod.reg & 1,
.full = 1,
- .component = lod.swizzle & 3,
+ .component = lod.offset / 4
};
uint8_t packed;
ins->texture.bias = packed;
}
+ /* If there is an offset register, install it */
+ if (ins->src[3] != ~0) {
+ unsigned x = offset.offset / 4;
+ unsigned y = x + 1;
+ unsigned z = x + 2;
+
+ /* Check range, TODO: half-registers */
+ assert(z < 4);
+
+ ins->texture.offset =
+ (1) | /* full */
+ (offset.reg & 1) << 1 | /* select */
+ (0 << 2) | /* upper */
+ (x << 3) | /* swizzle */
+ (y << 5) | /* swizzle */
+ (z << 7); /* swizzle */
+ }
+
break;
}
}
}
-void
-install_registers(compiler_context *ctx, struct ra_graph *g)
+static void
+install_registers(compiler_context *ctx, struct lcra_state *l)
+{
+ mir_foreach_instr_global(ctx, ins)
+ install_registers_instr(ctx, l, ins);
+}
+
+
+/* If register allocation fails, find the best spill node */
+
+static signed
+mir_choose_spill_node(
+ compiler_context *ctx,
+ struct lcra_state *l)
+{
+ /* We can't spill a previously spilled value or an unspill */
+
+ mir_foreach_instr_global(ctx, ins) {
+ if (ins->no_spill & (1 << l->spill_class)) {
+ lcra_set_node_spill_cost(l, ins->dest, -1);
+
+ if (l->spill_class != REG_CLASS_WORK) {
+ mir_foreach_src(ins, s)
+ lcra_set_node_spill_cost(l, ins->src[s], -1);
+ }
+ }
+ }
+
+ return lcra_get_best_spill_node(l);
+}
+
+/* Once we've chosen a spill node, spill it */
+
+static void
+mir_spill_register(
+ compiler_context *ctx,
+ unsigned spill_node,
+ unsigned spill_class,
+ unsigned *spill_count)
{
- mir_foreach_block(ctx, block) {
+ unsigned spill_index = ctx->temp_count;
+
+ /* We have a spill node, so check the class. Work registers
+ * legitimately spill to TLS, but special registers just spill to work
+ * registers */
+
+ bool is_special = spill_class != REG_CLASS_WORK;
+ bool is_special_w = spill_class == REG_CLASS_TEXW;
+
+ /* Allocate TLS slot (maybe) */
+ unsigned spill_slot = !is_special ? (*spill_count)++ : 0;
+
+ /* For TLS, replace all stores to the spilled node. For
+ * special reads, just keep as-is; the class will be demoted
+ * implicitly. For special writes, spill to a work register */
+
+ if (!is_special || is_special_w) {
+ if (is_special_w)
+ spill_slot = spill_index++;
+
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
+ mir_foreach_instr_in_block_safe(block, ins) {
+ if (ins->dest != spill_node) continue;
+
+ midgard_instruction st;
+
+ if (is_special_w) {
+ st = v_mov(spill_node, spill_slot);
+ st.no_spill |= (1 << spill_class);
+ } else {
+ ins->dest = spill_index++;
+ ins->no_spill |= (1 << spill_class);
+ st = v_load_store_scratch(ins->dest, spill_slot, true, ins->mask);
+ }
+
+ /* Hint: don't rewrite this node */
+ st.hint = true;
+
+ mir_insert_instruction_after_scheduled(ctx, block, ins, st);
+
+ if (!is_special)
+ ctx->spills++;
+ }
+ }
+ }
+
+ /* For special reads, figure out how many bytes we need */
+ unsigned read_bytemask = 0;
+
+ mir_foreach_instr_global_safe(ctx, ins) {
+ read_bytemask |= mir_bytemask_of_read_components(ins, spill_node);
+ }
+
+ /* Insert a load from TLS before the first consecutive
+ * use of the node, rewriting to use spilled indices to
+ * break up the live range. Or, for special, insert a
+ * move. Ironically the latter *increases* register
+ * pressure, but the two uses of the spilling mechanism
+ * are somewhat orthogonal. (special spilling is to use
+ * work registers to back special registers; TLS
+ * spilling is to use memory to back work registers) */
+
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
mir_foreach_instr_in_block(block, ins) {
- install_registers_instr(ctx, g, ins);
+ /* We can't rewrite the moves used to spill in the
+ * first place. These moves are hinted. */
+ if (ins->hint) continue;
+
+ /* If we don't use the spilled value, nothing to do */
+ if (!mir_has_arg(ins, spill_node)) continue;
+
+ unsigned index = 0;
+
+ if (!is_special_w) {
+ index = ++spill_index;
+
+ midgard_instruction *before = ins;
+ midgard_instruction st;
+
+ if (is_special) {
+ /* Move */
+ st = v_mov(spill_node, index);
+ st.no_spill |= (1 << spill_class);
+ } else {
+ /* TLS load */
+ st = v_load_store_scratch(index, spill_slot, false, 0xF);
+ }
+
+ /* Mask the load based on the component count
+ * actually needed to prevent RA loops */
+
+ st.mask = mir_from_bytemask(read_bytemask, midgard_reg_mode_32);
+
+ mir_insert_instruction_before_scheduled(ctx, block, before, st);
+ } else {
+ /* Special writes already have their move spilled in */
+ index = spill_slot;
+ }
+
+
+ /* Rewrite to use */
+ mir_rewrite_index_src_single(ins, spill_node, index);
+
+ if (!is_special)
+ ctx->fills++;
}
}
+ /* Reset hints */
+
+ mir_foreach_instr_global(ctx, ins) {
+ ins->hint = false;
+ }
+}
+
+/* Run register allocation in a loop, spilling until we succeed */
+
+void
+mir_ra(compiler_context *ctx)
+{
+ struct lcra_state *l = NULL;
+ bool spilled = false;
+ int iter_count = 1000; /* max iterations */
+
+ /* Number of 128-bit slots in memory we've spilled into */
+ unsigned spill_count = 0;
+
+
+ mir_create_pipeline_registers(ctx);
+
+ do {
+ if (spilled) {
+ signed spill_node = mir_choose_spill_node(ctx, l);
+
+ if (spill_node == -1) {
+ fprintf(stderr, "ERROR: Failed to choose spill node\n");
+ return;
+ }
+
+ mir_spill_register(ctx, spill_node, l->spill_class, &spill_count);
+ }
+
+ mir_squeeze_index(ctx);
+ mir_invalidate_liveness(ctx);
+
+ if (l) {
+ lcra_free(l);
+ l = NULL;
+ }
+
+ l = allocate_registers(ctx, &spilled);
+ } while(spilled && ((iter_count--) > 0));
+
+ if (iter_count <= 0) {
+ fprintf(stderr, "panfrost: Gave up allocating registers, rendering will be incomplete\n");
+ assert(0);
+ }
+
+ /* Report spilling information. spill_count is in 128-bit slots (vec4 x
+ * fp32), but tls_size is in bytes, so multiply by 16 */
+
+ ctx->tls_size = spill_count * 16;
+
+ install_registers(ctx, l);
+
+ lcra_free(l);
}