#include "midgard_ops.h"
#include "util/u_math.h"
#include "util/u_memory.h"
-#include "lcra.h"
+#include "midgard_quirks.h"
struct phys_reg {
/* Physical register: 0-31 */
/* First, we need liveness information to be computed per block */
mir_compute_liveness(ctx);
+ /* We need to force r1.w live throughout a blend shader */
+
+ if (ctx->is_blend) {
+ unsigned r1w = ~0;
+
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
+ mir_foreach_instr_in_block_rev(block, ins) {
+ if (ins->writeout)
+ r1w = ins->src[2];
+ }
+
+ if (r1w != ~0)
+ break;
+ }
+
+ mir_foreach_instr_global(ctx, ins) {
+ if (ins->dest < ctx->temp_count)
+ lcra_add_node_interference(l, ins->dest, mir_bytemask(ins), r1w, 0xF);
+ }
+ }
+
/* Now that every block has live_in/live_out computed, we can determine
* interference by walking each block linearly. Take live_out at the
* end of each block and walk the block backwards. */
- mir_foreach_block(ctx, blk) {
- uint16_t *live = mem_dup(blk->live_out, ctx->temp_count * sizeof(uint16_t));
+ mir_foreach_block(ctx, _blk) {
+ midgard_block *blk = (midgard_block *) _blk;
+ uint16_t *live = mem_dup(_blk->live_out, ctx->temp_count * sizeof(uint16_t));
mir_foreach_instr_in_block_rev(blk, ins) {
/* Mark all registers live after the instruction as
/* This routine performs the actual register allocation. It should be succeeded
* by install_registers */
-struct lcra_state *
+static struct lcra_state *
allocate_registers(compiler_context *ctx, bool *spilled)
{
/* The number of vec4 work registers available depends on when the
lcra_set_disjoint_class(l, REG_CLASS_TEXR, REG_CLASS_TEXW);
+ /* To save space on T*20, we don't have real texture registers.
+ * Instead, tex inputs reuse the load/store pipeline registers, and
+ * tex outputs use work r0/r1. Note we still use TEXR/TEXW classes,
+ * noting that this handles interferences and sizes correctly. */
+
+ if (ctx->quirks & MIDGARD_INTERPIPE_REG_ALIASING) {
+ l->class_start[REG_CLASS_TEXR] = l->class_start[REG_CLASS_LDST];
+ l->class_start[REG_CLASS_TEXW] = l->class_start[REG_CLASS_WORK];
+ }
+
unsigned *found_class = calloc(sizeof(unsigned), ctx->temp_count);
+ unsigned *min_alignment = calloc(sizeof(unsigned), ctx->temp_count);
mir_foreach_instr_global(ctx, ins) {
+ /* Swizzles of 32-bit sources on 64-bit instructions need to be
+ * aligned to either bottom (xy) or top (zw). More general
+ * swizzle lowering should happen prior to scheduling (TODO),
+ * but once we get RA we shouldn't disrupt this further. Align
+ * sources of 64-bit instructions. */
+
+ if (ins->type == TAG_ALU_4 && ins->alu.reg_mode == midgard_reg_mode_64) {
+ mir_foreach_src(ins, v) {
+ unsigned s = ins->src[v];
+
+ if (s < ctx->temp_count)
+ min_alignment[s] = 3;
+ }
+ }
+
+ if (ins->type == TAG_LOAD_STORE_4 && OP_HAS_ADDRESS(ins->load_store.op)) {
+ mir_foreach_src(ins, v) {
+ unsigned s = ins->src[v];
+ unsigned size = mir_srcsize(ins, v);
+
+ if (s < ctx->temp_count)
+ min_alignment[s] = (size == midgard_reg_mode_64) ? 3 : 2;
+ }
+ }
+
if (ins->dest >= SSA_FIXED_MINIMUM) continue;
/* 0 for x, 1 for xy, 2 for xyz, 3 for xyzw */
int dest = ins->dest;
found_class[dest] = MAX2(found_class[dest], class);
- lcra_set_alignment(l, dest, 2); /* (1 << 2) = 4 */
-
/* XXX: Ensure swizzles align the right way with more LCRA constraints? */
if (ins->type == TAG_ALU_4 && ins->alu.reg_mode != midgard_reg_mode_32)
- lcra_set_alignment(l, dest, 3); /* (1 << 3) = 8 */
+ min_alignment[dest] = 3; /* (1 << 3) = 8 */
+
+ if (ins->type == TAG_LOAD_STORE_4 && ins->load_64)
+ min_alignment[dest] = 3;
+
+ /* We don't have a swizzle for the conditional and we don't
+ * want to muck with the conditional itself, so just force
+ * alignment for now */
+
+ if (ins->type == TAG_ALU_4 && OP_IS_CSEL_V(ins->alu.op))
+ min_alignment[dest] = 4; /* 1 << 4= 16-byte = vec4 */
+
}
- for (unsigned i = 0; i < ctx->temp_count; ++i)
+ for (unsigned i = 0; i < ctx->temp_count; ++i) {
+ lcra_set_alignment(l, i, min_alignment[i] ? min_alignment[i] : 2);
lcra_restrict_range(l, i, (found_class[i] + 1) * 4);
+ }
free(found_class);
+ free(min_alignment);
/* Next, we'll determine semantic class. We default to zero (work).
* But, if we're used with a special operation, that will force us to a
set_class(l->class, ins->src[1], REG_CLASS_LDST);
set_class(l->class, ins->src[2], REG_CLASS_LDST);
- if (OP_IS_VEC4_ONLY(ins->load_store.op))
+ if (OP_IS_VEC4_ONLY(ins->load_store.op)) {
lcra_restrict_range(l, ins->dest, 16);
+ lcra_restrict_range(l, ins->src[0], 16);
+ lcra_restrict_range(l, ins->src[1], 16);
+ lcra_restrict_range(l, ins->src[2], 16);
+ }
} else if (ins->type == TAG_TEXTURE_4) {
set_class(l->class, ins->dest, REG_CLASS_TEXW);
set_class(l->class, ins->src[0], REG_CLASS_TEXR);
set_class(l->class, ins->src[1], REG_CLASS_TEXR);
set_class(l->class, ins->src[2], REG_CLASS_TEXR);
+ set_class(l->class, ins->src[3], REG_CLASS_TEXR);
}
}
assert(check_read_class(l->class, ins->type, ins->src[2]));
}
- /* Mark writeout to r0 */
+ /* Mark writeout to r0, render target to r1.z, unknown to r1.w */
mir_foreach_instr_global(ctx, ins) {
- if (ins->compact_branch && ins->writeout && ins->src[0] < ctx->temp_count)
- l->solutions[ins->src[0]] = 0;
+ if (!(ins->compact_branch && ins->writeout)) continue;
+
+ if (ins->src[0] < ctx->temp_count) {
+ if (ins->writeout_depth)
+ l->solutions[ins->src[0]] = (16 * 1) + COMPONENT_X * 4;
+ else if (ins->writeout_stencil)
+ l->solutions[ins->src[0]] = (16 * 1) + COMPONENT_Y * 4;
+ else
+ l->solutions[ins->src[0]] = 0;
+ }
+
+ if (ins->src[1] < ctx->temp_count)
+ l->solutions[ins->src[1]] = (16 * 1) + COMPONENT_Z * 4;
+
+ if (ins->src[2] < ctx->temp_count)
+ l->solutions[ins->src[2]] = (16 * 1) + COMPONENT_W * 4;
}
mir_compute_interference(ctx, l);
return l;
}
+
/* Once registers have been decided via register allocation
* (allocate_registers), we need to rewrite the MIR to use registers instead of
* indices */
unsigned src2 = ins->src[1];
unsigned src3 = ins->src[2];
+ midgard_reg_mode m32 = midgard_reg_mode_32;
if (src2 != ~0) {
- struct phys_reg src = index_to_reg(ctx, l, src2, mir_srcsize(ins, 1));
+ struct phys_reg src = index_to_reg(ctx, l, src2, m32);
unsigned component = src.offset / src.size;
assert(component * src.size == src.offset);
ins->load_store.arg_1 |= midgard_ldst_reg(src.reg, component);
}
if (src3 != ~0) {
- struct phys_reg src = index_to_reg(ctx, l, src3, mir_srcsize(ins, 2));
+ struct phys_reg src = index_to_reg(ctx, l, src3, m32);
unsigned component = src.offset / src.size;
assert(component * src.size == src.offset);
ins->load_store.arg_2 |= midgard_ldst_reg(src.reg, component);
}
case TAG_TEXTURE_4: {
+ if (ins->texture.op == TEXTURE_OP_BARRIER)
+ break;
+
/* Grab RA results */
struct phys_reg dest = index_to_reg(ctx, l, ins->dest, mir_typesize(ins));
struct phys_reg coord = index_to_reg(ctx, l, ins->src[1], mir_srcsize(ins, 1));
struct phys_reg lod = index_to_reg(ctx, l, ins->src[2], mir_srcsize(ins, 2));
-
- assert(dest.reg == 28 || dest.reg == 29);
- assert(coord.reg == 28 || coord.reg == 29);
+ struct phys_reg offset = index_to_reg(ctx, l, ins->src[3], mir_srcsize(ins, 2));
/* First, install the texture coordinate */
ins->texture.in_reg_full = 1;
ins->texture.in_reg_upper = 0;
- ins->texture.in_reg_select = coord.reg - 28;
+ ins->texture.in_reg_select = coord.reg & 1;
offset_swizzle(ins->swizzle[1], coord.offset, coord.size, 0);
/* Next, install the destination */
ins->texture.out_full = 1;
ins->texture.out_upper = 0;
- ins->texture.out_reg_select = dest.reg - 28;
+ ins->texture.out_reg_select = dest.reg & 1;
offset_swizzle(ins->swizzle[0], 0, 4, dest.offset);
mir_set_bytemask(ins, mir_bytemask(ins) << dest.offset);
if (ins->src[2] != ~0) {
assert(!(lod.offset & 3));
midgard_tex_register_select sel = {
- .select = lod.reg,
+ .select = lod.reg & 1,
.full = 1,
.component = lod.offset / 4
};
ins->texture.bias = packed;
}
+ /* If there is an offset register, install it */
+ if (ins->src[3] != ~0) {
+ unsigned x = offset.offset / 4;
+ unsigned y = x + 1;
+ unsigned z = x + 2;
+
+ /* Check range, TODO: half-registers */
+ assert(z < 4);
+
+ ins->texture.offset =
+ (1) | /* full */
+ (offset.reg & 1) << 1 | /* select */
+ (0 << 2) | /* upper */
+ (x << 3) | /* swizzle */
+ (y << 5) | /* swizzle */
+ (z << 7); /* swizzle */
+ }
+
break;
}
}
}
-void
+static void
install_registers(compiler_context *ctx, struct lcra_state *l)
{
mir_foreach_instr_global(ctx, ins)
install_registers_instr(ctx, l, ins);
}
+
+
+/* If register allocation fails, find the best spill node */
+
+static signed
+mir_choose_spill_node(
+ compiler_context *ctx,
+ struct lcra_state *l)
+{
+ /* We can't spill a previously spilled value or an unspill */
+
+ mir_foreach_instr_global(ctx, ins) {
+ if (ins->no_spill & (1 << l->spill_class)) {
+ lcra_set_node_spill_cost(l, ins->dest, -1);
+
+ if (l->spill_class != REG_CLASS_WORK) {
+ mir_foreach_src(ins, s)
+ lcra_set_node_spill_cost(l, ins->src[s], -1);
+ }
+ }
+ }
+
+ return lcra_get_best_spill_node(l);
+}
+
+/* Once we've chosen a spill node, spill it */
+
+static void
+mir_spill_register(
+ compiler_context *ctx,
+ unsigned spill_node,
+ unsigned spill_class,
+ unsigned *spill_count)
+{
+ unsigned spill_index = ctx->temp_count;
+
+ /* We have a spill node, so check the class. Work registers
+ * legitimately spill to TLS, but special registers just spill to work
+ * registers */
+
+ bool is_special = spill_class != REG_CLASS_WORK;
+ bool is_special_w = spill_class == REG_CLASS_TEXW;
+
+ /* Allocate TLS slot (maybe) */
+ unsigned spill_slot = !is_special ? (*spill_count)++ : 0;
+
+ /* For TLS, replace all stores to the spilled node. For
+ * special reads, just keep as-is; the class will be demoted
+ * implicitly. For special writes, spill to a work register */
+
+ if (!is_special || is_special_w) {
+ if (is_special_w)
+ spill_slot = spill_index++;
+
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
+ mir_foreach_instr_in_block_safe(block, ins) {
+ if (ins->dest != spill_node) continue;
+
+ midgard_instruction st;
+
+ if (is_special_w) {
+ st = v_mov(spill_node, spill_slot);
+ st.no_spill |= (1 << spill_class);
+ } else {
+ ins->dest = spill_index++;
+ ins->no_spill |= (1 << spill_class);
+ st = v_load_store_scratch(ins->dest, spill_slot, true, ins->mask);
+ }
+
+ /* Hint: don't rewrite this node */
+ st.hint = true;
+
+ mir_insert_instruction_after_scheduled(ctx, block, ins, st);
+
+ if (!is_special)
+ ctx->spills++;
+ }
+ }
+ }
+
+ /* For special reads, figure out how many bytes we need */
+ unsigned read_bytemask = 0;
+
+ mir_foreach_instr_global_safe(ctx, ins) {
+ read_bytemask |= mir_bytemask_of_read_components(ins, spill_node);
+ }
+
+ /* Insert a load from TLS before the first consecutive
+ * use of the node, rewriting to use spilled indices to
+ * break up the live range. Or, for special, insert a
+ * move. Ironically the latter *increases* register
+ * pressure, but the two uses of the spilling mechanism
+ * are somewhat orthogonal. (special spilling is to use
+ * work registers to back special registers; TLS
+ * spilling is to use memory to back work registers) */
+
+ mir_foreach_block(ctx, _block) {
+ midgard_block *block = (midgard_block *) _block;
+ mir_foreach_instr_in_block(block, ins) {
+ /* We can't rewrite the moves used to spill in the
+ * first place. These moves are hinted. */
+ if (ins->hint) continue;
+
+ /* If we don't use the spilled value, nothing to do */
+ if (!mir_has_arg(ins, spill_node)) continue;
+
+ unsigned index = 0;
+
+ if (!is_special_w) {
+ index = ++spill_index;
+
+ midgard_instruction *before = ins;
+ midgard_instruction st;
+
+ if (is_special) {
+ /* Move */
+ st = v_mov(spill_node, index);
+ st.no_spill |= (1 << spill_class);
+ } else {
+ /* TLS load */
+ st = v_load_store_scratch(index, spill_slot, false, 0xF);
+ }
+
+ /* Mask the load based on the component count
+ * actually needed to prevent RA loops */
+
+ st.mask = mir_from_bytemask(read_bytemask, midgard_reg_mode_32);
+
+ mir_insert_instruction_before_scheduled(ctx, block, before, st);
+ } else {
+ /* Special writes already have their move spilled in */
+ index = spill_slot;
+ }
+
+
+ /* Rewrite to use */
+ mir_rewrite_index_src_single(ins, spill_node, index);
+
+ if (!is_special)
+ ctx->fills++;
+ }
+ }
+
+ /* Reset hints */
+
+ mir_foreach_instr_global(ctx, ins) {
+ ins->hint = false;
+ }
+}
+
+/* Run register allocation in a loop, spilling until we succeed */
+
+void
+mir_ra(compiler_context *ctx)
+{
+ struct lcra_state *l = NULL;
+ bool spilled = false;
+ int iter_count = 1000; /* max iterations */
+
+ /* Number of 128-bit slots in memory we've spilled into */
+ unsigned spill_count = 0;
+
+
+ mir_create_pipeline_registers(ctx);
+
+ do {
+ if (spilled) {
+ signed spill_node = mir_choose_spill_node(ctx, l);
+
+ if (spill_node == -1) {
+ fprintf(stderr, "ERROR: Failed to choose spill node\n");
+ return;
+ }
+
+ mir_spill_register(ctx, spill_node, l->spill_class, &spill_count);
+ }
+
+ mir_squeeze_index(ctx);
+ mir_invalidate_liveness(ctx);
+
+ if (l) {
+ lcra_free(l);
+ l = NULL;
+ }
+
+ l = allocate_registers(ctx, &spilled);
+ } while(spilled && ((iter_count--) > 0));
+
+ if (iter_count <= 0) {
+ fprintf(stderr, "panfrost: Gave up allocating registers, rendering will be incomplete\n");
+ assert(0);
+ }
+
+ /* Report spilling information. spill_count is in 128-bit slots (vec4 x
+ * fp32), but tls_size is in bytes, so multiply by 16 */
+
+ ctx->tls_size = spill_count * 16;
+
+ install_registers(ctx, l);
+
+ lcra_free(l);
+}