if (instr->regs_count == 0)
continue;
/* couple special cases: */
- if (writes_addr(instr) || writes_pred(instr)) {
+ if (writes_addr0(instr) || writes_addr1(instr) || writes_pred(instr)) {
id->cls = -1;
} else if (instr->regs[0]->flags & IR3_REG_ARRAY) {
id->cls = total_class_count;
static int
pick_in_range(BITSET_WORD *regs, unsigned min, unsigned max)
{
- for (unsigned i = min; i < max; i++) {
+ for (unsigned i = min; i <= max; i++) {
+ if (BITSET_TEST(regs, i)) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+static int
+pick_in_range_rev(BITSET_WORD *regs, int min, int max)
+{
+ for (int i = max; i >= min; i--) {
if (BITSET_TEST(regs, i)) {
return i;
}
{
struct ir3_ra_ctx *ctx = data;
unsigned int class = ra_get_node_class(ctx->g, n);
+ bool half, high;
+ int sz = ra_class_to_size(class, &half, &high);
+
+ assert (sz > 0);
/* dimensions within the register class: */
unsigned max_target, start;
*/
unsigned base;
+ /* TODO I think eventually we want to round-robin in vector pass
+ * as well, but needs some more work to calculate # of live vals
+ * for this. (Maybe with some work, we could just figure out
+ * the scalar target and use that, since that is what we care
+ * about in the end.. but that would mean setting up use-def/
+ * liveranges for scalar pass before doing vector pass.)
+ *
+ * For now, in the vector class, just move assignments for scalar
+ * vals higher to hopefully prevent them from limiting where vecN
+ * values can be placed. Since the scalar values are re-assigned
+ * in the 2nd pass, we don't really care where they end up in the
+ * vector pass.
+ */
+ if (!ctx->scalar_pass) {
+ base = ctx->set->gpr_to_ra_reg[class][0];
+ if (high) {
+ max_target = HIGH_CLASS_REGS(class - HIGH_OFFSET);
+ } else if (half) {
+ max_target = HALF_CLASS_REGS(class - HALF_OFFSET);
+ } else {
+ max_target = CLASS_REGS(class);
+ }
+
+ if ((sz == 1) && !high) {
+ return pick_in_range_rev(regs, base, base + max_target);
+ } else {
+ return pick_in_range(regs, base, base + max_target);
+ }
+ } else {
+ assert(sz == 1);
+ }
+
/* NOTE: this is only used in scalar pass, so the register
* class will be one of the scalar classes (ie. idx==0):
*/
- if (class == ctx->set->high_classes[0]) {
+ base = ctx->set->gpr_to_ra_reg[class][0];
+ if (high) {
max_target = HIGH_CLASS_REGS(0);
start = 0;
- base = ctx->set->gpr_to_ra_reg[HIGH_OFFSET][0];
- } else if (class == ctx->set->half_classes[0]) {
+ } else if (half) {
max_target = ctx->max_target;
start = ctx->start_search_reg;
- base = ctx->set->gpr_to_ra_reg[HALF_OFFSET][0];
- } else if (class == ctx->set->classes[0]) {
+ } else {
max_target = ctx->max_target / 2;
start = ctx->start_search_reg;
- base = ctx->set->gpr_to_ra_reg[0][0];
- } else {
- unreachable("unexpected register class!");
}
/* For cat4 instructions, if the src reg is already assigned, and
* for write after read hazards:
*/
struct ir3_instruction *instr = name_to_instr(ctx, n);
- if (is_sfu(instr) && instr->regs[1]->instr) {
- struct ir3_instruction *src = instr->regs[1]->instr;
- unsigned src_n = scalar_name(ctx, src, 0);
+ if (is_sfu(instr)) {
+ struct ir3_register *src = instr->regs[1];
+ int src_n;
+
+ if ((src->flags & IR3_REG_ARRAY) && !(src->flags & IR3_REG_RELATIV)) {
+ struct ir3_array *arr = ir3_lookup_array(ctx->ir, src->array.id);
+ src_n = arr->base + src->array.offset;
+ } else {
+ src_n = scalar_name(ctx, src->instr, 0);
+ }
unsigned reg = ra_get_node_reg(ctx->g, src_n);
return reg;
}
}
+ } else if (is_tex_or_prefetch(instr)) {
+ /* we could have a tex fetch w/ wrmask .z, for example.. these
+ * cannot land in r0.x since that would underflow when we
+ * subtract the offset. Ie. if we pick r0.z, and subtract
+ * the offset, the register encoded for dst will be r0.x
+ */
+ unsigned n = ffs(instr->regs[0]->wrmask);
+ debug_assert(n > 0);
+ unsigned offset = n - 1;
+ if (!half)
+ offset *= 2;
+ base += offset;
+ max_target -= offset;
}
int r = pick_in_range(regs, base + start, base + max_target);
unsigned n, base;
ir3_clear_mark(ctx->ir);
- n = ir3_count_instructions(ctx->ir);
+ n = ir3_count_instructions_ra(ctx->ir);
ctx->instrd = rzalloc_array(NULL, struct ir3_ra_instr_data, n);
ctx->use = rzalloc_array(ctx->g, unsigned, ctx->alloc_count);
/* TODO add selector callback for split (pre-a6xx) register file: */
- if (ctx->scalar_pass && (ctx->ir->compiler->gpu_id >= 600)) {
+ if (ctx->ir->compiler->gpu_id >= 600) {
ra_set_select_reg_callback(ctx->g, ra_select_reg_merged, ctx);
- ctx->name_to_instr = _mesa_hash_table_create(ctx->g,
- _mesa_hash_int, _mesa_key_int_equal);
+ if (ctx->scalar_pass) {
+ ctx->name_to_instr = _mesa_hash_table_create(ctx->g,
+ _mesa_hash_int, _mesa_key_int_equal);
+ }
}
}
/* the remaining live should match liveout (for extra sanity testing): */
if (RA_DEBUG) {
+ unsigned new_dead = 0;
+ BITSET_FOREACH_SET (name, live, ctx->alloc_count) {
+ /* Is this the last use? */
+ if (ctx->use[name] != block->end_ip)
+ continue;
+ new_dead += name_size(ctx, name);
+ d("NEW_DEAD: %u (new_dead=%u)", name, new_dead);
+ BITSET_CLEAR(live, name);
+ }
unsigned liveout = 0;
BITSET_FOREACH_SET (name, bd->liveout, ctx->alloc_count) {
liveout += name_size(ctx, name);
case 1: /* move instructions */
instr->cat1.dst_type = half_type(instr->cat1.dst_type);
break;
- case 3:
- switch (instr->opc) {
- case OPC_MAD_F32:
- /* Available for that dest is half and srcs are full.
- * eg. mad.f32 hr0, r0.x, r0.y, r0.z
- */
- if (instr->regs[1]->flags & IR3_REG_HALF)
- instr->opc = OPC_MAD_F16;
- break;
- case OPC_SEL_B32:
- instr->opc = OPC_SEL_B16;
- break;
- case OPC_SEL_S32:
- instr->opc = OPC_SEL_S16;
- break;
- case OPC_SEL_F32:
- instr->opc = OPC_SEL_F16;
- break;
- case OPC_SAD_S32:
- instr->opc = OPC_SAD_S16;
- break;
- /* instructions may already be fixed up: */
- case OPC_MAD_F16:
- case OPC_SEL_B16:
- case OPC_SEL_S16:
- case OPC_SEL_F16:
- case OPC_SAD_S16:
- break;
- default:
- assert(0);
- break;
- }
- break;
case 4:
switch (instr->opc) {
case OPC_RSQ:
case OPC_MOV:
instr->cat1.src_type = half_type(instr->cat1.src_type);
break;
+ case OPC_MAD_F32:
+ instr->opc = OPC_MAD_F16;
+ break;
+ case OPC_SEL_B32:
+ instr->opc = OPC_SEL_B16;
+ break;
+ case OPC_SEL_S32:
+ instr->opc = OPC_SEL_S16;
+ break;
+ case OPC_SEL_F32:
+ instr->opc = OPC_SEL_F16;
+ break;
+ case OPC_SAD_S32:
+ instr->opc = OPC_SAD_S16;
+ break;
default:
break;
}
static bool
should_assign(struct ir3_ra_ctx *ctx, struct ir3_instruction *instr)
{
- if ((instr->opc == OPC_META_SPLIT) ||
- (instr->opc == OPC_META_COLLECT))
+ if ((instr->opc == OPC_META_SPLIT) &&
+ (util_bitcount(instr->regs[1]->wrmask) > 1))
+ return !ctx->scalar_pass;
+ if ((instr->opc == OPC_META_COLLECT) &&
+ (util_bitcount(instr->regs[0]->wrmask) > 1))
return !ctx->scalar_pass;
return ctx->scalar_pass;
}
static void
ra_precolor(struct ir3_ra_ctx *ctx, struct ir3_instruction **precolor, unsigned nprecolor)
{
- unsigned num_precolor = 0;
for (unsigned i = 0; i < nprecolor; i++) {
if (precolor[i] && !(precolor[i]->flags & IR3_INSTR_UNUSED)) {
struct ir3_instruction *instr = precolor[i];
unsigned reg = ctx->set->gpr_to_ra_reg[id->cls][regid];
unsigned name = ra_name(ctx, id);
ra_set_node_reg(ctx->g, name, reg);
- num_precolor = MAX2(regid, num_precolor);
}
}
foreach_block (block, &ctx->ir->block_list) {
foreach_instr (instr, &block->instr_list) {
- if ((instr->opc != OPC_META_SPLIT) &&
- (instr->opc != OPC_META_COLLECT))
+ if (!writes_gpr(instr))
+ continue;
+
+ if (should_assign(ctx, instr))
continue;
precolor(ctx, instr);