The register allocator prefers low-index registers from vc4_regs[] in the
configuration we're using, which is good because it means we prioritize
allocating the accumulators (which are faster). On the other hand, it was
causing raddr conflicts because everything beyond r0-r2 ended up in
regfile A until you got massive register pressure. By interleaving, we
end up getting more instruction pairing from getting non-conflicting
raddrs and QPU_WSes.
total instructions in shared programs: 55957 -> 52719 (-5.79%)
instructions in affected programs: 46855 -> 43617 (-6.91%)
{ QPU_MUX_R3, 0},
{ QPU_MUX_R4, 0},
QPU_R(A, 0),
{ QPU_MUX_R3, 0},
{ QPU_MUX_R4, 0},
QPU_R(A, 0),
- QPU_R(A, 1),
- QPU_R(A, 2),
- QPU_R(A, 3),
- QPU_R(A, 4),
- QPU_R(A, 5),
- QPU_R(A, 6),
- QPU_R(A, 7),
- QPU_R(A, 8),
- QPU_R(A, 9),
- QPU_R(A, 10),
- QPU_R(A, 11),
- QPU_R(A, 12),
- QPU_R(A, 13),
- QPU_R(A, 14),
- QPU_R(A, 15),
- QPU_R(A, 16),
- QPU_R(A, 17),
- QPU_R(A, 18),
- QPU_R(A, 19),
- QPU_R(A, 20),
- QPU_R(A, 21),
- QPU_R(A, 22),
- QPU_R(A, 23),
- QPU_R(A, 24),
- QPU_R(A, 25),
- QPU_R(A, 26),
- QPU_R(A, 27),
- QPU_R(A, 28),
- QPU_R(A, 29),
- QPU_R(A, 30),
- QPU_R(A, 31),
QPU_R(B, 31),
};
#define ACC_INDEX 0
QPU_R(B, 31),
};
#define ACC_INDEX 0
-#define A_INDEX (ACC_INDEX + 5)
-#define B_INDEX (A_INDEX + 32)
+#define AB_INDEX (ACC_INDEX + 5)
static void
vc4_alloc_reg_set(struct vc4_context *vc4)
{
static void
vc4_alloc_reg_set(struct vc4_context *vc4)
{
- assert(vc4_regs[A_INDEX].addr == 0);
- assert(vc4_regs[B_INDEX].addr == 0);
- STATIC_ASSERT(ARRAY_SIZE(vc4_regs) == B_INDEX + 32);
+ assert(vc4_regs[AB_INDEX].addr == 0);
+ assert(vc4_regs[AB_INDEX + 1].addr == 0);
+ STATIC_ASSERT(ARRAY_SIZE(vc4_regs) == AB_INDEX + 64);
}
vc4->reg_class_a = ra_alloc_reg_class(vc4->regs);
}
vc4->reg_class_a = ra_alloc_reg_class(vc4->regs);
- for (uint32_t i = A_INDEX; i < A_INDEX + 32; i++)
+ for (uint32_t i = AB_INDEX; i < AB_INDEX + 64; i += 2)
ra_class_add_reg(vc4->regs, vc4->reg_class_a, i);
ra_set_finalize(vc4->regs, NULL);
ra_class_add_reg(vc4->regs, vc4->reg_class_a, i);
ra_set_finalize(vc4->regs, NULL);
case QOP_FRAG_Z:
def[inst->dst.index] = 0;
ra_set_node_reg(g, inst->dst.index,
case QOP_FRAG_Z:
def[inst->dst.index] = 0;
ra_set_node_reg(g, inst->dst.index,
- B_INDEX + QPU_R_FRAG_PAYLOAD_ZW);
+ AB_INDEX + QPU_R_FRAG_PAYLOAD_ZW * 2 + 1);
break;
case QOP_FRAG_W:
def[inst->dst.index] = 0;
ra_set_node_reg(g, inst->dst.index,
break;
case QOP_FRAG_W:
def[inst->dst.index] = 0;
ra_set_node_reg(g, inst->dst.index,
- A_INDEX + QPU_R_FRAG_PAYLOAD_ZW);
+ AB_INDEX + QPU_R_FRAG_PAYLOAD_ZW * 2);
break;
case QOP_TEX_RESULT:
break;
case QOP_TEX_RESULT: