QPU_R(B, 31),
};
#define ACC_INDEX 0
-#define AB_INDEX (ACC_INDEX + 5)
+#define ACC_COUNT 5
+#define AB_INDEX (ACC_INDEX + ACC_COUNT)
+#define AB_COUNT 64
static void
vc4_alloc_reg_set(struct vc4_context *vc4)
vc4->regs = ra_alloc_reg_set(vc4, ARRAY_SIZE(vc4_regs), true);
- vc4->reg_class_any = ra_alloc_reg_class(vc4->regs);
- vc4->reg_class_a_or_b_or_acc = ra_alloc_reg_class(vc4->regs);
- vc4->reg_class_r4_or_a = ra_alloc_reg_class(vc4->regs);
- vc4->reg_class_a = ra_alloc_reg_class(vc4->regs);
+ /* The physical regfiles split us into two classes, with [0] being the
+ * whole space and [1] being the bottom half (for threaded fragment
+ * shaders).
+ */
+ for (int i = 0; i < 2; i++) {
+ vc4->reg_class_any[i] = ra_alloc_reg_class(vc4->regs);
+ vc4->reg_class_a_or_b[i] = ra_alloc_reg_class(vc4->regs);
+ vc4->reg_class_a_or_b_or_acc[i] = ra_alloc_reg_class(vc4->regs);
+ vc4->reg_class_r4_or_a[i] = ra_alloc_reg_class(vc4->regs);
+ vc4->reg_class_a[i] = ra_alloc_reg_class(vc4->regs);
+ }
vc4->reg_class_r0_r3 = ra_alloc_reg_class(vc4->regs);
- for (uint32_t i = 0; i < ARRAY_SIZE(vc4_regs); i++) {
- /* Reserve ra31/rb31 for spilling fixup_raddr_conflict() in
+
+ /* r0-r3 */
+ for (uint32_t i = ACC_INDEX; i < ACC_INDEX + 4; i++) {
+ ra_class_add_reg(vc4->regs, vc4->reg_class_r0_r3, i);
+ ra_class_add_reg(vc4->regs, vc4->reg_class_a_or_b_or_acc[0], i);
+ ra_class_add_reg(vc4->regs, vc4->reg_class_a_or_b_or_acc[1], i);
+ }
+
+ /* R4 gets a special class because it can't be written as a general
+ * purpose register. (it's TMU_NOSWAP as a write address).
+ */
+ for (int i = 0; i < 2; i++) {
+ ra_class_add_reg(vc4->regs, vc4->reg_class_r4_or_a[i],
+ ACC_INDEX + 4);
+ ra_class_add_reg(vc4->regs, vc4->reg_class_any[i],
+ ACC_INDEX + 4);
+ }
+
+ /* A/B */
+ for (uint32_t i = AB_INDEX; i < AB_INDEX + 64; i ++) {
+ /* Reserve ra14/rb14 for spilling fixup_raddr_conflict() in
* vc4_qpu_emit.c
*/
- if (vc4_regs[i].addr == 31)
+ if (vc4_regs[i].addr == 14)
continue;
- /* R4 can't be written as a general purpose register. (it's
- * TMU_NOSWAP as a write address).
- */
- if (vc4_regs[i].mux == QPU_MUX_R4) {
- ra_class_add_reg(vc4->regs, vc4->reg_class_r4_or_a, i);
- ra_class_add_reg(vc4->regs, vc4->reg_class_any, i);
- continue;
+ ra_class_add_reg(vc4->regs, vc4->reg_class_any[0], i);
+ ra_class_add_reg(vc4->regs, vc4->reg_class_a_or_b[0], i);
+ ra_class_add_reg(vc4->regs, vc4->reg_class_a_or_b_or_acc[0], i);
+
+ if (vc4_regs[i].addr < 16) {
+ ra_class_add_reg(vc4->regs, vc4->reg_class_any[1], i);
+ ra_class_add_reg(vc4->regs, vc4->reg_class_a_or_b[1], i);
+ ra_class_add_reg(vc4->regs, vc4->reg_class_a_or_b_or_acc[1], i);
}
- if (vc4_regs[i].mux <= QPU_MUX_R3)
- ra_class_add_reg(vc4->regs, vc4->reg_class_r0_r3, i);
- ra_class_add_reg(vc4->regs, vc4->reg_class_any, i);
- ra_class_add_reg(vc4->regs, vc4->reg_class_a_or_b_or_acc, i);
- }
+ /* A only */
+ if (((i - AB_INDEX) & 1) == 0) {
+ ra_class_add_reg(vc4->regs, vc4->reg_class_a[0], i);
+ ra_class_add_reg(vc4->regs, vc4->reg_class_r4_or_a[0], i);
- for (uint32_t i = AB_INDEX; i < AB_INDEX + 64; i += 2) {
- ra_class_add_reg(vc4->regs, vc4->reg_class_a, i);
- ra_class_add_reg(vc4->regs, vc4->reg_class_r4_or_a, i);
+ if (vc4_regs[i].addr < 16) {
+ ra_class_add_reg(vc4->regs,
+ vc4->reg_class_a[1], i);
+ ra_class_add_reg(vc4->regs,
+ vc4->reg_class_r4_or_a[1], i);
+ }
+ }
}
ra_set_finalize(vc4->regs, NULL);
#define CLASS_BIT_R4 (1 << 2)
#define CLASS_BIT_R0_R3 (1 << 4)
+struct vc4_ra_select_callback_data {
+ uint32_t next_acc;
+ uint32_t next_ab;
+};
+
+static unsigned int
+vc4_ra_select_callback(struct ra_graph *g, BITSET_WORD *regs, void *data)
+{
+ struct vc4_ra_select_callback_data *vc4_ra = data;
+
+ /* If r4 is available, always choose it -- few other things can go
+ * there, and choosing anything else means inserting a mov.
+ */
+ if (BITSET_TEST(regs, ACC_INDEX + 4))
+ return ACC_INDEX + 4;
+
+ /* Choose an accumulator if possible (no delay between write and
+ * read), but round-robin through them to give post-RA instruction
+ * selection more options.
+ */
+ for (int i = 0; i < ACC_COUNT; i++) {
+ int acc_off = (vc4_ra->next_acc + i) % ACC_COUNT;
+ int acc = ACC_INDEX + acc_off;
+
+ if (BITSET_TEST(regs, acc)) {
+ vc4_ra->next_acc = acc_off + 1;
+ return acc;
+ }
+ }
+
+ for (int i = 0; i < AB_COUNT; i++) {
+ int ab_off = (vc4_ra->next_ab + i) % AB_COUNT;
+ int ab = AB_INDEX + ab_off;
+
+ if (BITSET_TEST(regs, ab)) {
+ vc4_ra->next_ab = ab_off + 1;
+ return ab;
+ }
+ }
+
+ unreachable("RA must pass us at least one possible reg.");
+}
+
/**
* Returns a mapping from QFILE_TEMP indices to struct qpu_regs.
*
uint8_t class_bits[c->num_temps];
struct qpu_reg *temp_registers = calloc(c->num_temps,
sizeof(*temp_registers));
+ struct vc4_ra_select_callback_data callback_data = {
+ .next_acc = 0,
+ .next_ab = 0,
+ };
/* If things aren't ever written (undefined values), just read from
* r0.
/* Compute the live ranges so we can figure out interference. */
qir_calculate_live_intervals(c);
+ ra_set_select_reg_callback(g, vc4_ra_select_callback, &callback_data);
+
for (uint32_t i = 0; i < c->num_temps; i++) {
map[i].temp = i;
map[i].priority = c->temp_end[i] - c->temp_start[i];
if (c->temp_start[i] < ip && c->temp_end[i] > ip)
class_bits[i] &= ~CLASS_BIT_R4;
}
+
+ /* If we're doing a conditional write of something
+ * writing R4 (math, tex results), then make sure that
+ * we store in a temp so that we actually
+ * conditionally move the result.
+ */
+ if (inst->cond != QPU_COND_ALWAYS)
+ class_bits[inst->dst.index] &= ~CLASS_BIT_R4;
} else {
/* R4 can't be written as a general purpose
* register. (it's TMU_NOSWAP as a write address).
class_bits[inst->src[0].index] &= CLASS_BIT_R0_R3;
break;
+ case QOP_THRSW:
+ /* All accumulators are invalidated across a thread
+ * switch.
+ */
+ for (int i = 0; i < c->num_temps; i++) {
+ if (c->temp_start[i] < ip && c->temp_end[i] > ip)
+ class_bits[i] &= ~(CLASS_BIT_R0_R3 |
+ CLASS_BIT_R4);
+ }
+ break;
+
default:
break;
}
* can only be done from regfile A, while float unpacks can be
* either A or R4.
*/
- for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) {
+ for (int i = 0; i < qir_get_nsrc(inst); i++) {
if (inst->src[i].file == QFILE_TEMP &&
inst->src[i].pack) {
if (qir_is_float_input(inst)) {
switch (class_bits[i]) {
case CLASS_BIT_A | CLASS_BIT_B | CLASS_BIT_R4 | CLASS_BIT_R0_R3:
- ra_set_node_class(g, node, vc4->reg_class_any);
+ ra_set_node_class(g, node,
+ vc4->reg_class_any[c->fs_threaded]);
+ break;
+ case CLASS_BIT_A | CLASS_BIT_B:
+ ra_set_node_class(g, node,
+ vc4->reg_class_a_or_b[c->fs_threaded]);
break;
case CLASS_BIT_A | CLASS_BIT_B | CLASS_BIT_R0_R3:
- ra_set_node_class(g, node, vc4->reg_class_a_or_b_or_acc);
+ ra_set_node_class(g, node,
+ vc4->reg_class_a_or_b_or_acc[c->fs_threaded]);
break;
case CLASS_BIT_A | CLASS_BIT_R4:
- ra_set_node_class(g, node, vc4->reg_class_r4_or_a);
+ ra_set_node_class(g, node,
+ vc4->reg_class_r4_or_a[c->fs_threaded]);
break;
case CLASS_BIT_A:
- ra_set_node_class(g, node, vc4->reg_class_a);
+ ra_set_node_class(g, node,
+ vc4->reg_class_a[c->fs_threaded]);
break;
case CLASS_BIT_R0_R3:
ra_set_node_class(g, node, vc4->reg_class_r0_r3);
break;
+
default:
+ /* DDX/DDY used across thread switched might get us
+ * here.
+ */
+ if (c->fs_threaded) {
+ c->failed = true;
+ free(temp_registers);
+ return NULL;
+ }
+
fprintf(stderr, "temp %d: bad class bits: 0x%x\n",
i, class_bits[i]);
abort();
bool ok = ra_allocate(g);
if (!ok) {
- fprintf(stderr, "Failed to register allocate:\n");
- qir_dump(c);
+ if (!c->fs_threaded) {
+ fprintf(stderr, "Failed to register allocate:\n");
+ qir_dump(c);
+ }
+
c->failed = true;
+ free(temp_registers);
return NULL;
}