+#define ACC_COUNT 5
+#define AB_INDEX (ACC_INDEX + ACC_COUNT)
+#define AB_COUNT 64
+
+static void
+vc4_alloc_reg_set(struct vc4_context *vc4)
+{
+ assert(vc4_regs[AB_INDEX].addr == 0);
+ assert(vc4_regs[AB_INDEX + 1].addr == 0);
+ STATIC_ASSERT(ARRAY_SIZE(vc4_regs) == AB_INDEX + 64);
+
+ if (vc4->regs)
+ return;
+
+ vc4->regs = ra_alloc_reg_set(vc4, ARRAY_SIZE(vc4_regs), true);
+
+ /* The physical regfiles split us into two classes, with [0] being the
+ * whole space and [1] being the bottom half (for threaded fragment
+ * shaders).
+ */
+ for (int i = 0; i < 2; i++) {
+ vc4->reg_class_any[i] = ra_alloc_reg_class(vc4->regs);
+ vc4->reg_class_a_or_b[i] = ra_alloc_reg_class(vc4->regs);
+ vc4->reg_class_a_or_b_or_acc[i] = ra_alloc_reg_class(vc4->regs);
+ vc4->reg_class_r4_or_a[i] = ra_alloc_reg_class(vc4->regs);
+ vc4->reg_class_a[i] = ra_alloc_reg_class(vc4->regs);
+ }
+ vc4->reg_class_r0_r3 = ra_alloc_reg_class(vc4->regs);
+
+ /* r0-r3 */
+ for (uint32_t i = ACC_INDEX; i < ACC_INDEX + 4; i++) {
+ ra_class_add_reg(vc4->regs, vc4->reg_class_r0_r3, i);
+ ra_class_add_reg(vc4->regs, vc4->reg_class_a_or_b_or_acc[0], i);
+ ra_class_add_reg(vc4->regs, vc4->reg_class_a_or_b_or_acc[1], i);
+ }
+
+ /* R4 gets a special class because it can't be written as a general
+ * purpose register. (it's TMU_NOSWAP as a write address).
+ */
+ for (int i = 0; i < 2; i++) {
+ ra_class_add_reg(vc4->regs, vc4->reg_class_r4_or_a[i],
+ ACC_INDEX + 4);
+ ra_class_add_reg(vc4->regs, vc4->reg_class_any[i],
+ ACC_INDEX + 4);
+ }
+
+ /* A/B */
+ for (uint32_t i = AB_INDEX; i < AB_INDEX + 64; i ++) {
+ /* Reserve ra14/rb14 for spilling fixup_raddr_conflict() in
+ * vc4_qpu_emit.c
+ */
+ if (vc4_regs[i].addr == 14)
+ continue;
+
+ ra_class_add_reg(vc4->regs, vc4->reg_class_any[0], i);
+ ra_class_add_reg(vc4->regs, vc4->reg_class_a_or_b[0], i);
+ ra_class_add_reg(vc4->regs, vc4->reg_class_a_or_b_or_acc[0], i);
+
+ if (vc4_regs[i].addr < 16) {
+ ra_class_add_reg(vc4->regs, vc4->reg_class_any[1], i);
+ ra_class_add_reg(vc4->regs, vc4->reg_class_a_or_b[1], i);
+ ra_class_add_reg(vc4->regs, vc4->reg_class_a_or_b_or_acc[1], i);
+ }
+
+
+ /* A only */
+ if (((i - AB_INDEX) & 1) == 0) {
+ ra_class_add_reg(vc4->regs, vc4->reg_class_a[0], i);
+ ra_class_add_reg(vc4->regs, vc4->reg_class_r4_or_a[0], i);
+
+ if (vc4_regs[i].addr < 16) {
+ ra_class_add_reg(vc4->regs,
+ vc4->reg_class_a[1], i);
+ ra_class_add_reg(vc4->regs,
+ vc4->reg_class_r4_or_a[1], i);
+ }
+ }
+ }
+
+ ra_set_finalize(vc4->regs, NULL);
+}
+
+struct node_to_temp_map {
+ uint32_t temp;
+ uint32_t priority;
+};
+
+static int
+node_to_temp_priority(const void *in_a, const void *in_b)
+{
+ const struct node_to_temp_map *a = in_a;
+ const struct node_to_temp_map *b = in_b;
+
+ return a->priority - b->priority;
+}
+
+#define CLASS_BIT_A (1 << 0)
+#define CLASS_BIT_B (1 << 1)
+#define CLASS_BIT_R4 (1 << 2)
+#define CLASS_BIT_R0_R3 (1 << 4)
+
+struct vc4_ra_select_callback_data {
+ uint32_t next_acc;
+ uint32_t next_ab;
+};
+
+static unsigned int
+vc4_ra_select_callback(struct ra_graph *g, BITSET_WORD *regs, void *data)
+{
+ struct vc4_ra_select_callback_data *vc4_ra = data;
+
+ /* If r4 is available, always choose it -- few other things can go
+ * there, and choosing anything else means inserting a mov.
+ */
+ if (BITSET_TEST(regs, ACC_INDEX + 4))
+ return ACC_INDEX + 4;
+
+ /* Choose an accumulator if possible (no delay between write and
+ * read), but round-robin through them to give post-RA instruction
+ * selection more options.
+ */
+ for (int i = 0; i < ACC_COUNT; i++) {
+ int acc_off = (vc4_ra->next_acc + i) % ACC_COUNT;
+ int acc = ACC_INDEX + acc_off;
+
+ if (BITSET_TEST(regs, acc)) {
+ vc4_ra->next_acc = acc_off + 1;
+ return acc;
+ }
+ }
+
+ for (int i = 0; i < AB_COUNT; i++) {
+ int ab_off = (vc4_ra->next_ab + i) % AB_COUNT;
+ int ab = AB_INDEX + ab_off;
+
+ if (BITSET_TEST(regs, ab)) {
+ vc4_ra->next_ab = ab_off + 1;
+ return ab;
+ }
+ }
+
+ unreachable("RA must pass us at least one possible reg.");
+}