vc4: Don't forget to store stencil along with depth when storing either.
[mesa.git] / src / gallium / drivers / vc4 / vc4_qpu_emit.c
index 330876734d1b6bbee068553d489e5b7a332f4b07..bbfefebbbe4f8086181a5b892997a292b8c7e5b7 100644 (file)
@@ -21,7 +21,6 @@
  * IN THE SOFTWARE.
  */
 
-#include <stdio.h>
 #include <inttypes.h>
 
 #include "vc4_context.h"
 #include "vc4_qpu.h"
 
 static void
-vc4_dump_program(struct qcompile *c)
+vc4_dump_program(struct vc4_compile *c)
 {
         fprintf(stderr, "%s:\n", qir_get_stage_name(c->stage));
 
-        for (int i = 0; i < c->num_qpu_insts; i++) {
+        for (int i = 0; i < c->qpu_inst_count; i++) {
                 fprintf(stderr, "0x%016"PRIx64" ", c->qpu_insts[i]);
                 vc4_qpu_disasm(&c->qpu_insts[i], 1);
                 fprintf(stderr, "\n");
         }
 }
 
-void
-vc4_generate_code(struct qcompile *c)
+struct queued_qpu_inst {
+        struct simple_node link;
+        uint64_t inst;
+};
+
+static void
+queue(struct vc4_compile *c, uint64_t inst)
 {
-        uint64_t *insts = malloc(sizeof(uint64_t) * 1024); /* XXX: sizing */
-        uint32_t ni = 0;
-        struct qpu_reg allocate_to_qpu_reg[4 + 32 + 32];
-        bool reg_in_use[ARRAY_SIZE(allocate_to_qpu_reg)];
-        int *reg_allocated = calloc(c->num_temps, sizeof(*reg_allocated));
-        int *reg_uses_remaining =
-                calloc(c->num_temps, sizeof(*reg_uses_remaining));
-
-        for (int i = 0; i < ARRAY_SIZE(reg_in_use); i++)
-                reg_in_use[i] = false;
-        for (int i = 0; i < c->num_temps; i++)
-                reg_allocated[i] = -1;
-        for (int i = 0; i < 4; i++)
-                allocate_to_qpu_reg[i] = qpu_rn(i);
-        for (int i = 0; i < 32; i++)
-                allocate_to_qpu_reg[i + 4] = qpu_ra(i);
-        for (int i = 0; i < 32; i++)
-                allocate_to_qpu_reg[i + 4 + 32] = qpu_rb(i);
+        struct queued_qpu_inst *q = calloc(1, sizeof(*q));
+        q->inst = inst;
+        insert_at_tail(&c->qpu_inst_list, &q->link);
+}
 
-        struct simple_node *node;
-        foreach(node, &c->instructions) {
-                struct qinst *qinst = (struct qinst *)node;
+static uint64_t *
+last_inst(struct vc4_compile *c)
+{
+        struct queued_qpu_inst *q =
+                (struct queued_qpu_inst *)last_elem(&c->qpu_inst_list);
+        return &q->inst;
+}
 
-                if (qinst->dst.file == QFILE_TEMP)
-                        reg_uses_remaining[qinst->dst.index]++;
-                for (int i = 0; i < qir_get_op_nsrc(qinst->op); i++) {
-                        if (qinst->src[i].file == QFILE_TEMP)
-                                reg_uses_remaining[qinst->src[i].index]++;
+static void
+set_last_cond_add(struct vc4_compile *c, uint32_t cond)
+{
+        *last_inst(c) = qpu_set_cond_add(*last_inst(c), cond);
+}
+
+/**
+ * Some special registers can be read from either file, which lets us resolve
+ * raddr conflicts without extra MOVs.
+ */
+static bool
+swap_file(struct qpu_reg *src)
+{
+        switch (src->addr) {
+        case QPU_R_UNIF:
+        case QPU_R_VARY:
+                if (src->mux == QPU_MUX_A)
+                        src->mux = QPU_MUX_B;
+                else
+                        src->mux = QPU_MUX_A;
+                return true;
+
+        default:
+                return false;
+        }
+}
+
+/**
+ * This is used to resolve the fact that we might register-allocate two
+ * different operands of an instruction to the same physical register file
+ * even though instructions have only one field for the register file source
+ * address.
+ *
+ * In that case, we need to move one to a temporary that can be used in the
+ * instruction, instead.
+ */
+static void
+fixup_raddr_conflict(struct vc4_compile *c,
+                     struct qpu_reg *src0, struct qpu_reg *src1)
+{
+        if ((src0->mux != QPU_MUX_A && src0->mux != QPU_MUX_B) ||
+            src0->mux != src1->mux ||
+            src0->addr == src1->addr) {
+                return;
+        }
+
+        if (swap_file(src0) || swap_file(src1))
+                return;
+
+        queue(c, qpu_a_MOV(qpu_r3(), *src1));
+        *src1 = qpu_r3();
+}
+
+static void
+serialize_one_inst(struct vc4_compile *c, uint64_t inst)
+{
+        if (c->qpu_inst_count >= c->qpu_inst_size) {
+                c->qpu_inst_size = MAX2(16, c->qpu_inst_size * 2);
+                c->qpu_insts = realloc(c->qpu_insts,
+                                       c->qpu_inst_size * sizeof(uint64_t));
+        }
+        c->qpu_insts[c->qpu_inst_count++] = inst;
+}
+
+static void
+serialize_insts(struct vc4_compile *c)
+{
+        int last_sfu_write = -10;
+        bool scoreboard_wait_emitted = false;
+
+        while (!is_empty_list(&c->qpu_inst_list)) {
+                struct queued_qpu_inst *q =
+                        (struct queued_qpu_inst *)first_elem(&c->qpu_inst_list);
+                uint32_t last_waddr_a = QPU_W_NOP, last_waddr_b = QPU_W_NOP;
+                uint32_t raddr_a = QPU_GET_FIELD(q->inst, QPU_RADDR_A);
+                uint32_t raddr_b = QPU_GET_FIELD(q->inst, QPU_RADDR_B);
+
+                if (c->qpu_inst_count > 0) {
+                        uint64_t last_inst = c->qpu_insts[c->qpu_inst_count -
+                                                          1];
+                        uint32_t last_waddr_add = QPU_GET_FIELD(last_inst,
+                                                                QPU_WADDR_ADD);
+                        uint32_t last_waddr_mul = QPU_GET_FIELD(last_inst,
+                                                                QPU_WADDR_MUL);
+
+                        if (last_inst & QPU_WS) {
+                                last_waddr_a = last_waddr_mul;
+                                last_waddr_b = last_waddr_add;
+                        } else {
+                                last_waddr_a = last_waddr_add;
+                                last_waddr_b = last_waddr_mul;
+                        }
+                }
+
+                uint32_t src_muxes[] = {
+                        QPU_GET_FIELD(q->inst, QPU_ADD_A),
+                        QPU_GET_FIELD(q->inst, QPU_ADD_B),
+                        QPU_GET_FIELD(q->inst, QPU_MUL_A),
+                        QPU_GET_FIELD(q->inst, QPU_MUL_B),
+                };
+
+                /* "An instruction must not read from a location in physical
+                 *  regfile A or B that was written to by the previous
+                 *  instruction."
+                 */
+                bool needs_raddr_vs_waddr_nop = false;
+                bool reads_r4 = false;
+                for (int i = 0; i < ARRAY_SIZE(src_muxes); i++) {
+                        if ((raddr_a < 32 &&
+                             src_muxes[i] == QPU_MUX_A &&
+                             last_waddr_a == raddr_a) ||
+                            (raddr_b < 32 &&
+                             src_muxes[i] == QPU_MUX_B &&
+                             last_waddr_b == raddr_b)) {
+                                needs_raddr_vs_waddr_nop = true;
+                        }
+                        if (src_muxes[i] == QPU_MUX_R4)
+                                reads_r4 = true;
+                }
+
+                if (needs_raddr_vs_waddr_nop) {
+                        serialize_one_inst(c, qpu_NOP());
+                }
+
+                /* "After an SFU lookup instruction, accumulator r4 must not
+                 *  be read in the following two instructions. Any other
+                 *  instruction that results in r4 being written (that is, TMU
+                 *  read, TLB read, SFU lookup) cannot occur in the two
+                 *  instructions following an SFU lookup."
+                 */
+                if (reads_r4) {
+                        while (c->qpu_inst_count - last_sfu_write < 3) {
+                                serialize_one_inst(c, qpu_NOP());
+                        }
+                }
+
+                uint32_t waddr_a = QPU_GET_FIELD(q->inst, QPU_WADDR_ADD);
+                uint32_t waddr_m = QPU_GET_FIELD(q->inst, QPU_WADDR_MUL);
+                if ((waddr_a >= QPU_W_SFU_RECIP && waddr_a <= QPU_W_SFU_LOG) ||
+                    (waddr_m >= QPU_W_SFU_RECIP && waddr_m <= QPU_W_SFU_LOG)) {
+                        last_sfu_write = c->qpu_inst_count;
                 }
+
+                /* "A scoreboard wait must not occur in the first two
+                 *  instructions of a fragment shader. This is either the
+                 *  explicit Wait for Scoreboard signal or an implicit wait
+                 *  with the first tile-buffer read or write instruction."
+                 */
+                if (!scoreboard_wait_emitted &&
+                    (waddr_a == QPU_W_TLB_Z || waddr_m == QPU_W_TLB_Z ||
+                     waddr_a == QPU_W_TLB_COLOR_MS ||
+                     waddr_m == QPU_W_TLB_COLOR_MS ||
+                     waddr_a == QPU_W_TLB_COLOR_ALL ||
+                     waddr_m == QPU_W_TLB_COLOR_ALL ||
+                     QPU_GET_FIELD(q->inst, QPU_SIG) == QPU_SIG_COLOR_LOAD)) {
+                        while (c->qpu_inst_count < 3 ||
+                               QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
+                                             QPU_SIG) != QPU_SIG_NONE) {
+                                serialize_one_inst(c, qpu_NOP());
+                        }
+                        c->qpu_insts[c->qpu_inst_count - 1] =
+                                qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
+                                            QPU_SIG_WAIT_FOR_SCOREBOARD);
+                        scoreboard_wait_emitted = true;
+                }
+
+                serialize_one_inst(c, q->inst);
+
+                remove_from_list(&q->link);
+                free(q);
         }
+}
+
+void
+vc4_generate_code(struct vc4_context *vc4, struct vc4_compile *c)
+{
+        struct qpu_reg *temp_registers = vc4_register_allocate(vc4, c);
+        bool discard = false;
+
+        make_empty_list(&c->qpu_inst_list);
 
         switch (c->stage) {
         case QSTAGE_VERT:
         case QSTAGE_COORD:
-                insts[ni++] = qpu_load_imm_ui(qpu_vrsetup(), 0x00401a00);
-                insts[ni++] = qpu_load_imm_ui(qpu_vwsetup(), 0x00001a00);
+                queue(c, qpu_load_imm_ui(qpu_vrsetup(),
+                                         (0x00001a00 +
+                                          0x00100000 * c->num_inputs)));
+                queue(c, qpu_load_imm_ui(qpu_vwsetup(), 0x00001a00));
                 break;
         case QSTAGE_FRAG:
                 break;
         }
 
+        struct simple_node *node;
         foreach(node, &c->instructions) {
                 struct qinst *qinst = (struct qinst *)node;
 
@@ -106,8 +276,21 @@ vc4_generate_code(struct qcompile *c)
                         A(FMINABS),
                         A(FMAXABS),
                         A(FTOI),
+                        A(ITOF),
+                        A(ADD),
+                        A(SUB),
+                        A(SHL),
+                        A(SHR),
+                        A(ASR),
+                        A(MIN),
+                        A(MAX),
+                        A(AND),
+                        A(OR),
+                        A(XOR),
+                        A(NOT),
 
                         M(FMUL),
+                        M(MUL24),
                 };
 
                 struct qpu_reg src[4];
@@ -118,11 +301,7 @@ vc4_generate_code(struct qcompile *c)
                                 src[i] = qpu_rn(0);
                                 break;
                         case QFILE_TEMP:
-                                assert(reg_allocated[index] != -1);
-                                src[i] = allocate_to_qpu_reg[reg_allocated[index]];
-                                reg_uses_remaining[index]--;
-                                if (reg_uses_remaining[index] == 0)
-                                        reg_in_use[reg_allocated[index]] = false;
+                                src[i] = temp_registers[index];
                                 break;
                         case QFILE_UNIF:
                                 src[i] = qpu_unif();
@@ -138,36 +317,9 @@ vc4_generate_code(struct qcompile *c)
                 case QFILE_NULL:
                         dst = qpu_ra(QPU_W_NOP);
                         break;
-
                 case QFILE_TEMP:
-                        if (reg_allocated[qinst->dst.index] == -1) {
-                                int alloc;
-                                for (alloc = 0;
-                                     alloc < ARRAY_SIZE(reg_in_use);
-                                     alloc++) {
-                                        /* The pack flags require an A-file register. */
-                                        if (qinst->op == QOP_PACK_SCALED &&
-                                            allocate_to_qpu_reg[alloc].mux != QPU_MUX_A) {
-                                                continue;
-                                        }
-
-                                        if (!reg_in_use[alloc])
-                                                break;
-                                }
-                                assert(alloc != ARRAY_SIZE(reg_in_use) && "need better reg alloc");
-                                reg_in_use[alloc] = true;
-                                reg_allocated[qinst->dst.index] = alloc;
-                        }
-
-                        dst = allocate_to_qpu_reg[reg_allocated[qinst->dst.index]];
-
-                        reg_uses_remaining[qinst->dst.index]--;
-                        if (reg_uses_remaining[qinst->dst.index] == 0) {
-                                reg_in_use[reg_allocated[qinst->dst.index]] =
-                                        false;
-                        }
+                        dst = temp_registers[qinst->dst.index];
                         break;
-
                 case QFILE_VARY:
                 case QFILE_UNIF:
                         assert(!"not reached");
@@ -179,21 +331,48 @@ vc4_generate_code(struct qcompile *c)
                         /* Skip emitting the MOV if it's a no-op. */
                         if (dst.mux == QPU_MUX_A || dst.mux == QPU_MUX_B ||
                             dst.mux != src[0].mux || dst.addr != src[0].addr) {
-                                insts[ni++] = qpu_inst(qpu_a_MOV(dst, src[0]),
-                                                       qpu_m_NOP());
+                                queue(c, qpu_a_MOV(dst, src[0]));
                         }
                         break;
 
+                case QOP_SF:
+                        queue(c, qpu_a_MOV(qpu_ra(QPU_W_NOP), src[0]));
+                        *last_inst(c) |= QPU_SF;
+                        break;
+
+                case QOP_SEL_X_0_ZS:
+                case QOP_SEL_X_0_ZC:
+                case QOP_SEL_X_0_NS:
+                case QOP_SEL_X_0_NC:
+                        queue(c, qpu_a_MOV(dst, src[0]));
+                        set_last_cond_add(c, qinst->op - QOP_SEL_X_0_ZS +
+                                          QPU_COND_ZS);
+
+                        queue(c, qpu_a_XOR(dst, qpu_r0(), qpu_r0()));
+                        set_last_cond_add(c, ((qinst->op - QOP_SEL_X_0_ZS) ^
+                                              1) + QPU_COND_ZS);
+                        break;
+
+                case QOP_SEL_X_Y_ZS:
+                case QOP_SEL_X_Y_ZC:
+                case QOP_SEL_X_Y_NS:
+                case QOP_SEL_X_Y_NC:
+                        queue(c, qpu_a_MOV(dst, src[0]));
+                        set_last_cond_add(c, qinst->op - QOP_SEL_X_Y_ZS +
+                                          QPU_COND_ZS);
+
+                        queue(c, qpu_a_MOV(dst, src[1]));
+                        set_last_cond_add(c, ((qinst->op - QOP_SEL_X_Y_ZS) ^
+                                              1) + QPU_COND_ZS);
+
+                        break;
+
                 case QOP_VPM_WRITE:
-                        insts[ni++] = qpu_inst(qpu_a_MOV(qpu_ra(QPU_W_VPM),
-                                                         src[0]),
-                                               qpu_m_NOP());
+                        queue(c, qpu_a_MOV(qpu_ra(QPU_W_VPM), src[0]));
                         break;
 
                 case QOP_VPM_READ:
-                        insts[ni++] = qpu_inst(qpu_a_MOV(dst,
-                                                         qpu_ra(QPU_R_VPM)),
-                                               qpu_m_NOP());
+                        queue(c, qpu_a_MOV(dst, qpu_ra(QPU_R_VPM)));
                         break;
 
                 case QOP_RCP:
@@ -202,67 +381,166 @@ vc4_generate_code(struct qcompile *c)
                 case QOP_LOG2:
                         switch (qinst->op) {
                         case QOP_RCP:
-                                insts[ni++] = qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP),
-                                                                 src[0]),
-                                                       qpu_m_NOP());
+                                queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP),
+                                                   src[0]));
                                 break;
                         case QOP_RSQ:
-                                insts[ni++] = qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIPSQRT),
-                                                                 src[0]),
-                                                       qpu_m_NOP());
+                                queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIPSQRT),
+                                                   src[0]));
                                 break;
                         case QOP_EXP2:
-                                insts[ni++] = qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_EXP),
-                                                                 src[0]),
-                                                       qpu_m_NOP());
+                                queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_EXP),
+                                                   src[0]));
                                 break;
                         case QOP_LOG2:
-                                insts[ni++] = qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_LOG),
-                                                                 src[0]),
-                                                       qpu_m_NOP());
+                                queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_LOG),
+                                                   src[0]));
                                 break;
                         default:
                                 abort();
                         }
 
-                        insts[ni++] = qpu_inst(qpu_a_NOP(), qpu_m_NOP());
-                        insts[ni++] = qpu_inst(qpu_a_NOP(), qpu_m_NOP());
-
-                        insts[ni++] = qpu_inst(qpu_a_MOV(dst, qpu_r4()),
-                                               qpu_m_NOP());
+                        queue(c, qpu_a_MOV(dst, qpu_r4()));
 
                         break;
 
                 case QOP_PACK_COLORS:
                         for (int i = 0; i < 4; i++) {
-                                insts[ni++] = qpu_inst(qpu_a_NOP(),
-                                                       qpu_m_MOV(qpu_r5(), src[i]));
-                                insts[ni - 1] |= QPU_PM;
-                                insts[ni - 1] |= QPU_SET_FIELD(QPU_PACK_MUL_8A + i,
+                                queue(c, qpu_m_MOV(qpu_r3(), src[i]));
+                                *last_inst(c) |= QPU_PM;
+                                *last_inst(c) |= QPU_SET_FIELD(QPU_PACK_MUL_8A + i,
                                                                QPU_PACK);
                         }
 
-                        insts[ni++] = qpu_inst(qpu_a_MOV(dst, qpu_r5()),
-                                               qpu_m_NOP());
+                        queue(c, qpu_a_MOV(dst, qpu_r3()));
+
+                        break;
+
+                case QOP_FRAG_X:
+                        queue(c, qpu_a_ITOF(dst,
+                                            qpu_ra(QPU_R_XY_PIXEL_COORD)));
+                        break;
+
+                case QOP_FRAG_Y:
+                        queue(c, qpu_a_ITOF(dst,
+                                            qpu_rb(QPU_R_XY_PIXEL_COORD)));
+                        break;
+
+                case QOP_FRAG_Z:
+                case QOP_FRAG_W:
+                        /* QOP_FRAG_Z/W don't emit instructions, just allocate
+                         * the register to the Z/W payload.
+                         */
+                        break;
+
+                case QOP_TLB_DISCARD_SETUP:
+                        discard = true;
+                        queue(c, qpu_a_MOV(src[0], src[0]));
+                        *last_inst(c) |= QPU_SF;
+                        break;
+
+                case QOP_TLB_STENCIL_SETUP:
+                        queue(c, qpu_a_MOV(qpu_ra(QPU_W_TLB_STENCIL_SETUP), src[0]));
+                        break;
+
+                case QOP_TLB_Z_WRITE:
+                        queue(c, qpu_a_MOV(qpu_ra(QPU_W_TLB_Z), src[0]));
+                        if (discard) {
+                                set_last_cond_add(c, QPU_COND_ZS);
+                        }
+                        break;
+
+                case QOP_TLB_COLOR_READ:
+                        queue(c, qpu_NOP());
+                        *last_inst(c) = qpu_set_sig(*last_inst(c),
+                                                    QPU_SIG_COLOR_LOAD);
+
                         break;
 
                 case QOP_TLB_COLOR_WRITE:
-                        insts[ni++] = qpu_inst(qpu_a_MOV(qpu_tlbc(),
-                                                         src[0]),
-                                               qpu_m_NOP());
+                        queue(c, qpu_a_MOV(qpu_tlbc(), src[0]));
+                        if (discard) {
+                                set_last_cond_add(c, QPU_COND_ZS);
+                        }
                         break;
 
-                case QOP_PACK_SCALED:
-                        insts[ni++] = qpu_inst(qpu_a_MOV(dst, src[0]),
-                                               qpu_m_NOP());
-                        insts[ni - 1] |= QPU_SET_FIELD(QPU_PACK_A_16A, QPU_PACK);
+                case QOP_VARY_ADD_C:
+                        queue(c, qpu_a_FADD(dst, src[0], qpu_r5()));
+                        break;
+
+                case QOP_PACK_SCALED: {
+                        uint64_t a = (qpu_a_MOV(dst, src[0]) |
+                                      QPU_SET_FIELD(QPU_PACK_A_16A,
+                                                    QPU_PACK));
+                        uint64_t b = (qpu_a_MOV(dst, src[1]) |
+                                      QPU_SET_FIELD(QPU_PACK_A_16B,
+                                                    QPU_PACK));
+
+                        if (dst.mux == src[1].mux && dst.addr == src[1].addr) {
+                                queue(c, b);
+                                queue(c, a);
+                        } else {
+                                queue(c, a);
+                                queue(c, b);
+                        }
+                        break;
+                }
+
+                case QOP_TEX_S:
+                case QOP_TEX_T:
+                case QOP_TEX_R:
+                case QOP_TEX_B:
+                        queue(c, qpu_a_MOV(qpu_rb(QPU_W_TMU0_S +
+                                                  (qinst->op - QOP_TEX_S)),
+                                           src[0]));
+                        break;
+
+                case QOP_TEX_RESULT:
+                        queue(c, qpu_NOP());
+                        *last_inst(c) = qpu_set_sig(*last_inst(c),
+                                                    QPU_SIG_LOAD_TMU0);
+
+                        break;
 
-                        insts[ni++] = qpu_inst(qpu_a_MOV(dst, src[1]),
-                                               qpu_m_NOP());
-                        insts[ni - 1] |= QPU_SET_FIELD(QPU_PACK_A_16B, QPU_PACK);
+                case QOP_R4_UNPACK_A:
+                case QOP_R4_UNPACK_B:
+                case QOP_R4_UNPACK_C:
+                case QOP_R4_UNPACK_D:
+                        assert(src[0].mux == QPU_MUX_R4);
+                        queue(c, qpu_a_MOV(dst, src[0]));
+                        *last_inst(c) |= QPU_PM;
+                        *last_inst(c) |= QPU_SET_FIELD(QPU_UNPACK_8A +
+                                                       (qinst->op -
+                                                        QOP_R4_UNPACK_A),
+                                                       QPU_UNPACK);
 
                         break;
 
+                case QOP_UNPACK_8A:
+                case QOP_UNPACK_8B:
+                case QOP_UNPACK_8C:
+                case QOP_UNPACK_8D: {
+                        assert(src[0].mux == QPU_MUX_A);
+
+                        /* And, since we're setting the pack bits, if the
+                         * destination is in A it would get re-packed.
+                         */
+                        struct qpu_reg orig_dst = dst;
+                        if (orig_dst.mux == QPU_MUX_A)
+                                dst = qpu_rn(3);
+
+                        queue(c, qpu_a_FMAX(dst, src[0], src[0]));
+                        *last_inst(c) |= QPU_SET_FIELD(QPU_UNPACK_8A +
+                                                       (qinst->op -
+                                                        QOP_UNPACK_8A),
+                                                       QPU_UNPACK);
+
+                        if (orig_dst.mux == QPU_MUX_A) {
+                                queue(c, qpu_a_MOV(orig_dst, dst));
+                        }
+                }
+                        break;
+
                 default:
                         assert(qinst->op < ARRAY_SIZE(translate));
                         assert(translate[qinst->op].op != 0); /* NOPs */
@@ -274,55 +552,60 @@ vc4_generate_code(struct qcompile *c)
                         if (qir_get_op_nsrc(qinst->op) == 1)
                                 src[1] = src[0];
 
-                        if ((src[0].mux == QPU_MUX_A || src[0].mux == QPU_MUX_B) &&
-                            (src[1].mux == QPU_MUX_A || src[1].mux == QPU_MUX_B) &&
-                            src[0].addr != src[1].addr) {
-                                insts[ni++] = qpu_inst(qpu_a_MOV(qpu_r5(), src[1]),
-                                                       qpu_m_NOP());
-                                src[1] = qpu_r5();
-                        }
+                        fixup_raddr_conflict(c, &src[0], &src[1]);
 
                         if (translate[qinst->op].is_mul) {
-                                insts[ni++] = qpu_inst(qpu_a_NOP(),
-                                                       qpu_m_alu2(translate[qinst->op].op,
-                                                                  dst, src[0], src[1]));
+                                queue(c, qpu_m_alu2(translate[qinst->op].op,
+                                                    dst,
+                                                    src[0], src[1]));
                         } else {
-                                insts[ni++] = qpu_inst(qpu_a_alu2(translate[qinst->op].op,
-                                                                  dst, src[0], src[1]),
-                                                       qpu_m_NOP());
+                                queue(c, qpu_a_alu2(translate[qinst->op].op,
+                                                    dst,
+                                                    src[0], src[1]));
                         }
                         break;
                 }
-
-                if ((dst.mux == QPU_MUX_A || dst.mux == QPU_MUX_B) &&
-                    dst.addr < 32)
-                        insts[ni++] = qpu_inst(qpu_a_NOP(), qpu_m_NOP());
         }
 
+        serialize_insts(c);
+
         /* thread end can't have VPM write */
-        if (QPU_GET_FIELD(insts[ni - 1], QPU_WADDR_ADD) == QPU_W_VPM ||
-            QPU_GET_FIELD(insts[ni - 1], QPU_WADDR_MUL) == QPU_W_VPM)
-                insts[ni++] = qpu_inst(qpu_a_NOP(), qpu_m_NOP());
+        if (QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
+                          QPU_WADDR_ADD) == QPU_W_VPM ||
+            QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
+                          QPU_WADDR_MUL) == QPU_W_VPM) {
+                serialize_one_inst(c, qpu_NOP());
+        }
 
-        insts[ni - 1] = qpu_set_sig(insts[ni - 1], QPU_SIG_PROG_END);
-        insts[ni++] = qpu_inst(qpu_a_NOP(), qpu_m_NOP());
-        insts[ni++] = qpu_inst(qpu_a_NOP(), qpu_m_NOP());
+        /* thread end can't have uniform read */
+        if (QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
+                          QPU_RADDR_A) == QPU_R_UNIF ||
+            QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
+                          QPU_RADDR_B) == QPU_R_UNIF) {
+                serialize_one_inst(c, qpu_NOP());
+        }
+
+        c->qpu_insts[c->qpu_inst_count - 1] =
+                qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
+                            QPU_SIG_PROG_END);
+        serialize_one_inst(c, qpu_NOP());
+        serialize_one_inst(c, qpu_NOP());
 
         switch (c->stage) {
         case QSTAGE_VERT:
         case QSTAGE_COORD:
                 break;
         case QSTAGE_FRAG:
-                insts[2] = qpu_set_sig(insts[2], QPU_SIG_WAIT_FOR_SCOREBOARD);
-                insts[ni - 1] = qpu_set_sig(insts[ni - 1],
-                                            QPU_SIG_SCOREBOARD_UNLOCK);
+                c->qpu_insts[c->qpu_inst_count - 1] =
+                        qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
+                                    QPU_SIG_SCOREBOARD_UNLOCK);
                 break;
         }
 
-        c->qpu_insts = insts;
-        c->num_qpu_insts = ni;
+        if (vc4_debug & VC4_DEBUG_QPU)
+                vc4_dump_program(c);
 
-        vc4_dump_program(c);
-        vc4_qpu_validate(insts, ni);
-}
+        vc4_qpu_validate(c->qpu_insts, c->qpu_inst_count);
 
+        free(temp_registers);
+}