*last_inst(c) = qpu_set_cond_add(*last_inst(c), cond);
}
+/**
+ * Some special registers can be read from either file, which lets us resolve
+ * raddr conflicts without extra MOVs.
+ */
+static bool
+swap_file(struct qpu_reg *src)
+{
+ switch (src->addr) {
+ case QPU_R_UNIF:
+ case QPU_R_VARY:
+ if (src->mux == QPU_MUX_A)
+ src->mux = QPU_MUX_B;
+ else
+ src->mux = QPU_MUX_A;
+ return true;
+
+ default:
+ return false;
+ }
+}
+
/**
* This is used to resolve the fact that we might register-allocate two
* different operands of an instruction to the same physical register file
*/
static void
fixup_raddr_conflict(struct vc4_compile *c,
- struct qpu_reg src0, struct qpu_reg *src1)
+ struct qpu_reg *src0, struct qpu_reg *src1)
{
- if ((src0.mux == QPU_MUX_A || src0.mux == QPU_MUX_B) &&
- (src1->mux == QPU_MUX_A || src1->mux == QPU_MUX_B) &&
- src0.addr != src1->addr) {
- queue(c, qpu_a_MOV(qpu_r3(), *src1));
- *src1 = qpu_r3();
+ if ((src0->mux != QPU_MUX_A && src0->mux != QPU_MUX_B) ||
+ src0->mux != src1->mux ||
+ src0->addr == src1->addr) {
+ return;
}
+
+ if (swap_file(src0) || swap_file(src1))
+ return;
+
+ queue(c, qpu_a_MOV(qpu_r3(), *src1));
+ *src1 = qpu_r3();
}
static void
}
void
-vc4_generate_code(struct vc4_compile *c)
+vc4_generate_code(struct vc4_context *vc4, struct vc4_compile *c)
{
- struct qpu_reg *temp_registers = vc4_register_allocate(c);
+ struct qpu_reg *temp_registers = vc4_register_allocate(vc4, c);
bool discard = false;
make_empty_list(&c->qpu_inst_list);
qpu_rb(QPU_R_XY_PIXEL_COORD)));
break;
- case QOP_FRAG_Z:
- /* QOP_FRAG_Z doesn't emit instructions, just
- * allocates the register to the Z payload.
- */
+ case QOP_FRAG_REV_FLAG:
+ queue(c, qpu_a_ITOF(dst,
+ qpu_rb(QPU_R_MS_REV_FLAGS)));
break;
- case QOP_FRAG_RCP_W:
- queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP),
- qpu_ra(QPU_R_FRAG_PAYLOAD_ZW)));
-
- queue(c, qpu_a_MOV(dst, qpu_r4()));
+ case QOP_FRAG_Z:
+ case QOP_FRAG_W:
+ /* QOP_FRAG_Z/W don't emit instructions, just allocate
+ * the register to the Z/W payload.
+ */
break;
case QOP_TLB_DISCARD_SETUP:
assert(src[0].mux == QPU_MUX_R4);
queue(c, qpu_a_MOV(dst, src[0]));
*last_inst(c) |= QPU_PM;
- *last_inst(c) |= QPU_SET_FIELD(QPU_UNPACK_R4_8A +
+ *last_inst(c) |= QPU_SET_FIELD(QPU_UNPACK_8A +
(qinst->op -
QOP_R4_UNPACK_A),
QPU_UNPACK);
break;
+ case QOP_UNPACK_8A:
+ case QOP_UNPACK_8B:
+ case QOP_UNPACK_8C:
+ case QOP_UNPACK_8D: {
+ assert(src[0].mux == QPU_MUX_A);
+
+ /* And, since we're setting the pack bits, if the
+ * destination is in A it would get re-packed.
+ */
+ struct qpu_reg orig_dst = dst;
+ if (orig_dst.mux == QPU_MUX_A)
+ dst = qpu_rn(3);
+
+ queue(c, qpu_a_FMAX(dst, src[0], src[0]));
+ *last_inst(c) |= QPU_SET_FIELD(QPU_UNPACK_8A +
+ (qinst->op -
+ QOP_UNPACK_8A),
+ QPU_UNPACK);
+
+ if (orig_dst.mux == QPU_MUX_A) {
+ queue(c, qpu_a_MOV(orig_dst, dst));
+ }
+ }
+ break;
+
default:
assert(qinst->op < ARRAY_SIZE(translate));
assert(translate[qinst->op].op != 0); /* NOPs */
if (qir_get_op_nsrc(qinst->op) == 1)
src[1] = src[0];
- fixup_raddr_conflict(c, src[0], &src[1]);
+ fixup_raddr_conflict(c, &src[0], &src[1]);
if (translate[qinst->op].is_mul) {
queue(c, qpu_m_alu2(translate[qinst->op].op,