+static void
+mir_lower_inverts(midgard_instruction *ins)
+{
+ bool inv[3] = {
+ ins->src_invert[0],
+ ins->src_invert[1],
+ ins->src_invert[2]
+ };
+
+ switch (ins->op) {
+ case midgard_alu_op_iand:
+ /* a & ~b = iandnot(a, b) */
+ /* ~a & ~b = ~(a | b) = inor(a, b) */
+
+ if (inv[0] && inv[1])
+ ins->op = midgard_alu_op_inor;
+ else if (inv[1])
+ ins->op = midgard_alu_op_iandnot;
+
+ break;
+ case midgard_alu_op_ior:
+ /* a | ~b = iornot(a, b) */
+ /* ~a | ~b = ~(a & b) = inand(a, b) */
+
+ if (inv[0] && inv[1])
+ ins->op = midgard_alu_op_inand;
+ else if (inv[1])
+ ins->op = midgard_alu_op_iornot;
+
+ break;
+
+ case midgard_alu_op_ixor:
+ /* ~a ^ b = a ^ ~b = ~(a ^ b) = inxor(a, b) */
+ /* ~a ^ ~b = a ^ b */
+
+ if (inv[0] ^ inv[1])
+ ins->op = midgard_alu_op_inxor;
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* Opcodes with ROUNDS are the base (rte/0) type so we can just add */
+
+static void
+mir_lower_roundmode(midgard_instruction *ins)
+{
+ if (alu_opcode_props[ins->op].props & MIDGARD_ROUNDS) {
+ assert(ins->roundmode <= 0x3);
+ ins->op += ins->roundmode;
+ }
+}
+
+static midgard_load_store_word
+load_store_from_instr(midgard_instruction *ins)
+{
+ midgard_load_store_word ldst = ins->load_store;
+ ldst.op = ins->op;
+
+ if (OP_IS_STORE(ldst.op)) {
+ ldst.reg = SSA_REG_FROM_FIXED(ins->src[0]) & 1;
+ } else {
+ ldst.reg = SSA_REG_FROM_FIXED(ins->dest);
+ }
+
+ if (ins->src[1] != ~0) {
+ unsigned src = SSA_REG_FROM_FIXED(ins->src[1]);
+ ldst.arg_1 |= midgard_ldst_reg(src, ins->swizzle[1][0]);
+ }
+
+ if (ins->src[2] != ~0) {
+ unsigned src = SSA_REG_FROM_FIXED(ins->src[2]);
+ ldst.arg_2 |= midgard_ldst_reg(src, ins->swizzle[2][0]);
+ }
+
+ return ldst;
+}
+
+static midgard_texture_word
+texture_word_from_instr(midgard_instruction *ins)
+{
+ midgard_texture_word tex = ins->texture;
+ tex.op = ins->op;
+
+ unsigned src1 = ins->src[1] == ~0 ? REGISTER_UNUSED : SSA_REG_FROM_FIXED(ins->src[1]);
+ tex.in_reg_select = src1 & 1;
+
+ unsigned dest = ins->dest == ~0 ? REGISTER_UNUSED : SSA_REG_FROM_FIXED(ins->dest);
+ tex.out_reg_select = dest & 1;
+
+ if (ins->src[2] != ~0) {
+ midgard_tex_register_select sel = {
+ .select = SSA_REG_FROM_FIXED(ins->src[2]) & 1,
+ .full = 1,
+ .component = ins->swizzle[2][0]
+ };
+ uint8_t packed;
+ memcpy(&packed, &sel, sizeof(packed));
+ tex.bias = packed;
+ }
+
+ if (ins->src[3] != ~0) {
+ unsigned x = ins->swizzle[3][0];
+ unsigned y = x + 1;
+ unsigned z = x + 2;
+
+ /* Check range, TODO: half-registers */
+ assert(z < 4);
+
+ unsigned offset_reg = SSA_REG_FROM_FIXED(ins->src[3]);
+ tex.offset =
+ (1) | /* full */
+ (offset_reg & 1) << 1 | /* select */
+ (0 << 2) | /* upper */
+ (x << 3) | /* swizzle */
+ (y << 5) | /* swizzle */
+ (z << 7); /* swizzle */
+ }
+
+ return tex;
+}
+
+static midgard_vector_alu
+vector_alu_from_instr(midgard_instruction *ins)
+{
+ midgard_vector_alu alu = ins->alu;
+ alu.op = ins->op;
+ alu.outmod = ins->outmod;
+ alu.reg_mode = reg_mode_for_bitsize(max_bitsize_for_alu(ins));
+
+ if (ins->has_inline_constant) {
+ /* Encode inline 16-bit constant. See disassembler for
+ * where the algorithm is from */
+
+ int lower_11 = ins->inline_constant & ((1 << 12) - 1);
+ uint16_t imm = ((lower_11 >> 8) & 0x7) |
+ ((lower_11 & 0xFF) << 3);
+
+ alu.src2 = imm << 2;
+ }
+
+ return alu;
+}
+