+/* Emit store for a fragment shader, which is encoded via a fancy branch. TODO:
+ * Handle MRT here */
+
+static void
+emit_fragment_store(compiler_context *ctx, unsigned src, unsigned rt)
+{
+ /* First, move in whatever we're outputting */
+ midgard_instruction move = v_mov(src, blank_alu_src, SSA_FIXED_REGISTER(0));
+ if (rt != 0) {
+ /* Force a tight schedule. TODO: Make the scheduler MRT aware */
+ move.unit = UNIT_VMUL;
+ move.precede_break = true;
+ move.dont_eliminate = true;
+ }
+
+ emit_mir_instruction(ctx, move);
+
+ /* If we're doing MRT, we need to specify the render target */
+
+ midgard_instruction rt_move = {
+ .ssa_args = {
+ .dest = -1
+ }
+ };
+
+ if (rt != 0) {
+ /* We'll write to r1.z */
+ rt_move = v_mov(-1, blank_alu_src, SSA_FIXED_REGISTER(1));
+ rt_move.mask = 1 << COMPONENT_Z;
+ rt_move.unit = UNIT_SADD;
+
+ /* r1.z = (rt * 0x100) */
+ rt_move.ssa_args.inline_constant = true;
+ rt_move.inline_constant = (rt * 0x100);
+
+ /* r1 */
+ ctx->work_registers = MAX2(ctx->work_registers, 1);
+
+ /* Do the write */
+ emit_mir_instruction(ctx, rt_move);
+ }
+
+ /* Next, generate the branch. For R render targets in the writeout, the
+ * i'th render target jumps to pseudo-offset [2(R-1) + i] */
+
+ unsigned offset = (2 * (ctx->nir->num_outputs - 1)) + rt;
+
+ struct midgard_instruction ins =
+ v_alu_br_compact_cond(midgard_jmp_writeout_op_writeout, TAG_ALU_4, offset, midgard_condition_always);
+
+ /* Add dependencies */
+ ins.ssa_args.src[0] = move.ssa_args.dest;
+ ins.ssa_args.src[1] = rt_move.ssa_args.dest;
+
+ /* Emit the branch */
+ emit_mir_instruction(ctx, ins);
+}
+