pan/mdg: Mask spills from texture write
[mesa.git] / src / panfrost / midgard / midgard_ra.c
index 84f83aa443c5a51e4ebf86c7576d1eeee89ffbc8..b06eb97ab8ec77f96e3126fc9bb0416a8fb9ca1a 100644 (file)
@@ -450,7 +450,7 @@ allocate_registers(compiler_context *ctx, bool *spilled)
                         }
                 }
 
-                if (ins->type == TAG_LOAD_STORE_4 && OP_HAS_ADDRESS(ins->load_store.op)) {
+                if (ins->type == TAG_LOAD_STORE_4 && OP_HAS_ADDRESS(ins->op)) {
                         mir_foreach_src(ins, v) {
                                 unsigned s = ins->src[v];
                                 unsigned size = nir_alu_type_get_type_size(ins->src_types[v]);
@@ -492,7 +492,7 @@ allocate_registers(compiler_context *ctx, bool *spilled)
                  * want to muck with the conditional itself, so just force
                  * alignment for now */
 
-                if (ins->type == TAG_ALU_4 && OP_IS_CSEL_V(ins->alu.op)) {
+                if (ins->type == TAG_ALU_4 && OP_IS_CSEL_V(ins->op)) {
                         min_alignment[dest] = 4; /* 1 << 4= 16-byte = vec4 */
 
                         /* LCRA assumes bound >= alignment */
@@ -530,7 +530,7 @@ allocate_registers(compiler_context *ctx, bool *spilled)
                         set_class(l->class, ins->src[1], REG_CLASS_LDST);
                         set_class(l->class, ins->src[2], REG_CLASS_LDST);
 
-                        if (OP_IS_VEC4_ONLY(ins->load_store.op)) {
+                        if (OP_IS_VEC4_ONLY(ins->op)) {
                                 lcra_restrict_range(l, ins->dest, 16);
                                 lcra_restrict_range(l, ins->src[0], 16);
                                 lcra_restrict_range(l, ins->src[1], 16);
@@ -630,6 +630,15 @@ allocate_registers(compiler_context *ctx, bool *spilled)
                 l->solutions[ctx->blend_input] = 0;
         }
 
+        /* Same for the dual-source blend input/output, except here we use r2,
+         * which is also set in the fragment shader. */
+
+        if (ctx->blend_src1 != ~0) {
+                assert(ctx->blend_src1 < ctx->temp_count);
+                l->solutions[ctx->blend_src1] = (16 * 2);
+                ctx->work_registers = MAX2(ctx->work_registers, 2);
+        }
+
         mir_compute_interference(ctx, l);
 
         *spilled = !lcra_solve(l);
@@ -672,33 +681,18 @@ install_registers_instr(
                 mir_set_bytemask(ins, mir_bytemask(ins) << dest.offset);
 
                 unsigned dest_offset =
-                        GET_CHANNEL_COUNT(alu_opcode_props[ins->alu.op].props) ? 0 :
+                        GET_CHANNEL_COUNT(alu_opcode_props[ins->op].props) ? 0 :
                         dest.offset;
 
                 offset_swizzle(ins->swizzle[0], src1.offset, src1.shift, dest.shift, dest_offset);
-
-                ins->registers.src1_reg = src1.reg;
-
-                ins->registers.src2_imm = ins->has_inline_constant;
-
-                if (ins->has_inline_constant) {
-                        /* Encode inline 16-bit constant. See disassembler for
-                         * where the algorithm is from */
-
-                        ins->registers.src2_reg = ins->inline_constant >> 11;
-
-                        int lower_11 = ins->inline_constant & ((1 << 12) - 1);
-                        uint16_t imm = ((lower_11 >> 8) & 0x7) |
-                                       ((lower_11 & 0xFF) << 3);
-
-                        ins->alu.src2 = imm << 2;
-                } else {
+                if (!ins->has_inline_constant)
                         offset_swizzle(ins->swizzle[1], src2.offset, src2.shift, dest.shift, dest_offset);
-
-                        ins->registers.src2_reg = src2.reg;
-                }
-
-                ins->registers.out_reg = dest.reg;
+                if (ins->src[0] != ~0)
+                        ins->src[0] = SSA_FIXED_REGISTER(src1.reg);
+                if (ins->src[1] != ~0)
+                        ins->src[1] = SSA_FIXED_REGISTER(src2.reg);
+                if (ins->dest != ~0)
+                        ins->dest = SSA_FIXED_REGISTER(dest.reg);
                 break;
         }
 
@@ -707,18 +701,18 @@ install_registers_instr(
                  * whether we are loading or storing -- think about the
                  * logical dataflow */
 
-                bool encodes_src = OP_IS_STORE(ins->load_store.op);
+                bool encodes_src = OP_IS_STORE(ins->op);
 
                 if (encodes_src) {
                         struct phys_reg src = index_to_reg(ctx, l, ins->src[0], src_shift[0]);
                         assert(src.reg == 26 || src.reg == 27);
 
-                        ins->load_store.reg = src.reg - 26;
+                        ins->src[0] = SSA_FIXED_REGISTER(src.reg);
                         offset_swizzle(ins->swizzle[0], src.offset, src.shift, 0, 0);
                } else {
                         struct phys_reg dst = index_to_reg(ctx, l, ins->dest, dest_shift);
 
-                        ins->load_store.reg = dst.reg;
+                        ins->dest = SSA_FIXED_REGISTER(dst.reg);
                         offset_swizzle(ins->swizzle[0], 0, 2, 2, dst.offset);
                         mir_set_bytemask(ins, mir_bytemask(ins) << dst.offset);
                 }
@@ -732,21 +726,23 @@ install_registers_instr(
                         struct phys_reg src = index_to_reg(ctx, l, src2, 2);
                         unsigned component = src.offset >> src.shift;
                         assert(component << src.shift == src.offset);
-                        ins->load_store.arg_1 |= midgard_ldst_reg(src.reg, component);
+                        ins->src[1] = SSA_FIXED_REGISTER(src.reg);
+                        ins->swizzle[1][0] = component;
                 }
 
                 if (src3 != ~0) {
                         struct phys_reg src = index_to_reg(ctx, l, src3, 2);
                         unsigned component = src.offset >> src.shift;
                         assert(component << src.shift == src.offset);
-                        ins->load_store.arg_2 |= midgard_ldst_reg(src.reg, component);
+                        ins->src[2] = SSA_FIXED_REGISTER(src.reg);
+                        ins->swizzle[2][0] = component;
                 }
  
                 break;
         }
 
         case TAG_TEXTURE_4: {
-                if (ins->texture.op == TEXTURE_OP_BARRIER)
+                if (ins->op == TEXTURE_OP_BARRIER)
                         break;
 
                 /* Grab RA results */
@@ -756,11 +752,13 @@ install_registers_instr(
                 struct phys_reg offset = index_to_reg(ctx, l, ins->src[3], src_shift[3]);
 
                 /* First, install the texture coordinate */
-                ins->texture.in_reg_select = coord.reg & 1;
+                if (ins->src[1] != ~0)
+                        ins->src[1] = SSA_FIXED_REGISTER(coord.reg);
                 offset_swizzle(ins->swizzle[1], coord.offset, coord.shift, dest.shift, 0);
 
                 /* Next, install the destination */
-                ins->texture.out_reg_select = dest.reg & 1;
+                if (ins->dest != ~0)
+                        ins->dest = SSA_FIXED_REGISTER(dest.reg);
                 offset_swizzle(ins->swizzle[0], 0, 2, dest.shift,
                                 dest_shift == 1 ? dest.offset % 8 :
                                 dest.offset);
@@ -769,33 +767,14 @@ install_registers_instr(
                 /* If there is a register LOD/bias, use it */
                 if (ins->src[2] != ~0) {
                         assert(!(lod.offset & 3));
-                        midgard_tex_register_select sel = {
-                                .select = lod.reg & 1,
-                                .full = 1,
-                                .component = lod.offset / 4
-                        };
-
-                        uint8_t packed;
-                        memcpy(&packed, &sel, sizeof(packed));
-                        ins->texture.bias = packed;
+                        ins->src[2] = SSA_FIXED_REGISTER(lod.reg);
+                        ins->swizzle[2][0] = lod.offset / 4;
                 }
 
                 /* If there is an offset register, install it */
                 if (ins->src[3] != ~0) {
-                        unsigned x = offset.offset / 4;
-                        unsigned y = x + 1;
-                        unsigned z = x + 2;
-
-                        /* Check range, TODO: half-registers */
-                        assert(z < 4);
-
-                        ins->texture.offset =
-                                (1)                   | /* full */
-                                (offset.reg & 1) << 1 | /* select */
-                                (0 << 2)              | /* upper */
-                                (x << 3)              | /* swizzle */
-                                (y << 5)              | /* swizzle */
-                                (z << 7);               /* swizzle */
+                        ins->src[3] = SSA_FIXED_REGISTER(offset.reg);
+                        ins->swizzle[3][0] = offset.offset / 4;
                 }
 
                 break;
@@ -876,9 +855,17 @@ mir_spill_register(
 
                         midgard_instruction st;
 
+                        /* Note: it's important to match the mask of the spill
+                         * with the mask of the instruction whose destination
+                         * we're spilling, or otherwise we'll read invalid
+                         * components and can fail RA in a subsequent iteration
+                         */
+
                         if (is_special_w) {
                                 st = v_mov(spill_node, spill_slot);
                                 st.no_spill |= (1 << spill_class);
+                                st.mask = ins->mask;
+                                st.dest_type = st.src_types[0] = ins->dest_type;
                         } else {
                                 ins->dest = spill_index++;
                                 ins->no_spill |= (1 << spill_class);