pan/mdg: Ensure we don't DCE into impossible masks
[mesa.git] / src / panfrost / midgard / midgard_emit.c
index d5aba7f8612010405e7d1273f7980f012ad77728..cf283dd3ed5b1650f1f8f756ec0be125ca0b3cf1 100644 (file)
 #include "compiler.h"
 #include "midgard_ops.h"
 
+static midgard_int_mod
+mir_get_imod(bool shift, nir_alu_type T, bool half, bool scalar)
+{
+        if (!half) {
+                assert(!shift);
+                /* Sign-extension, really... */
+                return scalar ? 0 : midgard_int_normal;
+        }
+
+        if (shift)
+                return midgard_int_shift;
+
+        if (nir_alu_type_get_base_type(T) == nir_type_int)
+                return midgard_int_sign_extend;
+        else
+                return midgard_int_zero_extend;
+}
+
+static unsigned
+mir_pack_mod(midgard_instruction *ins, unsigned i, bool scalar)
+{
+        bool integer = midgard_is_integer_op(ins->alu.op);
+        unsigned base_size = (8 << ins->alu.reg_mode);
+        unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]);
+        bool half = (sz == (base_size >> 1));
+
+        return integer ?
+                mir_get_imod(ins->src_shift[i], ins->src_types[i], half, scalar) :
+                ((ins->src_abs[i] << 0) |
+                 ((ins->src_neg[i] << 1)));
+}
+
 /* Midgard IR only knows vector ALU types, but we sometimes need to actually
  * use scalar ALU instructions, for functional or performance reasons. To do
  * this, we just demote vector ALU payloads to scalar. */
@@ -41,40 +73,13 @@ component_from_mask(unsigned mask)
 }
 
 static unsigned
-vector_to_scalar_source(unsigned u, bool is_int, bool is_full,
-                unsigned component)
+mir_pack_scalar_source(unsigned mod, bool is_full, unsigned component)
 {
-        midgard_vector_alu_src v;
-        memcpy(&v, &u, sizeof(v));
-
-        /* TODO: Integers */
-
-        midgard_scalar_alu_src s = { 0 };
-
-        if (is_full) {
-                /* For a 32-bit op, just check the source half flag */
-                s.full = !v.half;
-        } else if (!v.half) {
-                /* For a 16-bit op that's not subdivided, never full */
-                s.full = false;
-        } else {
-                /* We can't do 8-bit scalar, abort! */
-                assert(0);
-        }
-
-        /* Component indexing takes size into account */
-
-        if (s.full)
-                s.component = component << 1;
-        else
-                s.component = component;
-
-        if (is_int) {
-                /* TODO */
-        } else {
-                s.abs = v.mod & MIDGARD_FLOAT_MOD_ABS;
-                s.negate = v.mod & MIDGARD_FLOAT_MOD_NEG;
-        }
+        midgard_scalar_alu_src s = {
+                .mod = mod,
+                .full = is_full,
+                .component = component << (is_full ? 1 : 0)
+        };
 
         unsigned o;
         memcpy(&o, &s, sizeof(s));
@@ -85,17 +90,22 @@ vector_to_scalar_source(unsigned u, bool is_int, bool is_full,
 static midgard_scalar_alu
 vector_to_scalar_alu(midgard_vector_alu v, midgard_instruction *ins)
 {
-        bool is_int = midgard_is_integer_op(v.op);
-        bool is_full = v.reg_mode == midgard_reg_mode_32;
-        bool is_inline_constant = ins->has_inline_constant;
+        bool is_full = nir_alu_type_get_type_size(ins->dest_type) == 32;
 
+        bool half_0 = nir_alu_type_get_type_size(ins->src_types[0]) == 16;
+        bool half_1 = nir_alu_type_get_type_size(ins->src_types[1]) == 16;
         unsigned comp = component_from_mask(ins->mask);
 
+        unsigned packed_src[2] = {
+                mir_pack_scalar_source(mir_pack_mod(ins, 0, true), !half_0, ins->swizzle[0][comp]),
+                mir_pack_scalar_source(mir_pack_mod(ins, 1, true), !half_1, ins->swizzle[1][comp])
+        };
+
         /* The output component is from the mask */
         midgard_scalar_alu s = {
                 .op = v.op,
-                .src1 = vector_to_scalar_source(v.src1, is_int, is_full, ins->swizzle[0][comp]),
-                .src2 = !is_inline_constant ? vector_to_scalar_source(v.src2, is_int, is_full, ins->swizzle[1][comp]) : 0,
+                .src1 = packed_src[0],
+                .src2 = packed_src[1],
                 .unknown = 0,
                 .outmod = v.outmod,
                 .output_full = is_full,
@@ -161,85 +171,149 @@ mir_pack_mask_alu(midgard_instruction *ins)
          * override to the lower or upper half, shifting the effective mask in
          * the latter, so AAAA.... becomes AAAA */
 
-        unsigned upper_shift = mir_upper_override(ins);
+        unsigned inst_size = 8 << ins->alu.reg_mode;
+        signed upper_shift = mir_upper_override(ins, inst_size);
 
-        if (upper_shift) {
+        if (upper_shift >= 0) {
                 effective >>= upper_shift;
-                ins->alu.dest_override = midgard_dest_override_upper;
+                ins->alu.dest_override = upper_shift ?
+                        midgard_dest_override_upper :
+                        midgard_dest_override_lower;
+        } else {
+                ins->alu.dest_override = midgard_dest_override_none;
         }
 
         if (ins->alu.reg_mode == midgard_reg_mode_32)
-                ins->alu.mask = expand_writemask(effective, 4);
-        else if (ins->alu.reg_mode == midgard_reg_mode_64)
                 ins->alu.mask = expand_writemask(effective, 2);
+        else if (ins->alu.reg_mode == midgard_reg_mode_64)
+                ins->alu.mask = expand_writemask(effective, 1);
         else
                 ins->alu.mask = effective;
 }
 
-static void
-mir_pack_swizzle_alu(midgard_instruction *ins)
+static unsigned
+mir_pack_swizzle(unsigned mask, unsigned *swizzle,
+                nir_alu_type T, midgard_reg_mode reg_mode,
+                bool op_channeled, bool *rep_low, bool *rep_high)
 {
-        midgard_vector_alu_src src[] = {
-                vector_alu_from_unsigned(ins->alu.src1),
-                vector_alu_from_unsigned(ins->alu.src2)
-        };
+        unsigned packed = 0;
+        unsigned sz = nir_alu_type_get_type_size(T);
 
-        for (unsigned i = 0; i < 2; ++i) {
-                unsigned packed = 0;
+        if (reg_mode == midgard_reg_mode_64) {
+                assert(sz == 64 || sz == 32);
+                unsigned components = (sz == 32) ? 4 : 2;
 
-                if (ins->alu.reg_mode == midgard_reg_mode_64) {
-                        midgard_reg_mode mode = mir_srcsize(ins, i);
-                        unsigned components = 16 / mir_bytes_for_mode(mode);
+                packed = mir_pack_swizzle_64(swizzle, components);
 
-                        packed = mir_pack_swizzle_64(ins->swizzle[i], components);
+                if (sz == 32) {
+                        bool lo = swizzle[0] >= COMPONENT_Z;
+                        bool hi = swizzle[1] >= COMPONENT_Z;
 
-                        if (mode == midgard_reg_mode_32) {
-                                src[i].rep_low |= (ins->swizzle[i][0] >= COMPONENT_Z);
-                                src[i].rep_high |= (ins->swizzle[i][1] >= COMPONENT_Z);
-                        } else if (mode < midgard_reg_mode_32) {
-                                unreachable("Cannot encode 8/16 swizzle in 64-bit");
-                        }
-                } else {
-                        /* For 32-bit, swizzle packing is stupid-simple. For 16-bit,
-                         * the strategy is to check whether the nibble we're on is
-                         * upper or lower. We need all components to be on the same
-                         * "side"; that much is enforced by the ISA and should have
-                         * been lowered. TODO: 8-bit packing. TODO: vec8 */
+                        if (mask & 0x1) {
+                                /* We can't mix halves... */
+                                if (mask & 2)
+                                        assert(lo == hi);
 
-                        unsigned first = ins->mask ? ffs(ins->mask) - 1 : 0;
-                        bool upper = ins->swizzle[i][first] > 3;
+                                *rep_low = lo;
+                        } else {
+                                *rep_low = hi;
+                        }
+                } else if (sz < 32) {
+                        unreachable("Cannot encode 8/16 swizzle in 64-bit");
+                }
+        } else {
+                /* For 32-bit, swizzle packing is stupid-simple. For 16-bit,
+                 * the strategy is to check whether the nibble we're on is
+                 * upper or lower. We need all components to be on the same
+                 * "side"; that much is enforced by the ISA and should have
+                 * been lowered. TODO: 8-bit packing. TODO: vec8 */
 
-                        if (upper && ins->mask)
-                                assert(mir_srcsize(ins, i) <= midgard_reg_mode_16);
+                unsigned first = mask ? ffs(mask) - 1 : 0;
+                bool upper = swizzle[first] > 3;
 
-                        for (unsigned c = 0; c < 4; ++c) {
-                                unsigned v = ins->swizzle[i][c];
+                if (upper && mask)
+                        assert(sz <= 16);
 
-                                bool t_upper = v > 3;
+                bool dest_up = !op_channeled && (first >= 4);
 
-                                /* Ensure we're doing something sane */
+                for (unsigned c = (dest_up ? 4 : 0); c < (dest_up ? 8 : 4); ++c) {
+                        unsigned v = swizzle[c];
 
-                                if (ins->mask & (1 << c)) {
-                                        assert(t_upper == upper);
-                                        assert(v <= 7);
-                                }
+                        bool t_upper = v > 3;
 
-                                /* Use the non upper part */
-                                v &= 0x3;
+                        /* Ensure we're doing something sane */
 
-                                packed |= v << (2 * c);
+                        if (mask & (1 << c)) {
+                                assert(t_upper == upper);
+                                assert(v <= 7);
                         }
 
-                        src[i].rep_high = upper;
+                        /* Use the non upper part */
+                        v &= 0x3;
+
+                        packed |= v << (2 * (c % 4));
                 }
 
-                src[i].swizzle = packed;
+
+                /* Replicate for now.. should really pick a side for
+                 * dot products */
+
+                if (reg_mode == midgard_reg_mode_16 && sz == 16) {
+                        *rep_low = !upper;
+                        *rep_high = upper;
+                } else if (reg_mode == midgard_reg_mode_16 && sz == 8) {
+                        *rep_low = upper;
+                        *rep_high = upper;
+                } else if (reg_mode == midgard_reg_mode_32) {
+                        *rep_low = upper;
+                } else {
+                        unreachable("Unhandled reg mode");
+                }
         }
 
-        ins->alu.src1 = vector_alu_srco_unsigned(src[0]);
+        return packed;
+}
+
+static void
+mir_pack_vector_srcs(midgard_instruction *ins)
+{
+        bool channeled = GET_CHANNEL_COUNT(alu_opcode_props[ins->alu.op].props);
+
+        midgard_reg_mode mode = ins->alu.reg_mode;
+        unsigned base_size = (8 << mode);
+
+        for (unsigned i = 0; i < 2; ++i) {
+                if (ins->has_inline_constant && (i == 1))
+                        continue;
+
+                if (ins->src[i] == ~0)
+                        continue;
 
-        if (!ins->has_inline_constant)
-                ins->alu.src2 = vector_alu_srco_unsigned(src[1]);
+                bool rep_lo = false, rep_hi = false;
+                unsigned sz = nir_alu_type_get_type_size(ins->src_types[i]);
+                bool half = (sz == (base_size >> 1));
+
+                assert((sz == base_size) || half);
+
+                unsigned swizzle = mir_pack_swizzle(ins->mask, ins->swizzle[i],
+                                ins->src_types[i], ins->alu.reg_mode,
+                                channeled, &rep_lo, &rep_hi);
+
+                midgard_vector_alu_src pack = {
+                        .mod = mir_pack_mod(ins, i, false),
+                        .rep_low = rep_lo,
+                        .rep_high = rep_hi,
+                        .half = half,
+                        .swizzle = swizzle
+                };
+                unsigned p = vector_alu_srco_unsigned(pack);
+                
+                if (i == 0)
+                        ins->alu.src1 = p;
+                else
+                        ins->alu.src2 = p;
+        }
 }
 
 static void
@@ -289,13 +363,13 @@ mir_pack_swizzle_tex(midgard_instruction *ins)
 static void
 mir_pack_ldst_mask(midgard_instruction *ins)
 {
-        midgard_reg_mode mode = mir_typesize(ins);
+        unsigned sz = nir_alu_type_get_type_size(ins->dest_type);
         unsigned packed = ins->mask;
 
-        if (mode == midgard_reg_mode_64) {
+        if (sz == 64) {
                 packed = ((ins->mask & 0x2) ? (0x8 | 0x4) : 0) |
                          ((ins->mask & 0x1) ? (0x2 | 0x1) : 0);
-        } else if (mode == midgard_reg_mode_16) {
+        } else if (sz == 16) {
                 packed = 0;
 
                 for (unsigned i = 0; i < 4; ++i) {
@@ -306,11 +380,58 @@ mir_pack_ldst_mask(midgard_instruction *ins)
 
                         packed |= (u << i);
                 }
+        } else {
+                assert(sz == 32);
         }
 
         ins->load_store.mask = packed;
 }
 
+static void
+mir_lower_inverts(midgard_instruction *ins)
+{
+        bool inv[3] = {
+                ins->src_invert[0],
+                ins->src_invert[1],
+                ins->src_invert[2]
+        };
+
+        switch (ins->alu.op) {
+        case midgard_alu_op_iand:
+                /* a & ~b = iandnot(a, b) */
+                /* ~a & ~b = ~(a | b) = inor(a, b) */
+
+                if (inv[0] && inv[1])
+                        ins->alu.op = midgard_alu_op_inor;
+                else if (inv[1])
+                        ins->alu.op = midgard_alu_op_iandnot;
+
+                break;
+        case midgard_alu_op_ior:
+                /*  a | ~b = iornot(a, b) */
+                /* ~a | ~b = ~(a & b) = inand(a, b) */
+
+                if (inv[0] && inv[1])
+                        ins->alu.op = midgard_alu_op_inand;
+                else if (inv[1])
+                        ins->alu.op = midgard_alu_op_iornot;
+
+                break;
+
+        case midgard_alu_op_ixor:
+                /* ~a ^ b = a ^ ~b = ~(a ^ b) = inxor(a, b) */
+                /* ~a ^ ~b = a ^ b */
+
+                if (inv[0] ^ inv[1])
+                        ins->alu.op = midgard_alu_op_inxor;
+
+                break;
+
+        default:
+                break;
+        }
+}
+
 static void
 emit_alu_bundle(compiler_context *ctx,
                 midgard_bundle *bundle,
@@ -344,9 +465,12 @@ emit_alu_bundle(compiler_context *ctx,
                 /* In case we demote to a scalar */
                 midgard_scalar_alu scalarized;
 
+                if (!ins->compact_branch)
+                        mir_lower_inverts(ins);
+
                 if (ins->unit & UNITS_ANY_VECTOR) {
                         mir_pack_mask_alu(ins);
-                        mir_pack_swizzle_alu(ins);
+                        mir_pack_vector_srcs(ins);
                         size = sizeof(midgard_vector_alu);
                         source = &ins->alu;
                 } else if (ins->unit == ALU_ENAB_BR_COMPACT) {
@@ -361,20 +485,16 @@ emit_alu_bundle(compiler_context *ctx,
                         source = &scalarized;
                 }
 
-                memcpy(util_dynarray_grow_bytes(emission, 1, size), source, size);
+                memcpy(util_dynarray_grow_bytes(emission, size, 1), source, size);
         }
 
         /* Emit padding (all zero) */
-        memset(util_dynarray_grow_bytes(emission, 1, bundle->padding), 0, bundle->padding);
+        memset(util_dynarray_grow_bytes(emission, bundle->padding, 1), 0, bundle->padding);
 
         /* Tack on constants */
 
-        if (bundle->has_embedded_constants) {
-                util_dynarray_append(emission, float, bundle->constants[0]);
-                util_dynarray_append(emission, float, bundle->constants[1]);
-                util_dynarray_append(emission, float, bundle->constants[2]);
-                util_dynarray_append(emission, float, bundle->constants[3]);
-        }
+        if (bundle->has_embedded_constants)
+                util_dynarray_append(emission, midgard_constants, bundle->constants);
 }
 
 /* Shift applied to the immediate used as an offset. Probably this is papering
@@ -390,6 +510,21 @@ mir_ldst_imm_shift(midgard_load_store_op op)
                 return 1;
 }
 
+static enum mali_sampler_type
+midgard_sampler_type(nir_alu_type t) {
+        switch (nir_alu_type_get_base_type(t))
+        {
+        case nir_type_float:
+                return MALI_SAMPLER_FLOAT;
+        case nir_type_int:
+                return MALI_SAMPLER_SIGNED;
+        case nir_type_uint:
+                return MALI_SAMPLER_UNSIGNED;
+        default:
+                unreachable("Unknown sampler type");
+        }
+}
+
 /* After everything is scheduled, emit whole bundles at a time */
 
 void
@@ -405,6 +540,10 @@ emit_binary_bundle(compiler_context *ctx,
         case TAG_ALU_8:
         case TAG_ALU_12:
         case TAG_ALU_16:
+        case TAG_ALU_4 + 4:
+        case TAG_ALU_8 + 4:
+        case TAG_ALU_12 + 4:
+        case TAG_ALU_16 + 4:
                 emit_alu_bundle(ctx, bundle, emission, lookahead);
                 break;
 
@@ -421,7 +560,7 @@ emit_binary_bundle(compiler_context *ctx,
                         mir_pack_swizzle_ldst(bundle->instructions[i]);
 
                         /* Apply a constant offset */
-                        unsigned offset = bundle->instructions[i]->constants[0];
+                        unsigned offset = bundle->instructions[i]->constants.u32[0];
 
                         if (offset) {
                                 unsigned shift = mir_ldst_imm_shift(bundle->instructions[i]->load_store.op);
@@ -450,7 +589,8 @@ emit_binary_bundle(compiler_context *ctx,
         }
 
         case TAG_TEXTURE_4:
-        case TAG_TEXTURE_4_VTX: {
+        case TAG_TEXTURE_4_VTX:
+        case TAG_TEXTURE_4_BARRIER: {
                 /* Texture instructions are easy, since there is no pipelining
                  * nor VLIW to worry about. We may need to set .cont/.last
                  * flags. */
@@ -459,22 +599,36 @@ emit_binary_bundle(compiler_context *ctx,
 
                 ins->texture.type = bundle->tag;
                 ins->texture.next_type = next_tag;
-                ins->texture.mask = ins->mask;
+
+                /* Nothing else to pack for barriers */
+                if (ins->texture.op == TEXTURE_OP_BARRIER) {
+                        ins->texture.cont = ins->texture.last = 1;
+                        util_dynarray_append(emission, midgard_texture_word, ins->texture);
+                        return;
+                }
+
+                signed override = mir_upper_override(ins, 32);
+
+                ins->texture.mask = override > 0 ?
+                        ins->mask >> override :
+                        ins->mask;
+
                 mir_pack_swizzle_tex(ins);
 
-                ctx->texture_op_count--;
+                unsigned osz = nir_alu_type_get_type_size(ins->dest_type);
+                unsigned isz = nir_alu_type_get_type_size(ins->src_types[1]);
 
-                if (mir_op_computes_derivatives(ctx->stage, ins->texture.op)) {
-                        bool continues = ctx->texture_op_count > 0;
+                assert(osz == 32 || osz == 16);
+                assert(isz == 32 || isz == 16);
 
-                        /* Control flow complicates helper invocation
-                         * lifespans, so for now just keep helper threads
-                         * around indefinitely with loops. TODO: Proper
-                         * analysis */
-                        continues |= ctx->loop_count > 0;
+                ins->texture.out_full = (osz == 32);
+                ins->texture.out_upper = override > 0;
+                ins->texture.in_reg_full = (isz == 32);
+                ins->texture.sampler_type = midgard_sampler_type(ins->dest_type);
 
-                        ins->texture.cont = continues;
-                        ins->texture.last = !continues;
+                if (mir_op_computes_derivatives(ctx->stage, ins->texture.op)) {
+                        ins->texture.cont = !ins->helper_terminate;
+                        ins->texture.last = ins->helper_terminate || ins->helper_execute;
                 } else {
                         ins->texture.cont = ins->texture.last = 1;
                 }