pan/mdg: Schedule based on liveness
[mesa.git] / src / panfrost / midgard / mir.c
index 2e0960148eb75ba552b9c4fe818e0f49e900c118..343c4bdb1d76a191fe7865603b6c8cf5e40e2fb0 100644 (file)
@@ -71,6 +71,10 @@ mir_rewrite_index_dst(compiler_context *ctx, unsigned old, unsigned new)
         mir_foreach_instr_global(ctx, ins) {
                 mir_rewrite_index_dst_single(ins, old, new);
         }
+
+        /* Implicitly written before the shader */
+        if (ctx->blend_input == old)
+                ctx->blend_input = new;
 }
 
 void
@@ -145,6 +149,25 @@ mir_nontrivial_outmod(midgard_instruction *ins)
                 return mod != midgard_outmod_none;
 }
 
+/* 128 / sz = exp2(log2(128 / sz))
+ *          = exp2(log2(128) - log2(sz))
+ *          = exp2(7 - log2(sz))
+ *          = 1 << (7 - log2(sz))
+ */
+
+static unsigned
+mir_components_for_bits(unsigned bits)
+{
+        return 1 << (7 - util_logbase2(bits));
+}
+
+unsigned
+mir_components_for_type(nir_alu_type T)
+{
+        unsigned sz = nir_alu_type_get_type_size(T);
+        return mir_components_for_bits(sz);
+}
+
 uint16_t
 mir_from_bytemask(uint16_t bytemask, unsigned bits)
 {
@@ -171,7 +194,7 @@ mir_round_bytemask_up(uint16_t mask, unsigned bits)
 {
         unsigned bytes = bits / 8;
         unsigned maxmask = mask_of(bytes);
-        unsigned channels = 16 / bytes;
+        unsigned channels = mir_components_for_bits(bits);
 
         for (unsigned c = 0; c < channels; ++c) {
                 unsigned submask = maxmask << (c * bytes);
@@ -202,23 +225,21 @@ mir_set_bytemask(midgard_instruction *ins, uint16_t bytemask)
 /* Checks if we should use an upper destination override, rather than the lower
  * one in the IR. Returns zero if no, returns the bytes to shift otherwise */
 
-unsigned
-mir_upper_override(midgard_instruction *ins)
+signed
+mir_upper_override(midgard_instruction *ins, unsigned inst_size)
 {
-        /* If there is no override, there is no upper override, tautology */
-        if (ins->alu.dest_override == midgard_dest_override_none)
-                return 0;
+        unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
 
-        /* Make sure we didn't already lower somehow */
-        assert(ins->alu.dest_override == midgard_dest_override_lower);
+        /* If the sizes are the same, there's nothing to override */
+        if (type_size == inst_size)
+                return -1;
 
         /* There are 16 bytes per vector, so there are (16/bytes)
          * components per vector. So the magic half is half of
          * (16/bytes), which simplifies to 8/bytes = 8 / (bits / 8) = 64 / bits
          * */
 
-        unsigned type_size = nir_alu_type_get_type_size(ins->dest_type);
-        unsigned threshold = 64 / type_size;
+        unsigned threshold = mir_components_for_bits(type_size) >> 1;
 
         /* How many components did we shift over? */
         unsigned zeroes = __builtin_ctz(ins->mask);
@@ -251,20 +272,6 @@ mir_bytemask_of_read_components_single(unsigned *swizzle, unsigned inmask, unsig
 uint16_t
 mir_bytemask_of_read_components_index(midgard_instruction *ins, unsigned i)
 {
-        if (ins->compact_branch && ins->writeout && (i == 0)) {
-                /* Non-ZS writeout uses all components */
-                if (!ins->writeout_depth && !ins->writeout_stencil)
-                        return 0xFFFF;
-
-                /* For ZS-writeout, if both Z and S are written we need two
-                 * components, otherwise we only need one.
-                 */
-                if (ins->writeout_depth && ins->writeout_stencil)
-                        return 0xFF;
-                else
-                        return 0xF;
-        }
-
         /* Conditional branches read one 32-bit component = 4 bytes (TODO: multi branch??) */
         if (ins->compact_branch && ins->branch.conditional && (i == 0))
                 return 0xF;
@@ -272,7 +279,7 @@ mir_bytemask_of_read_components_index(midgard_instruction *ins, unsigned i)
         /* ALU ops act componentwise so we need to pay attention to
          * their mask. Texture/ldst does not so we don't clamp source
          * readmasks based on the writemask */
-        unsigned qmask = (ins->type == TAG_ALU_4) ? ins->mask : ~0;
+        unsigned qmask = ~0;
 
         /* Handle dot products and things */
         if (ins->type == TAG_ALU_4 && !ins->compact_branch) {
@@ -282,6 +289,8 @@ mir_bytemask_of_read_components_index(midgard_instruction *ins, unsigned i)
 
                 if (channel_override)
                         qmask = mask_of(channel_override);
+                else
+                        qmask = ins->mask;
         }
 
         return mir_bytemask_of_read_components_single(ins->swizzle[i], qmask,
@@ -428,6 +437,10 @@ mir_flip(midgard_instruction *ins)
         ins->src_neg[0] = ins->src_neg[1];
         ins->src_neg[1] = temp;
 
+        temp = ins->src_invert[0];
+        ins->src_invert[0] = ins->src_invert[1];
+        ins->src_invert[1] = temp;
+
         unsigned temp_swizzle[16];
         memcpy(temp_swizzle, ins->swizzle[0], sizeof(ins->swizzle[0]));
         memcpy(ins->swizzle[0], ins->swizzle[1], sizeof(ins->swizzle[0]));