pan/midgard: Add units for more instructions
[mesa.git] / src / panfrost / midgard / midgard_schedule.c
index 97e06d743faddbad5d7b66b6049fb88c6d2bcdee..62135010d9892d476be5ac1de4cf6c3ea709195c 100644 (file)
@@ -24,6 +24,7 @@
 #include "compiler.h"
 #include "midgard_ops.h"
 #include "util/u_memory.h"
+#include "util/register_allocate.h"
 
 /* Create a mask of accessed components from a swizzle to figure out vector
  * dependencies */
@@ -117,6 +118,78 @@ midgard_has_hazard(
 
 }
 
+/* Fragment writeout (of r0) is allowed when:
+ *
+ *  - All components of r0 are written in the bundle
+ *  - No components of r0 are written in VLUT
+ *  - Non-pipelined dependencies of r0 are not written in the bundle
+ *
+ * This function checks if these requirements are satisfied given the content
+ * of a scheduled bundle.
+ */
+
+static bool
+can_writeout_fragment(compiler_context *ctx, midgard_instruction **bundle, unsigned count, unsigned node_count)
+{
+        /* First scan for which components of r0 are written out. Initially
+         * none are written */
+
+        uint8_t r0_written_mask = 0x0;
+
+        /* Simultaneously we scan for the set of dependencies */
+        BITSET_WORD *dependencies = calloc(sizeof(BITSET_WORD), BITSET_WORDS(node_count));
+
+        for (unsigned i = 0; i < count; ++i) {
+                midgard_instruction *ins = bundle[i];
+
+                if (ins->ssa_args.dest != SSA_FIXED_REGISTER(0))
+                        continue;
+
+                /* Record written out mask */
+                r0_written_mask |= ins->mask;
+
+                /* Record dependencies, but only if they won't become pipeline
+                 * registers. We know we can't be live after this, because
+                 * we're writeout at the very end of the shader. So check if
+                 * they were written before us. */
+
+                unsigned src0 = ins->ssa_args.src0;
+                unsigned src1 = ins->ssa_args.src1;
+
+                if (!mir_is_written_before(ctx, bundle[0], src0))
+                        src0 = -1;
+
+                if (!mir_is_written_before(ctx, bundle[0], src1))
+                        src1 = -1;
+
+                if ((src0 > 0) && (src0 < node_count))
+                        BITSET_SET(dependencies, src0);
+
+                if ((src1 > 0) && (src1 < node_count))
+                        BITSET_SET(dependencies, src1);
+
+                /* Requirement 2 */
+                if (ins->unit == UNIT_VLUT)
+                        return false;
+        }
+
+        /* Requirement 1 */
+        if ((r0_written_mask & 0xF) != 0xF)
+                return false;
+
+        /* Requirement 3 */
+
+        for (unsigned i = 0; i < count; ++i) {
+                unsigned dest = bundle[i]->ssa_args.dest;
+
+                if (dest < node_count && BITSET_TEST(dependencies, dest))
+                        return false;
+        }
+
+        /* Otherwise, we're good to go */
+        return true;
+}
+
 /* Schedules, but does not emit, a single basic block. After scheduling, the
  * final tag and size of the block are known, which are necessary for branching
  * */
@@ -127,6 +200,8 @@ schedule_bundle(compiler_context *ctx, midgard_block *block, midgard_instruction
         int instructions_emitted = 0, packed_idx = 0;
         midgard_bundle bundle = { 0 };
 
+        midgard_instruction *scheduled[5] = { NULL };
+
         uint8_t tag = ins->type;
 
         /* Default to the instruction's tag */
@@ -252,12 +327,16 @@ schedule_bundle(compiler_context *ctx, midgard_block *block, midgard_instruction
                                                 else
                                                         break;
                                         } else {
-                                                if ((units & UNIT_SADD) && !(control & UNIT_SADD) && !midgard_has_hazard(segment, segment_size, ains))
+                                                if ((units & UNIT_VMUL) && (last_unit < UNIT_VMUL))
+                                                        unit = UNIT_VMUL;
+                                                else if ((units & UNIT_SADD) && !(control & UNIT_SADD) && !midgard_has_hazard(segment, segment_size, ains))
                                                         unit = UNIT_SADD;
-                                                else if (units & UNIT_SMUL)
-                                                        unit = ((units & UNIT_VMUL) && !(control & UNIT_VMUL)) ? UNIT_VMUL : UNIT_SMUL;
-                                                else if ((units & UNIT_VADD) && !(control & UNIT_VADD))
+                                                else if (units & UNIT_VADD)
                                                         unit = UNIT_VADD;
+                                                else if (units & UNIT_SMUL)
+                                                        unit = UNIT_SMUL;
+                                                else if (units & UNIT_VLUT)
+                                                        unit = UNIT_VLUT;
                                                 else
                                                         break;
                                         }
@@ -385,15 +464,10 @@ schedule_bundle(compiler_context *ctx, midgard_block *block, midgard_instruction
                                 /* All of r0 has to be written out along with
                                  * the branch writeout */
 
-                                if (ains->writeout) {
-                                        /* The rules for when "bare" writeout
-                                         * is safe are when all components are
-                                         * r0 are written out in the final
-                                         * bundle, earlier than VLUT, where any
-                                         * register dependencies of r0 are from
-                                         * an earlier bundle. We can't verify
-                                         * this before RA, so we don't try. */
-
+                                if (ains->writeout && !can_writeout_fragment(ctx, scheduled, index, ctx->temp_count)) {
+                                        /* We only work on full moves
+                                         * at the beginning. We could
+                                         * probably do better */
                                         if (index != 0)
                                                 break;
 
@@ -421,6 +495,7 @@ schedule_bundle(compiler_context *ctx, midgard_block *block, midgard_instruction
                         }
 
                         /* Defer marking until after writing to allow for break */
+                        scheduled[index] = ains;
                         control |= ains->unit;
                         last_unit = ains->unit;
                         ++instructions_emitted;
@@ -575,14 +650,104 @@ midgard_pair_load_store(compiler_context *ctx, midgard_block *block)
         }
 }
 
+/* When we're 'squeezing down' the values in the IR, we maintain a hash
+ * as such */
+
+static unsigned
+find_or_allocate_temp(compiler_context *ctx, unsigned hash)
+{
+        if ((hash < 0) || (hash >= SSA_FIXED_MINIMUM))
+                return hash;
+
+        unsigned temp = (uintptr_t) _mesa_hash_table_u64_search(
+                                ctx->hash_to_temp, hash + 1);
+
+        if (temp)
+                return temp - 1;
+
+        /* If no temp is find, allocate one */
+        temp = ctx->temp_count++;
+        ctx->max_hash = MAX2(ctx->max_hash, hash);
+
+        _mesa_hash_table_u64_insert(ctx->hash_to_temp,
+                                    hash + 1, (void *) ((uintptr_t) temp + 1));
+
+        return temp;
+}
+
+/* Reassigns numbering to get rid of gaps in the indices */
+
+static void
+mir_squeeze_index(compiler_context *ctx)
+{
+        /* Reset */
+        ctx->temp_count = 0;
+        /* TODO don't leak old hash_to_temp */
+        ctx->hash_to_temp = _mesa_hash_table_u64_create(NULL);
+
+        mir_foreach_instr_global(ctx, ins) {
+                if (ins->compact_branch) continue;
+
+                ins->ssa_args.dest = find_or_allocate_temp(ctx, ins->ssa_args.dest);
+                ins->ssa_args.src0 = find_or_allocate_temp(ctx, ins->ssa_args.src0);
+
+                if (!ins->ssa_args.inline_constant)
+                        ins->ssa_args.src1 = find_or_allocate_temp(ctx, ins->ssa_args.src1);
+
+        }
+}
+
+static midgard_instruction
+v_load_store_scratch(
+                unsigned srcdest,
+                unsigned index,
+                bool is_store,
+                unsigned mask)
+{
+        /* We index by 32-bit vec4s */
+        unsigned byte = (index * 4 * 4);
+
+        midgard_instruction ins = {
+                .type = TAG_LOAD_STORE_4,
+                .mask = mask,
+                .ssa_args = {
+                        .dest = -1,
+                        .src0 = -1,
+                        .src1 = -1
+                },
+                .load_store = {
+                        .op = is_store ? midgard_op_st_int4 : midgard_op_ld_int4,
+                        .swizzle = SWIZZLE_XYZW,
+
+                        /* For register spilling - to thread local storage */
+                        .unknown = 0x1EEA,
+
+                        /* Splattered across, TODO combine logically */
+                        .varying_parameters = (byte & 0x1FF) << 1,
+                        .address = (byte >> 9)
+                }
+        };
+
+       if (is_store) {
+                /* r0 = r26, r1 = r27 */
+                assert(srcdest == SSA_FIXED_REGISTER(26) || srcdest == SSA_FIXED_REGISTER(27));
+                ins.ssa_args.src0 = (srcdest == SSA_FIXED_REGISTER(27)) ? SSA_FIXED_REGISTER(1) : SSA_FIXED_REGISTER(0);
+        } else {
+                ins.ssa_args.dest = srcdest;
+        }
 
+        return ins;
+}
 
 void
 schedule_program(compiler_context *ctx)
 {
         struct ra_graph *g = NULL;
         bool spilled = false;
-        int iter_count = 10; /* max iterations */
+        int iter_count = 1000; /* max iterations */
+
+        /* Number of 128-bit slots in memory we've spilled into */
+        unsigned spill_count = 0;
 
         midgard_promote_uniforms(ctx, 8);
 
@@ -590,24 +755,180 @@ schedule_program(compiler_context *ctx)
                 midgard_pair_load_store(ctx, block);
         }
 
+        /* Must be lowered right before RA */
+        mir_squeeze_index(ctx);
+        mir_lower_special_reads(ctx);
+
+        /* Lowering can introduce some dead moves */
+
+        mir_foreach_block(ctx, block) {
+                midgard_opt_dead_move_eliminate(ctx, block);
+        }
+
         do {
-                /* We would like to run RA after scheduling, but spilling can
-                 * complicate this */
+                /* If we spill, find the best spill node and spill it */
+
+                unsigned spill_index = ctx->temp_count;
+                if (g && spilled) {
+                        /* All nodes are equal in spill cost, but we can't
+                         * spill nodes written to from an unspill */
+
+                        for (unsigned i = 0; i < ctx->temp_count; ++i) {
+                                ra_set_node_spill_cost(g, i, 1.0);
+                        }
+
+                        mir_foreach_instr_global(ctx, ins) {
+                                if (ins->type != TAG_LOAD_STORE_4)  continue;
+                                if (ins->load_store.op != midgard_op_ld_int4) continue;
+                                if (ins->load_store.unknown != 0x1EEA) continue;
+                                ra_set_node_spill_cost(g, ins->ssa_args.dest, -1.0);
+                        }
+
+                        int spill_node = ra_get_best_spill_node(g);
+
+                        if (spill_node < 0) {
+                                mir_print_shader(ctx);
+                                assert(0);
+                        }
+
+                        /* Check the class. Work registers legitimately spill
+                         * to TLS, but special registers just spill to work
+                         * registers */
+                        unsigned class = ra_get_node_class(g, spill_node);
+                        bool is_special = (class >> 2) != REG_CLASS_WORK;
+                        bool is_special_w = (class >> 2) == REG_CLASS_TEXW;
+
+                        /* Allocate TLS slot (maybe) */
+                        unsigned spill_slot = !is_special ? spill_count++ : 0;
+                        midgard_instruction *spill_move = NULL;
+
+                        /* For TLS, replace all stores to the spilled node. For
+                         * special reads, just keep as-is; the class will be demoted
+                         * implicitly. For special writes, spill to a work register */
+
+                        if (!is_special || is_special_w) {
+                                mir_foreach_instr_global_safe(ctx, ins) {
+                                        if (ins->compact_branch) continue;
+                                        if (ins->ssa_args.dest != spill_node) continue;
+
+                                        midgard_instruction st;
+
+                                        if (is_special_w) {
+                                                spill_slot = spill_index++;
+                                                st = v_mov(spill_node, blank_alu_src, spill_slot);
+                                        } else {
+                                                ins->ssa_args.dest = SSA_FIXED_REGISTER(26);
+                                                st = v_load_store_scratch(ins->ssa_args.dest, spill_slot, true, ins->mask);
+                                        }
+
+                                        spill_move = mir_insert_instruction_before(mir_next_op(ins), st);
+
+                                        if (!is_special)
+                                                ctx->spills++;
+                                }
+                        }
 
-                mir_foreach_block(ctx, block) {
-                        schedule_block(ctx, block);
+                        /* Insert a load from TLS before the first consecutive
+                         * use of the node, rewriting to use spilled indices to
+                         * break up the live range. Or, for special, insert a
+                         * move. Ironically the latter *increases* register
+                         * pressure, but the two uses of the spilling mechanism
+                         * are somewhat orthogonal. (special spilling is to use
+                         * work registers to back special registers; TLS
+                         * spilling is to use memory to back work registers) */
+
+                        mir_foreach_block(ctx, block) {
+
+                        bool consecutive_skip = false;
+                        unsigned consecutive_index = 0;
+
+                        mir_foreach_instr_in_block(block, ins) {
+                                if (ins->compact_branch) continue;
+
+                                /* We can't rewrite the move used to spill in the first place */
+                                if (ins == spill_move) continue;
+                                
+                                if (!mir_has_arg(ins, spill_node)) {
+                                        consecutive_skip = false;
+                                        continue;
+                                }
+
+                                if (consecutive_skip) {
+                                        /* Rewrite */
+                                        mir_rewrite_index_src_single(ins, spill_node, consecutive_index);
+                                        continue;
+                                }
+
+                                if (!is_special_w) {
+                                        consecutive_index = ++spill_index;
+
+                                        midgard_instruction *before = ins;
+
+                                        /* For a csel, go back one more not to break up the bundle */
+                                        if (ins->type == TAG_ALU_4 && OP_IS_CSEL(ins->alu.op))
+                                                before = mir_prev_op(before);
+
+                                        midgard_instruction st;
+
+                                        if (is_special) {
+                                                /* Move */
+                                                st = v_mov(spill_node, blank_alu_src, consecutive_index);
+                                        } else {
+                                                /* TLS load */
+                                                st = v_load_store_scratch(consecutive_index, spill_slot, false, 0xF);
+                                        }
+
+                                        mir_insert_instruction_before(before, st);
+                                       // consecutive_skip = true;
+                                } else {
+                                        /* Special writes already have their move spilled in */
+                                        consecutive_index = spill_slot;
+                                }
+
+
+                                /* Rewrite to use */
+                                mir_rewrite_index_src_single(ins, spill_node, consecutive_index);
+
+                                if (!is_special)
+                                        ctx->fills++;
+                        }
+                        }
                 }
 
-                /* Pipeline registers creation is a prepass before RA */
-                mir_create_pipeline_registers(ctx);
+                mir_squeeze_index(ctx);
 
+                g = NULL;
                 g = allocate_registers(ctx, &spilled);
         } while(spilled && ((iter_count--) > 0));
 
+        /* We can simplify a bit after RA */
+
+        mir_foreach_block(ctx, block) {
+                midgard_opt_post_move_eliminate(ctx, block, g);
+        }
+
+        /* After RA finishes, we schedule all at once */
+
+        mir_foreach_block(ctx, block) {
+                schedule_block(ctx, block);
+        }
+
+        /* Finally, we create pipeline registers as a peephole pass after
+         * scheduling. This isn't totally optimal, since there are cases where
+         * the usage of pipeline registers can eliminate spills, but it does
+         * save some power */
+
+        mir_create_pipeline_registers(ctx);
+
         if (iter_count <= 0) {
                 fprintf(stderr, "panfrost: Gave up allocating registers, rendering will be incomplete\n");
                 assert(0);
         }
 
+        /* Report spilling information. spill_count is in 128-bit slots (vec4 x
+         * fp32), but tls_size is in bytes, so multiply by 16 */
+
+        ctx->tls_size = spill_count * 16;
+
         install_registers(ctx, g);
 }