return ins;
}
-void
-schedule_program(compiler_context *ctx)
+/* If register allocation fails, find the best spill node and spill it to fix
+ * whatever the issue was. This spill node could be a work register (spilling
+ * to thread local storage), but it could also simply be a special register
+ * that needs to spill to become a work register. */
+
+static void mir_spill_register(
+ compiler_context *ctx,
+ struct ra_graph *g,
+ unsigned *spill_count)
{
- struct ra_graph *g = NULL;
- bool spilled = false;
- int iter_count = 1000; /* max iterations */
+ unsigned spill_index = ctx->temp_count;
- /* Number of 128-bit slots in memory we've spilled into */
- unsigned spill_count = 0;
+ /* Our first step is to calculate spill cost to figure out the best
+ * spill node. All nodes are equal in spill cost, but we can't spill
+ * nodes written to from an unspill */
- midgard_promote_uniforms(ctx, 8);
-
- mir_foreach_block(ctx, block) {
- midgard_pair_load_store(ctx, block);
+ for (unsigned i = 0; i < ctx->temp_count; ++i) {
+ ra_set_node_spill_cost(g, i, 1.0);
}
- /* Must be lowered right before RA */
- mir_squeeze_index(ctx);
- mir_lower_special_reads(ctx);
+ mir_foreach_instr_global(ctx, ins) {
+ if (ins->type != TAG_LOAD_STORE_4) continue;
+ if (ins->load_store.op != midgard_op_ld_int4) continue;
+ if (ins->load_store.arg_1 != 0xEA) continue;
+ if (ins->load_store.arg_2 != 0x1E) continue;
+ ra_set_node_spill_cost(g, ins->ssa_args.dest, -1.0);
+ }
- /* Lowering can introduce some dead moves */
+ int spill_node = ra_get_best_spill_node(g);
- mir_foreach_block(ctx, block) {
- midgard_opt_dead_move_eliminate(ctx, block);
+ if (spill_node < 0) {
+ mir_print_shader(ctx);
+ assert(0);
}
- do {
- /* If we spill, find the best spill node and spill it */
+ /* We have a spill node, so check the class. Work registers
+ * legitimately spill to TLS, but special registers just spill to work
+ * registers */
- unsigned spill_index = ctx->temp_count;
- if (g && spilled) {
- /* All nodes are equal in spill cost, but we can't
- * spill nodes written to from an unspill */
+ unsigned class = ra_get_node_class(g, spill_node);
+ bool is_special = (class >> 2) != REG_CLASS_WORK;
+ bool is_special_w = (class >> 2) == REG_CLASS_TEXW;
- for (unsigned i = 0; i < ctx->temp_count; ++i) {
- ra_set_node_spill_cost(g, i, 1.0);
- }
+ /* Allocate TLS slot (maybe) */
+ unsigned spill_slot = !is_special ? (*spill_count)++ : 0;
+ midgard_instruction *spill_move = NULL;
- mir_foreach_instr_global(ctx, ins) {
- if (ins->type != TAG_LOAD_STORE_4) continue;
- if (ins->load_store.op != midgard_op_ld_int4) continue;
- if (ins->load_store.arg_1 != 0xEA) continue;
- if (ins->load_store.arg_2 != 0x1E) continue;
- ra_set_node_spill_cost(g, ins->ssa_args.dest, -1.0);
- }
+ /* For TLS, replace all stores to the spilled node. For
+ * special reads, just keep as-is; the class will be demoted
+ * implicitly. For special writes, spill to a work register */
+
+ if (!is_special || is_special_w) {
+ mir_foreach_instr_global_safe(ctx, ins) {
+ if (ins->ssa_args.dest != spill_node) continue;
- int spill_node = ra_get_best_spill_node(g);
+ midgard_instruction st;
- if (spill_node < 0) {
- mir_print_shader(ctx);
- assert(0);
+ if (is_special_w) {
+ spill_slot = spill_index++;
+ st = v_mov(spill_node, blank_alu_src, spill_slot);
+ } else {
+ ins->ssa_args.dest = SSA_FIXED_REGISTER(26);
+ st = v_load_store_scratch(ins->ssa_args.dest, spill_slot, true, ins->mask);
}
- /* Check the class. Work registers legitimately spill
- * to TLS, but special registers just spill to work
- * registers */
- unsigned class = ra_get_node_class(g, spill_node);
- bool is_special = (class >> 2) != REG_CLASS_WORK;
- bool is_special_w = (class >> 2) == REG_CLASS_TEXW;
+ spill_move = mir_insert_instruction_before(mir_next_op(ins), st);
- /* Allocate TLS slot (maybe) */
- unsigned spill_slot = !is_special ? spill_count++ : 0;
- midgard_instruction *spill_move = NULL;
+ if (!is_special)
+ ctx->spills++;
+ }
+ }
- /* For TLS, replace all stores to the spilled node. For
- * special reads, just keep as-is; the class will be demoted
- * implicitly. For special writes, spill to a work register */
+ /* Insert a load from TLS before the first consecutive
+ * use of the node, rewriting to use spilled indices to
+ * break up the live range. Or, for special, insert a
+ * move. Ironically the latter *increases* register
+ * pressure, but the two uses of the spilling mechanism
+ * are somewhat orthogonal. (special spilling is to use
+ * work registers to back special registers; TLS
+ * spilling is to use memory to back work registers) */
- if (!is_special || is_special_w) {
- mir_foreach_instr_global_safe(ctx, ins) {
- if (ins->ssa_args.dest != spill_node) continue;
+ mir_foreach_block(ctx, block) {
+ bool consecutive_skip = false;
+ unsigned consecutive_index = 0;
- midgard_instruction st;
+ mir_foreach_instr_in_block(block, ins) {
+ /* We can't rewrite the move used to spill in the first place */
+ if (ins == spill_move) continue;
- if (is_special_w) {
- spill_slot = spill_index++;
- st = v_mov(spill_node, blank_alu_src, spill_slot);
- } else {
- ins->ssa_args.dest = SSA_FIXED_REGISTER(26);
- st = v_load_store_scratch(ins->ssa_args.dest, spill_slot, true, ins->mask);
- }
+ if (!mir_has_arg(ins, spill_node)) {
+ consecutive_skip = false;
+ continue;
+ }
+
+ if (consecutive_skip) {
+ /* Rewrite */
+ mir_rewrite_index_src_single(ins, spill_node, consecutive_index);
+ continue;
+ }
+
+ if (!is_special_w) {
+ consecutive_index = ++spill_index;
+
+ midgard_instruction *before = ins;
- spill_move = mir_insert_instruction_before(mir_next_op(ins), st);
+ /* For a csel, go back one more not to break up the bundle */
+ if (ins->type == TAG_ALU_4 && OP_IS_CSEL(ins->alu.op))
+ before = mir_prev_op(before);
- if (!is_special)
- ctx->spills++;
+ midgard_instruction st;
+
+ if (is_special) {
+ /* Move */
+ st = v_mov(spill_node, blank_alu_src, consecutive_index);
+ } else {
+ /* TLS load */
+ st = v_load_store_scratch(consecutive_index, spill_slot, false, 0xF);
}
+
+ mir_insert_instruction_before(before, st);
+ // consecutive_skip = true;
+ } else {
+ /* Special writes already have their move spilled in */
+ consecutive_index = spill_slot;
}
- /* Insert a load from TLS before the first consecutive
- * use of the node, rewriting to use spilled indices to
- * break up the live range. Or, for special, insert a
- * move. Ironically the latter *increases* register
- * pressure, but the two uses of the spilling mechanism
- * are somewhat orthogonal. (special spilling is to use
- * work registers to back special registers; TLS
- * spilling is to use memory to back work registers) */
-
- mir_foreach_block(ctx, block) {
-
- bool consecutive_skip = false;
- unsigned consecutive_index = 0;
-
- mir_foreach_instr_in_block(block, ins) {
- /* We can't rewrite the move used to spill in the first place */
- if (ins == spill_move) continue;
-
- if (!mir_has_arg(ins, spill_node)) {
- consecutive_skip = false;
- continue;
- }
- if (consecutive_skip) {
- /* Rewrite */
- mir_rewrite_index_src_single(ins, spill_node, consecutive_index);
- continue;
- }
+ /* Rewrite to use */
+ mir_rewrite_index_src_single(ins, spill_node, consecutive_index);
- if (!is_special_w) {
- consecutive_index = ++spill_index;
+ if (!is_special)
+ ctx->fills++;
+ }
+ }
+}
- midgard_instruction *before = ins;
+void
+schedule_program(compiler_context *ctx)
+{
+ struct ra_graph *g = NULL;
+ bool spilled = false;
+ int iter_count = 1000; /* max iterations */
- /* For a csel, go back one more not to break up the bundle */
- if (ins->type == TAG_ALU_4 && OP_IS_CSEL(ins->alu.op))
- before = mir_prev_op(before);
+ /* Number of 128-bit slots in memory we've spilled into */
+ unsigned spill_count = 0;
- midgard_instruction st;
+ midgard_promote_uniforms(ctx, 8);
- if (is_special) {
- /* Move */
- st = v_mov(spill_node, blank_alu_src, consecutive_index);
- } else {
- /* TLS load */
- st = v_load_store_scratch(consecutive_index, spill_slot, false, 0xF);
- }
+ mir_foreach_block(ctx, block) {
+ midgard_pair_load_store(ctx, block);
+ }
- mir_insert_instruction_before(before, st);
- // consecutive_skip = true;
- } else {
- /* Special writes already have their move spilled in */
- consecutive_index = spill_slot;
- }
+ /* Must be lowered right before RA */
+ mir_squeeze_index(ctx);
+ mir_lower_special_reads(ctx);
+ /* Lowering can introduce some dead moves */
- /* Rewrite to use */
- mir_rewrite_index_src_single(ins, spill_node, consecutive_index);
+ mir_foreach_block(ctx, block) {
+ midgard_opt_dead_move_eliminate(ctx, block);
+ }
- if (!is_special)
- ctx->fills++;
- }
- }
- }
+ do {
+ if (spilled)
+ mir_spill_register(ctx, g, &spill_count);
mir_squeeze_index(ctx);