bool midgard_opt_varying_projection(compiler_context *ctx, midgard_block *block);
bool midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block);
bool midgard_opt_dead_move_eliminate(compiler_context *ctx, midgard_block *block);
+void midgard_opt_post_move_eliminate(compiler_context *ctx, midgard_block *block, struct ra_graph *g);
#endif
return progress;
}
+
+/* An even further special case - to be run after RA runs but before
+ * scheduling, eliminating moves that end up being useless even though they
+ * appeared meaningful in the SSA. Part #2 of register coalescing. */
+
+void
+midgard_opt_post_move_eliminate(compiler_context *ctx, midgard_block *block, struct ra_graph *g)
+{
+ mir_foreach_instr_in_block_safe(block, ins) {
+ if (ins->type != TAG_ALU_4) continue;
+ if (ins->compact_branch) continue;
+ if (!OP_IS_MOVE(ins->alu.op)) continue;
+
+ /* Check we're to the same place post-RA */
+ unsigned iA = ins->ssa_args.dest;
+ unsigned iB = ins->ssa_args.src1;
+
+ if ((iA < 0) || (iB < 0)) continue;
+
+ unsigned A = iA >= SSA_FIXED_MINIMUM ?
+ SSA_REG_FROM_FIXED(iA) :
+ ra_get_node_reg(g, iA);
+
+ unsigned B = iB >= SSA_FIXED_MINIMUM ?
+ SSA_REG_FROM_FIXED(iB) :
+ ra_get_node_reg(g, iB);
+
+ if (A != B) continue;
+ if (ins->ssa_args.inline_constant) continue;
+
+ /* Check we're in the work zone. TODO: promoted
+ * uniforms? */
+ if (A >= 16) continue;
+
+ /* Ensure there aren't side effects */
+ if (mir_nontrivial_source2_mod(ins)) continue;
+ if (mir_nontrivial_outmod(ins)) continue;
+ if (ins->mask != 0xF) continue;
+
+ /* We do want to rewrite to keep the graph sane for pipeline
+ * register creation (TODO: is this the best approach?) */
+ mir_rewrite_index_dst(ctx, ins->ssa_args.src1, ins->ssa_args.dest);
+
+ /* We're good to go */
+ mir_remove_instruction(ins);
+
+ }
+
+}
g = allocate_registers(ctx, &spilled);
} while(spilled && ((iter_count--) > 0));
+ /* We can simplify a bit after RA */
+
+ mir_foreach_block(ctx, block) {
+ midgard_opt_post_move_eliminate(ctx, block, g);
+ }
+
/* After RA finishes, we schedule all at once */
mir_foreach_block(ctx, block) {