bool midgard_opt_varying_projection(compiler_context *ctx, midgard_block *block);
bool midgard_opt_dead_code_eliminate(compiler_context *ctx, midgard_block *block);
bool midgard_opt_dead_move_eliminate(compiler_context *ctx, midgard_block *block);
-void midgard_opt_post_move_eliminate(compiler_context *ctx, midgard_block *block, struct ra_graph *g);
void midgard_lower_invert(compiler_context *ctx, midgard_block *block);
bool midgard_opt_not_propagate(compiler_context *ctx, midgard_block *block);
return progress;
}
-
-/* An even further special case - to be run after RA runs but before
- * scheduling, eliminating moves that end up being useless even though they
- * appeared meaningful in the SSA. Part #2 of register coalescing. */
-
-void
-midgard_opt_post_move_eliminate(compiler_context *ctx, midgard_block *block, struct ra_graph *g)
-{
- mir_foreach_instr_in_block_safe(block, ins) {
- if (ins->type != TAG_ALU_4) continue;
- if (ins->compact_branch) continue;
- if (!OP_IS_MOVE(ins->alu.op)) continue;
- if (ins->dont_eliminate) continue;
-
- /* Check we're to the same place post-RA */
- unsigned iA = ins->dest;
- unsigned iB = ins->src[1];
-
- if ((iA == ~0) || (iB == ~0)) continue;
-
- unsigned A = iA >= SSA_FIXED_MINIMUM ?
- SSA_REG_FROM_FIXED(iA) :
- ra_get_node_reg(g, iA);
-
- unsigned B = iB >= SSA_FIXED_MINIMUM ?
- SSA_REG_FROM_FIXED(iB) :
- ra_get_node_reg(g, iB);
-
- if (A != B) continue;
-
- /* Check we're in the work zone. TODO: promoted
- * uniforms? */
- if (A >= 16) continue;
-
- /* Ensure there aren't side effects */
- if (mir_nontrivial_source2_mod(ins)) continue;
- if (mir_nontrivial_outmod(ins)) continue;
- if (ins->mask != 0xF) continue;
-
- /* We do need to rewrite to facilitate pipelining/scheduling */
- mir_rewrite_index(ctx, ins->src[1], ins->dest);
-
- /* We're good to go */
- mir_remove_instruction(ins);
-
- }
-
-}