}
static bool
-add_parallel_copy_to_end_of_block(nir_block *block, void *void_state)
+add_parallel_copy_to_end_of_block(nir_block *block, void *dead_ctx)
{
- struct from_ssa_state *state = void_state;
bool need_end_copy = false;
if (block->successors[0]) {
* (if there is one).
*/
nir_parallel_copy_instr *pcopy =
- nir_parallel_copy_instr_create(state->dead_ctx);
+ nir_parallel_copy_instr_create(dead_ctx);
nir_instr_insert(nir_after_block_before_jump(block), &pcopy->instr);
}
* time because of potential back-edges in the CFG.
*/
static bool
-isolate_phi_nodes_block(nir_block *block, void *void_state)
+isolate_phi_nodes_block(nir_block *block, void *dead_ctx)
{
- struct from_ssa_state *state = void_state;
-
nir_instr *last_phi_instr = NULL;
nir_foreach_instr(block, instr) {
/* Phi nodes only ever come at the start of a block */
* start of this block but after the phi nodes.
*/
nir_parallel_copy_instr *block_pcopy =
- nir_parallel_copy_instr_create(state->dead_ctx);
+ nir_parallel_copy_instr_create(dead_ctx);
nir_instr_insert_after(last_phi_instr, &block_pcopy->instr);
nir_foreach_instr(block, instr) {
get_parallel_copy_at_end_of_block(src->pred);
assert(pcopy);
- nir_parallel_copy_entry *entry = rzalloc(state->dead_ctx,
+ nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
nir_parallel_copy_entry);
nir_ssa_dest_init(&pcopy->instr, &entry->dest,
phi->dest.ssa.num_components,
nir_src_for_ssa(&entry->dest.ssa));
}
- nir_parallel_copy_entry *entry = rzalloc(state->dead_ctx,
+ nir_parallel_copy_entry *entry = rzalloc(dead_ctx,
nir_parallel_copy_entry);
nir_ssa_dest_init(&block_pcopy->instr, &entry->dest,
phi->dest.ssa.num_components, phi->dest.ssa.bit_size,
}
static bool
-coalesce_phi_nodes_block(nir_block *block, void *void_state)
+coalesce_phi_nodes_block(nir_block *block, struct from_ssa_state *state)
{
- struct from_ssa_state *state = void_state;
-
nir_foreach_instr(block, instr) {
/* Phi nodes only ever come at the start of a block */
if (instr->type != nir_instr_type_phi)
}
static bool
-aggressive_coalesce_block(nir_block *block, void *void_state)
+aggressive_coalesce_block(nir_block *block, struct from_ssa_state *state)
{
- struct from_ssa_state *state = void_state;
-
nir_parallel_copy_instr *start_pcopy = NULL;
nir_foreach_instr(block, instr) {
/* Phi nodes only ever come at the start of a block */
* remove phi nodes.
*/
static bool
-resolve_registers_block(nir_block *block, void *void_state)
+resolve_registers_block(nir_block *block, struct from_ssa_state *state)
{
- struct from_ssa_state *state = void_state;
-
nir_foreach_instr_safe(block, instr) {
state->instr = instr;
nir_foreach_ssa_def(instr, rewrite_ssa_def, state);
* the end (or right before the final jump if it exists).
*/
static bool
-resolve_parallel_copies_block(nir_block *block, void *void_state)
+resolve_parallel_copies_block(nir_block *block, struct from_ssa_state *state)
{
- struct from_ssa_state *state = void_state;
-
/* At this point, we have removed all of the phi nodes. If a parallel
* copy existed right after the phi nodes in this block, it is now the
* first instruction.
state.merge_node_table = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
_mesa_key_pointer_equal);
- nir_foreach_block_call(impl, add_parallel_copy_to_end_of_block, &state);
- nir_foreach_block_call(impl, isolate_phi_nodes_block, &state);
+ nir_foreach_block(block, impl) {
+ add_parallel_copy_to_end_of_block(block, state.dead_ctx);
+ }
+
+ nir_foreach_block(block, impl) {
+ isolate_phi_nodes_block(block, state.dead_ctx);
+ }
/* Mark metadata as dirty before we ask for liveness analysis */
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_require(impl, nir_metadata_live_ssa_defs |
nir_metadata_dominance);
- nir_foreach_block_call(impl, coalesce_phi_nodes_block, &state);
- nir_foreach_block_call(impl, aggressive_coalesce_block, &state);
+ nir_foreach_block(block, impl) {
+ coalesce_phi_nodes_block(block, &state);
+ }
- nir_foreach_block_call(impl, resolve_registers_block, &state);
+ nir_foreach_block(block, impl) {
+ aggressive_coalesce_block(block, &state);
+ }
+
+ nir_foreach_block(block, impl) {
+ resolve_registers_block(block, &state);
+ }
- nir_foreach_block_call(impl, resolve_parallel_copies_block, &state);
+ nir_foreach_block(block, impl) {
+ resolve_parallel_copies_block(block, &state);
+ }
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);