2 * Copyright © 2020 Julian Winkler
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
28 #define NIR_LOWER_GOTO_IFS_DEBUG 0
31 /** Set of blocks which this path represents
33 * It's "reachable" not in the sense that these are all the nodes reachable
34 * through this path but in the sense that, when you see one of these
35 * blocks, you know you've reached this path.
37 struct set
*reachable
;
39 /** Fork in the path, if reachable->entries > 1 */
40 struct path_fork
*fork
;
46 nir_variable
*path_var
;
47 nir_ssa_def
*path_ssa
;
57 struct routes
*loop_backup
;
61 struct list_head link
;
63 /** Set of blocks at the current level */
66 /** Path for the next level */
69 /** Reach set from inside_outside if irreducable */
72 /** Outside set from inside_outside if irreducable */
75 /** True if a skip region starts with this level */
78 /** True if a skip region ends with this level */
81 /** True if this level is irreducable */
86 nir_block_ptr_cmp(const void *_a
, const void *_b
)
88 nir_block
*const *a
= _a
;
89 nir_block
*const *b
= _b
;
90 return (int)(*a
)->index
- (int)(*b
)->index
;
94 print_block_set(const struct set
*set
)
99 set_foreach(set
, entry
) {
102 printf("%u", ((nir_block
*)entry
->key
)->index
);
108 /** Return a sorted array of blocks for a set
110 * Hash set ordering is non-deterministic. We hash based on pointers and so,
111 * if any pointer ever changes from one run to another, the order of the set
112 * may change. Any time we're going to make decisions which may affect the
113 * final structure which may depend on ordering, we should first sort the
117 sorted_block_arr_for_set(const struct set
*block_set
, void *mem_ctx
)
119 const unsigned num_blocks
= block_set
->entries
;
120 nir_block
**block_arr
= ralloc_array(mem_ctx
, nir_block
*, num_blocks
);
122 set_foreach(block_set
, entry
)
123 block_arr
[i
++] = (nir_block
*)entry
->key
;
124 assert(i
== num_blocks
);
125 qsort(block_arr
, num_blocks
, sizeof(*block_arr
), nir_block_ptr_cmp
);
130 block_for_singular_set(const struct set
*block_set
)
132 assert(block_set
->entries
== 1);
133 return (nir_block
*)_mesa_set_next_entry(block_set
, NULL
)->key
;
137 * Sets all path variables to reach the target block via a fork
140 set_path_vars(nir_builder
*b
, struct path_fork
*fork
, nir_block
*target
)
143 for (int i
= 0; i
< 2; i
++) {
144 if (_mesa_set_search(fork
->paths
[i
].reachable
, target
)) {
146 nir_store_var(b
, fork
->path_var
, nir_imm_bool(b
, i
), 1);
148 assert(fork
->path_ssa
== NULL
);
149 fork
->path_ssa
= nir_imm_bool(b
, i
);
151 fork
= fork
->paths
[i
].fork
;
159 * Sets all path variables to reach the both target blocks via a fork.
160 * If the blocks are in different fork paths, the condition will be used.
161 * As the fork is already created, the then and else blocks may be swapped,
162 * in this case the condition is inverted
165 set_path_vars_cond(nir_builder
*b
, struct path_fork
*fork
, nir_src condition
,
166 nir_block
*then_block
, nir_block
*else_block
)
170 for (i
= 0; i
< 2; i
++) {
171 if (_mesa_set_search(fork
->paths
[i
].reachable
, then_block
)) {
172 if (_mesa_set_search(fork
->paths
[i
].reachable
, else_block
)) {
174 nir_store_var(b
, fork
->path_var
, nir_imm_bool(b
, i
), 1);
176 fork
->path_ssa
= nir_imm_bool(b
, i
);
177 fork
= fork
->paths
[i
].fork
;
181 assert(condition
.is_ssa
);
182 nir_ssa_def
*ssa_def
= condition
.ssa
;
183 assert(ssa_def
->bit_size
== 1);
184 assert(ssa_def
->num_components
== 1);
186 ssa_def
= nir_inot(b
, ssa_def
);
188 nir_store_var(b
, fork
->path_var
, ssa_def
, 1);
190 fork
->path_ssa
= ssa_def
;
191 set_path_vars(b
, fork
->paths
[i
].fork
, then_block
);
192 set_path_vars(b
, fork
->paths
[!i
].fork
, else_block
);
202 * Sets all path variables and places the right jump instruction to reach the
206 route_to(nir_builder
*b
, struct routes
*routing
, nir_block
*target
)
208 if (_mesa_set_search(routing
->regular
.reachable
, target
)) {
209 set_path_vars(b
, routing
->regular
.fork
, target
);
211 else if (_mesa_set_search(routing
->brk
.reachable
, target
)) {
212 set_path_vars(b
, routing
->brk
.fork
, target
);
213 nir_jump(b
, nir_jump_break
);
215 else if (_mesa_set_search(routing
->cont
.reachable
, target
)) {
216 set_path_vars(b
, routing
->cont
.fork
, target
);
217 nir_jump(b
, nir_jump_continue
);
220 assert(!target
->successors
[0]); /* target is endblock */
221 nir_jump(b
, nir_jump_return
);
226 * Sets path vars and places the right jump instr to reach one of the two
227 * target blocks based on the condition. If the targets need different jump
228 * istructions, they will be placed into an if else statement.
229 * This can happen if one target is the loop head
237 route_to_cond(nir_builder
*b
, struct routes
*routing
, nir_src condition
,
238 nir_block
*then_block
, nir_block
*else_block
)
240 if (_mesa_set_search(routing
->regular
.reachable
, then_block
)) {
241 if (_mesa_set_search(routing
->regular
.reachable
, else_block
)) {
242 set_path_vars_cond(b
, routing
->regular
.fork
, condition
,
243 then_block
, else_block
);
246 } else if (_mesa_set_search(routing
->brk
.reachable
, then_block
)) {
247 if (_mesa_set_search(routing
->brk
.reachable
, else_block
)) {
248 set_path_vars_cond(b
, routing
->brk
.fork
, condition
,
249 then_block
, else_block
);
250 nir_jump(b
, nir_jump_break
);
253 } else if (_mesa_set_search(routing
->cont
.reachable
, then_block
)) {
254 if (_mesa_set_search(routing
->cont
.reachable
, else_block
)) {
255 set_path_vars_cond(b
, routing
->cont
.fork
, condition
,
256 then_block
, else_block
);
257 nir_jump(b
, nir_jump_continue
);
262 /* then and else blocks are in different routes */
263 nir_push_if_src(b
, condition
);
264 route_to(b
, routing
, then_block
);
265 nir_push_else(b
, NULL
);
266 route_to(b
, routing
, else_block
);
271 * Merges the reachable sets of both fork subpaths into the forks entire
275 fork_reachable(struct path_fork
*fork
)
277 struct set
*reachable
= _mesa_set_clone(fork
->paths
[0].reachable
, fork
);
278 set_foreach(fork
->paths
[1].reachable
, entry
)
279 _mesa_set_add_pre_hashed(reachable
, entry
->hash
, entry
->key
);
284 * Modifies the routing to be the routing inside a loop. The old regular path
285 * becomes the new break path. The loop in path becomes the new regular and
287 * The lost routing information is stacked into the loop_backup stack.
288 * Also creates helper vars for multilevel loop jumping if needed.
289 * Also calls the nir builder to build the loop
292 loop_routing_start(struct routes
*routing
, nir_builder
*b
,
293 struct path loop_path
, struct set
*reach
,
294 struct set
*outside
, void *mem_ctx
)
296 if (NIR_LOWER_GOTO_IFS_DEBUG
) {
297 printf("loop_routing_start:\n");
299 print_block_set(reach
);
300 printf(" outside = ");
301 print_block_set(outside
);
302 printf(" loop_path.reachable = ");
303 print_block_set(loop_path
.reachable
);
304 printf(" routing->outside = ");
305 print_block_set(routing
->outside
);
306 printf(" routing->regular.reachable = ");
307 print_block_set(routing
->regular
.reachable
);
308 printf(" routing->brk.reachable = ");
309 print_block_set(routing
->brk
.reachable
);
310 printf(" routing->cont.reachable = ");
311 print_block_set(routing
->cont
.reachable
);
315 struct routes
*routing_backup
= ralloc(mem_ctx
, struct routes
);
316 *routing_backup
= *routing
;
317 bool break_needed
= false;
318 bool continue_needed
= false;
320 set_foreach(reach
, entry
) {
321 if (_mesa_set_search(loop_path
.reachable
, entry
->key
))
323 if (_mesa_set_search(routing
->regular
.reachable
, entry
->key
))
325 if (_mesa_set_search(routing
->brk
.reachable
, entry
->key
)) {
329 assert(_mesa_set_search(routing
->cont
.reachable
, entry
->key
));
330 continue_needed
= true;
333 if (outside
&& outside
->entries
) {
334 routing
->outside
= _mesa_set_clone(routing
->outside
, routing
);
335 set_foreach(outside
, entry
)
336 _mesa_set_add_pre_hashed(routing
->outside
, entry
->hash
, entry
->key
);
339 routing
->brk
= routing_backup
->regular
;
340 routing
->cont
= loop_path
;
341 routing
->regular
= loop_path
;
342 routing
->loop_backup
= routing_backup
;
345 struct path_fork
*fork
= ralloc(mem_ctx
, struct path_fork
);
347 fork
->path_var
= nir_local_variable_create(b
->impl
, glsl_bool_type(),
349 fork
->paths
[0] = routing
->brk
;
350 fork
->paths
[1] = routing_backup
->brk
;
351 routing
->brk
.fork
= fork
;
352 routing
->brk
.reachable
= fork_reachable(fork
);
354 if (continue_needed
) {
355 struct path_fork
*fork
= ralloc(mem_ctx
, struct path_fork
);
357 fork
->path_var
= nir_local_variable_create(b
->impl
, glsl_bool_type(),
359 fork
->paths
[0] = routing
->brk
;
360 fork
->paths
[1] = routing_backup
->cont
;
361 routing
->brk
.fork
= fork
;
362 routing
->brk
.reachable
= fork_reachable(fork
);
368 * Gets a forks condition as ssa def if the condition is inside a helper var,
369 * the variable will be read into an ssa def
372 fork_condition(nir_builder
*b
, struct path_fork
*fork
)
376 ret
= nir_load_var(b
, fork
->path_var
);
379 ret
= fork
->path_ssa
;
384 * Restores the routing after leaving a loop based on the loop_backup stack.
385 * Also handles multi level jump helper vars if existing and calls the nir
386 * builder to pop the nir loop
389 loop_routing_end(struct routes
*routing
, nir_builder
*b
)
391 struct routes
*routing_backup
= routing
->loop_backup
;
392 assert(routing
->cont
.fork
== routing
->regular
.fork
);
393 assert(routing
->cont
.reachable
== routing
->regular
.reachable
);
394 nir_pop_loop(b
, NULL
);
395 if (routing
->brk
.fork
&& routing
->brk
.fork
->paths
[1].reachable
==
396 routing_backup
->cont
.reachable
) {
397 assert(!(routing
->brk
.fork
->is_var
&&
398 strcmp(routing
->brk
.fork
->path_var
->name
, "path_continue")));
399 nir_push_if_src(b
, nir_src_for_ssa(
400 fork_condition(b
, routing
->brk
.fork
)));
401 nir_jump(b
, nir_jump_continue
);
403 routing
->brk
= routing
->brk
.fork
->paths
[0];
405 if (routing
->brk
.fork
&& routing
->brk
.fork
->paths
[1].reachable
==
406 routing_backup
->brk
.reachable
) {
407 assert(!(routing
->brk
.fork
->is_var
&&
408 strcmp(routing
->brk
.fork
->path_var
->name
, "path_break")));
409 nir_push_if_src(b
, nir_src_for_ssa(
410 fork_condition(b
, routing
->brk
.fork
)));
411 nir_jump(b
, nir_jump_break
);
413 routing
->brk
= routing
->brk
.fork
->paths
[0];
415 assert(routing
->brk
.fork
== routing_backup
->regular
.fork
);
416 assert(routing
->brk
.reachable
== routing_backup
->regular
.reachable
);
417 *routing
= *routing_backup
;
418 ralloc_free(routing_backup
);
422 * generates a list of all blocks dominated by the loop header, but the
423 * control flow can't go back to the loop header from the block.
424 * also generates a list of all blocks that can be reached from within the
432 * here B and C are directly dominated by A but only C can reach back to the
433 * loop head A. B will be added to the outside set and to the reach set.
434 * \param loop_heads set of loop heads. All blocks inside the loop will be
436 * \param outside all blocks directly outside the loop will be added
437 * \param reach all blocks reachable from the loop will be added
440 inside_outside(nir_block
*block
, struct set
*loop_heads
, struct set
*outside
,
441 struct set
*reach
, struct set
*brk_reachable
, void *mem_ctx
)
443 assert(_mesa_set_search(loop_heads
, block
));
444 struct set
*remaining
= _mesa_pointer_set_create(mem_ctx
);
445 for (int i
= 0; i
< block
->num_dom_children
; i
++) {
446 if (!_mesa_set_search(brk_reachable
, block
->dom_children
[i
]))
447 _mesa_set_add(remaining
, block
->dom_children
[i
]);
451 if (NIR_LOWER_GOTO_IFS_DEBUG
) {
452 printf("inside_outside(%u):\n", block
->index
);
453 printf(" loop_heads = ");
454 print_block_set(loop_heads
);
456 print_block_set(reach
);
457 printf(" brk_reach = ");
458 print_block_set(brk_reachable
);
459 printf(" remaining = ");
460 print_block_set(remaining
);
464 bool progress
= true;
465 while (remaining
->entries
&& progress
) {
467 set_foreach(remaining
, child_entry
) {
468 nir_block
*dom_child
= (nir_block
*) child_entry
->key
;
469 bool can_jump_back
= false;
470 set_foreach(dom_child
->dom_frontier
, entry
) {
471 if (entry
->key
== dom_child
)
473 if (_mesa_set_search_pre_hashed(remaining
, entry
->hash
,
475 can_jump_back
= true;
478 if (_mesa_set_search_pre_hashed(loop_heads
, entry
->hash
,
480 can_jump_back
= true;
484 if (!can_jump_back
) {
485 _mesa_set_add_pre_hashed(outside
, child_entry
->hash
,
487 _mesa_set_remove(remaining
, child_entry
);
493 /* Add everything remaining to loop_heads */
494 set_foreach(remaining
, entry
)
495 _mesa_set_add_pre_hashed(loop_heads
, entry
->hash
, entry
->key
);
497 /* Recurse for each remaining */
498 set_foreach(remaining
, entry
) {
499 inside_outside((nir_block
*) entry
->key
, loop_heads
, outside
, reach
,
500 brk_reachable
, mem_ctx
);
503 for (int i
= 0; i
< 2; i
++) {
504 if (block
->successors
[i
] && block
->successors
[i
]->successors
[0] &&
505 !_mesa_set_search(loop_heads
, block
->successors
[i
])) {
506 _mesa_set_add(reach
, block
->successors
[i
]);
510 if (NIR_LOWER_GOTO_IFS_DEBUG
) {
511 printf("outside(%u) = ", block
->index
);
512 print_block_set(outside
);
513 printf("reach(%u) = ", block
->index
);
514 print_block_set(reach
);
518 static struct path_fork
*
519 select_fork_recur(struct nir_block
**blocks
, unsigned start
, unsigned end
,
520 nir_function_impl
*impl
, bool need_var
, void *mem_ctx
)
522 if (start
== end
- 1)
525 struct path_fork
*fork
= ralloc(mem_ctx
, struct path_fork
);
526 fork
->is_var
= need_var
;
528 fork
->path_var
= nir_local_variable_create(impl
, glsl_bool_type(),
531 unsigned mid
= start
+ (end
- start
) / 2;
533 fork
->paths
[0].reachable
= _mesa_pointer_set_create(fork
);
534 for (unsigned i
= start
; i
< mid
; i
++)
535 _mesa_set_add(fork
->paths
[0].reachable
, blocks
[i
]);
536 fork
->paths
[0].fork
=
537 select_fork_recur(blocks
, start
, mid
, impl
, need_var
, mem_ctx
);
539 fork
->paths
[1].reachable
= _mesa_pointer_set_create(fork
);
540 for (unsigned i
= mid
; i
< end
; i
++)
541 _mesa_set_add(fork
->paths
[1].reachable
, blocks
[i
]);
542 fork
->paths
[1].fork
=
543 select_fork_recur(blocks
, mid
, end
, impl
, need_var
, mem_ctx
);
549 * Gets a set of blocks organized into the same level by the organize_levels
550 * function and creates enough forks to be able to route to them.
551 * If the set only contains one block, the function has nothing to do.
552 * The set should almost never contain more than two blocks, but if so,
553 * then the function calls itself recursively
555 static struct path_fork
*
556 select_fork(struct set
*reachable
, nir_function_impl
*impl
, bool need_var
,
559 assert(reachable
->entries
> 0);
560 if (reachable
->entries
<= 1)
563 /* Hash set ordering is non-deterministic. We're about to turn a set into
564 * a tree so we really want things to be in a deterministic ordering.
566 return select_fork_recur(sorted_block_arr_for_set(reachable
, mem_ctx
),
567 0, reachable
->entries
, impl
, need_var
, mem_ctx
);
571 * gets called when the organize_levels functions fails to find blocks that
572 * can't be reached by the other remaining blocks. This means, at least two
573 * dominance sibling blocks can reach each other. So we have a multi entry
574 * loop. This function tries to find the smallest possible set of blocks that
575 * must be part of the multi entry loop.
582 * The function choses a random block as candidate. for example C
583 * The function checks which remaining blocks can reach C, in this case A.
584 * So A becomes the new candidate and C is removed from the result set.
586 * So B becomes the new candidate and A is removed from the set.
588 * A was an old candidate. So it is added to the set containing B.
589 * No other remaining blocks can reach A or B.
590 * So only A and B must be part of the multi entry loop.
593 handle_irreducible(struct set
*remaining
, struct strct_lvl
*curr_level
,
594 struct set
*brk_reachable
, void *mem_ctx
)
596 nir_block
*candidate
= (nir_block
*)
597 _mesa_set_next_entry(remaining
, NULL
)->key
;
598 struct set
*old_candidates
= _mesa_pointer_set_create(mem_ctx
);
600 _mesa_set_add(old_candidates
, candidate
);
602 /* Start with just the candidate block */
603 _mesa_set_clear(curr_level
->blocks
, NULL
);
604 _mesa_set_add(curr_level
->blocks
, candidate
);
607 set_foreach(remaining
, entry
) {
608 nir_block
*remaining_block
= (nir_block
*) entry
->key
;
609 if (!_mesa_set_search(curr_level
->blocks
, remaining_block
) &&
610 _mesa_set_intersects(remaining_block
->dom_frontier
,
611 curr_level
->blocks
)) {
612 if (_mesa_set_search(old_candidates
, remaining_block
)) {
613 _mesa_set_add(curr_level
->blocks
, remaining_block
);
615 candidate
= remaining_block
;
621 _mesa_set_destroy(old_candidates
, NULL
);
622 old_candidates
= NULL
;
624 struct set
*loop_heads
= _mesa_set_clone(curr_level
->blocks
, curr_level
);
625 curr_level
->reach
= _mesa_pointer_set_create(curr_level
);
626 set_foreach(curr_level
->blocks
, entry
) {
627 _mesa_set_remove_key(remaining
, entry
->key
);
628 inside_outside((nir_block
*) entry
->key
, loop_heads
, remaining
,
629 curr_level
->reach
, brk_reachable
, mem_ctx
);
631 curr_level
->outside
= remaining
;
632 _mesa_set_destroy(loop_heads
, NULL
);
636 * organize a set of blocks into a list of levels. Where every level contains
637 * one or more blocks. So that every block is before all blocks it can reach.
638 * Also creates all path variables needed, for the control flow between the
640 * For example if the control flow looks like this:
648 * B, C, E and F are dominance children of A
649 * The level list should look like this:
650 * blocks irreducible conditional
651 * level 0 B, C false false
652 * level 1 E false true
653 * level 2 F false false
654 * The final structure should look like this:
661 * if (path_conditional) {
666 * \param levels uninitialized list
667 * \param is_dominated if true, no helper variables will be created for the
671 organize_levels(struct list_head
*levels
, struct set
*children
,
672 struct set
*reach
, struct routes
*routing
,
673 nir_function_impl
*impl
, bool is_domminated
, void *mem_ctx
)
675 if (NIR_LOWER_GOTO_IFS_DEBUG
) {
676 printf("organize_levels:\n");
677 printf(" children = ");
678 print_block_set(children
);
680 print_block_set(reach
);
683 /* Duplicate remaining because we're going to destroy it */
684 struct set
*remaining
= _mesa_set_clone(children
, mem_ctx
);
686 /* blocks that can be reached by the remaining blocks */
687 struct set
*remaining_frontier
= _mesa_pointer_set_create(mem_ctx
);
689 /* targets of active skip path */
690 struct set
*skip_targets
= _mesa_pointer_set_create(mem_ctx
);
692 list_inithead(levels
);
693 while (remaining
->entries
) {
694 _mesa_set_clear(remaining_frontier
, NULL
);
695 set_foreach(remaining
, entry
) {
696 nir_block
*remain_block
= (nir_block
*) entry
->key
;
697 set_foreach(remain_block
->dom_frontier
, frontier_entry
) {
698 nir_block
*frontier
= (nir_block
*) frontier_entry
->key
;
699 if (frontier
!= remain_block
) {
700 _mesa_set_add(remaining_frontier
, frontier
);
705 struct strct_lvl
*curr_level
= rzalloc(mem_ctx
, struct strct_lvl
);
706 curr_level
->blocks
= _mesa_pointer_set_create(curr_level
);
707 set_foreach(remaining
, entry
) {
708 nir_block
*candidate
= (nir_block
*) entry
->key
;
709 if (!_mesa_set_search(remaining_frontier
, candidate
)) {
710 _mesa_set_add(curr_level
->blocks
, candidate
);
711 _mesa_set_remove_key(remaining
, candidate
);
715 curr_level
->irreducible
= !curr_level
->blocks
->entries
;
716 if (curr_level
->irreducible
) {
717 handle_irreducible(remaining
, curr_level
,
718 routing
->brk
.reachable
, mem_ctx
);
720 assert(curr_level
->blocks
->entries
);
722 struct strct_lvl
*prev_level
= NULL
;
723 if (!list_is_empty(levels
))
724 prev_level
= list_last_entry(levels
, struct strct_lvl
, link
);
726 set_foreach(skip_targets
, entry
) {
727 if (_mesa_set_search_pre_hashed(curr_level
->blocks
,
728 entry
->hash
, entry
->key
)) {
729 _mesa_set_remove(skip_targets
, entry
);
730 prev_level
->skip_end
= 1;
733 curr_level
->skip_start
= skip_targets
->entries
!= 0;
735 struct set
*prev_frontier
= NULL
;
737 prev_frontier
= reach
;
738 } else if (prev_level
->irreducible
) {
739 prev_frontier
= prev_level
->reach
;
741 set_foreach(curr_level
->blocks
, blocks_entry
) {
742 nir_block
*level_block
= (nir_block
*) blocks_entry
->key
;
743 if (curr_level
->blocks
->entries
== 1) {
744 /* If we only have one block, there's no union operation and we
745 * can just use the one from the one block.
747 prev_frontier
= level_block
->dom_frontier
;
751 if (prev_frontier
== NULL
) {
753 _mesa_set_clone(level_block
->dom_frontier
, prev_level
);
755 set_foreach(level_block
->dom_frontier
, entry
)
756 _mesa_set_add_pre_hashed(prev_frontier
, entry
->hash
,
762 bool is_in_skip
= skip_targets
->entries
!= 0;
763 set_foreach(prev_frontier
, entry
) {
764 if (_mesa_set_search(remaining
, entry
->key
) ||
765 (_mesa_set_search(routing
->regular
.reachable
, entry
->key
) &&
766 !_mesa_set_search(routing
->brk
.reachable
, entry
->key
) &&
767 !_mesa_set_search(routing
->cont
.reachable
, entry
->key
))) {
768 _mesa_set_add_pre_hashed(skip_targets
, entry
->hash
, entry
->key
);
770 prev_level
->skip_end
= 1;
771 curr_level
->skip_start
= 1;
775 curr_level
->skip_end
= 0;
776 list_addtail(&curr_level
->link
, levels
);
779 if (NIR_LOWER_GOTO_IFS_DEBUG
) {
780 printf(" levels:\n");
781 list_for_each_entry(struct strct_lvl
, level
, levels
, link
) {
783 print_block_set(level
->blocks
);
788 if (skip_targets
->entries
)
789 list_last_entry(levels
, struct strct_lvl
, link
)->skip_end
= 1;
791 /* Iterate throught all levels reverse and create all the paths and forks */
792 struct path path_after_skip
;
794 list_for_each_entry_rev(struct strct_lvl
, level
, levels
, link
) {
795 bool need_var
= !(is_domminated
&& level
->link
.prev
== levels
);
796 level
->out_path
= routing
->regular
;
797 if (level
->skip_end
) {
798 path_after_skip
= routing
->regular
;
800 routing
->regular
.reachable
= level
->blocks
;
801 routing
->regular
.fork
= select_fork(routing
->regular
.reachable
, impl
,
803 if (level
->skip_start
) {
804 struct path_fork
*fork
= ralloc(mem_ctx
, struct path_fork
);
805 fork
->is_var
= need_var
;
807 fork
->path_var
= nir_local_variable_create(impl
, glsl_bool_type(),
809 fork
->paths
[0] = path_after_skip
;
810 fork
->paths
[1] = routing
->regular
;
811 routing
->regular
.fork
= fork
;
812 routing
->regular
.reachable
= fork_reachable(fork
);
818 nir_structurize(struct routes
*routing
, nir_builder
*b
,
819 nir_block
*block
, void *mem_ctx
);
822 * Places all the if else statements to select between all blocks in a select
826 select_blocks(struct routes
*routing
, nir_builder
*b
,
827 struct path in_path
, void *mem_ctx
)
830 nir_block
*block
= block_for_singular_set(in_path
.reachable
);
831 nir_structurize(routing
, b
, block
, mem_ctx
);
833 assert(!(in_path
.fork
->is_var
&&
834 strcmp(in_path
.fork
->path_var
->name
, "path_select")));
835 nir_push_if_src(b
, nir_src_for_ssa(fork_condition(b
, in_path
.fork
)));
836 select_blocks(routing
, b
, in_path
.fork
->paths
[1], mem_ctx
);
837 nir_push_else(b
, NULL
);
838 select_blocks(routing
, b
, in_path
.fork
->paths
[0], mem_ctx
);
844 * Builds the structurized nir code by the final level list.
847 plant_levels(struct list_head
*levels
, struct routes
*routing
,
848 nir_builder
*b
, void *mem_ctx
)
850 /* Place all dominated blocks and build the path forks */
851 list_for_each_entry(struct strct_lvl
, level
, levels
, link
) {
852 if (level
->skip_start
) {
853 assert(routing
->regular
.fork
);
854 assert(!(routing
->regular
.fork
->is_var
&& strcmp(
855 routing
->regular
.fork
->path_var
->name
, "path_conditional")));
856 nir_push_if_src(b
, nir_src_for_ssa(
857 fork_condition(b
, routing
->regular
.fork
)));
858 routing
->regular
= routing
->regular
.fork
->paths
[1];
860 struct path in_path
= routing
->regular
;
861 routing
->regular
= level
->out_path
;
862 if (level
->irreducible
) {
863 loop_routing_start(routing
, b
, in_path
, level
->reach
,
864 level
->outside
, mem_ctx
);
866 select_blocks(routing
, b
, in_path
, mem_ctx
);
867 if (level
->irreducible
)
868 loop_routing_end(routing
, b
);
875 * builds the control flow of a block and all its dominance children
876 * \param routing the routing after the block and all dominated blocks
879 nir_structurize(struct routes
*routing
, nir_builder
*b
, nir_block
*block
,
882 struct set
*remaining
= _mesa_pointer_set_create(mem_ctx
);
883 for (int i
= 0; i
< block
->num_dom_children
; i
++) {
884 if (!_mesa_set_search(routing
->outside
, block
->dom_children
[i
]))
885 _mesa_set_add(remaining
, block
->dom_children
[i
]);
888 /* If the block can reach back to itself, it is a loop head */
889 int is_looped
= _mesa_set_search(block
->dom_frontier
, block
) != NULL
;
890 struct list_head outside_levels
;
892 struct set
*loop_heads
= _mesa_pointer_set_create(mem_ctx
);
893 _mesa_set_add(loop_heads
, block
);
895 struct set
*outside
= _mesa_pointer_set_create(mem_ctx
);
896 struct set
*reach
= _mesa_pointer_set_create(mem_ctx
);
897 inside_outside(block
, loop_heads
, outside
, reach
,
898 routing
->brk
.reachable
, mem_ctx
);
900 set_foreach(outside
, entry
)
901 _mesa_set_remove_key(remaining
, entry
->key
);
903 organize_levels(&outside_levels
, outside
, reach
, routing
, b
->impl
,
906 struct path loop_path
= {
907 .reachable
= _mesa_pointer_set_create(mem_ctx
),
910 _mesa_set_add(loop_path
.reachable
, block
);
912 loop_routing_start(routing
, b
, loop_path
, reach
, outside
, mem_ctx
);
915 struct set
*reach
= _mesa_pointer_set_create(mem_ctx
);
916 if (block
->successors
[0]->successors
[0]) /* it is not the end_block */
917 _mesa_set_add(reach
, block
->successors
[0]);
918 if (block
->successors
[1] && block
->successors
[1]->successors
[0])
919 _mesa_set_add(reach
, block
->successors
[1]);
921 struct list_head levels
;
922 organize_levels(&levels
, remaining
, reach
, routing
, b
->impl
, true, mem_ctx
);
924 /* Push all instructions of this block, without the jump instr */
925 nir_jump_instr
*jump_instr
= NULL
;
926 nir_foreach_instr_safe(instr
, block
) {
927 if (instr
->type
== nir_instr_type_jump
) {
928 jump_instr
= nir_instr_as_jump(instr
);
931 nir_instr_remove(instr
);
932 nir_builder_instr_insert(b
, instr
);
935 /* Find path to the successor blocks */
936 if (jump_instr
->type
== nir_jump_goto_if
) {
937 route_to_cond(b
, routing
, jump_instr
->condition
,
938 jump_instr
->target
, jump_instr
->else_target
);
940 route_to(b
, routing
, block
->successors
[0]);
943 plant_levels(&levels
, routing
, b
, mem_ctx
);
945 loop_routing_end(routing
, b
);
946 plant_levels(&outside_levels
, routing
, b
, mem_ctx
);
951 nir_lower_goto_ifs_impl(nir_function_impl
*impl
)
953 if (impl
->structured
) {
954 nir_metadata_preserve(impl
, nir_metadata_all
);
958 nir_metadata_require(impl
, nir_metadata_dominance
);
961 nir_cf_extract(&cf_list
, nir_before_cf_list(&impl
->body
),
962 nir_after_cf_list(&impl
->body
));
964 /* From this point on, it's structured */
965 impl
->structured
= true;
968 nir_builder_init(&b
, impl
);
969 b
.cursor
= nir_before_block(nir_start_block(impl
));
971 void *mem_ctx
= ralloc_context(b
.shader
);
973 struct set
*end_set
= _mesa_pointer_set_create(mem_ctx
);
974 _mesa_set_add(end_set
, impl
->end_block
);
975 struct set
*empty_set
= _mesa_pointer_set_create(mem_ctx
);
977 nir_cf_node
*start_node
=
978 exec_node_data(nir_cf_node
, exec_list_get_head(&cf_list
.list
), node
);
979 nir_block
*start_block
= nir_cf_node_as_block(start_node
);
981 struct routes
*routing
= ralloc(mem_ctx
, struct routes
);
982 *routing
= (struct routes
) {
983 .outside
= empty_set
,
984 .regular
.reachable
= end_set
,
985 .brk
.reachable
= empty_set
,
986 .cont
.reachable
= empty_set
,
988 nir_structurize(routing
, &b
, start_block
, mem_ctx
);
989 assert(routing
->regular
.fork
== NULL
);
990 assert(routing
->brk
.fork
== NULL
);
991 assert(routing
->cont
.fork
== NULL
);
992 assert(routing
->brk
.reachable
== empty_set
);
993 assert(routing
->cont
.reachable
== empty_set
);
995 ralloc_free(mem_ctx
);
996 nir_cf_delete(&cf_list
);
998 nir_metadata_preserve(impl
, nir_metadata_none
);
1004 nir_lower_goto_ifs(nir_shader
*shader
)
1006 bool progress
= true;
1008 nir_foreach_function(function
, shader
) {
1009 if (function
->impl
&& nir_lower_goto_ifs_impl(function
->impl
))