b944b4c3164dd5f264ae6a03afaf6b3e42b3732f
2 * Copyright © 2020 Julian Winkler
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "nir_builder.h"
28 /** Set of blocks which this path represents
30 * It's "reachable" not in the sense that these are all the nodes reachable
31 * through this path but in the sense that, when you see one of these
32 * blocks, you know you've reached this path.
34 struct set
*reachable
;
36 /** Fork in the path, if reachable->entries > 1 */
37 struct path_fork
*fork
;
43 nir_variable
*path_var
;
44 nir_ssa_def
*path_ssa
;
53 struct routes
*loop_backup
;
57 struct list_head link
;
59 /** Set of blocks at the current level */
62 /** Path for the next level */
65 /** Reach set from inside_outside if irreducable */
68 /** True if a skip region starts with this level */
71 /** True if a skip region ends with this level */
74 /** True if this level is irreducable */
79 * Sets all path variables to reach the target block via a fork
82 set_path_vars(nir_builder
*b
, struct path_fork
*fork
, nir_block
*target
)
85 for (int i
= 0; i
< 2; i
++) {
86 if (_mesa_set_search(fork
->paths
[i
].reachable
, target
)) {
88 nir_store_var(b
, fork
->path_var
, nir_imm_bool(b
, i
), 1);
90 assert(fork
->path_ssa
== NULL
);
91 fork
->path_ssa
= nir_imm_bool(b
, i
);
93 fork
= fork
->paths
[i
].fork
;
101 * Sets all path variables to reach the both target blocks via a fork.
102 * If the blocks are in different fork paths, the condition will be used.
103 * As the fork is already created, the then and else blocks may be swapped,
104 * in this case the condition is inverted
107 set_path_vars_cond(nir_builder
*b
, struct path_fork
*fork
, nir_src condition
,
108 nir_block
*then_block
, nir_block
*else_block
)
112 for (i
= 0; i
< 2; i
++) {
113 if (_mesa_set_search(fork
->paths
[i
].reachable
, then_block
)) {
114 if (_mesa_set_search(fork
->paths
[i
].reachable
, else_block
)) {
116 nir_store_var(b
, fork
->path_var
, nir_imm_bool(b
, i
), 1);
118 fork
->path_ssa
= nir_imm_bool(b
, i
);
119 fork
= fork
->paths
[i
].fork
;
123 assert(condition
.is_ssa
);
124 nir_ssa_def
*ssa_def
= condition
.ssa
;
125 assert(ssa_def
->bit_size
== 1);
126 assert(ssa_def
->num_components
== 1);
128 ssa_def
= nir_inot(b
, ssa_def
);
130 nir_store_var(b
, fork
->path_var
, ssa_def
, 1);
132 fork
->path_ssa
= ssa_def
;
133 set_path_vars(b
, fork
->paths
[i
].fork
, then_block
);
134 set_path_vars(b
, fork
->paths
[!i
].fork
, else_block
);
144 * Sets all path variables and places the right jump instruction to reach the
148 route_to(nir_builder
*b
, struct routes
*routing
, nir_block
*target
)
150 if (_mesa_set_search(routing
->regular
.reachable
, target
)) {
151 set_path_vars(b
, routing
->regular
.fork
, target
);
153 else if (_mesa_set_search(routing
->brk
.reachable
, target
)) {
154 set_path_vars(b
, routing
->brk
.fork
, target
);
155 nir_jump(b
, nir_jump_break
);
157 else if (_mesa_set_search(routing
->cont
.reachable
, target
)) {
158 set_path_vars(b
, routing
->cont
.fork
, target
);
159 nir_jump(b
, nir_jump_continue
);
162 assert(!target
->successors
[0]); /* target is endblock */
163 nir_jump(b
, nir_jump_return
);
168 * Sets path vars and places the right jump instr to reach one of the two
169 * target blocks based on the condition. If the targets need different jump
170 * istructions, they will be placed into an if else statement.
171 * This can happen if one target is the loop head
179 route_to_cond(nir_builder
*b
, struct routes
*routing
, nir_src condition
,
180 nir_block
*then_block
, nir_block
*else_block
)
182 if (_mesa_set_search(routing
->regular
.reachable
, then_block
)) {
183 if (_mesa_set_search(routing
->regular
.reachable
, else_block
)) {
184 set_path_vars_cond(b
, routing
->regular
.fork
, condition
,
185 then_block
, else_block
);
188 } else if (_mesa_set_search(routing
->brk
.reachable
, then_block
)) {
189 if (_mesa_set_search(routing
->brk
.reachable
, else_block
)) {
190 set_path_vars_cond(b
, routing
->brk
.fork
, condition
,
191 then_block
, else_block
);
192 nir_jump(b
, nir_jump_break
);
195 } else if (_mesa_set_search(routing
->cont
.reachable
, then_block
)) {
196 if (_mesa_set_search(routing
->cont
.reachable
, else_block
)) {
197 set_path_vars_cond(b
, routing
->cont
.fork
, condition
,
198 then_block
, else_block
);
199 nir_jump(b
, nir_jump_continue
);
204 /* then and else blocks are in different routes */
205 nir_push_if_src(b
, condition
);
206 route_to(b
, routing
, then_block
);
207 nir_push_else(b
, NULL
);
208 route_to(b
, routing
, else_block
);
213 * Merges the reachable sets of both fork subpaths into the forks entire
217 fork_reachable(struct path_fork
*fork
)
219 struct set
*reachable
= _mesa_set_clone(fork
->paths
[0].reachable
, fork
);
220 set_foreach(fork
->paths
[1].reachable
, entry
)
221 _mesa_set_add_pre_hashed(reachable
, entry
->hash
, entry
->key
);
226 * Modifies the routing to be the routing inside a loop. The old regular path
227 * becomes the new break path. The loop in path becomes the new regular and
229 * The lost routing information is stacked into the loop_backup stack.
230 * Also creates helper vars for multilevel loop jumping if needed.
231 * Also calls the nir builder to build the loop
234 loop_routing_start(struct routes
*routing
, nir_builder
*b
,
235 struct path loop_path
, struct set
*reach
,
238 struct routes
*routing_backup
= ralloc(mem_ctx
, struct routes
);
239 *routing_backup
= *routing
;
240 bool break_needed
= false;
241 bool continue_needed
= false;
243 set_foreach(reach
, entry
) {
244 if (_mesa_set_search(loop_path
.reachable
, entry
->key
))
246 if (_mesa_set_search(routing
->regular
.reachable
, entry
->key
))
248 if (_mesa_set_search(routing
->brk
.reachable
, entry
->key
)) {
252 assert(_mesa_set_search(routing
->cont
.reachable
, entry
->key
));
253 continue_needed
= true;
256 routing
->brk
= routing_backup
->regular
;
257 routing
->cont
= loop_path
;
258 routing
->regular
= loop_path
;
259 routing
->loop_backup
= routing_backup
;
262 struct path_fork
*fork
= ralloc(mem_ctx
, struct path_fork
);
264 fork
->path_var
= nir_local_variable_create(b
->impl
, glsl_bool_type(),
266 fork
->paths
[0] = routing
->brk
;
267 fork
->paths
[1] = routing_backup
->brk
;
268 routing
->brk
.fork
= fork
;
269 routing
->brk
.reachable
= fork_reachable(fork
);
271 if (continue_needed
) {
272 struct path_fork
*fork
= ralloc(mem_ctx
, struct path_fork
);
274 fork
->path_var
= nir_local_variable_create(b
->impl
, glsl_bool_type(),
276 fork
->paths
[0] = routing
->brk
;
277 fork
->paths
[1] = routing_backup
->cont
;
278 routing
->brk
.fork
= fork
;
279 routing
->brk
.reachable
= fork_reachable(fork
);
285 * Gets a forks condition as ssa def if the condition is inside a helper var,
286 * the variable will be read into an ssa def
289 fork_condition(nir_builder
*b
, struct path_fork
*fork
)
293 ret
= nir_load_var(b
, fork
->path_var
);
296 ret
= fork
->path_ssa
;
301 * Restores the routing after leaving a loop based on the loop_backup stack.
302 * Also handles multi level jump helper vars if existing and calls the nir
303 * builder to pop the nir loop
306 loop_routing_end(struct routes
*routing
, nir_builder
*b
)
308 struct routes
*routing_backup
= routing
->loop_backup
;
309 assert(routing
->cont
.fork
== routing
->regular
.fork
);
310 assert(routing
->cont
.reachable
== routing
->regular
.reachable
);
311 nir_pop_loop(b
, NULL
);
312 if (routing
->brk
.fork
&& routing
->brk
.fork
->paths
[1].reachable
==
313 routing_backup
->cont
.reachable
) {
314 assert(!(routing
->brk
.fork
->is_var
&&
315 strcmp(routing
->brk
.fork
->path_var
->name
, "path_continue")));
316 nir_push_if_src(b
, nir_src_for_ssa(
317 fork_condition(b
, routing
->brk
.fork
)));
318 nir_jump(b
, nir_jump_continue
);
320 routing
->brk
= routing
->brk
.fork
->paths
[0];
322 if (routing
->brk
.fork
&& routing
->brk
.fork
->paths
[1].reachable
==
323 routing_backup
->brk
.reachable
) {
324 assert(!(routing
->brk
.fork
->is_var
&&
325 strcmp(routing
->brk
.fork
->path_var
->name
, "path_break")));
326 nir_push_if_src(b
, nir_src_for_ssa(
327 fork_condition(b
, routing
->brk
.fork
)));
328 nir_jump(b
, nir_jump_break
);
330 routing
->brk
= routing
->brk
.fork
->paths
[0];
332 assert(routing
->brk
.fork
== routing_backup
->regular
.fork
);
333 assert(routing
->brk
.reachable
== routing_backup
->regular
.reachable
);
334 *routing
= *routing_backup
;
335 ralloc_free(routing_backup
);
339 * generates a list of all blocks dominated by the loop header, but the
340 * control flow can't go back to the loop header from the block.
341 * also generates a list of all blocks that can be reached from within the
349 * here B and C are directly dominated by A but only C can reach back to the
350 * loop head A. B will be added to the outside set and to the reach set.
351 * \param loop_heads set of loop heads. All blocks inside the loop will be
353 * \param outside all blocks directly outside the loop will be added
354 * \param reach all blocks reachable from the loop will be added
357 inside_outside(nir_block
*block
, struct set
*loop_heads
, struct set
*outside
,
358 struct set
*reach
, struct set
*brk_reachable
, void *mem_ctx
)
360 assert(_mesa_set_search(loop_heads
, block
));
361 struct set
*remaining
= _mesa_pointer_set_create(mem_ctx
);
362 for (int i
= 0; i
< block
->num_dom_children
; i
++) {
363 if (!_mesa_set_search(brk_reachable
, block
->dom_children
[i
]))
364 _mesa_set_add(remaining
, block
->dom_children
[i
]);
367 bool progress
= true;
368 while (remaining
->entries
&& progress
) {
370 set_foreach(remaining
, child_entry
) {
371 nir_block
*dom_child
= (nir_block
*) child_entry
->key
;
372 bool can_jump_back
= false;
373 set_foreach(dom_child
->dom_frontier
, entry
) {
374 if (entry
->key
== dom_child
)
376 if (_mesa_set_search_pre_hashed(remaining
, entry
->hash
,
378 can_jump_back
= true;
381 if (_mesa_set_search_pre_hashed(loop_heads
, entry
->hash
,
383 can_jump_back
= true;
387 if (!can_jump_back
) {
388 _mesa_set_add_pre_hashed(outside
, child_entry
->hash
,
390 _mesa_set_remove(remaining
, child_entry
);
396 /* Add everything remaining to loop_heads */
397 set_foreach(remaining
, entry
)
398 _mesa_set_add_pre_hashed(loop_heads
, entry
->hash
, entry
->key
);
400 /* Recurse for each remaining */
401 set_foreach(remaining
, entry
) {
402 inside_outside((nir_block
*) entry
->key
, loop_heads
, outside
, reach
,
403 brk_reachable
, mem_ctx
);
406 for (int i
= 0; i
< 2; i
++) {
407 if (block
->successors
[i
] && block
->successors
[i
]->successors
[0] &&
408 !_mesa_set_search(loop_heads
, block
->successors
[i
])) {
409 _mesa_set_add(reach
, block
->successors
[i
]);
415 * Gets a set of blocks organized into the same level by the organize_levels
416 * function and creates enough forks to be able to route to them.
417 * If the set only contains one block, the function has nothing to do.
418 * The set should almost never contain more than two blocks, but if so,
419 * then the function calls itself recursively
421 static struct path_fork
*
422 select_fork(struct set
*reachable
, nir_function_impl
*impl
, bool need_var
,
425 struct path_fork
*fork
= NULL
;
426 if (reachable
->entries
> 1) {
427 fork
= ralloc(mem_ctx
, struct path_fork
);
428 fork
->is_var
= need_var
;
430 fork
->path_var
= nir_local_variable_create(impl
, glsl_bool_type(),
432 fork
->paths
[0].reachable
= _mesa_pointer_set_create(fork
);
433 struct set_entry
*entry
= NULL
;
434 while (fork
->paths
[0].reachable
->entries
< reachable
->entries
/ 2 &&
435 (entry
= _mesa_set_next_entry(reachable
, entry
))) {
436 _mesa_set_add_pre_hashed(fork
->paths
[0].reachable
,
437 entry
->hash
, entry
->key
);
439 fork
->paths
[0].fork
= select_fork(fork
->paths
[0].reachable
, impl
,
441 fork
->paths
[1].reachable
= _mesa_pointer_set_create(fork
);
442 while ((entry
= _mesa_set_next_entry(reachable
, entry
))) {
443 _mesa_set_add_pre_hashed(fork
->paths
[1].reachable
,
444 entry
->hash
, entry
->key
);
446 fork
->paths
[1].fork
= select_fork(fork
->paths
[1].reachable
, impl
,
453 * gets called when the organize_levels functions fails to find blocks that
454 * can't be reached by the other remaining blocks. This means, at least two
455 * dominance sibling blocks can reach each other. So we have a multi entry
456 * loop. This function tries to find the smallest possible set of blocks that
457 * must be part of the multi entry loop.
464 * The function choses a random block as candidate. for example C
465 * The function checks which remaining blocks can reach C, in this case A.
466 * So A becomes the new candidate and C is removed from the result set.
468 * So B becomes the new candidate and A is removed from the set.
470 * A was an old candidate. So it is added to the set containing B.
471 * No other remaining blocks can reach A or B.
472 * So only A and B must be part of the multi entry loop.
475 handle_irreducible(struct set
*remaining
, struct strct_lvl
*curr_level
,
476 struct set
*brk_reachable
, void *mem_ctx
)
478 nir_block
*candidate
= (nir_block
*)
479 _mesa_set_next_entry(remaining
, NULL
)->key
;
480 struct set
*old_candidates
= _mesa_pointer_set_create(mem_ctx
);
482 _mesa_set_add(old_candidates
, candidate
);
483 nir_block
*to_be_added
= candidate
;
486 _mesa_set_clear(curr_level
->blocks
, NULL
);
487 while (to_be_added
) {
488 _mesa_set_add(curr_level
->blocks
, to_be_added
);
491 set_foreach(remaining
, entry
) {
492 nir_block
*remaining_block
= (nir_block
*) entry
->key
;
493 if (!_mesa_set_search(curr_level
->blocks
, remaining_block
)
494 && _mesa_set_intersects(remaining_block
->dom_frontier
,
495 curr_level
->blocks
)) {
496 if (_mesa_set_search(old_candidates
, remaining_block
))
497 to_be_added
= remaining_block
;
499 candidate
= remaining_block
;
505 _mesa_set_destroy(old_candidates
, NULL
);
506 old_candidates
= NULL
;
508 struct set
*loop_heads
= _mesa_set_clone(curr_level
->blocks
, curr_level
);
509 curr_level
->reach
= _mesa_pointer_set_create(curr_level
);
510 set_foreach(curr_level
->blocks
, entry
) {
511 _mesa_set_remove_key(remaining
, entry
->key
);
512 inside_outside((nir_block
*) entry
->key
, loop_heads
, remaining
,
513 curr_level
->reach
, brk_reachable
, mem_ctx
);
515 _mesa_set_destroy(loop_heads
, NULL
);
519 * organize a set of blocks into a list of levels. Where every level contains
520 * one or more blocks. So that every block is before all blocks it can reach.
521 * Also creates all path variables needed, for the control flow between the
523 * For example if the control flow looks like this:
531 * B, C, E and F are dominance children of A
532 * The level list should look like this:
533 * blocks irreducible conditional
534 * level 0 B, C false false
535 * level 1 E false true
536 * level 2 F false false
537 * The final structure should look like this:
544 * if (path_conditional) {
549 * \param levels uninitialized list
550 * \param is_dominated if true, no helper variables will be created for the
554 organize_levels(struct list_head
*levels
, struct set
*remaining
,
555 struct set
*reach
, struct routes
*routing
,
556 nir_function_impl
*impl
, bool is_domminated
, void *mem_ctx
)
558 /* blocks that can be reached by the remaining blocks */
559 struct set
*remaining_frontier
= _mesa_pointer_set_create(mem_ctx
);
561 /* targets of active skip path */
562 struct set
*skip_targets
= _mesa_pointer_set_create(mem_ctx
);
564 list_inithead(levels
);
565 while (remaining
->entries
) {
566 _mesa_set_clear(remaining_frontier
, NULL
);
567 set_foreach(remaining
, entry
) {
568 nir_block
*remain_block
= (nir_block
*) entry
->key
;
569 set_foreach(remain_block
->dom_frontier
, frontier_entry
) {
570 nir_block
*frontier
= (nir_block
*) frontier_entry
->key
;
571 if (frontier
!= remain_block
) {
572 _mesa_set_add(remaining_frontier
, frontier
);
577 struct strct_lvl
*curr_level
= rzalloc(mem_ctx
, struct strct_lvl
);
578 curr_level
->blocks
= _mesa_pointer_set_create(curr_level
);
579 set_foreach(remaining
, entry
) {
580 nir_block
*candidate
= (nir_block
*) entry
->key
;
581 if (!_mesa_set_search(remaining_frontier
, candidate
)) {
582 _mesa_set_add(curr_level
->blocks
, candidate
);
583 _mesa_set_remove_key(remaining
, candidate
);
587 curr_level
->irreducible
= !curr_level
->blocks
->entries
;
588 if (curr_level
->irreducible
) {
589 handle_irreducible(remaining
, curr_level
,
590 routing
->brk
.reachable
, mem_ctx
);
592 assert(curr_level
->blocks
->entries
);
594 struct strct_lvl
*prev_level
= NULL
;
595 if (!list_is_empty(levels
))
596 prev_level
= list_last_entry(levels
, struct strct_lvl
, link
);
598 set_foreach(skip_targets
, entry
) {
599 if (_mesa_set_search_pre_hashed(curr_level
->blocks
,
600 entry
->hash
, entry
->key
)) {
601 _mesa_set_remove(skip_targets
, entry
);
602 prev_level
->skip_end
= 1;
605 curr_level
->skip_start
= skip_targets
->entries
!= 0;
607 struct set
*prev_frontier
= NULL
;
609 prev_frontier
= reach
;
610 } else if (prev_level
->irreducible
) {
611 prev_frontier
= prev_level
->reach
;
613 set_foreach(curr_level
->blocks
, blocks_entry
) {
614 nir_block
*level_block
= (nir_block
*) blocks_entry
->key
;
615 if (!prev_frontier
) {
616 prev_frontier
= curr_level
->blocks
->entries
== 1 ?
617 level_block
->dom_frontier
:
618 _mesa_set_clone(level_block
->dom_frontier
, prev_level
);
620 set_foreach(level_block
->dom_frontier
, entry
)
621 _mesa_set_add_pre_hashed(prev_frontier
, entry
->hash
,
627 bool is_in_skip
= skip_targets
->entries
!= 0;
628 set_foreach(prev_frontier
, entry
) {
629 if (_mesa_set_search(remaining
, entry
->key
) ||
630 (_mesa_set_search(routing
->regular
.reachable
, entry
->key
) &&
631 !_mesa_set_search(routing
->brk
.reachable
, entry
->key
) &&
632 !_mesa_set_search(routing
->cont
.reachable
, entry
->key
))) {
633 _mesa_set_add_pre_hashed(skip_targets
, entry
->hash
, entry
->key
);
635 prev_level
->skip_end
= 1;
636 curr_level
->skip_start
= 1;
640 curr_level
->skip_end
= 0;
641 list_addtail(&curr_level
->link
, levels
);
644 if (skip_targets
->entries
)
645 list_last_entry(levels
, struct strct_lvl
, link
)->skip_end
= 1;
647 /* Iterate throught all levels reverse and create all the paths and forks */
648 struct path path_after_skip
;
650 list_for_each_entry_rev(struct strct_lvl
, level
, levels
, link
) {
651 bool need_var
= !(is_domminated
&& level
->link
.prev
== levels
);
652 level
->out_path
= routing
->regular
;
653 if (level
->skip_end
) {
654 path_after_skip
= routing
->regular
;
656 routing
->regular
.reachable
= level
->blocks
;
657 routing
->regular
.fork
= select_fork(routing
->regular
.reachable
, impl
,
659 if (level
->skip_start
) {
660 struct path_fork
*fork
= ralloc(mem_ctx
, struct path_fork
);
661 fork
->is_var
= need_var
;
663 fork
->path_var
= nir_local_variable_create(impl
, glsl_bool_type(),
665 fork
->paths
[0] = path_after_skip
;
666 fork
->paths
[1] = routing
->regular
;
667 routing
->regular
.fork
= fork
;
668 routing
->regular
.reachable
= fork_reachable(fork
);
674 nir_structurize(struct routes
*routing
, nir_builder
*b
,
675 nir_block
*block
, void *mem_ctx
);
678 * Places all the if else statements to select between all blocks in a select
682 select_blocks(struct routes
*routing
, nir_builder
*b
,
683 struct path in_path
, void *mem_ctx
)
686 nir_structurize(routing
, b
, (nir_block
*)
687 _mesa_set_next_entry(in_path
.reachable
, NULL
)->key
,
690 assert(!(in_path
.fork
->is_var
&&
691 strcmp(in_path
.fork
->path_var
->name
, "path_select")));
692 nir_push_if_src(b
, nir_src_for_ssa(fork_condition(b
, in_path
.fork
)));
693 select_blocks(routing
, b
, in_path
.fork
->paths
[1], mem_ctx
);
694 nir_push_else(b
, NULL
);
695 select_blocks(routing
, b
, in_path
.fork
->paths
[0], mem_ctx
);
701 * Builds the structurized nir code by the final level list.
704 plant_levels(struct list_head
*levels
, struct routes
*routing
,
705 nir_builder
*b
, void *mem_ctx
)
707 /* Place all dominated blocks and build the path forks */
708 list_for_each_entry(struct strct_lvl
, level
, levels
, link
) {
709 if (level
->skip_start
) {
710 assert(routing
->regular
.fork
);
711 assert(!(routing
->regular
.fork
->is_var
&& strcmp(
712 routing
->regular
.fork
->path_var
->name
, "path_conditional")));
713 nir_push_if_src(b
, nir_src_for_ssa(
714 fork_condition(b
, routing
->regular
.fork
)));
715 routing
->regular
= routing
->regular
.fork
->paths
[1];
717 struct path in_path
= routing
->regular
;
718 routing
->regular
= level
->out_path
;
719 if (level
->irreducible
)
720 loop_routing_start(routing
, b
, in_path
, level
->reach
, mem_ctx
);
721 select_blocks(routing
, b
, in_path
, mem_ctx
);
722 if (level
->irreducible
)
723 loop_routing_end(routing
, b
);
730 * builds the control flow of a block and all its dominance children
731 * \param routing the routing after the block and all dominated blocks
734 nir_structurize(struct routes
*routing
, nir_builder
*b
, nir_block
*block
,
737 struct set
*remaining
= _mesa_pointer_set_create(mem_ctx
);
738 for (int i
= 0; i
< block
->num_dom_children
; i
++) {
739 if (!_mesa_set_search(routing
->brk
.reachable
, block
->dom_children
[i
]))
740 _mesa_set_add(remaining
, block
->dom_children
[i
]);
743 /* If the block can reach back to itself, it is a loop head */
744 int is_looped
= _mesa_set_search(block
->dom_frontier
, block
) != NULL
;
745 struct list_head outside_levels
;
747 struct set
*loop_heads
= _mesa_pointer_set_create(mem_ctx
);
748 _mesa_set_add(loop_heads
, block
);
750 struct set
*outside
= _mesa_pointer_set_create(mem_ctx
);
751 struct set
*reach
= _mesa_pointer_set_create(mem_ctx
);
752 inside_outside(block
, loop_heads
, outside
, reach
,
753 routing
->brk
.reachable
, mem_ctx
);
755 set_foreach(outside
, entry
)
756 _mesa_set_remove_key(remaining
, entry
->key
);
758 organize_levels(&outside_levels
, outside
, reach
, routing
, b
->impl
,
761 struct path loop_path
= {
762 .reachable
= _mesa_pointer_set_create(mem_ctx
),
765 _mesa_set_add(loop_path
.reachable
, block
);
767 loop_routing_start(routing
, b
, loop_path
, reach
, mem_ctx
);
770 struct set
*reach
= _mesa_pointer_set_create(mem_ctx
);
771 if (block
->successors
[0]->successors
[0]) /* it is not the end_block */
772 _mesa_set_add(reach
, block
->successors
[0]);
773 if (block
->successors
[1] && block
->successors
[1]->successors
[0])
774 _mesa_set_add(reach
, block
->successors
[1]);
776 struct list_head levels
;
777 organize_levels(&levels
, remaining
, reach
, routing
, b
->impl
, true, mem_ctx
);
779 /* Push all instructions of this block, without the jump instr */
780 nir_jump_instr
*jump_instr
= NULL
;
781 nir_foreach_instr_safe(instr
, block
) {
782 if (instr
->type
== nir_instr_type_jump
) {
783 jump_instr
= nir_instr_as_jump(instr
);
786 nir_instr_remove(instr
);
787 nir_builder_instr_insert(b
, instr
);
790 /* Find path to the successor blocks */
791 if (jump_instr
->type
== nir_jump_goto_if
) {
792 route_to_cond(b
, routing
, jump_instr
->condition
,
793 jump_instr
->target
, jump_instr
->else_target
);
795 route_to(b
, routing
, block
->successors
[0]);
798 plant_levels(&levels
, routing
, b
, mem_ctx
);
800 loop_routing_end(routing
, b
);
801 plant_levels(&outside_levels
, routing
, b
, mem_ctx
);
806 nir_lower_goto_ifs_impl(nir_function_impl
*impl
)
808 if (impl
->structured
) {
809 nir_metadata_preserve(impl
, nir_metadata_all
);
813 nir_metadata_require(impl
, nir_metadata_dominance
);
816 nir_cf_extract(&cf_list
, nir_before_cf_list(&impl
->body
),
817 nir_after_cf_list(&impl
->body
));
819 /* From this point on, it's structured */
820 impl
->structured
= true;
823 nir_builder_init(&b
, impl
);
824 b
.cursor
= nir_before_block(nir_start_block(impl
));
826 void *mem_ctx
= ralloc_context(b
.shader
);
828 struct set
*end_set
= _mesa_pointer_set_create(mem_ctx
);
829 _mesa_set_add(end_set
, impl
->end_block
);
830 struct set
*empty_set
= _mesa_pointer_set_create(mem_ctx
);
832 nir_cf_node
*start_node
=
833 exec_node_data(nir_cf_node
, exec_list_get_head(&cf_list
.list
), node
);
834 nir_block
*start_block
= nir_cf_node_as_block(start_node
);
836 struct routes
*routing
= ralloc(mem_ctx
, struct routes
);
837 *routing
= (struct routes
) {
838 .regular
.reachable
= end_set
,
839 .brk
.reachable
= empty_set
,
840 .cont
.reachable
= empty_set
,
842 nir_structurize(routing
, &b
, start_block
, mem_ctx
);
843 assert(routing
->regular
.fork
== NULL
);
844 assert(routing
->brk
.fork
== NULL
);
845 assert(routing
->cont
.fork
== NULL
);
846 assert(routing
->brk
.reachable
== empty_set
);
847 assert(routing
->cont
.reachable
== empty_set
);
849 ralloc_free(mem_ctx
);
850 nir_cf_delete(&cf_list
);
852 nir_metadata_preserve(impl
, nir_metadata_none
);
858 nir_lower_goto_ifs(nir_shader
*shader
)
860 bool progress
= true;
862 nir_foreach_function(function
, shader
) {
863 if (function
->impl
&& nir_lower_goto_ifs_impl(function
->impl
))