b00b2cd8becb8f45fd020d9add865f6dc22b0de2
[mesa.git] / src / compiler / nir / nir_lower_goto_ifs.c
1 /*
2 * Copyright © 2020 Julian Winkler
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_vla.h"
27
28 struct path {
29 /** Set of blocks which this path represents
30 *
31 * It's "reachable" not in the sense that these are all the nodes reachable
32 * through this path but in the sense that, when you see one of these
33 * blocks, you know you've reached this path.
34 */
35 struct set *reachable;
36
37 /** Fork in the path, if reachable->entries > 1 */
38 struct path_fork *fork;
39 };
40
41 struct path_fork {
42 bool is_var;
43 union {
44 nir_variable *path_var;
45 nir_ssa_def *path_ssa;
46 };
47 struct path paths[2];
48 };
49
50 struct routes {
51 struct set *outside;
52 struct path regular;
53 struct path brk;
54 struct path cont;
55 struct routes *loop_backup;
56 };
57
58 struct strct_lvl {
59 struct list_head link;
60
61 /** Set of blocks at the current level */
62 struct set *blocks;
63
64 /** Path for the next level */
65 struct path out_path;
66
67 /** Reach set from inside_outside if irreducable */
68 struct set *reach;
69
70 /** Outside set from inside_outside if irreducable */
71 struct set *outside;
72
73 /** True if a skip region starts with this level */
74 bool skip_start;
75
76 /** True if a skip region ends with this level */
77 bool skip_end;
78
79 /** True if this level is irreducable */
80 bool irreducible;
81 };
82
83 static int
84 nir_block_ptr_cmp(const void *_a, const void *_b)
85 {
86 nir_block *const *a = _a;
87 nir_block *const *b = _b;
88 return (int)(*a)->index - (int)(*b)->index;
89 }
90
91 /** Return a sorted array of blocks for a set
92 *
93 * Hash set ordering is non-deterministic. We hash based on pointers and so,
94 * if any pointer ever changes from one run to another, the order of the set
95 * may change. Any time we're going to make decisions which may affect the
96 * final structure which may depend on ordering, we should first sort the
97 * blocks.
98 */
99 static nir_block **
100 sorted_block_arr_for_set(const struct set *block_set, void *mem_ctx)
101 {
102 const unsigned num_blocks = block_set->entries;
103 nir_block **block_arr = ralloc_array(mem_ctx, nir_block *, num_blocks);
104 unsigned i = 0;
105 set_foreach(block_set, entry)
106 block_arr[i++] = (nir_block *)entry->key;
107 assert(i == num_blocks);
108 qsort(block_arr, num_blocks, sizeof(*block_arr), nir_block_ptr_cmp);
109 return block_arr;
110 }
111
112 static nir_block *
113 block_for_singular_set(const struct set *block_set)
114 {
115 assert(block_set->entries == 1);
116 return (nir_block *)_mesa_set_next_entry(block_set, NULL)->key;
117 }
118
119 /**
120 * Sets all path variables to reach the target block via a fork
121 */
122 static void
123 set_path_vars(nir_builder *b, struct path_fork *fork, nir_block *target)
124 {
125 while (fork) {
126 for (int i = 0; i < 2; i++) {
127 if (_mesa_set_search(fork->paths[i].reachable, target)) {
128 if (fork->is_var) {
129 nir_store_var(b, fork->path_var, nir_imm_bool(b, i), 1);
130 } else {
131 assert(fork->path_ssa == NULL);
132 fork->path_ssa = nir_imm_bool(b, i);
133 }
134 fork = fork->paths[i].fork;
135 break;
136 }
137 }
138 }
139 }
140
141 /**
142 * Sets all path variables to reach the both target blocks via a fork.
143 * If the blocks are in different fork paths, the condition will be used.
144 * As the fork is already created, the then and else blocks may be swapped,
145 * in this case the condition is inverted
146 */
147 static void
148 set_path_vars_cond(nir_builder *b, struct path_fork *fork, nir_src condition,
149 nir_block *then_block, nir_block *else_block)
150 {
151 int i;
152 while (fork) {
153 for (i = 0; i < 2; i++) {
154 if (_mesa_set_search(fork->paths[i].reachable, then_block)) {
155 if (_mesa_set_search(fork->paths[i].reachable, else_block)) {
156 if (fork->is_var)
157 nir_store_var(b, fork->path_var, nir_imm_bool(b, i), 1);
158 else
159 fork->path_ssa = nir_imm_bool(b, i);
160 fork = fork->paths[i].fork;
161 break;
162 }
163 else {
164 assert(condition.is_ssa);
165 nir_ssa_def *ssa_def = condition.ssa;
166 assert(ssa_def->bit_size == 1);
167 assert(ssa_def->num_components == 1);
168 if (!i)
169 ssa_def = nir_inot(b, ssa_def);
170 if (fork->is_var)
171 nir_store_var(b, fork->path_var, ssa_def, 1);
172 else
173 fork->path_ssa = ssa_def;
174 set_path_vars(b, fork->paths[i].fork, then_block);
175 set_path_vars(b, fork->paths[!i].fork, else_block);
176 return;
177 }
178 }
179 }
180 assert(i < 2);
181 }
182 }
183
184 /**
185 * Sets all path variables and places the right jump instruction to reach the
186 * target block
187 */
188 static void
189 route_to(nir_builder *b, struct routes *routing, nir_block *target)
190 {
191 if (_mesa_set_search(routing->regular.reachable, target)) {
192 set_path_vars(b, routing->regular.fork, target);
193 }
194 else if (_mesa_set_search(routing->brk.reachable, target)) {
195 set_path_vars(b, routing->brk.fork, target);
196 nir_jump(b, nir_jump_break);
197 }
198 else if (_mesa_set_search(routing->cont.reachable, target)) {
199 set_path_vars(b, routing->cont.fork, target);
200 nir_jump(b, nir_jump_continue);
201 }
202 else {
203 assert(!target->successors[0]); /* target is endblock */
204 nir_jump(b, nir_jump_return);
205 }
206 }
207
208 /**
209 * Sets path vars and places the right jump instr to reach one of the two
210 * target blocks based on the condition. If the targets need different jump
211 * istructions, they will be placed into an if else statement.
212 * This can happen if one target is the loop head
213 * A __
214 * | \
215 * B |
216 * |\__/
217 * C
218 */
219 static void
220 route_to_cond(nir_builder *b, struct routes *routing, nir_src condition,
221 nir_block *then_block, nir_block *else_block)
222 {
223 if (_mesa_set_search(routing->regular.reachable, then_block)) {
224 if (_mesa_set_search(routing->regular.reachable, else_block)) {
225 set_path_vars_cond(b, routing->regular.fork, condition,
226 then_block, else_block);
227 return;
228 }
229 } else if (_mesa_set_search(routing->brk.reachable, then_block)) {
230 if (_mesa_set_search(routing->brk.reachable, else_block)) {
231 set_path_vars_cond(b, routing->brk.fork, condition,
232 then_block, else_block);
233 nir_jump(b, nir_jump_break);
234 return;
235 }
236 } else if (_mesa_set_search(routing->cont.reachable, then_block)) {
237 if (_mesa_set_search(routing->cont.reachable, else_block)) {
238 set_path_vars_cond(b, routing->cont.fork, condition,
239 then_block, else_block);
240 nir_jump(b, nir_jump_continue);
241 return;
242 }
243 }
244
245 /* then and else blocks are in different routes */
246 nir_push_if_src(b, condition);
247 route_to(b, routing, then_block);
248 nir_push_else(b, NULL);
249 route_to(b, routing, else_block);
250 nir_pop_if(b, NULL);
251 }
252
253 /**
254 * Merges the reachable sets of both fork subpaths into the forks entire
255 * reachable set
256 */
257 static struct set *
258 fork_reachable(struct path_fork *fork)
259 {
260 struct set *reachable = _mesa_set_clone(fork->paths[0].reachable, fork);
261 set_foreach(fork->paths[1].reachable, entry)
262 _mesa_set_add_pre_hashed(reachable, entry->hash, entry->key);
263 return reachable;
264 }
265
266 /**
267 * Modifies the routing to be the routing inside a loop. The old regular path
268 * becomes the new break path. The loop in path becomes the new regular and
269 * continue path.
270 * The lost routing information is stacked into the loop_backup stack.
271 * Also creates helper vars for multilevel loop jumping if needed.
272 * Also calls the nir builder to build the loop
273 */
274 static void
275 loop_routing_start(struct routes *routing, nir_builder *b,
276 struct path loop_path, struct set *reach,
277 struct set *outside, void *mem_ctx)
278 {
279 struct routes *routing_backup = ralloc(mem_ctx, struct routes);
280 *routing_backup = *routing;
281 bool break_needed = false;
282 bool continue_needed = false;
283
284 set_foreach(reach, entry) {
285 if (_mesa_set_search(loop_path.reachable, entry->key))
286 continue;
287 if (_mesa_set_search(routing->regular.reachable, entry->key))
288 continue;
289 if (_mesa_set_search(routing->brk.reachable, entry->key)) {
290 break_needed = true;
291 continue;
292 }
293 assert(_mesa_set_search(routing->cont.reachable, entry->key));
294 continue_needed = true;
295 }
296
297 if (outside && outside->entries) {
298 routing->outside = _mesa_set_clone(routing->outside, routing);
299 set_foreach(outside, entry)
300 _mesa_set_add_pre_hashed(routing->outside, entry->hash, entry->key);
301 }
302
303 routing->brk = routing_backup->regular;
304 routing->cont = loop_path;
305 routing->regular = loop_path;
306 routing->loop_backup = routing_backup;
307
308 if (break_needed) {
309 struct path_fork *fork = ralloc(mem_ctx, struct path_fork);
310 fork->is_var = true;
311 fork->path_var = nir_local_variable_create(b->impl, glsl_bool_type(),
312 "path_break");
313 fork->paths[0] = routing->brk;
314 fork->paths[1] = routing_backup->brk;
315 routing->brk.fork = fork;
316 routing->brk.reachable = fork_reachable(fork);
317 }
318 if (continue_needed) {
319 struct path_fork *fork = ralloc(mem_ctx, struct path_fork);
320 fork->is_var = true;
321 fork->path_var = nir_local_variable_create(b->impl, glsl_bool_type(),
322 "path_continue");
323 fork->paths[0] = routing->brk;
324 fork->paths[1] = routing_backup->cont;
325 routing->brk.fork = fork;
326 routing->brk.reachable = fork_reachable(fork);
327 }
328 nir_push_loop(b);
329 }
330
331 /**
332 * Gets a forks condition as ssa def if the condition is inside a helper var,
333 * the variable will be read into an ssa def
334 */
335 static nir_ssa_def *
336 fork_condition(nir_builder *b, struct path_fork *fork)
337 {
338 nir_ssa_def *ret;
339 if (fork->is_var) {
340 ret = nir_load_var(b, fork->path_var);
341 }
342 else
343 ret = fork->path_ssa;
344 return ret;
345 }
346
347 /**
348 * Restores the routing after leaving a loop based on the loop_backup stack.
349 * Also handles multi level jump helper vars if existing and calls the nir
350 * builder to pop the nir loop
351 */
352 static void
353 loop_routing_end(struct routes *routing, nir_builder *b)
354 {
355 struct routes *routing_backup = routing->loop_backup;
356 assert(routing->cont.fork == routing->regular.fork);
357 assert(routing->cont.reachable == routing->regular.reachable);
358 nir_pop_loop(b, NULL);
359 if (routing->brk.fork && routing->brk.fork->paths[1].reachable ==
360 routing_backup->cont.reachable) {
361 assert(!(routing->brk.fork->is_var &&
362 strcmp(routing->brk.fork->path_var->name, "path_continue")));
363 nir_push_if_src(b, nir_src_for_ssa(
364 fork_condition(b, routing->brk.fork)));
365 nir_jump(b, nir_jump_continue);
366 nir_pop_if(b, NULL);
367 routing->brk = routing->brk.fork->paths[0];
368 }
369 if (routing->brk.fork && routing->brk.fork->paths[1].reachable ==
370 routing_backup->brk.reachable) {
371 assert(!(routing->brk.fork->is_var &&
372 strcmp(routing->brk.fork->path_var->name, "path_break")));
373 nir_push_if_src(b, nir_src_for_ssa(
374 fork_condition(b, routing->brk.fork)));
375 nir_jump(b, nir_jump_break);
376 nir_pop_if(b, NULL);
377 routing->brk = routing->brk.fork->paths[0];
378 }
379 assert(routing->brk.fork == routing_backup->regular.fork);
380 assert(routing->brk.reachable == routing_backup->regular.reachable);
381 *routing = *routing_backup;
382 ralloc_free(routing_backup);
383 }
384
385 /**
386 * generates a list of all blocks dominated by the loop header, but the
387 * control flow can't go back to the loop header from the block.
388 * also generates a list of all blocks that can be reached from within the
389 * loop
390 * | __
391 * A´ \
392 * | \ \
393 * B C-´
394 * /
395 * D
396 * here B and C are directly dominated by A but only C can reach back to the
397 * loop head A. B will be added to the outside set and to the reach set.
398 * \param loop_heads set of loop heads. All blocks inside the loop will be
399 * added to this set
400 * \param outside all blocks directly outside the loop will be added
401 * \param reach all blocks reachable from the loop will be added
402 */
403 static void
404 inside_outside(nir_block *block, struct set *loop_heads, struct set *outside,
405 struct set *reach, struct set *brk_reachable, void *mem_ctx)
406 {
407 assert(_mesa_set_search(loop_heads, block));
408 struct set *remaining = _mesa_pointer_set_create(mem_ctx);
409 for (int i = 0; i < block->num_dom_children; i++) {
410 if (!_mesa_set_search(brk_reachable, block->dom_children[i]))
411 _mesa_set_add(remaining, block->dom_children[i]);
412 }
413
414 bool progress = true;
415 while (remaining->entries && progress) {
416 progress = false;
417 set_foreach(remaining, child_entry) {
418 nir_block *dom_child = (nir_block *) child_entry->key;
419 bool can_jump_back = false;
420 set_foreach(dom_child->dom_frontier, entry) {
421 if (entry->key == dom_child)
422 continue;
423 if (_mesa_set_search_pre_hashed(remaining, entry->hash,
424 entry->key)) {
425 can_jump_back = true;
426 break;
427 }
428 if (_mesa_set_search_pre_hashed(loop_heads, entry->hash,
429 entry->key)) {
430 can_jump_back = true;
431 break;
432 }
433 }
434 if (!can_jump_back) {
435 _mesa_set_add_pre_hashed(outside, child_entry->hash,
436 child_entry->key);
437 _mesa_set_remove(remaining, child_entry);
438 progress = true;
439 }
440 }
441 }
442
443 /* Add everything remaining to loop_heads */
444 set_foreach(remaining, entry)
445 _mesa_set_add_pre_hashed(loop_heads, entry->hash, entry->key);
446
447 /* Recurse for each remaining */
448 set_foreach(remaining, entry) {
449 inside_outside((nir_block *) entry->key, loop_heads, outside, reach,
450 brk_reachable, mem_ctx);
451 }
452
453 for (int i = 0; i < 2; i++) {
454 if (block->successors[i] && block->successors[i]->successors[0] &&
455 !_mesa_set_search(loop_heads, block->successors[i])) {
456 _mesa_set_add(reach, block->successors[i]);
457 }
458 }
459 }
460
461 static struct path_fork *
462 select_fork_recur(struct nir_block **blocks, unsigned start, unsigned end,
463 nir_function_impl *impl, bool need_var, void *mem_ctx)
464 {
465 if (start == end - 1)
466 return NULL;
467
468 struct path_fork *fork = ralloc(mem_ctx, struct path_fork);
469 fork->is_var = need_var;
470 if (need_var)
471 fork->path_var = nir_local_variable_create(impl, glsl_bool_type(),
472 "path_select");
473
474 unsigned mid = start + (end - start) / 2;
475
476 fork->paths[0].reachable = _mesa_pointer_set_create(fork);
477 for (unsigned i = start; i < mid; i++)
478 _mesa_set_add(fork->paths[0].reachable, blocks[i]);
479 fork->paths[0].fork =
480 select_fork_recur(blocks, start, mid, impl, need_var, mem_ctx);
481
482 fork->paths[1].reachable = _mesa_pointer_set_create(fork);
483 for (unsigned i = mid; i < end; i++)
484 _mesa_set_add(fork->paths[1].reachable, blocks[i]);
485 fork->paths[1].fork =
486 select_fork_recur(blocks, mid, end, impl, need_var, mem_ctx);
487
488 return fork;
489 }
490
491 /**
492 * Gets a set of blocks organized into the same level by the organize_levels
493 * function and creates enough forks to be able to route to them.
494 * If the set only contains one block, the function has nothing to do.
495 * The set should almost never contain more than two blocks, but if so,
496 * then the function calls itself recursively
497 */
498 static struct path_fork *
499 select_fork(struct set *reachable, nir_function_impl *impl, bool need_var,
500 void *mem_ctx)
501 {
502 assert(reachable->entries > 0);
503 if (reachable->entries <= 1)
504 return NULL;
505
506 /* Hash set ordering is non-deterministic. We're about to turn a set into
507 * a tree so we really want things to be in a deterministic ordering.
508 */
509 return select_fork_recur(sorted_block_arr_for_set(reachable, mem_ctx),
510 0, reachable->entries, impl, need_var, mem_ctx);
511 }
512
513 /**
514 * gets called when the organize_levels functions fails to find blocks that
515 * can't be reached by the other remaining blocks. This means, at least two
516 * dominance sibling blocks can reach each other. So we have a multi entry
517 * loop. This function tries to find the smallest possible set of blocks that
518 * must be part of the multi entry loop.
519 * example cf: | |
520 * A<---B
521 * / \__,^ \
522 * \ /
523 * \ /
524 * C
525 * The function choses a random block as candidate. for example C
526 * The function checks which remaining blocks can reach C, in this case A.
527 * So A becomes the new candidate and C is removed from the result set.
528 * B can reach A.
529 * So B becomes the new candidate and A is removed from the set.
530 * A can reach B.
531 * A was an old candidate. So it is added to the set containing B.
532 * No other remaining blocks can reach A or B.
533 * So only A and B must be part of the multi entry loop.
534 */
535 static void
536 handle_irreducible(struct set *remaining, struct strct_lvl *curr_level,
537 struct set *brk_reachable, void *mem_ctx)
538 {
539 nir_block *candidate = (nir_block *)
540 _mesa_set_next_entry(remaining, NULL)->key;
541 struct set *old_candidates = _mesa_pointer_set_create(mem_ctx);
542 while (candidate) {
543 _mesa_set_add(old_candidates, candidate);
544
545 /* Start with just the candidate block */
546 _mesa_set_clear(curr_level->blocks, NULL);
547 _mesa_set_add(curr_level->blocks, candidate);
548
549 candidate = NULL;
550 set_foreach(remaining, entry) {
551 nir_block *remaining_block = (nir_block *) entry->key;
552 if (!_mesa_set_search(curr_level->blocks, remaining_block) &&
553 _mesa_set_intersects(remaining_block->dom_frontier,
554 curr_level->blocks)) {
555 if (_mesa_set_search(old_candidates, remaining_block)) {
556 _mesa_set_add(curr_level->blocks, remaining_block);
557 } else {
558 candidate = remaining_block;
559 break;
560 }
561 }
562 }
563 }
564 _mesa_set_destroy(old_candidates, NULL);
565 old_candidates = NULL;
566
567 struct set *loop_heads = _mesa_set_clone(curr_level->blocks, curr_level);
568 curr_level->reach = _mesa_pointer_set_create(curr_level);
569 set_foreach(curr_level->blocks, entry) {
570 _mesa_set_remove_key(remaining, entry->key);
571 inside_outside((nir_block *) entry->key, loop_heads, remaining,
572 curr_level->reach, brk_reachable, mem_ctx);
573 }
574 curr_level->outside = remaining;
575 _mesa_set_destroy(loop_heads, NULL);
576 }
577
578 /**
579 * organize a set of blocks into a list of levels. Where every level contains
580 * one or more blocks. So that every block is before all blocks it can reach.
581 * Also creates all path variables needed, for the control flow between the
582 * block.
583 * For example if the control flow looks like this:
584 * A
585 * / |
586 * B C
587 * | / \
588 * E |
589 * \ /
590 * F
591 * B, C, E and F are dominance children of A
592 * The level list should look like this:
593 * blocks irreducible conditional
594 * level 0 B, C false false
595 * level 1 E false true
596 * level 2 F false false
597 * The final structure should look like this:
598 * A
599 * if (path_select) {
600 * B
601 * } else {
602 * C
603 * }
604 * if (path_conditional) {
605 * E
606 * }
607 * F
608 *
609 * \param levels uninitialized list
610 * \param is_dominated if true, no helper variables will be created for the
611 * zeroth level
612 */
613 static void
614 organize_levels(struct list_head *levels, struct set *children,
615 struct set *reach, struct routes *routing,
616 nir_function_impl *impl, bool is_domminated, void *mem_ctx)
617 {
618 /* Duplicate remaining because we're going to destroy it */
619 struct set *remaining = _mesa_set_clone(children, mem_ctx);
620
621 /* blocks that can be reached by the remaining blocks */
622 struct set *remaining_frontier = _mesa_pointer_set_create(mem_ctx);
623
624 /* targets of active skip path */
625 struct set *skip_targets = _mesa_pointer_set_create(mem_ctx);
626
627 list_inithead(levels);
628 while (remaining->entries) {
629 _mesa_set_clear(remaining_frontier, NULL);
630 set_foreach(remaining, entry) {
631 nir_block *remain_block = (nir_block *) entry->key;
632 set_foreach(remain_block->dom_frontier, frontier_entry) {
633 nir_block *frontier = (nir_block *) frontier_entry->key;
634 if (frontier != remain_block) {
635 _mesa_set_add(remaining_frontier, frontier);
636 }
637 }
638 }
639
640 struct strct_lvl *curr_level = rzalloc(mem_ctx, struct strct_lvl);
641 curr_level->blocks = _mesa_pointer_set_create(curr_level);
642 set_foreach(remaining, entry) {
643 nir_block *candidate = (nir_block *) entry->key;
644 if (!_mesa_set_search(remaining_frontier, candidate)) {
645 _mesa_set_add(curr_level->blocks, candidate);
646 _mesa_set_remove_key(remaining, candidate);
647 }
648 }
649
650 curr_level->irreducible = !curr_level->blocks->entries;
651 if (curr_level->irreducible) {
652 handle_irreducible(remaining, curr_level,
653 routing->brk.reachable, mem_ctx);
654 }
655 assert(curr_level->blocks->entries);
656
657 struct strct_lvl *prev_level = NULL;
658 if (!list_is_empty(levels))
659 prev_level = list_last_entry(levels, struct strct_lvl, link);
660
661 set_foreach(skip_targets, entry) {
662 if (_mesa_set_search_pre_hashed(curr_level->blocks,
663 entry->hash, entry->key)) {
664 _mesa_set_remove(skip_targets, entry);
665 prev_level->skip_end = 1;
666 }
667 }
668 curr_level->skip_start = skip_targets->entries != 0;
669
670 struct set *prev_frontier = NULL;
671 if (!prev_level) {
672 prev_frontier = reach;
673 } else if (prev_level->irreducible) {
674 prev_frontier = prev_level->reach;
675 } else {
676 set_foreach(curr_level->blocks, blocks_entry) {
677 nir_block *level_block = (nir_block *) blocks_entry->key;
678 if (curr_level->blocks->entries == 1) {
679 /* If we only have one block, there's no union operation and we
680 * can just use the one from the one block.
681 */
682 prev_frontier = level_block->dom_frontier;
683 break;
684 }
685
686 if (prev_frontier == NULL) {
687 prev_frontier =
688 _mesa_set_clone(level_block->dom_frontier, prev_level);
689 } else {
690 set_foreach(level_block->dom_frontier, entry)
691 _mesa_set_add_pre_hashed(prev_frontier, entry->hash,
692 entry->key);
693 }
694 }
695 }
696
697 bool is_in_skip = skip_targets->entries != 0;
698 set_foreach(prev_frontier, entry) {
699 if (_mesa_set_search(remaining, entry->key) ||
700 (_mesa_set_search(routing->regular.reachable, entry->key) &&
701 !_mesa_set_search(routing->brk.reachable, entry->key) &&
702 !_mesa_set_search(routing->cont.reachable, entry->key))) {
703 _mesa_set_add_pre_hashed(skip_targets, entry->hash, entry->key);
704 if (is_in_skip)
705 prev_level->skip_end = 1;
706 curr_level->skip_start = 1;
707 }
708 }
709
710 curr_level->skip_end = 0;
711 list_addtail(&curr_level->link, levels);
712 }
713
714 if (skip_targets->entries)
715 list_last_entry(levels, struct strct_lvl, link)->skip_end = 1;
716
717 /* Iterate throught all levels reverse and create all the paths and forks */
718 struct path path_after_skip;
719
720 list_for_each_entry_rev(struct strct_lvl, level, levels, link) {
721 bool need_var = !(is_domminated && level->link.prev == levels);
722 level->out_path = routing->regular;
723 if (level->skip_end) {
724 path_after_skip = routing->regular;
725 }
726 routing->regular.reachable = level->blocks;
727 routing->regular.fork = select_fork(routing->regular.reachable, impl,
728 need_var, mem_ctx);
729 if (level->skip_start) {
730 struct path_fork *fork = ralloc(mem_ctx, struct path_fork);
731 fork->is_var = need_var;
732 if (need_var)
733 fork->path_var = nir_local_variable_create(impl, glsl_bool_type(),
734 "path_conditional");
735 fork->paths[0] = path_after_skip;
736 fork->paths[1] = routing->regular;
737 routing->regular.fork = fork;
738 routing->regular.reachable = fork_reachable(fork);
739 }
740 }
741 }
742
743 static void
744 nir_structurize(struct routes *routing, nir_builder *b,
745 nir_block *block, void *mem_ctx);
746
747 /**
748 * Places all the if else statements to select between all blocks in a select
749 * path
750 */
751 static void
752 select_blocks(struct routes *routing, nir_builder *b,
753 struct path in_path, void *mem_ctx)
754 {
755 if (!in_path.fork) {
756 nir_block *block = block_for_singular_set(in_path.reachable);
757 nir_structurize(routing, b, block, mem_ctx);
758 } else {
759 assert(!(in_path.fork->is_var &&
760 strcmp(in_path.fork->path_var->name, "path_select")));
761 nir_push_if_src(b, nir_src_for_ssa(fork_condition(b, in_path.fork)));
762 select_blocks(routing, b, in_path.fork->paths[1], mem_ctx);
763 nir_push_else(b, NULL);
764 select_blocks(routing, b, in_path.fork->paths[0], mem_ctx);
765 nir_pop_if(b, NULL);
766 }
767 }
768
769 /**
770 * Builds the structurized nir code by the final level list.
771 */
772 static void
773 plant_levels(struct list_head *levels, struct routes *routing,
774 nir_builder *b, void *mem_ctx)
775 {
776 /* Place all dominated blocks and build the path forks */
777 list_for_each_entry(struct strct_lvl, level, levels, link) {
778 if (level->skip_start) {
779 assert(routing->regular.fork);
780 assert(!(routing->regular.fork->is_var && strcmp(
781 routing->regular.fork->path_var->name, "path_conditional")));
782 nir_push_if_src(b, nir_src_for_ssa(
783 fork_condition(b, routing->regular.fork)));
784 routing->regular = routing->regular.fork->paths[1];
785 }
786 struct path in_path = routing->regular;
787 routing->regular = level->out_path;
788 if (level->irreducible) {
789 loop_routing_start(routing, b, in_path, level->reach,
790 level->outside, mem_ctx);
791 }
792 select_blocks(routing, b, in_path, mem_ctx);
793 if (level->irreducible)
794 loop_routing_end(routing, b);
795 if (level->skip_end)
796 nir_pop_if(b, NULL);
797 }
798 }
799
800 /**
801 * builds the control flow of a block and all its dominance children
802 * \param routing the routing after the block and all dominated blocks
803 */
804 static void
805 nir_structurize(struct routes *routing, nir_builder *b, nir_block *block,
806 void *mem_ctx)
807 {
808 struct set *remaining = _mesa_pointer_set_create(mem_ctx);
809 for (int i = 0; i < block->num_dom_children; i++) {
810 if (!_mesa_set_search(routing->outside, block->dom_children[i]))
811 _mesa_set_add(remaining, block->dom_children[i]);
812 }
813
814 /* If the block can reach back to itself, it is a loop head */
815 int is_looped = _mesa_set_search(block->dom_frontier, block) != NULL;
816 struct list_head outside_levels;
817 if (is_looped) {
818 struct set *loop_heads = _mesa_pointer_set_create(mem_ctx);
819 _mesa_set_add(loop_heads, block);
820
821 struct set *outside = _mesa_pointer_set_create(mem_ctx);
822 struct set *reach = _mesa_pointer_set_create(mem_ctx);
823 inside_outside(block, loop_heads, outside, reach,
824 routing->brk.reachable, mem_ctx);
825
826 set_foreach(outside, entry)
827 _mesa_set_remove_key(remaining, entry->key);
828
829 organize_levels(&outside_levels, outside, reach, routing, b->impl,
830 false, mem_ctx);
831
832 struct path loop_path = {
833 .reachable = _mesa_pointer_set_create(mem_ctx),
834 .fork = NULL,
835 };
836 _mesa_set_add(loop_path.reachable, block);
837
838 loop_routing_start(routing, b, loop_path, reach, outside, mem_ctx);
839 }
840
841 struct set *reach = _mesa_pointer_set_create(mem_ctx);
842 if (block->successors[0]->successors[0]) /* it is not the end_block */
843 _mesa_set_add(reach, block->successors[0]);
844 if (block->successors[1] && block->successors[1]->successors[0])
845 _mesa_set_add(reach, block->successors[1]);
846
847 struct list_head levels;
848 organize_levels(&levels, remaining, reach, routing, b->impl, true, mem_ctx);
849
850 /* Push all instructions of this block, without the jump instr */
851 nir_jump_instr *jump_instr = NULL;
852 nir_foreach_instr_safe(instr, block) {
853 if (instr->type == nir_instr_type_jump) {
854 jump_instr = nir_instr_as_jump(instr);
855 break;
856 }
857 nir_instr_remove(instr);
858 nir_builder_instr_insert(b, instr);
859 }
860
861 /* Find path to the successor blocks */
862 if (jump_instr->type == nir_jump_goto_if) {
863 route_to_cond(b, routing, jump_instr->condition,
864 jump_instr->target, jump_instr->else_target);
865 } else {
866 route_to(b, routing, block->successors[0]);
867 }
868
869 plant_levels(&levels, routing, b, mem_ctx);
870 if (is_looped) {
871 loop_routing_end(routing, b);
872 plant_levels(&outside_levels, routing, b, mem_ctx);
873 }
874 }
875
876 static bool
877 nir_lower_goto_ifs_impl(nir_function_impl *impl)
878 {
879 if (impl->structured) {
880 nir_metadata_preserve(impl, nir_metadata_all);
881 return false;
882 }
883
884 nir_metadata_require(impl, nir_metadata_dominance);
885
886 nir_cf_list cf_list;
887 nir_cf_extract(&cf_list, nir_before_cf_list(&impl->body),
888 nir_after_cf_list(&impl->body));
889
890 /* From this point on, it's structured */
891 impl->structured = true;
892
893 nir_builder b;
894 nir_builder_init(&b, impl);
895 b.cursor = nir_before_block(nir_start_block(impl));
896
897 void *mem_ctx = ralloc_context(b.shader);
898
899 struct set *end_set = _mesa_pointer_set_create(mem_ctx);
900 _mesa_set_add(end_set, impl->end_block);
901 struct set *empty_set = _mesa_pointer_set_create(mem_ctx);
902
903 nir_cf_node *start_node =
904 exec_node_data(nir_cf_node, exec_list_get_head(&cf_list.list), node);
905 nir_block *start_block = nir_cf_node_as_block(start_node);
906
907 struct routes *routing = ralloc(mem_ctx, struct routes);
908 *routing = (struct routes) {
909 .outside = empty_set,
910 .regular.reachable = end_set,
911 .brk.reachable = empty_set,
912 .cont.reachable = empty_set,
913 };
914 nir_structurize(routing, &b, start_block, mem_ctx);
915 assert(routing->regular.fork == NULL);
916 assert(routing->brk.fork == NULL);
917 assert(routing->cont.fork == NULL);
918 assert(routing->brk.reachable == empty_set);
919 assert(routing->cont.reachable == empty_set);
920
921 ralloc_free(mem_ctx);
922 nir_cf_delete(&cf_list);
923
924 nir_metadata_preserve(impl, nir_metadata_none);
925
926 return true;
927 }
928
929 bool
930 nir_lower_goto_ifs(nir_shader *shader)
931 {
932 bool progress = true;
933
934 nir_foreach_function(function, shader) {
935 if (function->impl && nir_lower_goto_ifs_impl(function->impl))
936 progress = true;
937 }
938
939 return progress;
940 }