nir: Add a lowering pass to split 64bit phis
[mesa.git] / src / compiler / nir / nir_lower_goto_ifs.c
1 /*
2 * Copyright © 2020 Julian Winkler
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26 #include "nir_vla.h"
27
28 #define NIR_LOWER_GOTO_IFS_DEBUG 0
29
30 struct path {
31 /** Set of blocks which this path represents
32 *
33 * It's "reachable" not in the sense that these are all the nodes reachable
34 * through this path but in the sense that, when you see one of these
35 * blocks, you know you've reached this path.
36 */
37 struct set *reachable;
38
39 /** Fork in the path, if reachable->entries > 1 */
40 struct path_fork *fork;
41 };
42
43 struct path_fork {
44 bool is_var;
45 union {
46 nir_variable *path_var;
47 nir_ssa_def *path_ssa;
48 };
49 struct path paths[2];
50 };
51
52 struct routes {
53 struct set *outside;
54 struct path regular;
55 struct path brk;
56 struct path cont;
57 struct routes *loop_backup;
58 };
59
60 struct strct_lvl {
61 struct list_head link;
62
63 /** Set of blocks at the current level */
64 struct set *blocks;
65
66 /** Path for the next level */
67 struct path out_path;
68
69 /** Reach set from inside_outside if irreducable */
70 struct set *reach;
71
72 /** Outside set from inside_outside if irreducable */
73 struct set *outside;
74
75 /** True if a skip region starts with this level */
76 bool skip_start;
77
78 /** True if a skip region ends with this level */
79 bool skip_end;
80
81 /** True if this level is irreducable */
82 bool irreducible;
83 };
84
85 static int
86 nir_block_ptr_cmp(const void *_a, const void *_b)
87 {
88 nir_block *const *a = _a;
89 nir_block *const *b = _b;
90 return (int)(*a)->index - (int)(*b)->index;
91 }
92
93 static void
94 print_block_set(const struct set *set)
95 {
96 printf("{ ");
97 if (set != NULL) {
98 unsigned count = 0;
99 set_foreach(set, entry) {
100 if (count++)
101 printf(", ");
102 printf("%u", ((nir_block *)entry->key)->index);
103 }
104 }
105 printf(" }\n");
106 }
107
108 /** Return a sorted array of blocks for a set
109 *
110 * Hash set ordering is non-deterministic. We hash based on pointers and so,
111 * if any pointer ever changes from one run to another, the order of the set
112 * may change. Any time we're going to make decisions which may affect the
113 * final structure which may depend on ordering, we should first sort the
114 * blocks.
115 */
116 static nir_block **
117 sorted_block_arr_for_set(const struct set *block_set, void *mem_ctx)
118 {
119 const unsigned num_blocks = block_set->entries;
120 nir_block **block_arr = ralloc_array(mem_ctx, nir_block *, num_blocks);
121 unsigned i = 0;
122 set_foreach(block_set, entry)
123 block_arr[i++] = (nir_block *)entry->key;
124 assert(i == num_blocks);
125 qsort(block_arr, num_blocks, sizeof(*block_arr), nir_block_ptr_cmp);
126 return block_arr;
127 }
128
129 static nir_block *
130 block_for_singular_set(const struct set *block_set)
131 {
132 assert(block_set->entries == 1);
133 return (nir_block *)_mesa_set_next_entry(block_set, NULL)->key;
134 }
135
136 /**
137 * Sets all path variables to reach the target block via a fork
138 */
139 static void
140 set_path_vars(nir_builder *b, struct path_fork *fork, nir_block *target)
141 {
142 while (fork) {
143 for (int i = 0; i < 2; i++) {
144 if (_mesa_set_search(fork->paths[i].reachable, target)) {
145 if (fork->is_var) {
146 nir_store_var(b, fork->path_var, nir_imm_bool(b, i), 1);
147 } else {
148 assert(fork->path_ssa == NULL);
149 fork->path_ssa = nir_imm_bool(b, i);
150 }
151 fork = fork->paths[i].fork;
152 break;
153 }
154 }
155 }
156 }
157
158 /**
159 * Sets all path variables to reach the both target blocks via a fork.
160 * If the blocks are in different fork paths, the condition will be used.
161 * As the fork is already created, the then and else blocks may be swapped,
162 * in this case the condition is inverted
163 */
164 static void
165 set_path_vars_cond(nir_builder *b, struct path_fork *fork, nir_src condition,
166 nir_block *then_block, nir_block *else_block)
167 {
168 int i;
169 while (fork) {
170 for (i = 0; i < 2; i++) {
171 if (_mesa_set_search(fork->paths[i].reachable, then_block)) {
172 if (_mesa_set_search(fork->paths[i].reachable, else_block)) {
173 if (fork->is_var)
174 nir_store_var(b, fork->path_var, nir_imm_bool(b, i), 1);
175 else
176 fork->path_ssa = nir_imm_bool(b, i);
177 fork = fork->paths[i].fork;
178 break;
179 }
180 else {
181 assert(condition.is_ssa);
182 nir_ssa_def *ssa_def = condition.ssa;
183 assert(ssa_def->bit_size == 1);
184 assert(ssa_def->num_components == 1);
185 if (!i)
186 ssa_def = nir_inot(b, ssa_def);
187 if (fork->is_var)
188 nir_store_var(b, fork->path_var, ssa_def, 1);
189 else
190 fork->path_ssa = ssa_def;
191 set_path_vars(b, fork->paths[i].fork, then_block);
192 set_path_vars(b, fork->paths[!i].fork, else_block);
193 return;
194 }
195 }
196 }
197 assert(i < 2);
198 }
199 }
200
201 /**
202 * Sets all path variables and places the right jump instruction to reach the
203 * target block
204 */
205 static void
206 route_to(nir_builder *b, struct routes *routing, nir_block *target)
207 {
208 if (_mesa_set_search(routing->regular.reachable, target)) {
209 set_path_vars(b, routing->regular.fork, target);
210 }
211 else if (_mesa_set_search(routing->brk.reachable, target)) {
212 set_path_vars(b, routing->brk.fork, target);
213 nir_jump(b, nir_jump_break);
214 }
215 else if (_mesa_set_search(routing->cont.reachable, target)) {
216 set_path_vars(b, routing->cont.fork, target);
217 nir_jump(b, nir_jump_continue);
218 }
219 else {
220 assert(!target->successors[0]); /* target is endblock */
221 nir_jump(b, nir_jump_return);
222 }
223 }
224
225 /**
226 * Sets path vars and places the right jump instr to reach one of the two
227 * target blocks based on the condition. If the targets need different jump
228 * istructions, they will be placed into an if else statement.
229 * This can happen if one target is the loop head
230 * A __
231 * | \
232 * B |
233 * |\__/
234 * C
235 */
236 static void
237 route_to_cond(nir_builder *b, struct routes *routing, nir_src condition,
238 nir_block *then_block, nir_block *else_block)
239 {
240 if (_mesa_set_search(routing->regular.reachable, then_block)) {
241 if (_mesa_set_search(routing->regular.reachable, else_block)) {
242 set_path_vars_cond(b, routing->regular.fork, condition,
243 then_block, else_block);
244 return;
245 }
246 } else if (_mesa_set_search(routing->brk.reachable, then_block)) {
247 if (_mesa_set_search(routing->brk.reachable, else_block)) {
248 set_path_vars_cond(b, routing->brk.fork, condition,
249 then_block, else_block);
250 nir_jump(b, nir_jump_break);
251 return;
252 }
253 } else if (_mesa_set_search(routing->cont.reachable, then_block)) {
254 if (_mesa_set_search(routing->cont.reachable, else_block)) {
255 set_path_vars_cond(b, routing->cont.fork, condition,
256 then_block, else_block);
257 nir_jump(b, nir_jump_continue);
258 return;
259 }
260 }
261
262 /* then and else blocks are in different routes */
263 nir_push_if_src(b, condition);
264 route_to(b, routing, then_block);
265 nir_push_else(b, NULL);
266 route_to(b, routing, else_block);
267 nir_pop_if(b, NULL);
268 }
269
270 /**
271 * Merges the reachable sets of both fork subpaths into the forks entire
272 * reachable set
273 */
274 static struct set *
275 fork_reachable(struct path_fork *fork)
276 {
277 struct set *reachable = _mesa_set_clone(fork->paths[0].reachable, fork);
278 set_foreach(fork->paths[1].reachable, entry)
279 _mesa_set_add_pre_hashed(reachable, entry->hash, entry->key);
280 return reachable;
281 }
282
283 /**
284 * Modifies the routing to be the routing inside a loop. The old regular path
285 * becomes the new break path. The loop in path becomes the new regular and
286 * continue path.
287 * The lost routing information is stacked into the loop_backup stack.
288 * Also creates helper vars for multilevel loop jumping if needed.
289 * Also calls the nir builder to build the loop
290 */
291 static void
292 loop_routing_start(struct routes *routing, nir_builder *b,
293 struct path loop_path, struct set *reach,
294 struct set *outside, void *mem_ctx)
295 {
296 if (NIR_LOWER_GOTO_IFS_DEBUG) {
297 printf("loop_routing_start:\n");
298 printf(" reach = ");
299 print_block_set(reach);
300 printf(" outside = ");
301 print_block_set(outside);
302 printf(" loop_path.reachable = ");
303 print_block_set(loop_path.reachable);
304 printf(" routing->outside = ");
305 print_block_set(routing->outside);
306 printf(" routing->regular.reachable = ");
307 print_block_set(routing->regular.reachable);
308 printf(" routing->brk.reachable = ");
309 print_block_set(routing->brk.reachable);
310 printf(" routing->cont.reachable = ");
311 print_block_set(routing->cont.reachable);
312 printf("\n");
313 }
314
315 struct routes *routing_backup = ralloc(mem_ctx, struct routes);
316 *routing_backup = *routing;
317 bool break_needed = false;
318 bool continue_needed = false;
319
320 set_foreach(reach, entry) {
321 if (_mesa_set_search(loop_path.reachable, entry->key))
322 continue;
323 if (_mesa_set_search(routing->regular.reachable, entry->key))
324 continue;
325 if (_mesa_set_search(routing->brk.reachable, entry->key)) {
326 break_needed = true;
327 continue;
328 }
329 assert(_mesa_set_search(routing->cont.reachable, entry->key));
330 continue_needed = true;
331 }
332
333 if (outside && outside->entries) {
334 routing->outside = _mesa_set_clone(routing->outside, routing);
335 set_foreach(outside, entry)
336 _mesa_set_add_pre_hashed(routing->outside, entry->hash, entry->key);
337 }
338
339 routing->brk = routing_backup->regular;
340 routing->cont = loop_path;
341 routing->regular = loop_path;
342 routing->loop_backup = routing_backup;
343
344 if (break_needed) {
345 struct path_fork *fork = ralloc(mem_ctx, struct path_fork);
346 fork->is_var = true;
347 fork->path_var = nir_local_variable_create(b->impl, glsl_bool_type(),
348 "path_break");
349 fork->paths[0] = routing->brk;
350 fork->paths[1] = routing_backup->brk;
351 routing->brk.fork = fork;
352 routing->brk.reachable = fork_reachable(fork);
353 }
354 if (continue_needed) {
355 struct path_fork *fork = ralloc(mem_ctx, struct path_fork);
356 fork->is_var = true;
357 fork->path_var = nir_local_variable_create(b->impl, glsl_bool_type(),
358 "path_continue");
359 fork->paths[0] = routing->brk;
360 fork->paths[1] = routing_backup->cont;
361 routing->brk.fork = fork;
362 routing->brk.reachable = fork_reachable(fork);
363 }
364 nir_push_loop(b);
365 }
366
367 /**
368 * Gets a forks condition as ssa def if the condition is inside a helper var,
369 * the variable will be read into an ssa def
370 */
371 static nir_ssa_def *
372 fork_condition(nir_builder *b, struct path_fork *fork)
373 {
374 nir_ssa_def *ret;
375 if (fork->is_var) {
376 ret = nir_load_var(b, fork->path_var);
377 }
378 else
379 ret = fork->path_ssa;
380 return ret;
381 }
382
383 /**
384 * Restores the routing after leaving a loop based on the loop_backup stack.
385 * Also handles multi level jump helper vars if existing and calls the nir
386 * builder to pop the nir loop
387 */
388 static void
389 loop_routing_end(struct routes *routing, nir_builder *b)
390 {
391 struct routes *routing_backup = routing->loop_backup;
392 assert(routing->cont.fork == routing->regular.fork);
393 assert(routing->cont.reachable == routing->regular.reachable);
394 nir_pop_loop(b, NULL);
395 if (routing->brk.fork && routing->brk.fork->paths[1].reachable ==
396 routing_backup->cont.reachable) {
397 assert(!(routing->brk.fork->is_var &&
398 strcmp(routing->brk.fork->path_var->name, "path_continue")));
399 nir_push_if_src(b, nir_src_for_ssa(
400 fork_condition(b, routing->brk.fork)));
401 nir_jump(b, nir_jump_continue);
402 nir_pop_if(b, NULL);
403 routing->brk = routing->brk.fork->paths[0];
404 }
405 if (routing->brk.fork && routing->brk.fork->paths[1].reachable ==
406 routing_backup->brk.reachable) {
407 assert(!(routing->brk.fork->is_var &&
408 strcmp(routing->brk.fork->path_var->name, "path_break")));
409 nir_push_if_src(b, nir_src_for_ssa(
410 fork_condition(b, routing->brk.fork)));
411 nir_jump(b, nir_jump_break);
412 nir_pop_if(b, NULL);
413 routing->brk = routing->brk.fork->paths[0];
414 }
415 assert(routing->brk.fork == routing_backup->regular.fork);
416 assert(routing->brk.reachable == routing_backup->regular.reachable);
417 *routing = *routing_backup;
418 ralloc_free(routing_backup);
419 }
420
421 /**
422 * generates a list of all blocks dominated by the loop header, but the
423 * control flow can't go back to the loop header from the block.
424 * also generates a list of all blocks that can be reached from within the
425 * loop
426 * | __
427 * A´ \
428 * | \ \
429 * B C-´
430 * /
431 * D
432 * here B and C are directly dominated by A but only C can reach back to the
433 * loop head A. B will be added to the outside set and to the reach set.
434 * \param loop_heads set of loop heads. All blocks inside the loop will be
435 * added to this set
436 * \param outside all blocks directly outside the loop will be added
437 * \param reach all blocks reachable from the loop will be added
438 */
439 static void
440 inside_outside(nir_block *block, struct set *loop_heads, struct set *outside,
441 struct set *reach, struct set *brk_reachable, void *mem_ctx)
442 {
443 assert(_mesa_set_search(loop_heads, block));
444 struct set *remaining = _mesa_pointer_set_create(mem_ctx);
445 for (int i = 0; i < block->num_dom_children; i++) {
446 if (!_mesa_set_search(brk_reachable, block->dom_children[i]))
447 _mesa_set_add(remaining, block->dom_children[i]);
448 }
449
450
451 if (NIR_LOWER_GOTO_IFS_DEBUG) {
452 printf("inside_outside(%u):\n", block->index);
453 printf(" loop_heads = ");
454 print_block_set(loop_heads);
455 printf(" reach = ");
456 print_block_set(reach);
457 printf(" brk_reach = ");
458 print_block_set(brk_reachable);
459 printf(" remaining = ");
460 print_block_set(remaining);
461 printf("\n");
462 }
463
464 bool progress = true;
465 while (remaining->entries && progress) {
466 progress = false;
467 set_foreach(remaining, child_entry) {
468 nir_block *dom_child = (nir_block *) child_entry->key;
469 bool can_jump_back = false;
470 set_foreach(dom_child->dom_frontier, entry) {
471 if (entry->key == dom_child)
472 continue;
473 if (_mesa_set_search_pre_hashed(remaining, entry->hash,
474 entry->key)) {
475 can_jump_back = true;
476 break;
477 }
478 if (_mesa_set_search_pre_hashed(loop_heads, entry->hash,
479 entry->key)) {
480 can_jump_back = true;
481 break;
482 }
483 }
484 if (!can_jump_back) {
485 _mesa_set_add_pre_hashed(outside, child_entry->hash,
486 child_entry->key);
487 _mesa_set_remove(remaining, child_entry);
488 progress = true;
489 }
490 }
491 }
492
493 /* Add everything remaining to loop_heads */
494 set_foreach(remaining, entry)
495 _mesa_set_add_pre_hashed(loop_heads, entry->hash, entry->key);
496
497 /* Recurse for each remaining */
498 set_foreach(remaining, entry) {
499 inside_outside((nir_block *) entry->key, loop_heads, outside, reach,
500 brk_reachable, mem_ctx);
501 }
502
503 for (int i = 0; i < 2; i++) {
504 if (block->successors[i] && block->successors[i]->successors[0] &&
505 !_mesa_set_search(loop_heads, block->successors[i])) {
506 _mesa_set_add(reach, block->successors[i]);
507 }
508 }
509
510 if (NIR_LOWER_GOTO_IFS_DEBUG) {
511 printf("outside(%u) = ", block->index);
512 print_block_set(outside);
513 printf("reach(%u) = ", block->index);
514 print_block_set(reach);
515 }
516 }
517
518 static struct path_fork *
519 select_fork_recur(struct nir_block **blocks, unsigned start, unsigned end,
520 nir_function_impl *impl, bool need_var, void *mem_ctx)
521 {
522 if (start == end - 1)
523 return NULL;
524
525 struct path_fork *fork = ralloc(mem_ctx, struct path_fork);
526 fork->is_var = need_var;
527 if (need_var)
528 fork->path_var = nir_local_variable_create(impl, glsl_bool_type(),
529 "path_select");
530
531 unsigned mid = start + (end - start) / 2;
532
533 fork->paths[0].reachable = _mesa_pointer_set_create(fork);
534 for (unsigned i = start; i < mid; i++)
535 _mesa_set_add(fork->paths[0].reachable, blocks[i]);
536 fork->paths[0].fork =
537 select_fork_recur(blocks, start, mid, impl, need_var, mem_ctx);
538
539 fork->paths[1].reachable = _mesa_pointer_set_create(fork);
540 for (unsigned i = mid; i < end; i++)
541 _mesa_set_add(fork->paths[1].reachable, blocks[i]);
542 fork->paths[1].fork =
543 select_fork_recur(blocks, mid, end, impl, need_var, mem_ctx);
544
545 return fork;
546 }
547
548 /**
549 * Gets a set of blocks organized into the same level by the organize_levels
550 * function and creates enough forks to be able to route to them.
551 * If the set only contains one block, the function has nothing to do.
552 * The set should almost never contain more than two blocks, but if so,
553 * then the function calls itself recursively
554 */
555 static struct path_fork *
556 select_fork(struct set *reachable, nir_function_impl *impl, bool need_var,
557 void *mem_ctx)
558 {
559 assert(reachable->entries > 0);
560 if (reachable->entries <= 1)
561 return NULL;
562
563 /* Hash set ordering is non-deterministic. We're about to turn a set into
564 * a tree so we really want things to be in a deterministic ordering.
565 */
566 return select_fork_recur(sorted_block_arr_for_set(reachable, mem_ctx),
567 0, reachable->entries, impl, need_var, mem_ctx);
568 }
569
570 /**
571 * gets called when the organize_levels functions fails to find blocks that
572 * can't be reached by the other remaining blocks. This means, at least two
573 * dominance sibling blocks can reach each other. So we have a multi entry
574 * loop. This function tries to find the smallest possible set of blocks that
575 * must be part of the multi entry loop.
576 * example cf: | |
577 * A<---B
578 * / \__,^ \
579 * \ /
580 * \ /
581 * C
582 * The function choses a random block as candidate. for example C
583 * The function checks which remaining blocks can reach C, in this case A.
584 * So A becomes the new candidate and C is removed from the result set.
585 * B can reach A.
586 * So B becomes the new candidate and A is removed from the set.
587 * A can reach B.
588 * A was an old candidate. So it is added to the set containing B.
589 * No other remaining blocks can reach A or B.
590 * So only A and B must be part of the multi entry loop.
591 */
592 static void
593 handle_irreducible(struct set *remaining, struct strct_lvl *curr_level,
594 struct set *brk_reachable, void *mem_ctx)
595 {
596 nir_block *candidate = (nir_block *)
597 _mesa_set_next_entry(remaining, NULL)->key;
598 struct set *old_candidates = _mesa_pointer_set_create(mem_ctx);
599 while (candidate) {
600 _mesa_set_add(old_candidates, candidate);
601
602 /* Start with just the candidate block */
603 _mesa_set_clear(curr_level->blocks, NULL);
604 _mesa_set_add(curr_level->blocks, candidate);
605
606 candidate = NULL;
607 set_foreach(remaining, entry) {
608 nir_block *remaining_block = (nir_block *) entry->key;
609 if (!_mesa_set_search(curr_level->blocks, remaining_block) &&
610 _mesa_set_intersects(remaining_block->dom_frontier,
611 curr_level->blocks)) {
612 if (_mesa_set_search(old_candidates, remaining_block)) {
613 _mesa_set_add(curr_level->blocks, remaining_block);
614 } else {
615 candidate = remaining_block;
616 break;
617 }
618 }
619 }
620 }
621 _mesa_set_destroy(old_candidates, NULL);
622 old_candidates = NULL;
623
624 struct set *loop_heads = _mesa_set_clone(curr_level->blocks, curr_level);
625 curr_level->reach = _mesa_pointer_set_create(curr_level);
626 set_foreach(curr_level->blocks, entry) {
627 _mesa_set_remove_key(remaining, entry->key);
628 inside_outside((nir_block *) entry->key, loop_heads, remaining,
629 curr_level->reach, brk_reachable, mem_ctx);
630 }
631 curr_level->outside = remaining;
632 _mesa_set_destroy(loop_heads, NULL);
633 }
634
635 /**
636 * organize a set of blocks into a list of levels. Where every level contains
637 * one or more blocks. So that every block is before all blocks it can reach.
638 * Also creates all path variables needed, for the control flow between the
639 * block.
640 * For example if the control flow looks like this:
641 * A
642 * / |
643 * B C
644 * | / \
645 * E |
646 * \ /
647 * F
648 * B, C, E and F are dominance children of A
649 * The level list should look like this:
650 * blocks irreducible conditional
651 * level 0 B, C false false
652 * level 1 E false true
653 * level 2 F false false
654 * The final structure should look like this:
655 * A
656 * if (path_select) {
657 * B
658 * } else {
659 * C
660 * }
661 * if (path_conditional) {
662 * E
663 * }
664 * F
665 *
666 * \param levels uninitialized list
667 * \param is_dominated if true, no helper variables will be created for the
668 * zeroth level
669 */
670 static void
671 organize_levels(struct list_head *levels, struct set *children,
672 struct set *reach, struct routes *routing,
673 nir_function_impl *impl, bool is_domminated, void *mem_ctx)
674 {
675 if (NIR_LOWER_GOTO_IFS_DEBUG) {
676 printf("organize_levels:\n");
677 printf(" children = ");
678 print_block_set(children);
679 printf(" reach = ");
680 print_block_set(reach);
681 }
682
683 /* Duplicate remaining because we're going to destroy it */
684 struct set *remaining = _mesa_set_clone(children, mem_ctx);
685
686 /* blocks that can be reached by the remaining blocks */
687 struct set *remaining_frontier = _mesa_pointer_set_create(mem_ctx);
688
689 /* targets of active skip path */
690 struct set *skip_targets = _mesa_pointer_set_create(mem_ctx);
691
692 list_inithead(levels);
693 while (remaining->entries) {
694 _mesa_set_clear(remaining_frontier, NULL);
695 set_foreach(remaining, entry) {
696 nir_block *remain_block = (nir_block *) entry->key;
697 set_foreach(remain_block->dom_frontier, frontier_entry) {
698 nir_block *frontier = (nir_block *) frontier_entry->key;
699 if (frontier != remain_block) {
700 _mesa_set_add(remaining_frontier, frontier);
701 }
702 }
703 }
704
705 struct strct_lvl *curr_level = rzalloc(mem_ctx, struct strct_lvl);
706 curr_level->blocks = _mesa_pointer_set_create(curr_level);
707 set_foreach(remaining, entry) {
708 nir_block *candidate = (nir_block *) entry->key;
709 if (!_mesa_set_search(remaining_frontier, candidate)) {
710 _mesa_set_add(curr_level->blocks, candidate);
711 _mesa_set_remove_key(remaining, candidate);
712 }
713 }
714
715 curr_level->irreducible = !curr_level->blocks->entries;
716 if (curr_level->irreducible) {
717 handle_irreducible(remaining, curr_level,
718 routing->brk.reachable, mem_ctx);
719 }
720 assert(curr_level->blocks->entries);
721
722 struct strct_lvl *prev_level = NULL;
723 if (!list_is_empty(levels))
724 prev_level = list_last_entry(levels, struct strct_lvl, link);
725
726 set_foreach(skip_targets, entry) {
727 if (_mesa_set_search_pre_hashed(curr_level->blocks,
728 entry->hash, entry->key)) {
729 _mesa_set_remove(skip_targets, entry);
730 prev_level->skip_end = 1;
731 }
732 }
733 curr_level->skip_start = skip_targets->entries != 0;
734
735 struct set *prev_frontier = NULL;
736 if (!prev_level) {
737 prev_frontier = reach;
738 } else if (prev_level->irreducible) {
739 prev_frontier = prev_level->reach;
740 } else {
741 set_foreach(curr_level->blocks, blocks_entry) {
742 nir_block *level_block = (nir_block *) blocks_entry->key;
743 if (curr_level->blocks->entries == 1) {
744 /* If we only have one block, there's no union operation and we
745 * can just use the one from the one block.
746 */
747 prev_frontier = level_block->dom_frontier;
748 break;
749 }
750
751 if (prev_frontier == NULL) {
752 prev_frontier =
753 _mesa_set_clone(level_block->dom_frontier, prev_level);
754 } else {
755 set_foreach(level_block->dom_frontier, entry)
756 _mesa_set_add_pre_hashed(prev_frontier, entry->hash,
757 entry->key);
758 }
759 }
760 }
761
762 bool is_in_skip = skip_targets->entries != 0;
763 set_foreach(prev_frontier, entry) {
764 if (_mesa_set_search(remaining, entry->key) ||
765 (_mesa_set_search(routing->regular.reachable, entry->key) &&
766 !_mesa_set_search(routing->brk.reachable, entry->key) &&
767 !_mesa_set_search(routing->cont.reachable, entry->key))) {
768 _mesa_set_add_pre_hashed(skip_targets, entry->hash, entry->key);
769 if (is_in_skip)
770 prev_level->skip_end = 1;
771 curr_level->skip_start = 1;
772 }
773 }
774
775 curr_level->skip_end = 0;
776 list_addtail(&curr_level->link, levels);
777 }
778
779 if (NIR_LOWER_GOTO_IFS_DEBUG) {
780 printf(" levels:\n");
781 list_for_each_entry(struct strct_lvl, level, levels, link) {
782 printf(" ");
783 print_block_set(level->blocks);
784 }
785 printf("\n");
786 }
787
788 if (skip_targets->entries)
789 list_last_entry(levels, struct strct_lvl, link)->skip_end = 1;
790
791 /* Iterate throught all levels reverse and create all the paths and forks */
792 struct path path_after_skip;
793
794 list_for_each_entry_rev(struct strct_lvl, level, levels, link) {
795 bool need_var = !(is_domminated && level->link.prev == levels);
796 level->out_path = routing->regular;
797 if (level->skip_end) {
798 path_after_skip = routing->regular;
799 }
800 routing->regular.reachable = level->blocks;
801 routing->regular.fork = select_fork(routing->regular.reachable, impl,
802 need_var, mem_ctx);
803 if (level->skip_start) {
804 struct path_fork *fork = ralloc(mem_ctx, struct path_fork);
805 fork->is_var = need_var;
806 if (need_var)
807 fork->path_var = nir_local_variable_create(impl, glsl_bool_type(),
808 "path_conditional");
809 fork->paths[0] = path_after_skip;
810 fork->paths[1] = routing->regular;
811 routing->regular.fork = fork;
812 routing->regular.reachable = fork_reachable(fork);
813 }
814 }
815 }
816
817 static void
818 nir_structurize(struct routes *routing, nir_builder *b,
819 nir_block *block, void *mem_ctx);
820
821 /**
822 * Places all the if else statements to select between all blocks in a select
823 * path
824 */
825 static void
826 select_blocks(struct routes *routing, nir_builder *b,
827 struct path in_path, void *mem_ctx)
828 {
829 if (!in_path.fork) {
830 nir_block *block = block_for_singular_set(in_path.reachable);
831 nir_structurize(routing, b, block, mem_ctx);
832 } else {
833 assert(!(in_path.fork->is_var &&
834 strcmp(in_path.fork->path_var->name, "path_select")));
835 nir_push_if_src(b, nir_src_for_ssa(fork_condition(b, in_path.fork)));
836 select_blocks(routing, b, in_path.fork->paths[1], mem_ctx);
837 nir_push_else(b, NULL);
838 select_blocks(routing, b, in_path.fork->paths[0], mem_ctx);
839 nir_pop_if(b, NULL);
840 }
841 }
842
843 /**
844 * Builds the structurized nir code by the final level list.
845 */
846 static void
847 plant_levels(struct list_head *levels, struct routes *routing,
848 nir_builder *b, void *mem_ctx)
849 {
850 /* Place all dominated blocks and build the path forks */
851 list_for_each_entry(struct strct_lvl, level, levels, link) {
852 if (level->skip_start) {
853 assert(routing->regular.fork);
854 assert(!(routing->regular.fork->is_var && strcmp(
855 routing->regular.fork->path_var->name, "path_conditional")));
856 nir_push_if_src(b, nir_src_for_ssa(
857 fork_condition(b, routing->regular.fork)));
858 routing->regular = routing->regular.fork->paths[1];
859 }
860 struct path in_path = routing->regular;
861 routing->regular = level->out_path;
862 if (level->irreducible) {
863 loop_routing_start(routing, b, in_path, level->reach,
864 level->outside, mem_ctx);
865 }
866 select_blocks(routing, b, in_path, mem_ctx);
867 if (level->irreducible)
868 loop_routing_end(routing, b);
869 if (level->skip_end)
870 nir_pop_if(b, NULL);
871 }
872 }
873
874 /**
875 * builds the control flow of a block and all its dominance children
876 * \param routing the routing after the block and all dominated blocks
877 */
878 static void
879 nir_structurize(struct routes *routing, nir_builder *b, nir_block *block,
880 void *mem_ctx)
881 {
882 struct set *remaining = _mesa_pointer_set_create(mem_ctx);
883 for (int i = 0; i < block->num_dom_children; i++) {
884 if (!_mesa_set_search(routing->outside, block->dom_children[i]))
885 _mesa_set_add(remaining, block->dom_children[i]);
886 }
887
888 /* If the block can reach back to itself, it is a loop head */
889 int is_looped = _mesa_set_search(block->dom_frontier, block) != NULL;
890 struct list_head outside_levels;
891 if (is_looped) {
892 struct set *loop_heads = _mesa_pointer_set_create(mem_ctx);
893 _mesa_set_add(loop_heads, block);
894
895 struct set *outside = _mesa_pointer_set_create(mem_ctx);
896 struct set *reach = _mesa_pointer_set_create(mem_ctx);
897 inside_outside(block, loop_heads, outside, reach,
898 routing->brk.reachable, mem_ctx);
899
900 set_foreach(outside, entry)
901 _mesa_set_remove_key(remaining, entry->key);
902
903 organize_levels(&outside_levels, outside, reach, routing, b->impl,
904 false, mem_ctx);
905
906 struct path loop_path = {
907 .reachable = _mesa_pointer_set_create(mem_ctx),
908 .fork = NULL,
909 };
910 _mesa_set_add(loop_path.reachable, block);
911
912 loop_routing_start(routing, b, loop_path, reach, outside, mem_ctx);
913 }
914
915 struct set *reach = _mesa_pointer_set_create(mem_ctx);
916 if (block->successors[0]->successors[0]) /* it is not the end_block */
917 _mesa_set_add(reach, block->successors[0]);
918 if (block->successors[1] && block->successors[1]->successors[0])
919 _mesa_set_add(reach, block->successors[1]);
920
921 struct list_head levels;
922 organize_levels(&levels, remaining, reach, routing, b->impl, true, mem_ctx);
923
924 /* Push all instructions of this block, without the jump instr */
925 nir_jump_instr *jump_instr = NULL;
926 nir_foreach_instr_safe(instr, block) {
927 if (instr->type == nir_instr_type_jump) {
928 jump_instr = nir_instr_as_jump(instr);
929 break;
930 }
931 nir_instr_remove(instr);
932 nir_builder_instr_insert(b, instr);
933 }
934
935 /* Find path to the successor blocks */
936 if (jump_instr->type == nir_jump_goto_if) {
937 route_to_cond(b, routing, jump_instr->condition,
938 jump_instr->target, jump_instr->else_target);
939 } else {
940 route_to(b, routing, block->successors[0]);
941 }
942
943 plant_levels(&levels, routing, b, mem_ctx);
944 if (is_looped) {
945 loop_routing_end(routing, b);
946 plant_levels(&outside_levels, routing, b, mem_ctx);
947 }
948 }
949
950 static bool
951 nir_lower_goto_ifs_impl(nir_function_impl *impl)
952 {
953 if (impl->structured) {
954 nir_metadata_preserve(impl, nir_metadata_all);
955 return false;
956 }
957
958 nir_metadata_require(impl, nir_metadata_dominance);
959
960 nir_cf_list cf_list;
961 nir_cf_extract(&cf_list, nir_before_cf_list(&impl->body),
962 nir_after_cf_list(&impl->body));
963
964 /* From this point on, it's structured */
965 impl->structured = true;
966
967 nir_builder b;
968 nir_builder_init(&b, impl);
969 b.cursor = nir_before_block(nir_start_block(impl));
970
971 void *mem_ctx = ralloc_context(b.shader);
972
973 struct set *end_set = _mesa_pointer_set_create(mem_ctx);
974 _mesa_set_add(end_set, impl->end_block);
975 struct set *empty_set = _mesa_pointer_set_create(mem_ctx);
976
977 nir_cf_node *start_node =
978 exec_node_data(nir_cf_node, exec_list_get_head(&cf_list.list), node);
979 nir_block *start_block = nir_cf_node_as_block(start_node);
980
981 struct routes *routing = ralloc(mem_ctx, struct routes);
982 *routing = (struct routes) {
983 .outside = empty_set,
984 .regular.reachable = end_set,
985 .brk.reachable = empty_set,
986 .cont.reachable = empty_set,
987 };
988 nir_structurize(routing, &b, start_block, mem_ctx);
989 assert(routing->regular.fork == NULL);
990 assert(routing->brk.fork == NULL);
991 assert(routing->cont.fork == NULL);
992 assert(routing->brk.reachable == empty_set);
993 assert(routing->cont.reachable == empty_set);
994
995 ralloc_free(mem_ctx);
996 nir_cf_delete(&cf_list);
997
998 nir_metadata_preserve(impl, nir_metadata_none);
999
1000 return true;
1001 }
1002
1003 bool
1004 nir_lower_goto_ifs(nir_shader *shader)
1005 {
1006 bool progress = true;
1007
1008 nir_foreach_function(function, shader) {
1009 if (function->impl && nir_lower_goto_ifs_impl(function->impl))
1010 progress = true;
1011 }
1012
1013 return progress;
1014 }