e0f4b100d57cd45c2feb4404d8ea2658d2964be1
[mesa.git] / src / compiler / nir / nir_lower_goto_ifs.c
1 /*
2 * Copyright © 2020 Julian Winkler
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26
27 struct path {
28 /** Set of blocks which this path represents
29 *
30 * It's "reachable" not in the sense that these are all the nodes reachable
31 * through this path but in the sense that, when you see one of these
32 * blocks, you know you've reached this path.
33 */
34 struct set *reachable;
35
36 /** Fork in the path, if reachable->entries > 1 */
37 struct path_fork *fork;
38 };
39
40 struct path_fork {
41 bool is_var;
42 union {
43 nir_variable *path_var;
44 nir_ssa_def *path_ssa;
45 };
46 struct path paths[2];
47 };
48
49 struct routes {
50 struct path regular;
51 struct path brk;
52 struct path cont;
53 struct routes *loop_backup;
54 };
55
56 struct strct_lvl {
57 struct exec_node node;
58
59 /** Set of blocks at the current level */
60 struct set *blocks;
61
62 /** Path for the next level */
63 struct path out_path;
64
65 /** Reach set from inside_outside if irreducable */
66 struct set *reach;
67
68 /** True if a skip region starts with this level */
69 bool skip_start;
70
71 /** True if a skip region ends with this level */
72 bool skip_end;
73
74 /** True if this level is irreducable */
75 bool irreducible;
76 };
77
78 /**
79 * Sets all path variables to reach the target block via a fork
80 */
81 static void
82 set_path_vars(nir_builder *b, struct path_fork *fork, nir_block *target)
83 {
84 while (fork) {
85 for (int i = 0; i < 2; i++) {
86 if (_mesa_set_search(fork->paths[i].reachable, target)) {
87 if (fork->is_var) {
88 nir_store_var(b, fork->path_var, nir_imm_bool(b, i), 1);
89 } else {
90 assert(fork->path_ssa == NULL);
91 fork->path_ssa = nir_imm_bool(b, i);
92 }
93 fork = fork->paths[i].fork;
94 break;
95 }
96 }
97 }
98 }
99
100 /**
101 * Sets all path variables to reach the both target blocks via a fork.
102 * If the blocks are in different fork paths, the condition will be used.
103 * As the fork is already created, the then and else blocks may be swapped,
104 * in this case the condition is inverted
105 */
106 static void
107 set_path_vars_cond(nir_builder *b, struct path_fork *fork, nir_src condition,
108 nir_block *then_block, nir_block *else_block)
109 {
110 int i;
111 while (fork) {
112 for (i = 0; i < 2; i++) {
113 if (_mesa_set_search(fork->paths[i].reachable, then_block)) {
114 if (_mesa_set_search(fork->paths[i].reachable, else_block)) {
115 if (fork->is_var)
116 nir_store_var(b, fork->path_var, nir_imm_bool(b, i), 1);
117 else
118 fork->path_ssa = nir_imm_bool(b, i);
119 fork = fork->paths[i].fork;
120 break;
121 }
122 else {
123 assert(condition.is_ssa);
124 nir_ssa_def *ssa_def = condition.ssa;
125 assert(ssa_def->bit_size == 1);
126 assert(ssa_def->num_components == 1);
127 if (!i)
128 ssa_def = nir_inot(b, ssa_def);
129 if (fork->is_var)
130 nir_store_var(b, fork->path_var, ssa_def, 1);
131 else
132 fork->path_ssa = ssa_def;
133 set_path_vars(b, fork->paths[i].fork, then_block);
134 set_path_vars(b, fork->paths[!i].fork, else_block);
135 return;
136 }
137 }
138 }
139 assert(i < 2);
140 }
141 }
142
143 /**
144 * Sets all path variables and places the right jump instruction to reach the
145 * target block
146 */
147 static void
148 route_to(nir_builder *b, struct routes *routing, nir_block *target)
149 {
150 if (_mesa_set_search(routing->regular.reachable, target)) {
151 set_path_vars(b, routing->regular.fork, target);
152 }
153 else if (_mesa_set_search(routing->brk.reachable, target)) {
154 set_path_vars(b, routing->brk.fork, target);
155 nir_jump(b, nir_jump_break);
156 }
157 else if (_mesa_set_search(routing->cont.reachable, target)) {
158 set_path_vars(b, routing->cont.fork, target);
159 nir_jump(b, nir_jump_continue);
160 }
161 else {
162 assert(!target->successors[0]); /* target is endblock */
163 nir_jump(b, nir_jump_return);
164 }
165 }
166
167 /**
168 * Sets path vars and places the right jump instr to reach one of the two
169 * target blocks based on the condition. If the targets need different jump
170 * istructions, they will be placed into an if else statement.
171 * This can happen if one target is the loop head
172 * A __
173 * | \
174 * B |
175 * |\__/
176 * C
177 */
178 static void
179 route_to_cond(nir_builder *b, struct routes *routing, nir_src condition,
180 nir_block *then_block, nir_block *else_block)
181 {
182 if (_mesa_set_search(routing->regular.reachable, then_block)) {
183 if (_mesa_set_search(routing->regular.reachable, else_block)) {
184 set_path_vars_cond(b, routing->regular.fork, condition,
185 then_block, else_block);
186 return;
187 }
188 } else if (_mesa_set_search(routing->brk.reachable, then_block)) {
189 if (_mesa_set_search(routing->brk.reachable, else_block)) {
190 set_path_vars_cond(b, routing->brk.fork, condition,
191 then_block, else_block);
192 nir_jump(b, nir_jump_break);
193 return;
194 }
195 } else if (_mesa_set_search(routing->cont.reachable, then_block)) {
196 if (_mesa_set_search(routing->cont.reachable, else_block)) {
197 set_path_vars_cond(b, routing->cont.fork, condition,
198 then_block, else_block);
199 nir_jump(b, nir_jump_continue);
200 return;
201 }
202 }
203
204 /* then and else blocks are in different routes */
205 nir_push_if_src(b, condition);
206 route_to(b, routing, then_block);
207 nir_push_else(b, NULL);
208 route_to(b, routing, else_block);
209 nir_pop_if(b, NULL);
210 }
211
212 /**
213 * Merges the reachable sets of both fork subpaths into the forks entire
214 * reachable set
215 */
216 static struct set *
217 fork_reachable(struct path_fork *fork)
218 {
219 struct set *reachable = _mesa_set_clone(fork->paths[0].reachable, fork);
220 set_foreach(fork->paths[1].reachable, entry)
221 _mesa_set_add_pre_hashed(reachable, entry->hash, entry->key);
222 return reachable;
223 }
224
225 /**
226 * Modifies the routing to be the routing inside a loop. The old regular path
227 * becomes the new break path. The loop in path becomes the new regular and
228 * continue path.
229 * The lost routing information is stacked into the loop_backup stack.
230 * Also creates helper vars for multilevel loop jumping if needed.
231 * Also calls the nir builder to build the loop
232 */
233 static void
234 loop_routing_start(struct routes *routing, nir_builder *b,
235 struct path loop_path, struct set *reach)
236 {
237 struct routes *routing_backup = ralloc(routing, struct routes);
238 *routing_backup = *routing;
239 bool break_needed = false;
240 bool continue_needed = false;
241
242 set_foreach(reach, entry) {
243 if (_mesa_set_search(loop_path.reachable, entry->key))
244 continue;
245 if (_mesa_set_search(routing->regular.reachable, entry->key))
246 continue;
247 if (_mesa_set_search(routing->brk.reachable, entry->key)) {
248 break_needed = true;
249 continue;
250 }
251 assert(_mesa_set_search(routing->cont.reachable, entry->key));
252 continue_needed = true;
253 }
254
255 routing->brk = routing_backup->regular;
256 routing->cont = loop_path;
257 routing->regular = loop_path;
258 routing->loop_backup = routing_backup;
259
260 if (break_needed) {
261 struct path_fork *fork = ralloc(routing_backup, struct path_fork);
262 fork->is_var = true;
263 fork->path_var = nir_local_variable_create(b->impl, glsl_bool_type(),
264 "path_break");
265 fork->paths[0] = routing->brk;
266 fork->paths[1] = routing_backup->brk;
267 routing->brk.fork = fork;
268 routing->brk.reachable = fork_reachable(fork);
269 }
270 if (continue_needed) {
271 struct path_fork *fork = ralloc(routing_backup, struct path_fork);
272 fork->is_var = true;
273 fork->path_var = nir_local_variable_create(b->impl, glsl_bool_type(),
274 "path_continue");
275 fork->paths[0] = routing->brk;
276 fork->paths[1] = routing_backup->cont;
277 routing->brk.fork = fork;
278 routing->brk.reachable = fork_reachable(fork);
279 }
280 nir_push_loop(b);
281 }
282
283 /**
284 * Gets a forks condition as ssa def if the condition is inside a helper var,
285 * the variable will be read into an ssa def
286 */
287 static nir_ssa_def *
288 fork_condition(nir_builder *b, struct path_fork *fork)
289 {
290 nir_ssa_def *ret;
291 if (fork->is_var) {
292 ret = nir_load_var(b, fork->path_var);
293 }
294 else
295 ret = fork->path_ssa;
296 return ret;
297 }
298
299 /**
300 * Restores the routing after leaving a loop based on the loop_backup stack.
301 * Also handles multi level jump helper vars if existing and calls the nir
302 * builder to pop the nir loop
303 */
304 static void
305 loop_routing_end(struct routes *routing, nir_builder *b)
306 {
307 struct routes *routing_backup = routing->loop_backup;
308 assert(routing->cont.fork == routing->regular.fork);
309 assert(routing->cont.reachable == routing->regular.reachable);
310 nir_pop_loop(b, NULL);
311 if (routing->brk.fork && routing->brk.fork->paths[1].reachable ==
312 routing_backup->cont.reachable) {
313 assert(!(routing->brk.fork->is_var &&
314 strcmp(routing->brk.fork->path_var->name, "path_continue")));
315 nir_push_if_src(b, nir_src_for_ssa(
316 fork_condition(b, routing->brk.fork)));
317 nir_jump(b, nir_jump_continue);
318 nir_pop_if(b, NULL);
319 routing->brk = routing->brk.fork->paths[0];
320 }
321 if (routing->brk.fork && routing->brk.fork->paths[1].reachable ==
322 routing_backup->brk.reachable) {
323 assert(!(routing->brk.fork->is_var &&
324 strcmp(routing->brk.fork->path_var->name, "path_break")));
325 nir_push_if_src(b, nir_src_for_ssa(
326 fork_condition(b, routing->brk.fork)));
327 nir_jump(b, nir_jump_break);
328 nir_pop_if(b, NULL);
329 routing->brk = routing->brk.fork->paths[0];
330 }
331 assert(routing->brk.fork == routing_backup->regular.fork);
332 assert(routing->brk.reachable == routing_backup->regular.reachable);
333 *routing = *routing_backup;
334 ralloc_free(routing_backup);
335 }
336
337 /**
338 * generates a list of all blocks dominated by the loop header, but the
339 * control flow can't go back to the loop header from the block.
340 * also generates a list of all blocks that can be reached from within the
341 * loop
342 * | __
343 * A´ \
344 * | \ \
345 * B C-´
346 * /
347 * D
348 * here B and C are directly dominated by A but only C can reach back to the
349 * loop head A. B will be added to the outside set and to the reach set.
350 * \param loop_heads set of loop heads. All blocks inside the loop will be
351 * added to this set
352 * \param outside all blocks directly outside the loop will be added
353 * \param reach all blocks reachable from the loop will be added
354 */
355 static void
356 inside_outside(nir_block *block, struct set *loop_heads, struct set *outside,
357 struct set *reach, struct set *brk_reachable)
358 {
359 assert(_mesa_set_search(loop_heads, block));
360 struct set *remaining = _mesa_pointer_set_create(NULL);
361 for (int i = 0; i < block->num_dom_children; i++) {
362 if (!_mesa_set_search(brk_reachable, block->dom_children[i]))
363 _mesa_set_add(remaining, block->dom_children[i]);
364 }
365
366 bool progress = true;
367 while (remaining->entries && progress) {
368 progress = false;
369 set_foreach(remaining, child_entry) {
370 nir_block *dom_child = (nir_block *) child_entry->key;
371 bool can_jump_back = false;
372 set_foreach(dom_child->dom_frontier, entry) {
373 if (entry->key == dom_child)
374 continue;
375 if (_mesa_set_search_pre_hashed(remaining, entry->hash,
376 entry->key)) {
377 can_jump_back = true;
378 break;
379 }
380 if (_mesa_set_search_pre_hashed(loop_heads, entry->hash,
381 entry->key)) {
382 can_jump_back = true;
383 break;
384 }
385 }
386 if (!can_jump_back) {
387 _mesa_set_add_pre_hashed(outside, child_entry->hash,
388 child_entry->key);
389 _mesa_set_remove(remaining, child_entry);
390 progress = true;
391 }
392 }
393 }
394
395 /* Add everything remaining to loop_heads */
396 set_foreach(remaining, entry)
397 _mesa_set_add_pre_hashed(loop_heads, entry->hash, entry->key);
398
399 /* Recurse for each remaining */
400 set_foreach(remaining, entry) {
401 inside_outside((nir_block *) entry->key, loop_heads, outside, reach,
402 brk_reachable);
403 }
404
405 _mesa_set_destroy(remaining, NULL);
406 remaining = NULL;
407
408 for (int i = 0; i < 2; i++) {
409 if (block->successors[i] && block->successors[i]->successors[0] &&
410 !_mesa_set_search(loop_heads, block->successors[i])) {
411 _mesa_set_add(reach, block->successors[i]);
412 }
413 }
414 }
415
416 /**
417 * Gets a set of blocks organized into the same level by the organize_levels
418 * function and creates enough forks to be able to route to them.
419 * If the set only contains one block, the function has nothing to do.
420 * The set should almost never contain more than two blocks, but if so,
421 * then the function calls itself recursively
422 */
423 static struct path_fork *
424 select_fork(struct set *reachable, nir_function_impl *impl, bool need_var)
425 {
426 struct path_fork *fork = NULL;
427 if (reachable->entries > 1) {
428 fork = ralloc(reachable, struct path_fork);
429 fork->is_var = need_var;
430 if (need_var)
431 fork->path_var = nir_local_variable_create(impl, glsl_bool_type(),
432 "path_select");
433 fork->paths[0].reachable = _mesa_pointer_set_create(fork);
434 struct set_entry *entry = NULL;
435 while (fork->paths[0].reachable->entries < reachable->entries / 2 &&
436 (entry = _mesa_set_next_entry(reachable, entry))) {
437 _mesa_set_add_pre_hashed(fork->paths[0].reachable,
438 entry->hash, entry->key);
439 }
440 fork->paths[0].fork = select_fork(fork->paths[0].reachable, impl,
441 need_var);
442 fork->paths[1].reachable = _mesa_pointer_set_create(fork);
443 while ((entry = _mesa_set_next_entry(reachable, entry))) {
444 _mesa_set_add_pre_hashed(fork->paths[1].reachable,
445 entry->hash, entry->key);
446 }
447 fork->paths[1].fork = select_fork(fork->paths[1].reachable, impl,
448 need_var);
449 }
450 return fork;
451 }
452
453 /**
454 * gets called when the organize_levels functions fails to find blocks that
455 * can't be reached by the other remaining blocks. This means, at least two
456 * dominance sibling blocks can reach each other. So we have a multi entry
457 * loop. This function tries to find the smallest possible set of blocks that
458 * must be part of the multi entry loop.
459 * example cf: | |
460 * A<---B
461 * / \__,^ \
462 * \ /
463 * \ /
464 * C
465 * The function choses a random block as candidate. for example C
466 * The function checks which remaining blocks can reach C, in this case A.
467 * So A becomes the new candidate and C is removed from the result set.
468 * B can reach A.
469 * So B becomes the new candidate and A is removed from the set.
470 * A can reach B.
471 * A was an old candidate. So it is added to the set containing B.
472 * No other remaining blocks can reach A or B.
473 * So only A and B must be part of the multi entry loop.
474 */
475 static void
476 handle_irreducible(struct set *remaining, struct strct_lvl *curr_level,
477 struct set *brk_reachable) {
478 nir_block *candidate = (nir_block *)
479 _mesa_set_next_entry(remaining, NULL)->key;
480 struct set *old_candidates = _mesa_pointer_set_create(curr_level);
481 while (candidate) {
482 _mesa_set_add(old_candidates, candidate);
483 nir_block *to_be_added = candidate;
484 candidate = NULL;
485
486 _mesa_set_clear(curr_level->blocks, NULL);
487 while (to_be_added) {
488 _mesa_set_add(curr_level->blocks, to_be_added);
489 to_be_added = NULL;
490
491 set_foreach(remaining, entry) {
492 nir_block *remaining_block = (nir_block *) entry->key;
493 if (!_mesa_set_search(curr_level->blocks, remaining_block)
494 && _mesa_set_intersects(remaining_block->dom_frontier,
495 curr_level->blocks)) {
496 if (_mesa_set_search(old_candidates, remaining_block))
497 to_be_added = remaining_block;
498 else
499 candidate = remaining_block;
500 break;
501 }
502 }
503 }
504 }
505 _mesa_set_destroy(old_candidates, NULL);
506 old_candidates = NULL;
507
508 struct set *loop_heads = _mesa_set_clone(curr_level->blocks, curr_level);
509 curr_level->reach = _mesa_pointer_set_create(curr_level);
510 set_foreach(curr_level->blocks, entry) {
511 _mesa_set_remove_key(remaining, entry->key);
512 inside_outside((nir_block *) entry->key, loop_heads, remaining,
513 curr_level->reach, brk_reachable);
514 }
515 _mesa_set_destroy(loop_heads, NULL);
516 }
517
518 /**
519 * organize a set of blocks into a list of levels. Where every level contains
520 * one or more blocks. So that every block is before all blocks it can reach.
521 * Also creates all path variables needed, for the control flow between the
522 * block.
523 * For example if the control flow looks like this:
524 * A
525 * / |
526 * B C
527 * | / \
528 * E |
529 * \ /
530 * F
531 * B, C, E and F are dominance children of A
532 * The level list should look like this:
533 * blocks irreducible conditional
534 * level 0 B, C false false
535 * level 1 E false true
536 * level 2 F false false
537 * The final structure should look like this:
538 * A
539 * if (path_select) {
540 * B
541 * } else {
542 * C
543 * }
544 * if (path_conditional) {
545 * E
546 * }
547 * F
548 *
549 * \param levels uninitialized list
550 * \param is_dominated if true, no helper variables will be created for the
551 * zeroth level
552 */
553 static void
554 organize_levels(struct exec_list *levels, struct set *remaining,
555 struct set *reach, struct routes *routing,
556 nir_function_impl *impl, bool is_domminated)
557 {
558 void *mem_ctx = ralloc_parent(remaining);
559
560 /* blocks that can be reached by the remaining blocks */
561 struct set *remaining_frontier = _mesa_pointer_set_create(mem_ctx);
562
563 /* targets of active skip path */
564 struct set *skip_targets = _mesa_pointer_set_create(mem_ctx);
565
566 exec_list_make_empty(levels);
567 while (remaining->entries) {
568 _mesa_set_clear(remaining_frontier, NULL);
569 set_foreach(remaining, entry) {
570 nir_block *remain_block = (nir_block *) entry->key;
571 set_foreach(remain_block->dom_frontier, frontier_entry) {
572 nir_block *frontier = (nir_block *) frontier_entry->key;
573 if (frontier != remain_block) {
574 _mesa_set_add(remaining_frontier, frontier);
575 }
576 }
577 }
578
579 struct strct_lvl *curr_level = ralloc(mem_ctx, struct strct_lvl);
580 curr_level->blocks = _mesa_pointer_set_create(curr_level);
581 set_foreach(remaining, entry) {
582 nir_block *candidate = (nir_block *) entry->key;
583 if (!_mesa_set_search(remaining_frontier, candidate)) {
584 _mesa_set_add(curr_level->blocks, candidate);
585 _mesa_set_remove_key(remaining, candidate);
586 }
587 }
588
589 curr_level->irreducible = !curr_level->blocks->entries;
590 if (curr_level->irreducible)
591 handle_irreducible(remaining, curr_level, routing->brk.reachable);
592 assert(curr_level->blocks->entries);
593 curr_level->skip_start = 0;
594
595 struct strct_lvl *prev_level = NULL;
596 struct exec_node *tail;
597 if ((tail = exec_list_get_tail(levels)))
598 prev_level = exec_node_data(struct strct_lvl, tail, node);
599
600 if (skip_targets->entries) {
601 set_foreach(skip_targets, entry) {
602 if (_mesa_set_search_pre_hashed(curr_level->blocks,
603 entry->hash, entry->key)) {
604 _mesa_set_remove(skip_targets, entry);
605 prev_level->skip_end = 1;
606 curr_level->skip_start = !!skip_targets->entries;
607 }
608 }
609 }
610 struct set *prev_frontier = NULL;
611 if (!prev_level) {
612 prev_frontier = reach;
613 } else if (prev_level->irreducible) {
614 prev_frontier = prev_level->reach;
615 } else {
616 set_foreach(curr_level->blocks, blocks_entry) {
617 nir_block *level_block = (nir_block *) blocks_entry->key;
618 if (!prev_frontier) {
619 prev_frontier = curr_level->blocks->entries == 1 ?
620 level_block->dom_frontier :
621 _mesa_set_clone(level_block->dom_frontier, prev_level);
622 } else {
623 set_foreach(level_block->dom_frontier, entry)
624 _mesa_set_add_pre_hashed(prev_frontier, entry->hash,
625 entry->key);
626 }
627 }
628 }
629
630 bool is_in_skip = skip_targets->entries != 0;
631 set_foreach(prev_frontier, entry) {
632 if (_mesa_set_search(remaining, entry->key) ||
633 (_mesa_set_search(routing->regular.reachable, entry->key) &&
634 !_mesa_set_search(routing->brk.reachable, entry->key) &&
635 !_mesa_set_search(routing->cont.reachable, entry->key))) {
636 _mesa_set_add_pre_hashed(skip_targets, entry->hash, entry->key);
637 if (is_in_skip)
638 prev_level->skip_end = 1;
639 curr_level->skip_start = 1;
640 }
641 }
642
643 curr_level->skip_end = 0;
644 exec_list_push_tail(levels, &curr_level->node);
645 }
646
647 if (skip_targets->entries)
648 exec_node_data(struct strct_lvl, exec_list_get_tail(levels), node)
649 ->skip_end = 1;
650 _mesa_set_destroy(remaining_frontier, NULL);
651 remaining_frontier = NULL;
652 _mesa_set_destroy(skip_targets, NULL);
653 skip_targets = NULL;
654
655 /* Iterate throught all levels reverse and create all the paths and forks */
656 struct path path_after_skip;
657
658 foreach_list_typed_reverse(struct strct_lvl, level, node, levels) {
659 bool need_var = !(is_domminated && exec_node_get_prev(&level->node)
660 == &levels->head_sentinel);
661 level->out_path = routing->regular;
662 if (level->skip_end) {
663 path_after_skip = routing->regular;
664 }
665 routing->regular.reachable = level->blocks;
666 routing->regular.fork = select_fork(routing->regular.reachable, impl,
667 need_var);
668 if (level->skip_start) {
669 struct path_fork *fork = ralloc(level, struct path_fork);
670 fork->is_var = need_var;
671 if (need_var)
672 fork->path_var = nir_local_variable_create(impl, glsl_bool_type(),
673 "path_conditional");
674 fork->paths[0] = path_after_skip;
675 fork->paths[1] = routing->regular;
676 routing->regular.fork = fork;
677 routing->regular.reachable = fork_reachable(fork);
678 }
679 }
680 }
681
682 static void
683 nir_structurize(struct routes *routing, nir_builder *b, nir_block *block);
684
685 /**
686 * Places all the if else statements to select between all blocks in a select
687 * path
688 */
689 static void
690 select_blocks(struct routes *routing, nir_builder *b, struct path in_path) {
691 if (!in_path.fork) {
692 nir_structurize(routing, b, (nir_block *)
693 _mesa_set_next_entry(in_path.reachable, NULL)->key);
694 } else {
695 assert(!(in_path.fork->is_var &&
696 strcmp(in_path.fork->path_var->name, "path_select")));
697 nir_push_if_src(b, nir_src_for_ssa(fork_condition(b, in_path.fork)));
698 select_blocks(routing, b, in_path.fork->paths[1]);
699 nir_push_else(b, NULL);
700 select_blocks(routing, b, in_path.fork->paths[0]);
701 nir_pop_if(b, NULL);
702 }
703 }
704
705 /**
706 * Builds the structurized nir code by the final level list.
707 */
708 static void
709 plant_levels(struct exec_list *levels, struct routes *routing,
710 nir_builder *b)
711 {
712 /* Place all dominated blocks and build the path forks */
713 struct exec_node *list_node;
714 while ((list_node = exec_list_pop_head(levels))) {
715 struct strct_lvl *curr_level =
716 exec_node_data(struct strct_lvl, list_node, node);
717 if (curr_level->skip_start) {
718 assert(routing->regular.fork);
719 assert(!(routing->regular.fork->is_var && strcmp(
720 routing->regular.fork->path_var->name, "path_conditional")));
721 nir_push_if_src(b, nir_src_for_ssa(
722 fork_condition(b, routing->regular.fork)));
723 routing->regular = routing->regular.fork->paths[1];
724 }
725 struct path in_path = routing->regular;
726 routing->regular = curr_level->out_path;
727 if (curr_level->irreducible)
728 loop_routing_start(routing, b, in_path, curr_level->reach);
729 select_blocks(routing, b, in_path);
730 if (curr_level->irreducible)
731 loop_routing_end(routing, b);
732 if (curr_level->skip_end)
733 nir_pop_if(b, NULL);
734 ralloc_free(curr_level);
735 }
736 }
737
738 /**
739 * builds the control flow of a block and all its dominance children
740 * \param routing the routing after the block and all dominated blocks
741 */
742 static void
743 nir_structurize(struct routes *routing, nir_builder *b, nir_block *block)
744 {
745 /* Mem context for this function; freed at the end */
746 void *mem_ctx = ralloc_context(routing);
747
748 struct set *remaining = _mesa_pointer_set_create(mem_ctx);
749 for (int i = 0; i < block->num_dom_children; i++) {
750 if (!_mesa_set_search(routing->brk.reachable, block->dom_children[i]))
751 _mesa_set_add(remaining, block->dom_children[i]);
752 }
753
754 /* If the block can reach back to itself, it is a loop head */
755 int is_looped = _mesa_set_search(block->dom_frontier, block) != NULL;
756 struct exec_list outside_levels;
757 if (is_looped) {
758 struct set *loop_heads = _mesa_pointer_set_create(mem_ctx);
759 _mesa_set_add(loop_heads, block);
760
761 struct set *outside = _mesa_pointer_set_create(mem_ctx);
762 struct set *reach = _mesa_pointer_set_create(mem_ctx);
763 inside_outside(block, loop_heads, outside, reach,
764 routing->brk.reachable);
765
766 _mesa_set_destroy(loop_heads, NULL);
767 loop_heads = NULL;
768
769 set_foreach(outside, entry)
770 _mesa_set_remove_key(remaining, entry->key);
771
772 organize_levels(&outside_levels, outside, reach, routing, b->impl,
773 false);
774
775 struct path loop_path = {
776 .reachable = _mesa_pointer_set_create(mem_ctx),
777 .fork = NULL,
778 };
779 _mesa_set_add(loop_path.reachable, block);
780
781 loop_routing_start(routing, b, loop_path, reach);
782 _mesa_set_destroy(reach, NULL);
783 reach = NULL;
784 }
785
786 struct set *reach = _mesa_pointer_set_create(mem_ctx);
787 if (block->successors[0]->successors[0]) /* it is not the end_block */
788 _mesa_set_add(reach, block->successors[0]);
789 if (block->successors[1] && block->successors[1]->successors[0])
790 _mesa_set_add(reach, block->successors[1]);
791
792 struct exec_list levels;
793 organize_levels(&levels, remaining, reach, routing, b->impl, true);
794 _mesa_set_destroy(remaining, NULL);
795 remaining = NULL;
796 _mesa_set_destroy(reach, NULL);
797 reach = NULL;
798
799 /* Push all instructions of this block, without the jump instr */
800 nir_jump_instr *jump_instr = NULL;
801 nir_foreach_instr_safe(instr, block) {
802 if (instr->type == nir_instr_type_jump) {
803 jump_instr = nir_instr_as_jump(instr);
804 break;
805 }
806 nir_instr_remove(instr);
807 nir_builder_instr_insert(b, instr);
808 }
809
810 /* Find path to the successor blocks */
811 if (jump_instr->type == nir_jump_goto_if) {
812 route_to_cond(b, routing, jump_instr->condition,
813 jump_instr->target, jump_instr->else_target);
814 } else {
815 route_to(b, routing, block->successors[0]);
816 }
817
818 plant_levels(&levels, routing, b);
819 if (is_looped) {
820 loop_routing_end(routing, b);
821 plant_levels(&outside_levels, routing, b);
822 }
823
824 ralloc_free(mem_ctx);
825 }
826
827 static bool
828 nir_lower_goto_ifs_impl(nir_function_impl *impl)
829 {
830 if (impl->structured) {
831 nir_metadata_preserve(impl, nir_metadata_all);
832 return false;
833 }
834
835 nir_metadata_require(impl, nir_metadata_dominance);
836
837 nir_cf_list cf_list;
838 nir_cf_extract(&cf_list, nir_before_cf_list(&impl->body),
839 nir_after_cf_list(&impl->body));
840
841 /* From this point on, it's structured */
842 impl->structured = true;
843
844 nir_builder b;
845 nir_builder_init(&b, impl);
846 b.cursor = nir_before_block(nir_start_block(impl));
847
848 struct routes *routing = ralloc(b.shader, struct routes);
849 routing->regular.reachable = _mesa_pointer_set_create(routing);
850 _mesa_set_add(routing->regular.reachable, impl->end_block);
851 struct set *empty = _mesa_pointer_set_create(routing);
852 routing->regular.fork = NULL;
853 routing->brk.reachable = empty;
854 routing->brk.fork = NULL;
855 routing->cont.reachable = empty;
856 routing->cont.fork = NULL;
857 nir_structurize(routing, &b,
858 nir_cf_node_as_block(exec_node_data
859 (nir_cf_node,
860 exec_list_get_head(&cf_list.list),
861 node)));
862 assert(routing->regular.fork == NULL);
863 assert(routing->brk.fork == NULL);
864 assert(routing->cont.fork == NULL);
865 assert(routing->brk.reachable == empty);
866 assert(routing->cont.reachable == empty);
867 _mesa_set_destroy(routing->regular.reachable, NULL);
868 _mesa_set_destroy(empty, NULL);
869 ralloc_free(routing);
870 nir_cf_delete(&cf_list);
871
872 nir_metadata_preserve(impl, nir_metadata_none);
873
874 return true;
875 }
876
877 bool
878 nir_lower_goto_ifs(nir_shader *shader)
879 {
880 bool progress = true;
881
882 nir_foreach_function(function, shader) {
883 if (function->impl && nir_lower_goto_ifs_impl(function->impl))
884 progress = true;
885 }
886
887 return progress;
888 }