b944b4c3164dd5f264ae6a03afaf6b3e42b3732f
[mesa.git] / src / compiler / nir / nir_lower_goto_ifs.c
1 /*
2 * Copyright © 2020 Julian Winkler
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26
27 struct path {
28 /** Set of blocks which this path represents
29 *
30 * It's "reachable" not in the sense that these are all the nodes reachable
31 * through this path but in the sense that, when you see one of these
32 * blocks, you know you've reached this path.
33 */
34 struct set *reachable;
35
36 /** Fork in the path, if reachable->entries > 1 */
37 struct path_fork *fork;
38 };
39
40 struct path_fork {
41 bool is_var;
42 union {
43 nir_variable *path_var;
44 nir_ssa_def *path_ssa;
45 };
46 struct path paths[2];
47 };
48
49 struct routes {
50 struct path regular;
51 struct path brk;
52 struct path cont;
53 struct routes *loop_backup;
54 };
55
56 struct strct_lvl {
57 struct list_head link;
58
59 /** Set of blocks at the current level */
60 struct set *blocks;
61
62 /** Path for the next level */
63 struct path out_path;
64
65 /** Reach set from inside_outside if irreducable */
66 struct set *reach;
67
68 /** True if a skip region starts with this level */
69 bool skip_start;
70
71 /** True if a skip region ends with this level */
72 bool skip_end;
73
74 /** True if this level is irreducable */
75 bool irreducible;
76 };
77
78 /**
79 * Sets all path variables to reach the target block via a fork
80 */
81 static void
82 set_path_vars(nir_builder *b, struct path_fork *fork, nir_block *target)
83 {
84 while (fork) {
85 for (int i = 0; i < 2; i++) {
86 if (_mesa_set_search(fork->paths[i].reachable, target)) {
87 if (fork->is_var) {
88 nir_store_var(b, fork->path_var, nir_imm_bool(b, i), 1);
89 } else {
90 assert(fork->path_ssa == NULL);
91 fork->path_ssa = nir_imm_bool(b, i);
92 }
93 fork = fork->paths[i].fork;
94 break;
95 }
96 }
97 }
98 }
99
100 /**
101 * Sets all path variables to reach the both target blocks via a fork.
102 * If the blocks are in different fork paths, the condition will be used.
103 * As the fork is already created, the then and else blocks may be swapped,
104 * in this case the condition is inverted
105 */
106 static void
107 set_path_vars_cond(nir_builder *b, struct path_fork *fork, nir_src condition,
108 nir_block *then_block, nir_block *else_block)
109 {
110 int i;
111 while (fork) {
112 for (i = 0; i < 2; i++) {
113 if (_mesa_set_search(fork->paths[i].reachable, then_block)) {
114 if (_mesa_set_search(fork->paths[i].reachable, else_block)) {
115 if (fork->is_var)
116 nir_store_var(b, fork->path_var, nir_imm_bool(b, i), 1);
117 else
118 fork->path_ssa = nir_imm_bool(b, i);
119 fork = fork->paths[i].fork;
120 break;
121 }
122 else {
123 assert(condition.is_ssa);
124 nir_ssa_def *ssa_def = condition.ssa;
125 assert(ssa_def->bit_size == 1);
126 assert(ssa_def->num_components == 1);
127 if (!i)
128 ssa_def = nir_inot(b, ssa_def);
129 if (fork->is_var)
130 nir_store_var(b, fork->path_var, ssa_def, 1);
131 else
132 fork->path_ssa = ssa_def;
133 set_path_vars(b, fork->paths[i].fork, then_block);
134 set_path_vars(b, fork->paths[!i].fork, else_block);
135 return;
136 }
137 }
138 }
139 assert(i < 2);
140 }
141 }
142
143 /**
144 * Sets all path variables and places the right jump instruction to reach the
145 * target block
146 */
147 static void
148 route_to(nir_builder *b, struct routes *routing, nir_block *target)
149 {
150 if (_mesa_set_search(routing->regular.reachable, target)) {
151 set_path_vars(b, routing->regular.fork, target);
152 }
153 else if (_mesa_set_search(routing->brk.reachable, target)) {
154 set_path_vars(b, routing->brk.fork, target);
155 nir_jump(b, nir_jump_break);
156 }
157 else if (_mesa_set_search(routing->cont.reachable, target)) {
158 set_path_vars(b, routing->cont.fork, target);
159 nir_jump(b, nir_jump_continue);
160 }
161 else {
162 assert(!target->successors[0]); /* target is endblock */
163 nir_jump(b, nir_jump_return);
164 }
165 }
166
167 /**
168 * Sets path vars and places the right jump instr to reach one of the two
169 * target blocks based on the condition. If the targets need different jump
170 * istructions, they will be placed into an if else statement.
171 * This can happen if one target is the loop head
172 * A __
173 * | \
174 * B |
175 * |\__/
176 * C
177 */
178 static void
179 route_to_cond(nir_builder *b, struct routes *routing, nir_src condition,
180 nir_block *then_block, nir_block *else_block)
181 {
182 if (_mesa_set_search(routing->regular.reachable, then_block)) {
183 if (_mesa_set_search(routing->regular.reachable, else_block)) {
184 set_path_vars_cond(b, routing->regular.fork, condition,
185 then_block, else_block);
186 return;
187 }
188 } else if (_mesa_set_search(routing->brk.reachable, then_block)) {
189 if (_mesa_set_search(routing->brk.reachable, else_block)) {
190 set_path_vars_cond(b, routing->brk.fork, condition,
191 then_block, else_block);
192 nir_jump(b, nir_jump_break);
193 return;
194 }
195 } else if (_mesa_set_search(routing->cont.reachable, then_block)) {
196 if (_mesa_set_search(routing->cont.reachable, else_block)) {
197 set_path_vars_cond(b, routing->cont.fork, condition,
198 then_block, else_block);
199 nir_jump(b, nir_jump_continue);
200 return;
201 }
202 }
203
204 /* then and else blocks are in different routes */
205 nir_push_if_src(b, condition);
206 route_to(b, routing, then_block);
207 nir_push_else(b, NULL);
208 route_to(b, routing, else_block);
209 nir_pop_if(b, NULL);
210 }
211
212 /**
213 * Merges the reachable sets of both fork subpaths into the forks entire
214 * reachable set
215 */
216 static struct set *
217 fork_reachable(struct path_fork *fork)
218 {
219 struct set *reachable = _mesa_set_clone(fork->paths[0].reachable, fork);
220 set_foreach(fork->paths[1].reachable, entry)
221 _mesa_set_add_pre_hashed(reachable, entry->hash, entry->key);
222 return reachable;
223 }
224
225 /**
226 * Modifies the routing to be the routing inside a loop. The old regular path
227 * becomes the new break path. The loop in path becomes the new regular and
228 * continue path.
229 * The lost routing information is stacked into the loop_backup stack.
230 * Also creates helper vars for multilevel loop jumping if needed.
231 * Also calls the nir builder to build the loop
232 */
233 static void
234 loop_routing_start(struct routes *routing, nir_builder *b,
235 struct path loop_path, struct set *reach,
236 void *mem_ctx)
237 {
238 struct routes *routing_backup = ralloc(mem_ctx, struct routes);
239 *routing_backup = *routing;
240 bool break_needed = false;
241 bool continue_needed = false;
242
243 set_foreach(reach, entry) {
244 if (_mesa_set_search(loop_path.reachable, entry->key))
245 continue;
246 if (_mesa_set_search(routing->regular.reachable, entry->key))
247 continue;
248 if (_mesa_set_search(routing->brk.reachable, entry->key)) {
249 break_needed = true;
250 continue;
251 }
252 assert(_mesa_set_search(routing->cont.reachable, entry->key));
253 continue_needed = true;
254 }
255
256 routing->brk = routing_backup->regular;
257 routing->cont = loop_path;
258 routing->regular = loop_path;
259 routing->loop_backup = routing_backup;
260
261 if (break_needed) {
262 struct path_fork *fork = ralloc(mem_ctx, struct path_fork);
263 fork->is_var = true;
264 fork->path_var = nir_local_variable_create(b->impl, glsl_bool_type(),
265 "path_break");
266 fork->paths[0] = routing->brk;
267 fork->paths[1] = routing_backup->brk;
268 routing->brk.fork = fork;
269 routing->brk.reachable = fork_reachable(fork);
270 }
271 if (continue_needed) {
272 struct path_fork *fork = ralloc(mem_ctx, struct path_fork);
273 fork->is_var = true;
274 fork->path_var = nir_local_variable_create(b->impl, glsl_bool_type(),
275 "path_continue");
276 fork->paths[0] = routing->brk;
277 fork->paths[1] = routing_backup->cont;
278 routing->brk.fork = fork;
279 routing->brk.reachable = fork_reachable(fork);
280 }
281 nir_push_loop(b);
282 }
283
284 /**
285 * Gets a forks condition as ssa def if the condition is inside a helper var,
286 * the variable will be read into an ssa def
287 */
288 static nir_ssa_def *
289 fork_condition(nir_builder *b, struct path_fork *fork)
290 {
291 nir_ssa_def *ret;
292 if (fork->is_var) {
293 ret = nir_load_var(b, fork->path_var);
294 }
295 else
296 ret = fork->path_ssa;
297 return ret;
298 }
299
300 /**
301 * Restores the routing after leaving a loop based on the loop_backup stack.
302 * Also handles multi level jump helper vars if existing and calls the nir
303 * builder to pop the nir loop
304 */
305 static void
306 loop_routing_end(struct routes *routing, nir_builder *b)
307 {
308 struct routes *routing_backup = routing->loop_backup;
309 assert(routing->cont.fork == routing->regular.fork);
310 assert(routing->cont.reachable == routing->regular.reachable);
311 nir_pop_loop(b, NULL);
312 if (routing->brk.fork && routing->brk.fork->paths[1].reachable ==
313 routing_backup->cont.reachable) {
314 assert(!(routing->brk.fork->is_var &&
315 strcmp(routing->brk.fork->path_var->name, "path_continue")));
316 nir_push_if_src(b, nir_src_for_ssa(
317 fork_condition(b, routing->brk.fork)));
318 nir_jump(b, nir_jump_continue);
319 nir_pop_if(b, NULL);
320 routing->brk = routing->brk.fork->paths[0];
321 }
322 if (routing->brk.fork && routing->brk.fork->paths[1].reachable ==
323 routing_backup->brk.reachable) {
324 assert(!(routing->brk.fork->is_var &&
325 strcmp(routing->brk.fork->path_var->name, "path_break")));
326 nir_push_if_src(b, nir_src_for_ssa(
327 fork_condition(b, routing->brk.fork)));
328 nir_jump(b, nir_jump_break);
329 nir_pop_if(b, NULL);
330 routing->brk = routing->brk.fork->paths[0];
331 }
332 assert(routing->brk.fork == routing_backup->regular.fork);
333 assert(routing->brk.reachable == routing_backup->regular.reachable);
334 *routing = *routing_backup;
335 ralloc_free(routing_backup);
336 }
337
338 /**
339 * generates a list of all blocks dominated by the loop header, but the
340 * control flow can't go back to the loop header from the block.
341 * also generates a list of all blocks that can be reached from within the
342 * loop
343 * | __
344 * A´ \
345 * | \ \
346 * B C-´
347 * /
348 * D
349 * here B and C are directly dominated by A but only C can reach back to the
350 * loop head A. B will be added to the outside set and to the reach set.
351 * \param loop_heads set of loop heads. All blocks inside the loop will be
352 * added to this set
353 * \param outside all blocks directly outside the loop will be added
354 * \param reach all blocks reachable from the loop will be added
355 */
356 static void
357 inside_outside(nir_block *block, struct set *loop_heads, struct set *outside,
358 struct set *reach, struct set *brk_reachable, void *mem_ctx)
359 {
360 assert(_mesa_set_search(loop_heads, block));
361 struct set *remaining = _mesa_pointer_set_create(mem_ctx);
362 for (int i = 0; i < block->num_dom_children; i++) {
363 if (!_mesa_set_search(brk_reachable, block->dom_children[i]))
364 _mesa_set_add(remaining, block->dom_children[i]);
365 }
366
367 bool progress = true;
368 while (remaining->entries && progress) {
369 progress = false;
370 set_foreach(remaining, child_entry) {
371 nir_block *dom_child = (nir_block *) child_entry->key;
372 bool can_jump_back = false;
373 set_foreach(dom_child->dom_frontier, entry) {
374 if (entry->key == dom_child)
375 continue;
376 if (_mesa_set_search_pre_hashed(remaining, entry->hash,
377 entry->key)) {
378 can_jump_back = true;
379 break;
380 }
381 if (_mesa_set_search_pre_hashed(loop_heads, entry->hash,
382 entry->key)) {
383 can_jump_back = true;
384 break;
385 }
386 }
387 if (!can_jump_back) {
388 _mesa_set_add_pre_hashed(outside, child_entry->hash,
389 child_entry->key);
390 _mesa_set_remove(remaining, child_entry);
391 progress = true;
392 }
393 }
394 }
395
396 /* Add everything remaining to loop_heads */
397 set_foreach(remaining, entry)
398 _mesa_set_add_pre_hashed(loop_heads, entry->hash, entry->key);
399
400 /* Recurse for each remaining */
401 set_foreach(remaining, entry) {
402 inside_outside((nir_block *) entry->key, loop_heads, outside, reach,
403 brk_reachable, mem_ctx);
404 }
405
406 for (int i = 0; i < 2; i++) {
407 if (block->successors[i] && block->successors[i]->successors[0] &&
408 !_mesa_set_search(loop_heads, block->successors[i])) {
409 _mesa_set_add(reach, block->successors[i]);
410 }
411 }
412 }
413
414 /**
415 * Gets a set of blocks organized into the same level by the organize_levels
416 * function and creates enough forks to be able to route to them.
417 * If the set only contains one block, the function has nothing to do.
418 * The set should almost never contain more than two blocks, but if so,
419 * then the function calls itself recursively
420 */
421 static struct path_fork *
422 select_fork(struct set *reachable, nir_function_impl *impl, bool need_var,
423 void *mem_ctx)
424 {
425 struct path_fork *fork = NULL;
426 if (reachable->entries > 1) {
427 fork = ralloc(mem_ctx, struct path_fork);
428 fork->is_var = need_var;
429 if (need_var)
430 fork->path_var = nir_local_variable_create(impl, glsl_bool_type(),
431 "path_select");
432 fork->paths[0].reachable = _mesa_pointer_set_create(fork);
433 struct set_entry *entry = NULL;
434 while (fork->paths[0].reachable->entries < reachable->entries / 2 &&
435 (entry = _mesa_set_next_entry(reachable, entry))) {
436 _mesa_set_add_pre_hashed(fork->paths[0].reachable,
437 entry->hash, entry->key);
438 }
439 fork->paths[0].fork = select_fork(fork->paths[0].reachable, impl,
440 need_var, mem_ctx);
441 fork->paths[1].reachable = _mesa_pointer_set_create(fork);
442 while ((entry = _mesa_set_next_entry(reachable, entry))) {
443 _mesa_set_add_pre_hashed(fork->paths[1].reachable,
444 entry->hash, entry->key);
445 }
446 fork->paths[1].fork = select_fork(fork->paths[1].reachable, impl,
447 need_var, mem_ctx);
448 }
449 return fork;
450 }
451
452 /**
453 * gets called when the organize_levels functions fails to find blocks that
454 * can't be reached by the other remaining blocks. This means, at least two
455 * dominance sibling blocks can reach each other. So we have a multi entry
456 * loop. This function tries to find the smallest possible set of blocks that
457 * must be part of the multi entry loop.
458 * example cf: | |
459 * A<---B
460 * / \__,^ \
461 * \ /
462 * \ /
463 * C
464 * The function choses a random block as candidate. for example C
465 * The function checks which remaining blocks can reach C, in this case A.
466 * So A becomes the new candidate and C is removed from the result set.
467 * B can reach A.
468 * So B becomes the new candidate and A is removed from the set.
469 * A can reach B.
470 * A was an old candidate. So it is added to the set containing B.
471 * No other remaining blocks can reach A or B.
472 * So only A and B must be part of the multi entry loop.
473 */
474 static void
475 handle_irreducible(struct set *remaining, struct strct_lvl *curr_level,
476 struct set *brk_reachable, void *mem_ctx)
477 {
478 nir_block *candidate = (nir_block *)
479 _mesa_set_next_entry(remaining, NULL)->key;
480 struct set *old_candidates = _mesa_pointer_set_create(mem_ctx);
481 while (candidate) {
482 _mesa_set_add(old_candidates, candidate);
483 nir_block *to_be_added = candidate;
484 candidate = NULL;
485
486 _mesa_set_clear(curr_level->blocks, NULL);
487 while (to_be_added) {
488 _mesa_set_add(curr_level->blocks, to_be_added);
489 to_be_added = NULL;
490
491 set_foreach(remaining, entry) {
492 nir_block *remaining_block = (nir_block *) entry->key;
493 if (!_mesa_set_search(curr_level->blocks, remaining_block)
494 && _mesa_set_intersects(remaining_block->dom_frontier,
495 curr_level->blocks)) {
496 if (_mesa_set_search(old_candidates, remaining_block))
497 to_be_added = remaining_block;
498 else
499 candidate = remaining_block;
500 break;
501 }
502 }
503 }
504 }
505 _mesa_set_destroy(old_candidates, NULL);
506 old_candidates = NULL;
507
508 struct set *loop_heads = _mesa_set_clone(curr_level->blocks, curr_level);
509 curr_level->reach = _mesa_pointer_set_create(curr_level);
510 set_foreach(curr_level->blocks, entry) {
511 _mesa_set_remove_key(remaining, entry->key);
512 inside_outside((nir_block *) entry->key, loop_heads, remaining,
513 curr_level->reach, brk_reachable, mem_ctx);
514 }
515 _mesa_set_destroy(loop_heads, NULL);
516 }
517
518 /**
519 * organize a set of blocks into a list of levels. Where every level contains
520 * one or more blocks. So that every block is before all blocks it can reach.
521 * Also creates all path variables needed, for the control flow between the
522 * block.
523 * For example if the control flow looks like this:
524 * A
525 * / |
526 * B C
527 * | / \
528 * E |
529 * \ /
530 * F
531 * B, C, E and F are dominance children of A
532 * The level list should look like this:
533 * blocks irreducible conditional
534 * level 0 B, C false false
535 * level 1 E false true
536 * level 2 F false false
537 * The final structure should look like this:
538 * A
539 * if (path_select) {
540 * B
541 * } else {
542 * C
543 * }
544 * if (path_conditional) {
545 * E
546 * }
547 * F
548 *
549 * \param levels uninitialized list
550 * \param is_dominated if true, no helper variables will be created for the
551 * zeroth level
552 */
553 static void
554 organize_levels(struct list_head *levels, struct set *remaining,
555 struct set *reach, struct routes *routing,
556 nir_function_impl *impl, bool is_domminated, void *mem_ctx)
557 {
558 /* blocks that can be reached by the remaining blocks */
559 struct set *remaining_frontier = _mesa_pointer_set_create(mem_ctx);
560
561 /* targets of active skip path */
562 struct set *skip_targets = _mesa_pointer_set_create(mem_ctx);
563
564 list_inithead(levels);
565 while (remaining->entries) {
566 _mesa_set_clear(remaining_frontier, NULL);
567 set_foreach(remaining, entry) {
568 nir_block *remain_block = (nir_block *) entry->key;
569 set_foreach(remain_block->dom_frontier, frontier_entry) {
570 nir_block *frontier = (nir_block *) frontier_entry->key;
571 if (frontier != remain_block) {
572 _mesa_set_add(remaining_frontier, frontier);
573 }
574 }
575 }
576
577 struct strct_lvl *curr_level = rzalloc(mem_ctx, struct strct_lvl);
578 curr_level->blocks = _mesa_pointer_set_create(curr_level);
579 set_foreach(remaining, entry) {
580 nir_block *candidate = (nir_block *) entry->key;
581 if (!_mesa_set_search(remaining_frontier, candidate)) {
582 _mesa_set_add(curr_level->blocks, candidate);
583 _mesa_set_remove_key(remaining, candidate);
584 }
585 }
586
587 curr_level->irreducible = !curr_level->blocks->entries;
588 if (curr_level->irreducible) {
589 handle_irreducible(remaining, curr_level,
590 routing->brk.reachable, mem_ctx);
591 }
592 assert(curr_level->blocks->entries);
593
594 struct strct_lvl *prev_level = NULL;
595 if (!list_is_empty(levels))
596 prev_level = list_last_entry(levels, struct strct_lvl, link);
597
598 set_foreach(skip_targets, entry) {
599 if (_mesa_set_search_pre_hashed(curr_level->blocks,
600 entry->hash, entry->key)) {
601 _mesa_set_remove(skip_targets, entry);
602 prev_level->skip_end = 1;
603 }
604 }
605 curr_level->skip_start = skip_targets->entries != 0;
606
607 struct set *prev_frontier = NULL;
608 if (!prev_level) {
609 prev_frontier = reach;
610 } else if (prev_level->irreducible) {
611 prev_frontier = prev_level->reach;
612 } else {
613 set_foreach(curr_level->blocks, blocks_entry) {
614 nir_block *level_block = (nir_block *) blocks_entry->key;
615 if (!prev_frontier) {
616 prev_frontier = curr_level->blocks->entries == 1 ?
617 level_block->dom_frontier :
618 _mesa_set_clone(level_block->dom_frontier, prev_level);
619 } else {
620 set_foreach(level_block->dom_frontier, entry)
621 _mesa_set_add_pre_hashed(prev_frontier, entry->hash,
622 entry->key);
623 }
624 }
625 }
626
627 bool is_in_skip = skip_targets->entries != 0;
628 set_foreach(prev_frontier, entry) {
629 if (_mesa_set_search(remaining, entry->key) ||
630 (_mesa_set_search(routing->regular.reachable, entry->key) &&
631 !_mesa_set_search(routing->brk.reachable, entry->key) &&
632 !_mesa_set_search(routing->cont.reachable, entry->key))) {
633 _mesa_set_add_pre_hashed(skip_targets, entry->hash, entry->key);
634 if (is_in_skip)
635 prev_level->skip_end = 1;
636 curr_level->skip_start = 1;
637 }
638 }
639
640 curr_level->skip_end = 0;
641 list_addtail(&curr_level->link, levels);
642 }
643
644 if (skip_targets->entries)
645 list_last_entry(levels, struct strct_lvl, link)->skip_end = 1;
646
647 /* Iterate throught all levels reverse and create all the paths and forks */
648 struct path path_after_skip;
649
650 list_for_each_entry_rev(struct strct_lvl, level, levels, link) {
651 bool need_var = !(is_domminated && level->link.prev == levels);
652 level->out_path = routing->regular;
653 if (level->skip_end) {
654 path_after_skip = routing->regular;
655 }
656 routing->regular.reachable = level->blocks;
657 routing->regular.fork = select_fork(routing->regular.reachable, impl,
658 need_var, mem_ctx);
659 if (level->skip_start) {
660 struct path_fork *fork = ralloc(mem_ctx, struct path_fork);
661 fork->is_var = need_var;
662 if (need_var)
663 fork->path_var = nir_local_variable_create(impl, glsl_bool_type(),
664 "path_conditional");
665 fork->paths[0] = path_after_skip;
666 fork->paths[1] = routing->regular;
667 routing->regular.fork = fork;
668 routing->regular.reachable = fork_reachable(fork);
669 }
670 }
671 }
672
673 static void
674 nir_structurize(struct routes *routing, nir_builder *b,
675 nir_block *block, void *mem_ctx);
676
677 /**
678 * Places all the if else statements to select between all blocks in a select
679 * path
680 */
681 static void
682 select_blocks(struct routes *routing, nir_builder *b,
683 struct path in_path, void *mem_ctx)
684 {
685 if (!in_path.fork) {
686 nir_structurize(routing, b, (nir_block *)
687 _mesa_set_next_entry(in_path.reachable, NULL)->key,
688 mem_ctx);
689 } else {
690 assert(!(in_path.fork->is_var &&
691 strcmp(in_path.fork->path_var->name, "path_select")));
692 nir_push_if_src(b, nir_src_for_ssa(fork_condition(b, in_path.fork)));
693 select_blocks(routing, b, in_path.fork->paths[1], mem_ctx);
694 nir_push_else(b, NULL);
695 select_blocks(routing, b, in_path.fork->paths[0], mem_ctx);
696 nir_pop_if(b, NULL);
697 }
698 }
699
700 /**
701 * Builds the structurized nir code by the final level list.
702 */
703 static void
704 plant_levels(struct list_head *levels, struct routes *routing,
705 nir_builder *b, void *mem_ctx)
706 {
707 /* Place all dominated blocks and build the path forks */
708 list_for_each_entry(struct strct_lvl, level, levels, link) {
709 if (level->skip_start) {
710 assert(routing->regular.fork);
711 assert(!(routing->regular.fork->is_var && strcmp(
712 routing->regular.fork->path_var->name, "path_conditional")));
713 nir_push_if_src(b, nir_src_for_ssa(
714 fork_condition(b, routing->regular.fork)));
715 routing->regular = routing->regular.fork->paths[1];
716 }
717 struct path in_path = routing->regular;
718 routing->regular = level->out_path;
719 if (level->irreducible)
720 loop_routing_start(routing, b, in_path, level->reach, mem_ctx);
721 select_blocks(routing, b, in_path, mem_ctx);
722 if (level->irreducible)
723 loop_routing_end(routing, b);
724 if (level->skip_end)
725 nir_pop_if(b, NULL);
726 }
727 }
728
729 /**
730 * builds the control flow of a block and all its dominance children
731 * \param routing the routing after the block and all dominated blocks
732 */
733 static void
734 nir_structurize(struct routes *routing, nir_builder *b, nir_block *block,
735 void *mem_ctx)
736 {
737 struct set *remaining = _mesa_pointer_set_create(mem_ctx);
738 for (int i = 0; i < block->num_dom_children; i++) {
739 if (!_mesa_set_search(routing->brk.reachable, block->dom_children[i]))
740 _mesa_set_add(remaining, block->dom_children[i]);
741 }
742
743 /* If the block can reach back to itself, it is a loop head */
744 int is_looped = _mesa_set_search(block->dom_frontier, block) != NULL;
745 struct list_head outside_levels;
746 if (is_looped) {
747 struct set *loop_heads = _mesa_pointer_set_create(mem_ctx);
748 _mesa_set_add(loop_heads, block);
749
750 struct set *outside = _mesa_pointer_set_create(mem_ctx);
751 struct set *reach = _mesa_pointer_set_create(mem_ctx);
752 inside_outside(block, loop_heads, outside, reach,
753 routing->brk.reachable, mem_ctx);
754
755 set_foreach(outside, entry)
756 _mesa_set_remove_key(remaining, entry->key);
757
758 organize_levels(&outside_levels, outside, reach, routing, b->impl,
759 false, mem_ctx);
760
761 struct path loop_path = {
762 .reachable = _mesa_pointer_set_create(mem_ctx),
763 .fork = NULL,
764 };
765 _mesa_set_add(loop_path.reachable, block);
766
767 loop_routing_start(routing, b, loop_path, reach, mem_ctx);
768 }
769
770 struct set *reach = _mesa_pointer_set_create(mem_ctx);
771 if (block->successors[0]->successors[0]) /* it is not the end_block */
772 _mesa_set_add(reach, block->successors[0]);
773 if (block->successors[1] && block->successors[1]->successors[0])
774 _mesa_set_add(reach, block->successors[1]);
775
776 struct list_head levels;
777 organize_levels(&levels, remaining, reach, routing, b->impl, true, mem_ctx);
778
779 /* Push all instructions of this block, without the jump instr */
780 nir_jump_instr *jump_instr = NULL;
781 nir_foreach_instr_safe(instr, block) {
782 if (instr->type == nir_instr_type_jump) {
783 jump_instr = nir_instr_as_jump(instr);
784 break;
785 }
786 nir_instr_remove(instr);
787 nir_builder_instr_insert(b, instr);
788 }
789
790 /* Find path to the successor blocks */
791 if (jump_instr->type == nir_jump_goto_if) {
792 route_to_cond(b, routing, jump_instr->condition,
793 jump_instr->target, jump_instr->else_target);
794 } else {
795 route_to(b, routing, block->successors[0]);
796 }
797
798 plant_levels(&levels, routing, b, mem_ctx);
799 if (is_looped) {
800 loop_routing_end(routing, b);
801 plant_levels(&outside_levels, routing, b, mem_ctx);
802 }
803 }
804
805 static bool
806 nir_lower_goto_ifs_impl(nir_function_impl *impl)
807 {
808 if (impl->structured) {
809 nir_metadata_preserve(impl, nir_metadata_all);
810 return false;
811 }
812
813 nir_metadata_require(impl, nir_metadata_dominance);
814
815 nir_cf_list cf_list;
816 nir_cf_extract(&cf_list, nir_before_cf_list(&impl->body),
817 nir_after_cf_list(&impl->body));
818
819 /* From this point on, it's structured */
820 impl->structured = true;
821
822 nir_builder b;
823 nir_builder_init(&b, impl);
824 b.cursor = nir_before_block(nir_start_block(impl));
825
826 void *mem_ctx = ralloc_context(b.shader);
827
828 struct set *end_set = _mesa_pointer_set_create(mem_ctx);
829 _mesa_set_add(end_set, impl->end_block);
830 struct set *empty_set = _mesa_pointer_set_create(mem_ctx);
831
832 nir_cf_node *start_node =
833 exec_node_data(nir_cf_node, exec_list_get_head(&cf_list.list), node);
834 nir_block *start_block = nir_cf_node_as_block(start_node);
835
836 struct routes *routing = ralloc(mem_ctx, struct routes);
837 *routing = (struct routes) {
838 .regular.reachable = end_set,
839 .brk.reachable = empty_set,
840 .cont.reachable = empty_set,
841 };
842 nir_structurize(routing, &b, start_block, mem_ctx);
843 assert(routing->regular.fork == NULL);
844 assert(routing->brk.fork == NULL);
845 assert(routing->cont.fork == NULL);
846 assert(routing->brk.reachable == empty_set);
847 assert(routing->cont.reachable == empty_set);
848
849 ralloc_free(mem_ctx);
850 nir_cf_delete(&cf_list);
851
852 nir_metadata_preserve(impl, nir_metadata_none);
853
854 return true;
855 }
856
857 bool
858 nir_lower_goto_ifs(nir_shader *shader)
859 {
860 bool progress = true;
861
862 nir_foreach_function(function, shader) {
863 if (function->impl && nir_lower_goto_ifs_impl(function->impl))
864 progress = true;
865 }
866
867 return progress;
868 }