nir: Add a structurizer
[mesa.git] / src / compiler / nir / nir_lower_goto_ifs.c
1 /*
2 * Copyright © 2020 Julian Winkler
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir_builder.h"
26
27 struct path {
28 struct set *reachable;
29 struct path_fork *fork;
30 };
31
32 struct path_fork {
33 bool is_var;
34 union {
35 nir_variable *path_var;
36 nir_ssa_def *path_ssa;
37 };
38 struct path paths[2];
39 };
40
41 struct routes {
42 struct path regular;
43 struct path brk;
44 struct path cont;
45 struct routes *loop_backup;
46 };
47
48 struct strct_lvl {
49 struct exec_node node;
50 struct set *blocks;
51 struct path out_path;
52 struct set *reach;
53 bool skip_start;
54 bool skip_end;
55 bool irreducible;
56 };
57
58 /**
59 * Sets all path variables to reach the target block via a fork
60 */
61 static void
62 set_path_vars(nir_builder *b, struct path_fork *fork, nir_block *target)
63 {
64 while (fork) {
65 for (int i = 0; i < 2; i++) {
66 if (_mesa_set_search(fork->paths[i].reachable, target)) {
67 if (fork->is_var) {
68 nir_store_var(b, fork->path_var, nir_imm_bool(b, i), 1);
69 } else {
70 assert(fork->path_ssa == NULL);
71 fork->path_ssa = nir_imm_bool(b, i);
72 }
73 fork = fork->paths[i].fork;
74 break;
75 }
76 }
77 }
78 }
79
80 /**
81 * Sets all path variables to reach the both target blocks via a fork.
82 * If the blocks are in different fork paths, the condition will be used.
83 * As the fork is already created, the then and else blocks may be swapped,
84 * in this case the condition is inverted
85 */
86 static void
87 set_path_vars_cond(nir_builder *b, struct path_fork *fork, nir_src condition,
88 nir_block *then_block, nir_block *else_block)
89 {
90 int i;
91 while (fork) {
92 for (i = 0; i < 2; i++) {
93 if (_mesa_set_search(fork->paths[i].reachable, then_block)) {
94 if (_mesa_set_search(fork->paths[i].reachable, else_block)) {
95 if (fork->is_var)
96 nir_store_var(b, fork->path_var, nir_imm_bool(b, i), 1);
97 else
98 fork->path_ssa = nir_imm_bool(b, i);
99 fork = fork->paths[i].fork;
100 break;
101 }
102 else {
103 assert(condition.is_ssa);
104 nir_ssa_def *ssa_def = condition.ssa;
105 assert(ssa_def->bit_size == 1);
106 assert(ssa_def->num_components == 1);
107 if (!i)
108 ssa_def = nir_inot(b, ssa_def);
109 if (fork->is_var)
110 nir_store_var(b, fork->path_var, ssa_def, 1);
111 else
112 fork->path_ssa = ssa_def;
113 set_path_vars(b, fork->paths[i].fork, then_block);
114 set_path_vars(b, fork->paths[!i].fork, else_block);
115 return;
116 }
117 }
118 }
119 assert(i < 2);
120 }
121 }
122
123 /**
124 * Sets all path variables and places the right jump instruction to reach the
125 * target block
126 */
127 static void
128 route_to(nir_builder *b, struct routes *routing, nir_block *target)
129 {
130 if (_mesa_set_search(routing->regular.reachable, target)) {
131 set_path_vars(b, routing->regular.fork, target);
132 }
133 else if (_mesa_set_search(routing->brk.reachable, target)) {
134 set_path_vars(b, routing->brk.fork, target);
135 nir_jump(b, nir_jump_break);
136 }
137 else if (_mesa_set_search(routing->cont.reachable, target)) {
138 set_path_vars(b, routing->cont.fork, target);
139 nir_jump(b, nir_jump_continue);
140 }
141 else {
142 assert(!target->successors[0]); /* target is endblock */
143 nir_jump(b, nir_jump_return);
144 }
145 }
146
147 /**
148 * Sets path vars and places the right jump instr to reach one of the two
149 * target blocks based on the condition. If the targets need different jump
150 * istructions, they will be placed into an if else statement.
151 * This can happen if one target is the loop head
152 * A __
153 * | \
154 * B |
155 * |\__/
156 * C
157 */
158 static void
159 route_to_cond(nir_builder *b, struct routes *routing, nir_src condition,
160 nir_block *then_block, nir_block *else_block)
161 {
162 if (_mesa_set_search(routing->regular.reachable, then_block)) {
163 if (_mesa_set_search(routing->regular.reachable, else_block)) {
164 set_path_vars_cond(b, routing->regular.fork, condition,
165 then_block, else_block);
166 return;
167 }
168 } else if (_mesa_set_search(routing->brk.reachable, then_block)) {
169 if (_mesa_set_search(routing->brk.reachable, else_block)) {
170 set_path_vars_cond(b, routing->brk.fork, condition,
171 then_block, else_block);
172 nir_jump(b, nir_jump_break);
173 return;
174 }
175 } else if (_mesa_set_search(routing->cont.reachable, then_block)) {
176 if (_mesa_set_search(routing->cont.reachable, else_block)) {
177 set_path_vars_cond(b, routing->cont.fork, condition,
178 then_block, else_block);
179 nir_jump(b, nir_jump_continue);
180 return;
181 }
182 }
183
184 /* then and else blocks are in different routes */
185 nir_push_if_src(b, condition);
186 route_to(b, routing, then_block);
187 nir_push_else(b, NULL);
188 route_to(b, routing, else_block);
189 nir_pop_if(b, NULL);
190 }
191
192 /**
193 * Merges the reachable sets of both fork subpaths into the forks entire
194 * reachable set
195 */
196 static struct set *
197 fork_reachable(struct path_fork *fork)
198 {
199 struct set *reachable = _mesa_set_clone(fork->paths[0].reachable, fork);
200 set_foreach(fork->paths[1].reachable, entry)
201 _mesa_set_add_pre_hashed(reachable, entry->hash, entry->key);
202 return reachable;
203 }
204
205 /**
206 * Modifies the routing to be the routing inside a loop. The old regular path
207 * becomes the new break path. The loop in path becomes the new regular and
208 * continue path.
209 * The lost routing information is stacked into the loop_backup stack.
210 * Also creates helper vars for multilevel loop jumping if needed.
211 * Also calls the nir builder to build the loop
212 */
213 static void
214 loop_routing_start(struct routes *routing, nir_builder *b,
215 struct path loop_path, struct set *reach)
216 {
217 struct routes *routing_backup = ralloc(routing, struct routes);
218 *routing_backup = *routing;
219 bool break_needed = false;
220 bool continue_needed = false;
221
222 set_foreach(reach, entry) {
223 if (_mesa_set_search(loop_path.reachable, entry->key))
224 continue;
225 if (_mesa_set_search(routing->regular.reachable, entry->key))
226 continue;
227 if (_mesa_set_search(routing->brk.reachable, entry->key)) {
228 break_needed = true;
229 continue;
230 }
231 assert(_mesa_set_search(routing->cont.reachable, entry->key));
232 continue_needed = true;
233 }
234
235 routing->brk = routing_backup->regular;
236 routing->cont = loop_path;
237 routing->regular = loop_path;
238 routing->loop_backup = routing_backup;
239
240 if (break_needed) {
241 struct path_fork *fork = ralloc(routing_backup, struct path_fork);
242 fork->is_var = true;
243 fork->path_var = nir_local_variable_create(b->impl, glsl_bool_type(),
244 "path_break");
245 fork->paths[0] = routing->brk;
246 fork->paths[1] = routing_backup->brk;
247 routing->brk.fork = fork;
248 routing->brk.reachable = fork_reachable(fork);
249 }
250 if (continue_needed) {
251 struct path_fork *fork = ralloc(routing_backup, struct path_fork);
252 fork->is_var = true;
253 fork->path_var = nir_local_variable_create(b->impl, glsl_bool_type(),
254 "path_continue");
255 fork->paths[0] = routing->brk;
256 fork->paths[1] = routing_backup->cont;
257 routing->brk.fork = fork;
258 routing->brk.reachable = fork_reachable(fork);
259 }
260 nir_push_loop(b);
261 }
262
263 /**
264 * Gets a forks condition as ssa def if the condition is inside a helper var,
265 * the variable will be read into an ssa def
266 */
267 static nir_ssa_def *
268 fork_condition(nir_builder *b, struct path_fork *fork)
269 {
270 nir_ssa_def *ret;
271 if (fork->is_var) {
272 ret = nir_load_var(b, fork->path_var);
273 }
274 else
275 ret = fork->path_ssa;
276 return ret;
277 }
278
279 /**
280 * Restores the routing after leaving a loop based on the loop_backup stack.
281 * Also handles multi level jump helper vars if existing and calls the nir
282 * builder to pop the nir loop
283 */
284 static void
285 loop_routing_end(struct routes *routing, nir_builder *b)
286 {
287 struct routes *routing_backup = routing->loop_backup;
288 assert(routing->cont.fork == routing->regular.fork);
289 assert(routing->cont.reachable == routing->regular.reachable);
290 nir_pop_loop(b, NULL);
291 if (routing->brk.fork && routing->brk.fork->paths[1].reachable ==
292 routing_backup->cont.reachable) {
293 assert(!(routing->brk.fork->is_var &&
294 strcmp(routing->brk.fork->path_var->name, "path_continue")));
295 nir_push_if_src(b, nir_src_for_ssa(
296 fork_condition(b, routing->brk.fork)));
297 nir_jump(b, nir_jump_continue);
298 nir_pop_if(b, NULL);
299 routing->brk = routing->brk.fork->paths[0];
300 }
301 if (routing->brk.fork && routing->brk.fork->paths[1].reachable ==
302 routing_backup->brk.reachable) {
303 assert(!(routing->brk.fork->is_var &&
304 strcmp(routing->brk.fork->path_var->name, "path_break")));
305 nir_push_if_src(b, nir_src_for_ssa(
306 fork_condition(b, routing->brk.fork)));
307 nir_jump(b, nir_jump_break);
308 nir_pop_if(b, NULL);
309 routing->brk = routing->brk.fork->paths[0];
310 }
311 assert(routing->brk.fork == routing_backup->regular.fork);
312 assert(routing->brk.reachable == routing_backup->regular.reachable);
313 *routing = *routing_backup;
314 ralloc_free(routing_backup);
315 }
316
317 /**
318 * generates a list of all blocks dominated by the loop header, but the
319 * control flow can't go back to the loop header from the block.
320 * also generates a list of all blocks that can be reached from within the
321 * loop
322 * | __
323 * A´ \
324 * | \ \
325 * B C-´
326 * /
327 * D
328 * here B and C are directly dominated by A but only C can reach back to the
329 * loop head A. B will be added to the outside set and to the reach set.
330 * \param loop_heads set of loop heads. All blocks inside the loop will be
331 * added to this set
332 * \param outside all blocks directly outside the loop will be added
333 * \param reach all blocks reachable from the loop will be added
334 */
335 static void
336 inside_outside(nir_block *block, struct set *loop_heads, struct set *outside,
337 struct set *reach, struct set *brk_reachable)
338 {
339 assert(_mesa_set_search(loop_heads, block));
340 struct set *remaining = _mesa_pointer_set_create(NULL);
341 for (int i = 0; i < block->num_dom_children; i++) {
342 if (!_mesa_set_search(brk_reachable, block->dom_children[i]))
343 _mesa_set_add(remaining, block->dom_children[i]);
344 }
345
346 bool progress = true;
347 while (remaining->entries && progress) {
348 progress = false;
349 set_foreach(remaining, child_entry) {
350 nir_block *dom_child = (nir_block *) child_entry->key;
351 bool can_jump_back = false;
352 set_foreach(dom_child->dom_frontier, entry) {
353 if (entry->key == dom_child)
354 continue;
355 if (_mesa_set_search_pre_hashed(remaining, entry->hash,
356 entry->key)) {
357 can_jump_back = true;
358 break;
359 }
360 if (_mesa_set_search_pre_hashed(loop_heads, entry->hash,
361 entry->key)) {
362 can_jump_back = true;
363 break;
364 }
365 }
366 if (!can_jump_back) {
367 _mesa_set_add_pre_hashed(outside, child_entry->hash,
368 child_entry->key);
369 _mesa_set_remove(remaining, child_entry);
370 progress = true;
371 }
372 }
373 }
374
375 /* Add everything remaining to loop_heads */
376 set_foreach(remaining, entry)
377 _mesa_set_add_pre_hashed(loop_heads, entry->hash, entry->key);
378
379 /* Recurse for each remaining */
380 set_foreach(remaining, entry) {
381 inside_outside((nir_block *) entry->key, loop_heads, outside, reach,
382 brk_reachable);
383 }
384
385 _mesa_set_destroy(remaining, NULL);
386 remaining = NULL;
387
388 for (int i = 0; i < 2; i++) {
389 if (block->successors[i] && block->successors[i]->successors[0] &&
390 !_mesa_set_search(loop_heads, block->successors[i])) {
391 _mesa_set_add(reach, block->successors[i]);
392 }
393 }
394 }
395
396 /**
397 * Gets a set of blocks organized into the same level by the organize_levels
398 * function and creates enough forks to be able to route to them.
399 * If the set only contains one block, the function has nothing to do.
400 * The set should almost never contain more than two blocks, but if so,
401 * then the function calls itself recursively
402 */
403 static struct path_fork *
404 select_fork(struct set *reachable, nir_function_impl *impl, bool need_var)
405 {
406 struct path_fork *fork = NULL;
407 if (reachable->entries > 1) {
408 fork = ralloc(reachable, struct path_fork);
409 fork->is_var = need_var;
410 if (need_var)
411 fork->path_var = nir_local_variable_create(impl, glsl_bool_type(),
412 "path_select");
413 fork->paths[0].reachable = _mesa_pointer_set_create(fork);
414 struct set_entry *entry = NULL;
415 while (fork->paths[0].reachable->entries < reachable->entries / 2 &&
416 (entry = _mesa_set_next_entry(reachable, entry))) {
417 _mesa_set_add_pre_hashed(fork->paths[0].reachable,
418 entry->hash, entry->key);
419 }
420 fork->paths[0].fork = select_fork(fork->paths[0].reachable, impl,
421 need_var);
422 fork->paths[1].reachable = _mesa_pointer_set_create(fork);
423 while ((entry = _mesa_set_next_entry(reachable, entry))) {
424 _mesa_set_add_pre_hashed(fork->paths[1].reachable,
425 entry->hash, entry->key);
426 }
427 fork->paths[1].fork = select_fork(fork->paths[1].reachable, impl,
428 need_var);
429 }
430 return fork;
431 }
432
433 /**
434 * gets called when the organize_levels functions fails to find blocks that
435 * can't be reached by the other remaining blocks. This means, at least two
436 * dominance sibling blocks can reach each other. So we have a multi entry
437 * loop. This function tries to find the smallest possible set of blocks that
438 * must be part of the multi entry loop.
439 * example cf: | |
440 * A<---B
441 * / \__,^ \
442 * \ /
443 * \ /
444 * C
445 * The function choses a random block as candidate. for example C
446 * The function checks which remaining blocks can reach C, in this case A.
447 * So A becomes the new candidate and C is removed from the result set.
448 * B can reach A.
449 * So B becomes the new candidate and A is removed from the set.
450 * A can reach B.
451 * A was an old candidate. So it is added to the set containing B.
452 * No other remaining blocks can reach A or B.
453 * So only A and B must be part of the multi entry loop.
454 */
455 static void
456 handle_irreducible(struct set *remaining, struct strct_lvl *curr_level,
457 struct set *brk_reachable) {
458 nir_block *candidate = (nir_block *)
459 _mesa_set_next_entry(remaining, NULL)->key;
460 struct set *old_candidates = _mesa_pointer_set_create(curr_level);
461 while (candidate) {
462 _mesa_set_add(old_candidates, candidate);
463 nir_block *to_be_added = candidate;
464 candidate = NULL;
465
466 _mesa_set_clear(curr_level->blocks, NULL);
467 while (to_be_added) {
468 _mesa_set_add(curr_level->blocks, to_be_added);
469 to_be_added = NULL;
470
471 set_foreach(remaining, entry) {
472 nir_block *remaining_block = (nir_block *) entry->key;
473 if (!_mesa_set_search(curr_level->blocks, remaining_block)
474 && _mesa_set_intersects(remaining_block->dom_frontier,
475 curr_level->blocks)) {
476 if (_mesa_set_search(old_candidates, remaining_block))
477 to_be_added = remaining_block;
478 else
479 candidate = remaining_block;
480 break;
481 }
482 }
483 }
484 }
485 _mesa_set_destroy(old_candidates, NULL);
486 old_candidates = NULL;
487
488 struct set *loop_heads = _mesa_set_clone(curr_level->blocks, curr_level);
489 curr_level->reach = _mesa_pointer_set_create(curr_level);
490 set_foreach(curr_level->blocks, entry) {
491 _mesa_set_remove_key(remaining, entry->key);
492 inside_outside((nir_block *) entry->key, loop_heads, remaining,
493 curr_level->reach, brk_reachable);
494 }
495 _mesa_set_destroy(loop_heads, NULL);
496 }
497
498 /**
499 * organize a set of blocks into a list of levels. Where every level contains
500 * one or more blocks. So that every block is before all blocks it can reach.
501 * Also creates all path variables needed, for the control flow between the
502 * block.
503 * For example if the control flow looks like this:
504 * A
505 * / |
506 * B C
507 * | / \
508 * E |
509 * \ /
510 * F
511 * B, C, E and F are dominance children of A
512 * The level list should look like this:
513 * blocks irreducible conditional
514 * level 0 B, C false false
515 * level 1 E false true
516 * level 2 F false false
517 * The final structure should look like this:
518 * A
519 * if (path_select) {
520 * B
521 * } else {
522 * C
523 * }
524 * if (path_conditional) {
525 * E
526 * }
527 * F
528 *
529 * \param levels uninitialized list
530 * \param is_dominated if true, no helper variables will be created for the
531 * zeroth level
532 */
533 static void
534 organize_levels(struct exec_list *levels, struct set *remaining,
535 struct set *reach, struct routes *routing,
536 nir_function_impl *impl, bool is_domminated)
537 {
538 void *mem_ctx = ralloc_parent(remaining);
539
540 /* blocks that can be reached by the remaining blocks */
541 struct set *remaining_frontier = _mesa_pointer_set_create(mem_ctx);
542
543 /* targets of active skip path */
544 struct set *skip_targets = _mesa_pointer_set_create(mem_ctx);
545
546 exec_list_make_empty(levels);
547 while (remaining->entries) {
548 _mesa_set_clear(remaining_frontier, NULL);
549 set_foreach(remaining, entry) {
550 nir_block *remain_block = (nir_block *) entry->key;
551 set_foreach(remain_block->dom_frontier, frontier_entry) {
552 nir_block *frontier = (nir_block *) frontier_entry->key;
553 if (frontier != remain_block) {
554 _mesa_set_add(remaining_frontier, frontier);
555 }
556 }
557 }
558
559 struct strct_lvl *curr_level = ralloc(mem_ctx, struct strct_lvl);
560 curr_level->blocks = _mesa_pointer_set_create(curr_level);
561 set_foreach(remaining, entry) {
562 nir_block *candidate = (nir_block *) entry->key;
563 if (!_mesa_set_search(remaining_frontier, candidate)) {
564 _mesa_set_add(curr_level->blocks, candidate);
565 _mesa_set_remove_key(remaining, candidate);
566 }
567 }
568
569 curr_level->irreducible = !curr_level->blocks->entries;
570 if (curr_level->irreducible)
571 handle_irreducible(remaining, curr_level, routing->brk.reachable);
572 assert(curr_level->blocks->entries);
573 curr_level->skip_start = 0;
574
575 struct strct_lvl *prev_level = NULL;
576 struct exec_node *tail;
577 if ((tail = exec_list_get_tail(levels)))
578 prev_level = exec_node_data(struct strct_lvl, tail, node);
579
580 if (skip_targets->entries) {
581 set_foreach(skip_targets, entry) {
582 if (_mesa_set_search_pre_hashed(curr_level->blocks,
583 entry->hash, entry->key)) {
584 _mesa_set_remove(skip_targets, entry);
585 prev_level->skip_end = 1;
586 curr_level->skip_start = !!skip_targets->entries;
587 }
588 }
589 }
590 struct set *prev_frontier = NULL;
591 if (!prev_level) {
592 prev_frontier = reach;
593 } else if (prev_level->irreducible) {
594 prev_frontier = prev_level->reach;
595 } else {
596 set_foreach(curr_level->blocks, blocks_entry) {
597 nir_block *level_block = (nir_block *) blocks_entry->key;
598 if (!prev_frontier) {
599 prev_frontier = curr_level->blocks->entries == 1 ?
600 level_block->dom_frontier :
601 _mesa_set_clone(level_block->dom_frontier, prev_level);
602 } else {
603 set_foreach(level_block->dom_frontier, entry)
604 _mesa_set_add_pre_hashed(prev_frontier, entry->hash,
605 entry->key);
606 }
607 }
608 }
609
610 bool is_in_skip = skip_targets->entries != 0;
611 set_foreach(prev_frontier, entry) {
612 if (_mesa_set_search(remaining, entry->key) ||
613 (_mesa_set_search(routing->regular.reachable, entry->key) &&
614 !_mesa_set_search(routing->brk.reachable, entry->key) &&
615 !_mesa_set_search(routing->cont.reachable, entry->key))) {
616 _mesa_set_add_pre_hashed(skip_targets, entry->hash, entry->key);
617 if (is_in_skip)
618 prev_level->skip_end = 1;
619 curr_level->skip_start = 1;
620 }
621 }
622
623 curr_level->skip_end = 0;
624 exec_list_push_tail(levels, &curr_level->node);
625 }
626
627 if (skip_targets->entries)
628 exec_node_data(struct strct_lvl, exec_list_get_tail(levels), node)
629 ->skip_end = 1;
630 _mesa_set_destroy(remaining_frontier, NULL);
631 remaining_frontier = NULL;
632 _mesa_set_destroy(skip_targets, NULL);
633 skip_targets = NULL;
634
635 /* Iterate throught all levels reverse and create all the paths and forks */
636 struct path path_after_skip;
637
638 foreach_list_typed_reverse(struct strct_lvl, level, node, levels) {
639 bool need_var = !(is_domminated && exec_node_get_prev(&level->node)
640 == &levels->head_sentinel);
641 level->out_path = routing->regular;
642 if (level->skip_end) {
643 path_after_skip = routing->regular;
644 }
645 routing->regular.reachable = level->blocks;
646 routing->regular.fork = select_fork(routing->regular.reachable, impl,
647 need_var);
648 if (level->skip_start) {
649 struct path_fork *fork = ralloc(level, struct path_fork);
650 fork->is_var = need_var;
651 if (need_var)
652 fork->path_var = nir_local_variable_create(impl, glsl_bool_type(),
653 "path_conditional");
654 fork->paths[0] = path_after_skip;
655 fork->paths[1] = routing->regular;
656 routing->regular.fork = fork;
657 routing->regular.reachable = fork_reachable(fork);
658 }
659 }
660 }
661
662 static void
663 nir_structurize(struct routes *routing, nir_builder *b, nir_block *block);
664
665 /**
666 * Places all the if else statements to select between all blocks in a select
667 * path
668 */
669 static void
670 select_blocks(struct routes *routing, nir_builder *b, struct path in_path) {
671 if (!in_path.fork) {
672 nir_structurize(routing, b, (nir_block *)
673 _mesa_set_next_entry(in_path.reachable, NULL)->key);
674 } else {
675 assert(!(in_path.fork->is_var &&
676 strcmp(in_path.fork->path_var->name, "path_select")));
677 nir_push_if_src(b, nir_src_for_ssa(fork_condition(b, in_path.fork)));
678 select_blocks(routing, b, in_path.fork->paths[1]);
679 nir_push_else(b, NULL);
680 select_blocks(routing, b, in_path.fork->paths[0]);
681 nir_pop_if(b, NULL);
682 }
683 }
684
685 /**
686 * Builds the structurized nir code by the final level list.
687 */
688 static void
689 plant_levels(struct exec_list *levels, struct routes *routing,
690 nir_builder *b)
691 {
692 /* Place all dominated blocks and build the path forks */
693 struct exec_node *list_node;
694 while ((list_node = exec_list_pop_head(levels))) {
695 struct strct_lvl *curr_level =
696 exec_node_data(struct strct_lvl, list_node, node);
697 if (curr_level->skip_start) {
698 assert(routing->regular.fork);
699 assert(!(routing->regular.fork->is_var && strcmp(
700 routing->regular.fork->path_var->name, "path_conditional")));
701 nir_push_if_src(b, nir_src_for_ssa(
702 fork_condition(b, routing->regular.fork)));
703 routing->regular = routing->regular.fork->paths[1];
704 }
705 struct path in_path = routing->regular;
706 routing->regular = curr_level->out_path;
707 if (curr_level->irreducible)
708 loop_routing_start(routing, b, in_path, curr_level->reach);
709 select_blocks(routing, b, in_path);
710 if (curr_level->irreducible)
711 loop_routing_end(routing, b);
712 if (curr_level->skip_end)
713 nir_pop_if(b, NULL);
714 ralloc_free(curr_level);
715 }
716 }
717
718 /**
719 * builds the control flow of a block and all its dominance children
720 * \param routing the routing after the block and all dominated blocks
721 */
722 static void
723 nir_structurize(struct routes *routing, nir_builder *b, nir_block *block)
724 {
725 /* Mem context for this function; freed at the end */
726 void *mem_ctx = ralloc_context(routing);
727
728 struct set *remaining = _mesa_pointer_set_create(mem_ctx);
729 for (int i = 0; i < block->num_dom_children; i++) {
730 if (!_mesa_set_search(routing->brk.reachable, block->dom_children[i]))
731 _mesa_set_add(remaining, block->dom_children[i]);
732 }
733
734 /* If the block can reach back to itself, it is a loop head */
735 int is_looped = _mesa_set_search(block->dom_frontier, block) != NULL;
736 struct exec_list outside_levels;
737 if (is_looped) {
738 struct set *loop_heads = _mesa_pointer_set_create(mem_ctx);
739 _mesa_set_add(loop_heads, block);
740
741 struct set *outside = _mesa_pointer_set_create(mem_ctx);
742 struct set *reach = _mesa_pointer_set_create(mem_ctx);
743 inside_outside(block, loop_heads, outside, reach,
744 routing->brk.reachable);
745
746 _mesa_set_destroy(loop_heads, NULL);
747 loop_heads = NULL;
748
749 set_foreach(outside, entry)
750 _mesa_set_remove_key(remaining, entry->key);
751
752 organize_levels(&outside_levels, outside, reach, routing, b->impl,
753 false);
754
755 struct path loop_path = {
756 .reachable = _mesa_pointer_set_create(mem_ctx),
757 .fork = NULL,
758 };
759 _mesa_set_add(loop_path.reachable, block);
760
761 loop_routing_start(routing, b, loop_path, reach);
762 _mesa_set_destroy(reach, NULL);
763 reach = NULL;
764 }
765
766 struct set *reach = _mesa_pointer_set_create(mem_ctx);
767 if (block->successors[0]->successors[0]) /* it is not the end_block */
768 _mesa_set_add(reach, block->successors[0]);
769 if (block->successors[1] && block->successors[1]->successors[0])
770 _mesa_set_add(reach, block->successors[1]);
771
772 struct exec_list levels;
773 organize_levels(&levels, remaining, reach, routing, b->impl, true);
774 _mesa_set_destroy(remaining, NULL);
775 remaining = NULL;
776 _mesa_set_destroy(reach, NULL);
777 reach = NULL;
778
779 /* Push all instructions of this block, without the jump instr */
780 nir_jump_instr *jump_instr = NULL;
781 nir_foreach_instr_safe(instr, block) {
782 if (instr->type == nir_instr_type_jump) {
783 jump_instr = nir_instr_as_jump(instr);
784 break;
785 }
786 nir_instr_remove(instr);
787 nir_builder_instr_insert(b, instr);
788 }
789
790 /* Find path to the successor blocks */
791 if (jump_instr->type == nir_jump_goto_if) {
792 route_to_cond(b, routing, jump_instr->condition,
793 jump_instr->target, jump_instr->else_target);
794 } else {
795 route_to(b, routing, block->successors[0]);
796 }
797
798 plant_levels(&levels, routing, b);
799 if (is_looped) {
800 loop_routing_end(routing, b);
801 plant_levels(&outside_levels, routing, b);
802 }
803
804 ralloc_free(mem_ctx);
805 }
806
807 static bool
808 nir_lower_goto_ifs_impl(nir_function_impl *impl)
809 {
810 if (impl->structured) {
811 nir_metadata_preserve(impl, nir_metadata_all);
812 return false;
813 }
814
815 nir_metadata_require(impl, nir_metadata_dominance);
816
817 nir_cf_list cf_list;
818 nir_cf_extract(&cf_list, nir_before_cf_list(&impl->body),
819 nir_after_cf_list(&impl->body));
820
821 /* From this point on, it's structured */
822 impl->structured = true;
823
824 nir_builder b;
825 nir_builder_init(&b, impl);
826 b.cursor = nir_before_block(nir_start_block(impl));
827
828 struct routes *routing = ralloc(b.shader, struct routes);
829 routing->regular.reachable = _mesa_pointer_set_create(routing);
830 _mesa_set_add(routing->regular.reachable, impl->end_block);
831 struct set *empty = _mesa_pointer_set_create(routing);
832 routing->regular.fork = NULL;
833 routing->brk.reachable = empty;
834 routing->brk.fork = NULL;
835 routing->cont.reachable = empty;
836 routing->cont.fork = NULL;
837 nir_structurize(routing, &b,
838 nir_cf_node_as_block(exec_node_data
839 (nir_cf_node,
840 exec_list_get_head(&cf_list.list),
841 node)));
842 assert(routing->regular.fork == NULL);
843 assert(routing->brk.fork == NULL);
844 assert(routing->cont.fork == NULL);
845 assert(routing->brk.reachable == empty);
846 assert(routing->cont.reachable == empty);
847 _mesa_set_destroy(routing->regular.reachable, NULL);
848 _mesa_set_destroy(empty, NULL);
849 ralloc_free(routing);
850 nir_cf_delete(&cf_list);
851
852 nir_metadata_preserve(impl, nir_metadata_none);
853
854 return true;
855 }
856
857 bool
858 nir_lower_goto_ifs(nir_shader *shader)
859 {
860 bool progress = true;
861
862 nir_foreach_function(function, shader) {
863 if (function->impl && nir_lower_goto_ifs_impl(function->impl))
864 progress = true;
865 }
866
867 return progress;
868 }