nir: Add new system values and intrinsics for dealing with CL work offsets
[mesa.git] / src / compiler / nir / nir_opt_if.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir/nir_builder.h"
26 #include "nir_constant_expressions.h"
27 #include "nir_control_flow.h"
28 #include "nir_loop_analyze.h"
29
30 static nir_ssa_def *clone_alu_and_replace_src_defs(nir_builder *b,
31 const nir_alu_instr *alu,
32 nir_ssa_def **src_defs);
33
34 /**
35 * Gets the single block that jumps back to the loop header. Already assumes
36 * there is exactly one such block.
37 */
38 static nir_block*
39 find_continue_block(nir_loop *loop)
40 {
41 nir_block *header_block = nir_loop_first_block(loop);
42 nir_block *prev_block =
43 nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
44
45 assert(header_block->predecessors->entries == 2);
46
47 set_foreach(header_block->predecessors, pred_entry) {
48 if (pred_entry->key != prev_block)
49 return (nir_block*)pred_entry->key;
50 }
51
52 unreachable("Continue block not found!");
53 }
54
55 /**
56 * Does a phi have one constant value from outside a loop and one from inside?
57 */
58 static bool
59 phi_has_constant_from_outside_and_one_from_inside_loop(nir_phi_instr *phi,
60 const nir_block *entry_block,
61 bool *entry_val,
62 bool *continue_val)
63 {
64 /* We already know we have exactly one continue */
65 assert(exec_list_length(&phi->srcs) == 2);
66
67 *entry_val = false;
68 *continue_val = false;
69
70 nir_foreach_phi_src(src, phi) {
71 if (!nir_src_is_const(src->src))
72 return false;
73
74 if (src->pred != entry_block) {
75 *continue_val = nir_src_as_bool(src->src);
76 } else {
77 *entry_val = nir_src_as_bool(src->src);
78 }
79 }
80
81 return true;
82 }
83
84 /**
85 * This optimization detects if statements at the tops of loops where the
86 * condition is a phi node of two constants and moves half of the if to above
87 * the loop and the other half of the if to the end of the loop. A simple for
88 * loop "for (int i = 0; i < 4; i++)", when run through the SPIR-V front-end,
89 * ends up looking something like this:
90 *
91 * vec1 32 ssa_0 = load_const (0x00000000)
92 * vec1 32 ssa_1 = load_const (0xffffffff)
93 * loop {
94 * block block_1:
95 * vec1 32 ssa_2 = phi block_0: ssa_0, block_7: ssa_5
96 * vec1 32 ssa_3 = phi block_0: ssa_0, block_7: ssa_1
97 * if ssa_3 {
98 * block block_2:
99 * vec1 32 ssa_4 = load_const (0x00000001)
100 * vec1 32 ssa_5 = iadd ssa_2, ssa_4
101 * } else {
102 * block block_3:
103 * }
104 * block block_4:
105 * vec1 32 ssa_6 = load_const (0x00000004)
106 * vec1 32 ssa_7 = ilt ssa_5, ssa_6
107 * if ssa_7 {
108 * block block_5:
109 * } else {
110 * block block_6:
111 * break
112 * }
113 * block block_7:
114 * }
115 *
116 * This turns it into something like this:
117 *
118 * // Stuff from block 1
119 * // Stuff from block 3
120 * loop {
121 * block block_1:
122 * vec1 32 ssa_2 = phi block_0: ssa_0, block_7: ssa_5
123 * vec1 32 ssa_6 = load_const (0x00000004)
124 * vec1 32 ssa_7 = ilt ssa_2, ssa_6
125 * if ssa_7 {
126 * block block_5:
127 * } else {
128 * block block_6:
129 * break
130 * }
131 * block block_7:
132 * // Stuff from block 1
133 * // Stuff from block 2
134 * vec1 32 ssa_4 = load_const (0x00000001)
135 * vec1 32 ssa_5 = iadd ssa_2, ssa_4
136 * }
137 */
138 static bool
139 opt_peel_loop_initial_if(nir_loop *loop)
140 {
141 nir_block *header_block = nir_loop_first_block(loop);
142 nir_block *const prev_block =
143 nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
144
145 /* It would be insane if this were not true */
146 assert(_mesa_set_search(header_block->predecessors, prev_block));
147
148 /* The loop must have exactly one continue block which could be a block
149 * ending in a continue instruction or the "natural" continue from the
150 * last block in the loop back to the top.
151 */
152 if (header_block->predecessors->entries != 2)
153 return false;
154
155 nir_cf_node *if_node = nir_cf_node_next(&header_block->cf_node);
156 if (!if_node || if_node->type != nir_cf_node_if)
157 return false;
158
159 nir_if *nif = nir_cf_node_as_if(if_node);
160 assert(nif->condition.is_ssa);
161
162 nir_ssa_def *cond = nif->condition.ssa;
163 if (cond->parent_instr->type != nir_instr_type_phi)
164 return false;
165
166 nir_phi_instr *cond_phi = nir_instr_as_phi(cond->parent_instr);
167 if (cond->parent_instr->block != header_block)
168 return false;
169
170 bool entry_val = false, continue_val = false;
171 if (!phi_has_constant_from_outside_and_one_from_inside_loop(cond_phi,
172 prev_block,
173 &entry_val,
174 &continue_val))
175 return false;
176
177 /* If they both execute or both don't execute, this is a job for
178 * nir_dead_cf, not this pass.
179 */
180 if ((entry_val && continue_val) || (!entry_val && !continue_val))
181 return false;
182
183 struct exec_list *continue_list, *entry_list;
184 if (continue_val) {
185 continue_list = &nif->then_list;
186 entry_list = &nif->else_list;
187 } else {
188 continue_list = &nif->else_list;
189 entry_list = &nif->then_list;
190 }
191
192 /* We want to be moving the contents of entry_list to above the loop so it
193 * can't contain any break or continue instructions.
194 */
195 foreach_list_typed(nir_cf_node, cf_node, node, entry_list) {
196 nir_foreach_block_in_cf_node(block, cf_node) {
197 nir_instr *last_instr = nir_block_last_instr(block);
198 if (last_instr && last_instr->type == nir_instr_type_jump)
199 return false;
200 }
201 }
202
203 /* We're about to re-arrange a bunch of blocks so make sure that we don't
204 * have deref uses which cross block boundaries. We don't want a deref
205 * accidentally ending up in a phi.
206 */
207 nir_rematerialize_derefs_in_use_blocks_impl(
208 nir_cf_node_get_function(&loop->cf_node));
209
210 /* Before we do anything, convert the loop to LCSSA. We're about to
211 * replace a bunch of SSA defs with registers and this will prevent any of
212 * it from leaking outside the loop.
213 */
214 nir_convert_loop_to_lcssa(loop);
215
216 nir_block *after_if_block =
217 nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node));
218
219 /* Get rid of phis in the header block since we will be duplicating it */
220 nir_lower_phis_to_regs_block(header_block);
221 /* Get rid of phis after the if since dominance will change */
222 nir_lower_phis_to_regs_block(after_if_block);
223
224 /* Get rid of SSA defs in the pieces we're about to move around */
225 nir_lower_ssa_defs_to_regs_block(header_block);
226 nir_foreach_block_in_cf_node(block, &nif->cf_node)
227 nir_lower_ssa_defs_to_regs_block(block);
228
229 nir_cf_list header, tmp;
230 nir_cf_extract(&header, nir_before_block(header_block),
231 nir_after_block(header_block));
232
233 nir_cf_list_clone(&tmp, &header, &loop->cf_node, NULL);
234 nir_cf_reinsert(&tmp, nir_before_cf_node(&loop->cf_node));
235 nir_cf_extract(&tmp, nir_before_cf_list(entry_list),
236 nir_after_cf_list(entry_list));
237 nir_cf_reinsert(&tmp, nir_before_cf_node(&loop->cf_node));
238
239 nir_cf_reinsert(&header,
240 nir_after_block_before_jump(find_continue_block(loop)));
241
242 bool continue_list_jumps =
243 nir_block_ends_in_jump(exec_node_data(nir_block,
244 exec_list_get_tail(continue_list),
245 cf_node.node));
246
247 nir_cf_extract(&tmp, nir_before_cf_list(continue_list),
248 nir_after_cf_list(continue_list));
249
250 /* Get continue block again as the previous reinsert might have removed the
251 * block. Also, if both the continue list and the continue block ends in
252 * jump instructions, removes the jump from the latter, as it will not be
253 * executed if we insert the continue list before it. */
254
255 nir_block *continue_block = find_continue_block(loop);
256
257 if (continue_list_jumps) {
258 nir_instr *last_instr = nir_block_last_instr(continue_block);
259 if (last_instr && last_instr->type == nir_instr_type_jump)
260 nir_instr_remove(last_instr);
261 }
262
263 nir_cf_reinsert(&tmp,
264 nir_after_block_before_jump(continue_block));
265
266 nir_cf_node_remove(&nif->cf_node);
267
268 return true;
269 }
270
271 static bool
272 alu_instr_is_comparison(const nir_alu_instr *alu)
273 {
274 switch (alu->op) {
275 case nir_op_flt32:
276 case nir_op_fge32:
277 case nir_op_feq32:
278 case nir_op_fneu32:
279 case nir_op_ilt32:
280 case nir_op_ult32:
281 case nir_op_ige32:
282 case nir_op_uge32:
283 case nir_op_ieq32:
284 case nir_op_ine32:
285 return true;
286 default:
287 return nir_alu_instr_is_comparison(alu);
288 }
289 }
290
291 static bool
292 alu_instr_is_type_conversion(const nir_alu_instr *alu)
293 {
294 return nir_op_infos[alu->op].num_inputs == 1 &&
295 nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) !=
296 nir_alu_type_get_base_type(nir_op_infos[alu->op].input_types[0]);
297 }
298
299 /**
300 * Splits ALU instructions that have a source that is a phi node
301 *
302 * ALU instructions in the header block of a loop that meet the following
303 * criteria can be split.
304 *
305 * - The loop has no continue instructions other than the "natural" continue
306 * at the bottom of the loop.
307 *
308 * - At least one source of the instruction is a phi node from the header block.
309 *
310 * - The phi node selects a constant or undef from the block before the loop.
311 *
312 * - Any non-phi sources of the ALU instruction come from a block that
313 * dominates the block before the loop. The most common failure mode for
314 * this check is sources that are generated in the loop header block.
315 *
316 * The split process splits the original ALU instruction into two, one at the
317 * bottom of the loop and one at the block before the loop. The instruction
318 * before the loop computes the value on the first iteration, and the
319 * instruction at the bottom computes the value on the second, third, and so
320 * on. A new phi node is added to the header block that selects either the
321 * instruction before the loop or the one at the end, and uses of the original
322 * instruction are replaced by this phi.
323 *
324 * The splitting transforms a loop like:
325 *
326 * vec1 32 ssa_8 = load_const (0x00000001)
327 * vec1 32 ssa_10 = load_const (0x00000000)
328 * // succs: block_1
329 * loop {
330 * block block_1:
331 * // preds: block_0 block_4
332 * vec1 32 ssa_11 = phi block_0: ssa_10, block_4: ssa_15
333 * vec1 32 ssa_12 = phi block_0: ssa_1, block_4: ssa_15
334 * vec1 32 ssa_13 = phi block_0: ssa_10, block_4: ssa_16
335 * vec1 32 ssa_14 = iadd ssa_11, ssa_8
336 * vec1 32 ssa_15 = b32csel ssa_13, ssa_14, ssa_12
337 * ...
338 * // succs: block_1
339 * }
340 *
341 * into:
342 *
343 * vec1 32 ssa_8 = load_const (0x00000001)
344 * vec1 32 ssa_10 = load_const (0x00000000)
345 * vec1 32 ssa_22 = iadd ssa_10, ssa_8
346 * // succs: block_1
347 * loop {
348 * block block_1:
349 * // preds: block_0 block_4
350 * vec1 32 ssa_11 = phi block_0: ssa_10, block_4: ssa_15
351 * vec1 32 ssa_12 = phi block_0: ssa_1, block_4: ssa_15
352 * vec1 32 ssa_13 = phi block_0: ssa_10, block_4: ssa_16
353 * vec1 32 ssa_21 = phi block_0: ssa_22, block_4: ssa_20
354 * vec1 32 ssa_15 = b32csel ssa_13, ssa_21, ssa_12
355 * ...
356 * vec1 32 ssa_20 = iadd ssa_15, ssa_8
357 * // succs: block_1
358 * }
359 */
360 static bool
361 opt_split_alu_of_phi(nir_builder *b, nir_loop *loop)
362 {
363 bool progress = false;
364 nir_block *header_block = nir_loop_first_block(loop);
365 nir_block *const prev_block =
366 nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
367
368 /* It would be insane if this were not true */
369 assert(_mesa_set_search(header_block->predecessors, prev_block));
370
371 /* The loop must have exactly one continue block which could be a block
372 * ending in a continue instruction or the "natural" continue from the
373 * last block in the loop back to the top.
374 */
375 if (header_block->predecessors->entries != 2)
376 return false;
377
378 nir_foreach_instr_safe(instr, header_block) {
379 if (instr->type != nir_instr_type_alu)
380 continue;
381
382 nir_alu_instr *const alu = nir_instr_as_alu(instr);
383
384 /* nir_op_vec{2,3,4} and nir_op_mov are excluded because they can easily
385 * lead to infinite optimization loops. Splitting comparisons can lead
386 * to loop unrolling not recognizing loop termintators, and type
387 * conversions also lead to regressions.
388 */
389 if (nir_op_is_vec(alu->op) ||
390 alu_instr_is_comparison(alu) ||
391 alu_instr_is_type_conversion(alu))
392 continue;
393
394 bool has_phi_src_from_prev_block = false;
395 bool all_non_phi_exist_in_prev_block = true;
396 bool is_prev_result_undef = true;
397 bool is_prev_result_const = true;
398 nir_ssa_def *prev_srcs[8]; // FINISHME: Array size?
399 nir_ssa_def *continue_srcs[8]; // FINISHME: Array size?
400
401 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
402 nir_instr *const src_instr = alu->src[i].src.ssa->parent_instr;
403
404 /* If the source is a phi in the loop header block, then the
405 * prev_srcs and continue_srcs will come from the different sources
406 * of the phi.
407 */
408 if (src_instr->type == nir_instr_type_phi &&
409 src_instr->block == header_block) {
410 nir_phi_instr *const phi = nir_instr_as_phi(src_instr);
411
412 /* Only strictly need to NULL out the pointers when the assertions
413 * (below) are compiled in. Debugging a NULL pointer deref in the
414 * wild is easier than debugging a random pointer deref, so set
415 * NULL unconditionally just to be safe.
416 */
417 prev_srcs[i] = NULL;
418 continue_srcs[i] = NULL;
419
420 nir_foreach_phi_src(src_of_phi, phi) {
421 if (src_of_phi->pred == prev_block) {
422 if (src_of_phi->src.ssa->parent_instr->type !=
423 nir_instr_type_ssa_undef) {
424 is_prev_result_undef = false;
425 }
426
427 if (src_of_phi->src.ssa->parent_instr->type !=
428 nir_instr_type_load_const) {
429 is_prev_result_const = false;
430 }
431
432 prev_srcs[i] = src_of_phi->src.ssa;
433 has_phi_src_from_prev_block = true;
434 } else
435 continue_srcs[i] = src_of_phi->src.ssa;
436 }
437
438 assert(prev_srcs[i] != NULL);
439 assert(continue_srcs[i] != NULL);
440 } else {
441 /* If the source is not a phi (or a phi in a block other than the
442 * loop header), then the value must exist in prev_block.
443 */
444 if (!nir_block_dominates(src_instr->block, prev_block)) {
445 all_non_phi_exist_in_prev_block = false;
446 break;
447 }
448
449 prev_srcs[i] = alu->src[i].src.ssa;
450 continue_srcs[i] = alu->src[i].src.ssa;
451 }
452 }
453
454 if (has_phi_src_from_prev_block && all_non_phi_exist_in_prev_block &&
455 (is_prev_result_undef || is_prev_result_const)) {
456 nir_block *const continue_block = find_continue_block(loop);
457
458 b->cursor = nir_after_block(prev_block);
459 nir_ssa_def *prev_value = clone_alu_and_replace_src_defs(b, alu, prev_srcs);
460
461 /* Make a copy of the original ALU instruction. Replace the sources
462 * of the new instruction that read a phi with an undef source from
463 * prev_block with the non-undef source of that phi.
464 *
465 * Insert the new instruction at the end of the continue block.
466 */
467 b->cursor = nir_after_block_before_jump(continue_block);
468
469 nir_ssa_def *const alu_copy =
470 clone_alu_and_replace_src_defs(b, alu, continue_srcs);
471
472 /* Make a new phi node that selects a value from prev_block and the
473 * result of the new instruction from continue_block.
474 */
475 nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
476 nir_phi_src *phi_src;
477
478 phi_src = ralloc(phi, nir_phi_src);
479 phi_src->pred = prev_block;
480 phi_src->src = nir_src_for_ssa(prev_value);
481 exec_list_push_tail(&phi->srcs, &phi_src->node);
482
483 phi_src = ralloc(phi, nir_phi_src);
484 phi_src->pred = continue_block;
485 phi_src->src = nir_src_for_ssa(alu_copy);
486 exec_list_push_tail(&phi->srcs, &phi_src->node);
487
488 nir_ssa_dest_init(&phi->instr, &phi->dest,
489 alu_copy->num_components, alu_copy->bit_size, NULL);
490
491 b->cursor = nir_after_phis(header_block);
492 nir_builder_instr_insert(b, &phi->instr);
493
494 /* Modify all readers of the original ALU instruction to read the
495 * result of the phi.
496 */
497 nir_foreach_use_safe(use_src, &alu->dest.dest.ssa) {
498 nir_instr_rewrite_src(use_src->parent_instr,
499 use_src,
500 nir_src_for_ssa(&phi->dest.ssa));
501 }
502
503 nir_foreach_if_use_safe(use_src, &alu->dest.dest.ssa) {
504 nir_if_rewrite_condition(use_src->parent_if,
505 nir_src_for_ssa(&phi->dest.ssa));
506 }
507
508 /* Since the original ALU instruction no longer has any readers, just
509 * remove it.
510 */
511 nir_instr_remove_v(&alu->instr);
512 ralloc_free(alu);
513
514 progress = true;
515 }
516 }
517
518 return progress;
519 }
520
521 /**
522 * Get the SSA value from a phi node that corresponds to a specific block
523 */
524 static nir_ssa_def *
525 ssa_for_phi_from_block(nir_phi_instr *phi, nir_block *block)
526 {
527 nir_foreach_phi_src(src, phi) {
528 if (src->pred == block)
529 return src->src.ssa;
530 }
531
532 assert(!"Block is not a predecessor of phi.");
533 return NULL;
534 }
535
536 /**
537 * Simplify a bcsel whose sources are all phi nodes from the loop header block
538 *
539 * bcsel instructions in a loop that meet the following criteria can be
540 * converted to phi nodes:
541 *
542 * - The loop has no continue instructions other than the "natural" continue
543 * at the bottom of the loop.
544 *
545 * - All of the sources of the bcsel are phi nodes in the header block of the
546 * loop.
547 *
548 * - The phi node representing the condition of the bcsel instruction chooses
549 * only constant values.
550 *
551 * The contant value from the condition will select one of the other sources
552 * when entered from outside the loop and the remaining source when entered
553 * from the continue block. Since each of these sources is also a phi node in
554 * the header block, the value of the phi node can be "evaluated." These
555 * evaluated phi nodes provide the sources for a new phi node. All users of
556 * the bcsel result are updated to use the phi node result.
557 *
558 * The replacement transforms loops like:
559 *
560 * vec1 32 ssa_7 = undefined
561 * vec1 32 ssa_8 = load_const (0x00000001)
562 * vec1 32 ssa_9 = load_const (0x000000c8)
563 * vec1 32 ssa_10 = load_const (0x00000000)
564 * // succs: block_1
565 * loop {
566 * block block_1:
567 * // preds: block_0 block_4
568 * vec1 32 ssa_11 = phi block_0: ssa_1, block_4: ssa_14
569 * vec1 32 ssa_12 = phi block_0: ssa_10, block_4: ssa_15
570 * vec1 32 ssa_13 = phi block_0: ssa_7, block_4: ssa_25
571 * vec1 32 ssa_14 = b32csel ssa_12, ssa_13, ssa_11
572 * vec1 32 ssa_16 = ige32 ssa_14, ssa_9
573 * ...
574 * vec1 32 ssa_15 = load_const (0xffffffff)
575 * ...
576 * vec1 32 ssa_25 = iadd ssa_14, ssa_8
577 * // succs: block_1
578 * }
579 *
580 * into:
581 *
582 * vec1 32 ssa_7 = undefined
583 * vec1 32 ssa_8 = load_const (0x00000001)
584 * vec1 32 ssa_9 = load_const (0x000000c8)
585 * vec1 32 ssa_10 = load_const (0x00000000)
586 * // succs: block_1
587 * loop {
588 * block block_1:
589 * // preds: block_0 block_4
590 * vec1 32 ssa_11 = phi block_0: ssa_1, block_4: ssa_14
591 * vec1 32 ssa_12 = phi block_0: ssa_10, block_4: ssa_15
592 * vec1 32 ssa_13 = phi block_0: ssa_7, block_4: ssa_25
593 * vec1 32 sss_26 = phi block_0: ssa_1, block_4: ssa_25
594 * vec1 32 ssa_16 = ige32 ssa_26, ssa_9
595 * ...
596 * vec1 32 ssa_15 = load_const (0xffffffff)
597 * ...
598 * vec1 32 ssa_25 = iadd ssa_26, ssa_8
599 * // succs: block_1
600 * }
601 *
602 * \note
603 * It may be possible modify this function to not require a phi node as the
604 * source of the bcsel that is selected when entering from outside the loop.
605 * The only restriction is that the source must be geneated outside the loop
606 * (since it will become the source of a phi node in the header block of the
607 * loop).
608 */
609 static bool
610 opt_simplify_bcsel_of_phi(nir_builder *b, nir_loop *loop)
611 {
612 bool progress = false;
613 nir_block *header_block = nir_loop_first_block(loop);
614 nir_block *const prev_block =
615 nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
616
617 /* It would be insane if this were not true */
618 assert(_mesa_set_search(header_block->predecessors, prev_block));
619
620 /* The loop must have exactly one continue block which could be a block
621 * ending in a continue instruction or the "natural" continue from the
622 * last block in the loop back to the top.
623 */
624 if (header_block->predecessors->entries != 2)
625 return false;
626
627 /* We can move any bcsel that can guaranteed to execut on every iteration
628 * of a loop. For now this is accomplished by only taking bcsels from the
629 * header_block. In the future, this could be expanced to include any
630 * bcsel that must come before any break.
631 *
632 * For more details, see
633 * https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/170#note_110305
634 */
635 nir_foreach_instr_safe(instr, header_block) {
636 if (instr->type != nir_instr_type_alu)
637 continue;
638
639 nir_alu_instr *const bcsel = nir_instr_as_alu(instr);
640 if (bcsel->op != nir_op_bcsel &&
641 bcsel->op != nir_op_b32csel &&
642 bcsel->op != nir_op_fcsel)
643 continue;
644
645 bool match = true;
646 for (unsigned i = 0; i < 3; i++) {
647 /* FINISHME: The abs and negate cases could be handled by adding
648 * move instructions at the bottom of the continue block and more
649 * phi nodes in the header_block.
650 */
651 if (!bcsel->src[i].src.is_ssa ||
652 bcsel->src[i].src.ssa->parent_instr->type != nir_instr_type_phi ||
653 bcsel->src[i].src.ssa->parent_instr->block != header_block ||
654 bcsel->src[i].negate || bcsel->src[i].abs) {
655 match = false;
656 break;
657 }
658 }
659
660 if (!match)
661 continue;
662
663 nir_phi_instr *const cond_phi =
664 nir_instr_as_phi(bcsel->src[0].src.ssa->parent_instr);
665
666 bool entry_val = false, continue_val = false;
667 if (!phi_has_constant_from_outside_and_one_from_inside_loop(cond_phi,
668 prev_block,
669 &entry_val,
670 &continue_val))
671 continue;
672
673 /* If they both execute or both don't execute, this is a job for
674 * nir_dead_cf, not this pass.
675 */
676 if ((entry_val && continue_val) || (!entry_val && !continue_val))
677 continue;
678
679 const unsigned entry_src = entry_val ? 1 : 2;
680 const unsigned continue_src = entry_val ? 2 : 1;
681
682 /* Create a new phi node that selects the value for prev_block from
683 * the bcsel source that is selected by entry_val and the value for
684 * continue_block from the other bcsel source. Both sources have
685 * already been verified to be phi nodes.
686 */
687 nir_block *const continue_block = find_continue_block(loop);
688 nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
689 nir_phi_src *phi_src;
690
691 phi_src = ralloc(phi, nir_phi_src);
692 phi_src->pred = prev_block;
693 phi_src->src =
694 nir_src_for_ssa(ssa_for_phi_from_block(nir_instr_as_phi(bcsel->src[entry_src].src.ssa->parent_instr),
695 prev_block));
696 exec_list_push_tail(&phi->srcs, &phi_src->node);
697
698 phi_src = ralloc(phi, nir_phi_src);
699 phi_src->pred = continue_block;
700 phi_src->src =
701 nir_src_for_ssa(ssa_for_phi_from_block(nir_instr_as_phi(bcsel->src[continue_src].src.ssa->parent_instr),
702 continue_block));
703 exec_list_push_tail(&phi->srcs, &phi_src->node);
704
705 nir_ssa_dest_init(&phi->instr,
706 &phi->dest,
707 nir_dest_num_components(bcsel->dest.dest),
708 nir_dest_bit_size(bcsel->dest.dest),
709 NULL);
710
711 b->cursor = nir_after_phis(header_block);
712 nir_builder_instr_insert(b, &phi->instr);
713
714 /* Modify all readers of the bcsel instruction to read the result of
715 * the phi.
716 */
717 nir_foreach_use_safe(use_src, &bcsel->dest.dest.ssa) {
718 nir_instr_rewrite_src(use_src->parent_instr,
719 use_src,
720 nir_src_for_ssa(&phi->dest.ssa));
721 }
722
723 nir_foreach_if_use_safe(use_src, &bcsel->dest.dest.ssa) {
724 nir_if_rewrite_condition(use_src->parent_if,
725 nir_src_for_ssa(&phi->dest.ssa));
726 }
727
728 /* Since the original bcsel instruction no longer has any readers,
729 * just remove it.
730 */
731 nir_instr_remove_v(&bcsel->instr);
732 ralloc_free(bcsel);
733
734 progress = true;
735 }
736
737 return progress;
738 }
739
740 static bool
741 is_block_empty(nir_block *block)
742 {
743 return nir_cf_node_is_last(&block->cf_node) &&
744 exec_list_is_empty(&block->instr_list);
745 }
746
747 static bool
748 nir_block_ends_in_continue(nir_block *block)
749 {
750 if (exec_list_is_empty(&block->instr_list))
751 return false;
752
753 nir_instr *instr = nir_block_last_instr(block);
754 return instr->type == nir_instr_type_jump &&
755 nir_instr_as_jump(instr)->type == nir_jump_continue;
756 }
757
758 /**
759 * This optimization turns:
760 *
761 * loop {
762 * ...
763 * if (cond) {
764 * do_work_1();
765 * continue;
766 * } else {
767 * }
768 * do_work_2();
769 * }
770 *
771 * into:
772 *
773 * loop {
774 * ...
775 * if (cond) {
776 * do_work_1();
777 * continue;
778 * } else {
779 * do_work_2();
780 * }
781 * }
782 *
783 * The continue should then be removed by nir_opt_trivial_continues() and the
784 * loop can potentially be unrolled.
785 *
786 * Note: Unless the function param aggressive_last_continue==true do_work_2()
787 * is only ever blocks and nested loops. We avoid nesting other if-statments
788 * in the branch as this can result in increased register pressure, and in
789 * the i965 driver it causes a large amount of spilling in shader-db.
790 * For RADV however nesting these if-statements allows further continues to be
791 * remove and provides a significant FPS boost in Doom, which is why we have
792 * opted for this special bool to enable more aggresive optimisations.
793 * TODO: The GCM pass solves most of the spilling regressions in i965, if it
794 * is ever enabled we should consider removing the aggressive_last_continue
795 * param.
796 */
797 static bool
798 opt_if_loop_last_continue(nir_loop *loop, bool aggressive_last_continue)
799 {
800 nir_if *nif;
801 bool then_ends_in_continue = false;
802 bool else_ends_in_continue = false;
803
804 /* Scan the control flow of the loop from the last to the first node
805 * looking for an if-statement we can optimise.
806 */
807 nir_block *last_block = nir_loop_last_block(loop);
808 nir_cf_node *if_node = nir_cf_node_prev(&last_block->cf_node);
809 while (if_node) {
810 if (if_node->type == nir_cf_node_if) {
811 nif = nir_cf_node_as_if(if_node);
812 nir_block *then_block = nir_if_last_then_block(nif);
813 nir_block *else_block = nir_if_last_else_block(nif);
814
815 then_ends_in_continue = nir_block_ends_in_continue(then_block);
816 else_ends_in_continue = nir_block_ends_in_continue(else_block);
817
818 /* If both branches end in a jump do nothing, this should be handled
819 * by nir_opt_dead_cf().
820 */
821 if ((then_ends_in_continue || nir_block_ends_in_break(then_block)) &&
822 (else_ends_in_continue || nir_block_ends_in_break(else_block)))
823 return false;
824
825 /* If continue found stop scanning and attempt optimisation, or
826 */
827 if (then_ends_in_continue || else_ends_in_continue ||
828 !aggressive_last_continue)
829 break;
830 }
831
832 if_node = nir_cf_node_prev(if_node);
833 }
834
835 /* If we didn't find an if to optimise return */
836 if (!then_ends_in_continue && !else_ends_in_continue)
837 return false;
838
839 /* If there is nothing after the if-statement we bail */
840 if (&nif->cf_node == nir_cf_node_prev(&last_block->cf_node) &&
841 exec_list_is_empty(&last_block->instr_list))
842 return false;
843
844 /* Move the last block of the loop inside the last if-statement */
845 nir_cf_list tmp;
846 nir_cf_extract(&tmp, nir_after_cf_node(if_node),
847 nir_after_block(last_block));
848 if (then_ends_in_continue)
849 nir_cf_reinsert(&tmp, nir_after_cf_list(&nif->else_list));
850 else
851 nir_cf_reinsert(&tmp, nir_after_cf_list(&nif->then_list));
852
853 /* In order to avoid running nir_lower_regs_to_ssa_impl() every time an if
854 * opt makes progress we leave nir_opt_trivial_continues() to remove the
855 * continue now that the end of the loop has been simplified.
856 */
857
858 return true;
859 }
860
861 /* Walk all the phis in the block immediately following the if statement and
862 * swap the blocks.
863 */
864 static void
865 rewrite_phi_predecessor_blocks(nir_if *nif,
866 nir_block *old_then_block,
867 nir_block *old_else_block,
868 nir_block *new_then_block,
869 nir_block *new_else_block)
870 {
871 nir_block *after_if_block =
872 nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node));
873
874 nir_foreach_instr(instr, after_if_block) {
875 if (instr->type != nir_instr_type_phi)
876 continue;
877
878 nir_phi_instr *phi = nir_instr_as_phi(instr);
879
880 foreach_list_typed(nir_phi_src, src, node, &phi->srcs) {
881 if (src->pred == old_then_block) {
882 src->pred = new_then_block;
883 } else if (src->pred == old_else_block) {
884 src->pred = new_else_block;
885 }
886 }
887 }
888 }
889
890 /**
891 * This optimization turns:
892 *
893 * if (cond) {
894 * } else {
895 * do_work();
896 * }
897 *
898 * into:
899 *
900 * if (!cond) {
901 * do_work();
902 * } else {
903 * }
904 */
905 static bool
906 opt_if_simplification(nir_builder *b, nir_if *nif)
907 {
908 /* Only simplify if the then block is empty and the else block is not. */
909 if (!is_block_empty(nir_if_first_then_block(nif)) ||
910 is_block_empty(nir_if_first_else_block(nif)))
911 return false;
912
913 /* Make sure the condition is a comparison operation. */
914 nir_instr *src_instr = nif->condition.ssa->parent_instr;
915 if (src_instr->type != nir_instr_type_alu)
916 return false;
917
918 nir_alu_instr *alu_instr = nir_instr_as_alu(src_instr);
919 if (!nir_alu_instr_is_comparison(alu_instr))
920 return false;
921
922 /* Insert the inverted instruction and rewrite the condition. */
923 b->cursor = nir_after_instr(&alu_instr->instr);
924
925 nir_ssa_def *new_condition =
926 nir_inot(b, &alu_instr->dest.dest.ssa);
927
928 nir_if_rewrite_condition(nif, nir_src_for_ssa(new_condition));
929
930 /* Grab pointers to the last then/else blocks for fixing up the phis. */
931 nir_block *then_block = nir_if_last_then_block(nif);
932 nir_block *else_block = nir_if_last_else_block(nif);
933
934 if (nir_block_ends_in_jump(else_block)) {
935 /* Even though this if statement has a jump on one side, we may still have
936 * phis afterwards. Single-source phis can be produced by loop unrolling
937 * or dead control-flow passes and are perfectly legal. Run a quick phi
938 * removal on the block after the if to clean up any such phis.
939 */
940 nir_block *const next_block =
941 nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node));
942 nir_opt_remove_phis_block(next_block);
943 }
944
945 rewrite_phi_predecessor_blocks(nif, then_block, else_block, else_block,
946 then_block);
947
948 /* Finally, move the else block to the then block. */
949 nir_cf_list tmp;
950 nir_cf_extract(&tmp, nir_before_cf_list(&nif->else_list),
951 nir_after_cf_list(&nif->else_list));
952 nir_cf_reinsert(&tmp, nir_before_cf_list(&nif->then_list));
953
954 return true;
955 }
956
957 /**
958 * This optimization simplifies potential loop terminators which then allows
959 * other passes such as opt_if_simplification() and loop unrolling to progress
960 * further:
961 *
962 * if (cond) {
963 * ... then block instructions ...
964 * } else {
965 * ...
966 * break;
967 * }
968 *
969 * into:
970 *
971 * if (cond) {
972 * } else {
973 * ...
974 * break;
975 * }
976 * ... then block instructions ...
977 */
978 static bool
979 opt_if_loop_terminator(nir_if *nif)
980 {
981 nir_block *break_blk = NULL;
982 nir_block *continue_from_blk = NULL;
983 bool continue_from_then = true;
984
985 nir_block *last_then = nir_if_last_then_block(nif);
986 nir_block *last_else = nir_if_last_else_block(nif);
987
988 if (nir_block_ends_in_break(last_then)) {
989 break_blk = last_then;
990 continue_from_blk = last_else;
991 continue_from_then = false;
992 } else if (nir_block_ends_in_break(last_else)) {
993 break_blk = last_else;
994 continue_from_blk = last_then;
995 }
996
997 /* Continue if the if-statement contained no jumps at all */
998 if (!break_blk)
999 return false;
1000
1001 /* If the continue from block is empty then return as there is nothing to
1002 * move.
1003 */
1004 nir_block *first_continue_from_blk = continue_from_then ?
1005 nir_if_first_then_block(nif) :
1006 nir_if_first_else_block(nif);
1007 if (is_block_empty(first_continue_from_blk))
1008 return false;
1009
1010 if (nir_block_ends_in_jump(continue_from_blk))
1011 return false;
1012
1013 /* Even though this if statement has a jump on one side, we may still have
1014 * phis afterwards. Single-source phis can be produced by loop unrolling
1015 * or dead control-flow passes and are perfectly legal. Run a quick phi
1016 * removal on the block after the if to clean up any such phis.
1017 */
1018 nir_opt_remove_phis_block(nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node)));
1019
1020 /* Finally, move the continue from branch after the if-statement. */
1021 nir_cf_list tmp;
1022 nir_cf_extract(&tmp, nir_before_block(first_continue_from_blk),
1023 nir_after_block(continue_from_blk));
1024 nir_cf_reinsert(&tmp, nir_after_cf_node(&nif->cf_node));
1025
1026 return true;
1027 }
1028
1029 static bool
1030 evaluate_if_condition(nir_if *nif, nir_cursor cursor, bool *value)
1031 {
1032 nir_block *use_block = nir_cursor_current_block(cursor);
1033 if (nir_block_dominates(nir_if_first_then_block(nif), use_block)) {
1034 *value = true;
1035 return true;
1036 } else if (nir_block_dominates(nir_if_first_else_block(nif), use_block)) {
1037 *value = false;
1038 return true;
1039 } else {
1040 return false;
1041 }
1042 }
1043
1044 static nir_ssa_def *
1045 clone_alu_and_replace_src_defs(nir_builder *b, const nir_alu_instr *alu,
1046 nir_ssa_def **src_defs)
1047 {
1048 nir_alu_instr *nalu = nir_alu_instr_create(b->shader, alu->op);
1049 nalu->exact = alu->exact;
1050
1051 nir_ssa_dest_init(&nalu->instr, &nalu->dest.dest,
1052 alu->dest.dest.ssa.num_components,
1053 alu->dest.dest.ssa.bit_size, alu->dest.dest.ssa.name);
1054
1055 nalu->dest.saturate = alu->dest.saturate;
1056 nalu->dest.write_mask = alu->dest.write_mask;
1057
1058 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
1059 assert(alu->src[i].src.is_ssa);
1060 nalu->src[i].src = nir_src_for_ssa(src_defs[i]);
1061 nalu->src[i].negate = alu->src[i].negate;
1062 nalu->src[i].abs = alu->src[i].abs;
1063 memcpy(nalu->src[i].swizzle, alu->src[i].swizzle,
1064 sizeof(nalu->src[i].swizzle));
1065 }
1066
1067 nir_builder_instr_insert(b, &nalu->instr);
1068
1069 return &nalu->dest.dest.ssa;;
1070 }
1071
1072 /*
1073 * This propagates if condition evaluation down the chain of some alu
1074 * instructions. For example by checking the use of some of the following alu
1075 * instruction we can eventually replace ssa_107 with NIR_TRUE.
1076 *
1077 * loop {
1078 * block block_1:
1079 * vec1 32 ssa_85 = load_const (0x00000002)
1080 * vec1 32 ssa_86 = ieq ssa_48, ssa_85
1081 * vec1 32 ssa_87 = load_const (0x00000001)
1082 * vec1 32 ssa_88 = ieq ssa_48, ssa_87
1083 * vec1 32 ssa_89 = ior ssa_86, ssa_88
1084 * vec1 32 ssa_90 = ieq ssa_48, ssa_0
1085 * vec1 32 ssa_91 = ior ssa_89, ssa_90
1086 * if ssa_86 {
1087 * block block_2:
1088 * ...
1089 * break
1090 * } else {
1091 * block block_3:
1092 * }
1093 * block block_4:
1094 * if ssa_88 {
1095 * block block_5:
1096 * ...
1097 * break
1098 * } else {
1099 * block block_6:
1100 * }
1101 * block block_7:
1102 * if ssa_90 {
1103 * block block_8:
1104 * ...
1105 * break
1106 * } else {
1107 * block block_9:
1108 * }
1109 * block block_10:
1110 * vec1 32 ssa_107 = inot ssa_91
1111 * if ssa_107 {
1112 * block block_11:
1113 * break
1114 * } else {
1115 * block block_12:
1116 * }
1117 * }
1118 */
1119 static bool
1120 propagate_condition_eval(nir_builder *b, nir_if *nif, nir_src *use_src,
1121 nir_src *alu_use, nir_alu_instr *alu,
1122 bool is_if_condition)
1123 {
1124 bool bool_value;
1125 b->cursor = nir_before_src(alu_use, is_if_condition);
1126 if (!evaluate_if_condition(nif, b->cursor, &bool_value))
1127 return false;
1128
1129 nir_ssa_def *def[4] = {0};
1130 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
1131 if (alu->src[i].src.ssa == use_src->ssa) {
1132 def[i] = nir_imm_bool(b, bool_value);
1133 } else {
1134 def[i] = alu->src[i].src.ssa;
1135 }
1136 }
1137
1138 nir_ssa_def *nalu = clone_alu_and_replace_src_defs(b, alu, def);
1139
1140 /* Rewrite use to use new alu instruction */
1141 nir_src new_src = nir_src_for_ssa(nalu);
1142
1143 if (is_if_condition)
1144 nir_if_rewrite_condition(alu_use->parent_if, new_src);
1145 else
1146 nir_instr_rewrite_src(alu_use->parent_instr, alu_use, new_src);
1147
1148 return true;
1149 }
1150
1151 static bool
1152 can_propagate_through_alu(nir_src *src)
1153 {
1154 if (src->parent_instr->type != nir_instr_type_alu)
1155 return false;
1156
1157 nir_alu_instr *alu = nir_instr_as_alu(src->parent_instr);
1158 switch (alu->op) {
1159 case nir_op_ior:
1160 case nir_op_iand:
1161 case nir_op_inot:
1162 case nir_op_b2i32:
1163 return true;
1164 case nir_op_bcsel:
1165 return src == &alu->src[0].src;
1166 default:
1167 return false;
1168 }
1169 }
1170
1171 static bool
1172 evaluate_condition_use(nir_builder *b, nir_if *nif, nir_src *use_src,
1173 bool is_if_condition)
1174 {
1175 bool progress = false;
1176
1177 b->cursor = nir_before_src(use_src, is_if_condition);
1178
1179 bool bool_value;
1180 if (evaluate_if_condition(nif, b->cursor, &bool_value)) {
1181 /* Rewrite use to use const */
1182 nir_src imm_src = nir_src_for_ssa(nir_imm_bool(b, bool_value));
1183 if (is_if_condition)
1184 nir_if_rewrite_condition(use_src->parent_if, imm_src);
1185 else
1186 nir_instr_rewrite_src(use_src->parent_instr, use_src, imm_src);
1187
1188 progress = true;
1189 }
1190
1191 if (!is_if_condition && can_propagate_through_alu(use_src)) {
1192 nir_alu_instr *alu = nir_instr_as_alu(use_src->parent_instr);
1193
1194 nir_foreach_use_safe(alu_use, &alu->dest.dest.ssa) {
1195 progress |= propagate_condition_eval(b, nif, use_src, alu_use, alu,
1196 false);
1197 }
1198
1199 nir_foreach_if_use_safe(alu_use, &alu->dest.dest.ssa) {
1200 progress |= propagate_condition_eval(b, nif, use_src, alu_use, alu,
1201 true);
1202 }
1203 }
1204
1205 return progress;
1206 }
1207
1208 static bool
1209 opt_if_evaluate_condition_use(nir_builder *b, nir_if *nif)
1210 {
1211 bool progress = false;
1212
1213 /* Evaluate any uses of the if condition inside the if branches */
1214 assert(nif->condition.is_ssa);
1215 nir_foreach_use_safe(use_src, nif->condition.ssa) {
1216 progress |= evaluate_condition_use(b, nif, use_src, false);
1217 }
1218
1219 nir_foreach_if_use_safe(use_src, nif->condition.ssa) {
1220 if (use_src->parent_if != nif)
1221 progress |= evaluate_condition_use(b, nif, use_src, true);
1222 }
1223
1224 return progress;
1225 }
1226
1227 static void
1228 simple_merge_if(nir_if *dest_if, nir_if *src_if, bool dest_if_then,
1229 bool src_if_then)
1230 {
1231 /* Now merge the if branch */
1232 nir_block *dest_blk = dest_if_then ? nir_if_last_then_block(dest_if)
1233 : nir_if_last_else_block(dest_if);
1234
1235 struct exec_list *list = src_if_then ? &src_if->then_list
1236 : &src_if->else_list;
1237
1238 nir_cf_list if_cf_list;
1239 nir_cf_extract(&if_cf_list, nir_before_cf_list(list),
1240 nir_after_cf_list(list));
1241 nir_cf_reinsert(&if_cf_list, nir_after_block(dest_blk));
1242 }
1243
1244 static bool
1245 opt_if_merge(nir_if *nif)
1246 {
1247 bool progress = false;
1248
1249 nir_block *next_blk = nir_cf_node_cf_tree_next(&nif->cf_node);
1250 if (next_blk && nif->condition.is_ssa) {
1251 nir_if *next_if = nir_block_get_following_if(next_blk);
1252 if (next_if && next_if->condition.is_ssa) {
1253
1254 /* Here we merge two consecutive ifs that have the same
1255 * condition e.g:
1256 *
1257 * if ssa_12 {
1258 * ...
1259 * } else {
1260 * ...
1261 * }
1262 * if ssa_12 {
1263 * ...
1264 * } else {
1265 * ...
1266 * }
1267 *
1268 * Note: This only merges if-statements when the block between them
1269 * is empty. The reason we don't try to merge ifs that just have phis
1270 * between them is because this can results in increased register
1271 * pressure. For example when merging if ladders created by indirect
1272 * indexing.
1273 */
1274 if (nif->condition.ssa == next_if->condition.ssa &&
1275 exec_list_is_empty(&next_blk->instr_list)) {
1276
1277 simple_merge_if(nif, next_if, true, true);
1278 simple_merge_if(nif, next_if, false, false);
1279
1280 nir_block *new_then_block = nir_if_last_then_block(nif);
1281 nir_block *new_else_block = nir_if_last_else_block(nif);
1282
1283 nir_block *old_then_block = nir_if_last_then_block(next_if);
1284 nir_block *old_else_block = nir_if_last_else_block(next_if);
1285
1286 /* Rewrite the predecessor block for any phis following the second
1287 * if-statement.
1288 */
1289 rewrite_phi_predecessor_blocks(next_if, old_then_block,
1290 old_else_block,
1291 new_then_block,
1292 new_else_block);
1293
1294 /* Move phis after merged if to avoid them being deleted when we
1295 * remove the merged if-statement.
1296 */
1297 nir_block *after_next_if_block =
1298 nir_cf_node_as_block(nir_cf_node_next(&next_if->cf_node));
1299
1300 nir_foreach_instr_safe(instr, after_next_if_block) {
1301 if (instr->type != nir_instr_type_phi)
1302 break;
1303
1304 exec_node_remove(&instr->node);
1305 exec_list_push_tail(&next_blk->instr_list, &instr->node);
1306 instr->block = next_blk;
1307 }
1308
1309 nir_cf_node_remove(&next_if->cf_node);
1310
1311 progress = true;
1312 }
1313 }
1314 }
1315
1316 return progress;
1317 }
1318
1319 static bool
1320 opt_if_cf_list(nir_builder *b, struct exec_list *cf_list,
1321 bool aggressive_last_continue)
1322 {
1323 bool progress = false;
1324 foreach_list_typed(nir_cf_node, cf_node, node, cf_list) {
1325 switch (cf_node->type) {
1326 case nir_cf_node_block:
1327 break;
1328
1329 case nir_cf_node_if: {
1330 nir_if *nif = nir_cf_node_as_if(cf_node);
1331 progress |= opt_if_cf_list(b, &nif->then_list,
1332 aggressive_last_continue);
1333 progress |= opt_if_cf_list(b, &nif->else_list,
1334 aggressive_last_continue);
1335 progress |= opt_if_loop_terminator(nif);
1336 progress |= opt_if_merge(nif);
1337 progress |= opt_if_simplification(b, nif);
1338 break;
1339 }
1340
1341 case nir_cf_node_loop: {
1342 nir_loop *loop = nir_cf_node_as_loop(cf_node);
1343 progress |= opt_if_cf_list(b, &loop->body,
1344 aggressive_last_continue);
1345 progress |= opt_simplify_bcsel_of_phi(b, loop);
1346 progress |= opt_if_loop_last_continue(loop,
1347 aggressive_last_continue);
1348 break;
1349 }
1350
1351 case nir_cf_node_function:
1352 unreachable("Invalid cf type");
1353 }
1354 }
1355
1356 return progress;
1357 }
1358
1359 static bool
1360 opt_peel_loop_initial_if_cf_list(struct exec_list *cf_list)
1361 {
1362 bool progress = false;
1363 foreach_list_typed(nir_cf_node, cf_node, node, cf_list) {
1364 switch (cf_node->type) {
1365 case nir_cf_node_block:
1366 break;
1367
1368 case nir_cf_node_if: {
1369 nir_if *nif = nir_cf_node_as_if(cf_node);
1370 progress |= opt_peel_loop_initial_if_cf_list(&nif->then_list);
1371 progress |= opt_peel_loop_initial_if_cf_list(&nif->else_list);
1372 break;
1373 }
1374
1375 case nir_cf_node_loop: {
1376 nir_loop *loop = nir_cf_node_as_loop(cf_node);
1377 progress |= opt_peel_loop_initial_if_cf_list(&loop->body);
1378 progress |= opt_peel_loop_initial_if(loop);
1379 break;
1380 }
1381
1382 case nir_cf_node_function:
1383 unreachable("Invalid cf type");
1384 }
1385 }
1386
1387 return progress;
1388 }
1389
1390 /**
1391 * These optimisations depend on nir_metadata_block_index and therefore must
1392 * not do anything to cause the metadata to become invalid.
1393 */
1394 static bool
1395 opt_if_safe_cf_list(nir_builder *b, struct exec_list *cf_list)
1396 {
1397 bool progress = false;
1398 foreach_list_typed(nir_cf_node, cf_node, node, cf_list) {
1399 switch (cf_node->type) {
1400 case nir_cf_node_block:
1401 break;
1402
1403 case nir_cf_node_if: {
1404 nir_if *nif = nir_cf_node_as_if(cf_node);
1405 progress |= opt_if_safe_cf_list(b, &nif->then_list);
1406 progress |= opt_if_safe_cf_list(b, &nif->else_list);
1407 progress |= opt_if_evaluate_condition_use(b, nif);
1408 break;
1409 }
1410
1411 case nir_cf_node_loop: {
1412 nir_loop *loop = nir_cf_node_as_loop(cf_node);
1413 progress |= opt_if_safe_cf_list(b, &loop->body);
1414 progress |= opt_split_alu_of_phi(b, loop);
1415 break;
1416 }
1417
1418 case nir_cf_node_function:
1419 unreachable("Invalid cf type");
1420 }
1421 }
1422
1423 return progress;
1424 }
1425
1426 bool
1427 nir_opt_if(nir_shader *shader, bool aggressive_last_continue)
1428 {
1429 bool progress = false;
1430
1431 nir_foreach_function(function, shader) {
1432 if (function->impl == NULL)
1433 continue;
1434
1435 nir_builder b;
1436 nir_builder_init(&b, function->impl);
1437
1438 nir_metadata_require(function->impl, nir_metadata_block_index |
1439 nir_metadata_dominance);
1440 progress = opt_if_safe_cf_list(&b, &function->impl->body);
1441 nir_metadata_preserve(function->impl, nir_metadata_block_index |
1442 nir_metadata_dominance);
1443
1444 bool preserve = true;
1445
1446 if (opt_if_cf_list(&b, &function->impl->body, aggressive_last_continue)) {
1447 preserve = false;
1448 progress = true;
1449 }
1450
1451 if (opt_peel_loop_initial_if_cf_list(&function->impl->body)) {
1452 preserve = false;
1453 progress = true;
1454
1455 /* If that made progress, we're no longer really in SSA form. We
1456 * need to convert registers back into SSA defs and clean up SSA defs
1457 * that don't dominate their uses.
1458 */
1459 nir_lower_regs_to_ssa_impl(function->impl);
1460 }
1461
1462 if (preserve) {
1463 nir_metadata_preserve(function->impl, nir_metadata_none);
1464 } else {
1465 nir_metadata_preserve(function->impl, nir_metadata_all);
1466 }
1467 }
1468
1469 return progress;
1470 }