nir: Add a new memory_barrier_tcs_patch intrinsic
[mesa.git] / src / compiler / nir / nir_opt_if.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir/nir_builder.h"
26 #include "nir_constant_expressions.h"
27 #include "nir_control_flow.h"
28 #include "nir_loop_analyze.h"
29
30 static nir_ssa_def *clone_alu_and_replace_src_defs(nir_builder *b,
31 const nir_alu_instr *alu,
32 nir_ssa_def **src_defs);
33
34 /**
35 * Gets the single block that jumps back to the loop header. Already assumes
36 * there is exactly one such block.
37 */
38 static nir_block*
39 find_continue_block(nir_loop *loop)
40 {
41 nir_block *header_block = nir_loop_first_block(loop);
42 nir_block *prev_block =
43 nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
44
45 assert(header_block->predecessors->entries == 2);
46
47 set_foreach(header_block->predecessors, pred_entry) {
48 if (pred_entry->key != prev_block)
49 return (nir_block*)pred_entry->key;
50 }
51
52 unreachable("Continue block not found!");
53 }
54
55 /**
56 * Does a phi have one constant value from outside a loop and one from inside?
57 */
58 static bool
59 phi_has_constant_from_outside_and_one_from_inside_loop(nir_phi_instr *phi,
60 const nir_block *entry_block,
61 uint32_t *entry_val,
62 uint32_t *continue_val)
63 {
64 /* We already know we have exactly one continue */
65 assert(exec_list_length(&phi->srcs) == 2);
66
67 *entry_val = 0;
68 *continue_val = 0;
69
70 nir_foreach_phi_src(src, phi) {
71 assert(src->src.is_ssa);
72 nir_const_value *const_src = nir_src_as_const_value(src->src);
73 if (!const_src)
74 return false;
75
76 if (src->pred != entry_block) {
77 *continue_val = const_src[0].u32;
78 } else {
79 *entry_val = const_src[0].u32;
80 }
81 }
82
83 return true;
84 }
85
86 /**
87 * This optimization detects if statements at the tops of loops where the
88 * condition is a phi node of two constants and moves half of the if to above
89 * the loop and the other half of the if to the end of the loop. A simple for
90 * loop "for (int i = 0; i < 4; i++)", when run through the SPIR-V front-end,
91 * ends up looking something like this:
92 *
93 * vec1 32 ssa_0 = load_const (0x00000000)
94 * vec1 32 ssa_1 = load_const (0xffffffff)
95 * loop {
96 * block block_1:
97 * vec1 32 ssa_2 = phi block_0: ssa_0, block_7: ssa_5
98 * vec1 32 ssa_3 = phi block_0: ssa_0, block_7: ssa_1
99 * if ssa_3 {
100 * block block_2:
101 * vec1 32 ssa_4 = load_const (0x00000001)
102 * vec1 32 ssa_5 = iadd ssa_2, ssa_4
103 * } else {
104 * block block_3:
105 * }
106 * block block_4:
107 * vec1 32 ssa_6 = load_const (0x00000004)
108 * vec1 32 ssa_7 = ilt ssa_5, ssa_6
109 * if ssa_7 {
110 * block block_5:
111 * } else {
112 * block block_6:
113 * break
114 * }
115 * block block_7:
116 * }
117 *
118 * This turns it into something like this:
119 *
120 * // Stuff from block 1
121 * // Stuff from block 3
122 * loop {
123 * block block_1:
124 * vec1 32 ssa_2 = phi block_0: ssa_0, block_7: ssa_5
125 * vec1 32 ssa_6 = load_const (0x00000004)
126 * vec1 32 ssa_7 = ilt ssa_2, ssa_6
127 * if ssa_7 {
128 * block block_5:
129 * } else {
130 * block block_6:
131 * break
132 * }
133 * block block_7:
134 * // Stuff from block 1
135 * // Stuff from block 2
136 * vec1 32 ssa_4 = load_const (0x00000001)
137 * vec1 32 ssa_5 = iadd ssa_2, ssa_4
138 * }
139 */
140 static bool
141 opt_peel_loop_initial_if(nir_loop *loop)
142 {
143 nir_block *header_block = nir_loop_first_block(loop);
144 nir_block *const prev_block =
145 nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
146
147 /* It would be insane if this were not true */
148 assert(_mesa_set_search(header_block->predecessors, prev_block));
149
150 /* The loop must have exactly one continue block which could be a block
151 * ending in a continue instruction or the "natural" continue from the
152 * last block in the loop back to the top.
153 */
154 if (header_block->predecessors->entries != 2)
155 return false;
156
157 nir_cf_node *if_node = nir_cf_node_next(&header_block->cf_node);
158 if (!if_node || if_node->type != nir_cf_node_if)
159 return false;
160
161 nir_if *nif = nir_cf_node_as_if(if_node);
162 assert(nif->condition.is_ssa);
163
164 nir_ssa_def *cond = nif->condition.ssa;
165 if (cond->parent_instr->type != nir_instr_type_phi)
166 return false;
167
168 nir_phi_instr *cond_phi = nir_instr_as_phi(cond->parent_instr);
169 if (cond->parent_instr->block != header_block)
170 return false;
171
172 uint32_t entry_val = 0, continue_val = 0;
173 if (!phi_has_constant_from_outside_and_one_from_inside_loop(cond_phi,
174 prev_block,
175 &entry_val,
176 &continue_val))
177 return false;
178
179 /* If they both execute or both don't execute, this is a job for
180 * nir_dead_cf, not this pass.
181 */
182 if ((entry_val && continue_val) || (!entry_val && !continue_val))
183 return false;
184
185 struct exec_list *continue_list, *entry_list;
186 if (continue_val) {
187 continue_list = &nif->then_list;
188 entry_list = &nif->else_list;
189 } else {
190 continue_list = &nif->else_list;
191 entry_list = &nif->then_list;
192 }
193
194 /* We want to be moving the contents of entry_list to above the loop so it
195 * can't contain any break or continue instructions.
196 */
197 foreach_list_typed(nir_cf_node, cf_node, node, entry_list) {
198 nir_foreach_block_in_cf_node(block, cf_node) {
199 nir_instr *last_instr = nir_block_last_instr(block);
200 if (last_instr && last_instr->type == nir_instr_type_jump)
201 return false;
202 }
203 }
204
205 /* We're about to re-arrange a bunch of blocks so make sure that we don't
206 * have deref uses which cross block boundaries. We don't want a deref
207 * accidentally ending up in a phi.
208 */
209 nir_rematerialize_derefs_in_use_blocks_impl(
210 nir_cf_node_get_function(&loop->cf_node));
211
212 /* Before we do anything, convert the loop to LCSSA. We're about to
213 * replace a bunch of SSA defs with registers and this will prevent any of
214 * it from leaking outside the loop.
215 */
216 nir_convert_loop_to_lcssa(loop);
217
218 nir_block *after_if_block =
219 nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node));
220
221 /* Get rid of phis in the header block since we will be duplicating it */
222 nir_lower_phis_to_regs_block(header_block);
223 /* Get rid of phis after the if since dominance will change */
224 nir_lower_phis_to_regs_block(after_if_block);
225
226 /* Get rid of SSA defs in the pieces we're about to move around */
227 nir_lower_ssa_defs_to_regs_block(header_block);
228 nir_foreach_block_in_cf_node(block, &nif->cf_node)
229 nir_lower_ssa_defs_to_regs_block(block);
230
231 nir_cf_list header, tmp;
232 nir_cf_extract(&header, nir_before_block(header_block),
233 nir_after_block(header_block));
234
235 nir_cf_list_clone(&tmp, &header, &loop->cf_node, NULL);
236 nir_cf_reinsert(&tmp, nir_before_cf_node(&loop->cf_node));
237 nir_cf_extract(&tmp, nir_before_cf_list(entry_list),
238 nir_after_cf_list(entry_list));
239 nir_cf_reinsert(&tmp, nir_before_cf_node(&loop->cf_node));
240
241 nir_cf_reinsert(&header,
242 nir_after_block_before_jump(find_continue_block(loop)));
243
244 bool continue_list_jumps =
245 nir_block_ends_in_jump(exec_node_data(nir_block,
246 exec_list_get_tail(continue_list),
247 cf_node.node));
248
249 nir_cf_extract(&tmp, nir_before_cf_list(continue_list),
250 nir_after_cf_list(continue_list));
251
252 /* Get continue block again as the previous reinsert might have removed the
253 * block. Also, if both the continue list and the continue block ends in
254 * jump instructions, removes the jump from the latter, as it will not be
255 * executed if we insert the continue list before it. */
256
257 nir_block *continue_block = find_continue_block(loop);
258
259 if (continue_list_jumps) {
260 nir_instr *last_instr = nir_block_last_instr(continue_block);
261 if (last_instr && last_instr->type == nir_instr_type_jump)
262 nir_instr_remove(last_instr);
263 }
264
265 nir_cf_reinsert(&tmp,
266 nir_after_block_before_jump(continue_block));
267
268 nir_cf_node_remove(&nif->cf_node);
269
270 return true;
271 }
272
273 static bool
274 alu_instr_is_comparison(const nir_alu_instr *alu)
275 {
276 switch (alu->op) {
277 case nir_op_flt32:
278 case nir_op_fge32:
279 case nir_op_feq32:
280 case nir_op_fne32:
281 case nir_op_ilt32:
282 case nir_op_ult32:
283 case nir_op_ige32:
284 case nir_op_uge32:
285 case nir_op_ieq32:
286 case nir_op_ine32:
287 return true;
288 default:
289 return nir_alu_instr_is_comparison(alu);
290 }
291 }
292
293 static bool
294 alu_instr_is_type_conversion(const nir_alu_instr *alu)
295 {
296 return nir_op_infos[alu->op].num_inputs == 1 &&
297 nir_alu_type_get_base_type(nir_op_infos[alu->op].output_type) !=
298 nir_alu_type_get_base_type(nir_op_infos[alu->op].input_types[0]);
299 }
300
301 /**
302 * Splits ALU instructions that have a source that is a phi node
303 *
304 * ALU instructions in the header block of a loop that meet the following
305 * criteria can be split.
306 *
307 * - The loop has no continue instructions other than the "natural" continue
308 * at the bottom of the loop.
309 *
310 * - At least one source of the instruction is a phi node from the header block.
311 *
312 * - The phi node selects a constant or undef from the block before the loop.
313 *
314 * - Any non-phi sources of the ALU instruction come from a block that
315 * dominates the block before the loop. The most common failure mode for
316 * this check is sources that are generated in the loop header block.
317 *
318 * The split process splits the original ALU instruction into two, one at the
319 * bottom of the loop and one at the block before the loop. The instruction
320 * before the loop computes the value on the first iteration, and the
321 * instruction at the bottom computes the value on the second, third, and so
322 * on. A new phi node is added to the header block that selects either the
323 * instruction before the loop or the one at the end, and uses of the original
324 * instruction are replaced by this phi.
325 *
326 * The splitting transforms a loop like:
327 *
328 * vec1 32 ssa_8 = load_const (0x00000001)
329 * vec1 32 ssa_10 = load_const (0x00000000)
330 * // succs: block_1
331 * loop {
332 * block block_1:
333 * // preds: block_0 block_4
334 * vec1 32 ssa_11 = phi block_0: ssa_10, block_4: ssa_15
335 * vec1 32 ssa_12 = phi block_0: ssa_1, block_4: ssa_15
336 * vec1 32 ssa_13 = phi block_0: ssa_10, block_4: ssa_16
337 * vec1 32 ssa_14 = iadd ssa_11, ssa_8
338 * vec1 32 ssa_15 = b32csel ssa_13, ssa_14, ssa_12
339 * ...
340 * // succs: block_1
341 * }
342 *
343 * into:
344 *
345 * vec1 32 ssa_8 = load_const (0x00000001)
346 * vec1 32 ssa_10 = load_const (0x00000000)
347 * vec1 32 ssa_22 = iadd ssa_10, ssa_8
348 * // succs: block_1
349 * loop {
350 * block block_1:
351 * // preds: block_0 block_4
352 * vec1 32 ssa_11 = phi block_0: ssa_10, block_4: ssa_15
353 * vec1 32 ssa_12 = phi block_0: ssa_1, block_4: ssa_15
354 * vec1 32 ssa_13 = phi block_0: ssa_10, block_4: ssa_16
355 * vec1 32 ssa_21 = phi block_0: ssa_22, block_4: ssa_20
356 * vec1 32 ssa_15 = b32csel ssa_13, ssa_21, ssa_12
357 * ...
358 * vec1 32 ssa_20 = iadd ssa_15, ssa_8
359 * // succs: block_1
360 * }
361 */
362 static bool
363 opt_split_alu_of_phi(nir_builder *b, nir_loop *loop)
364 {
365 bool progress = false;
366 nir_block *header_block = nir_loop_first_block(loop);
367 nir_block *const prev_block =
368 nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
369
370 /* It would be insane if this were not true */
371 assert(_mesa_set_search(header_block->predecessors, prev_block));
372
373 /* The loop must have exactly one continue block which could be a block
374 * ending in a continue instruction or the "natural" continue from the
375 * last block in the loop back to the top.
376 */
377 if (header_block->predecessors->entries != 2)
378 return false;
379
380 nir_foreach_instr_safe(instr, header_block) {
381 if (instr->type != nir_instr_type_alu)
382 continue;
383
384 nir_alu_instr *const alu = nir_instr_as_alu(instr);
385
386 /* nir_op_vec{2,3,4} and nir_op_mov are excluded because they can easily
387 * lead to infinite optimization loops. Splitting comparisons can lead
388 * to loop unrolling not recognizing loop termintators, and type
389 * conversions also lead to regressions.
390 */
391 if (alu->op == nir_op_vec2 ||
392 alu->op == nir_op_vec3 ||
393 alu->op == nir_op_vec4 ||
394 alu->op == nir_op_mov ||
395 alu_instr_is_comparison(alu) ||
396 alu_instr_is_type_conversion(alu))
397 continue;
398
399 bool has_phi_src_from_prev_block = false;
400 bool all_non_phi_exist_in_prev_block = true;
401 bool is_prev_result_undef = true;
402 bool is_prev_result_const = true;
403 nir_ssa_def *prev_srcs[8]; // FINISHME: Array size?
404 nir_ssa_def *continue_srcs[8]; // FINISHME: Array size?
405
406 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
407 nir_instr *const src_instr = alu->src[i].src.ssa->parent_instr;
408
409 /* If the source is a phi in the loop header block, then the
410 * prev_srcs and continue_srcs will come from the different sources
411 * of the phi.
412 */
413 if (src_instr->type == nir_instr_type_phi &&
414 src_instr->block == header_block) {
415 nir_phi_instr *const phi = nir_instr_as_phi(src_instr);
416
417 /* Only strictly need to NULL out the pointers when the assertions
418 * (below) are compiled in. Debugging a NULL pointer deref in the
419 * wild is easier than debugging a random pointer deref, so set
420 * NULL unconditionally just to be safe.
421 */
422 prev_srcs[i] = NULL;
423 continue_srcs[i] = NULL;
424
425 nir_foreach_phi_src(src_of_phi, phi) {
426 if (src_of_phi->pred == prev_block) {
427 if (src_of_phi->src.ssa->parent_instr->type !=
428 nir_instr_type_ssa_undef) {
429 is_prev_result_undef = false;
430 }
431
432 if (src_of_phi->src.ssa->parent_instr->type !=
433 nir_instr_type_load_const) {
434 is_prev_result_const = false;
435 }
436
437 prev_srcs[i] = src_of_phi->src.ssa;
438 has_phi_src_from_prev_block = true;
439 } else
440 continue_srcs[i] = src_of_phi->src.ssa;
441 }
442
443 assert(prev_srcs[i] != NULL);
444 assert(continue_srcs[i] != NULL);
445 } else {
446 /* If the source is not a phi (or a phi in a block other than the
447 * loop header), then the value must exist in prev_block.
448 */
449 if (!nir_block_dominates(src_instr->block, prev_block)) {
450 all_non_phi_exist_in_prev_block = false;
451 break;
452 }
453
454 prev_srcs[i] = alu->src[i].src.ssa;
455 continue_srcs[i] = alu->src[i].src.ssa;
456 }
457 }
458
459 if (has_phi_src_from_prev_block && all_non_phi_exist_in_prev_block &&
460 (is_prev_result_undef || is_prev_result_const)) {
461 nir_block *const continue_block = find_continue_block(loop);
462
463 b->cursor = nir_after_block(prev_block);
464 nir_ssa_def *prev_value = clone_alu_and_replace_src_defs(b, alu, prev_srcs);
465
466 /* Make a copy of the original ALU instruction. Replace the sources
467 * of the new instruction that read a phi with an undef source from
468 * prev_block with the non-undef source of that phi.
469 *
470 * Insert the new instruction at the end of the continue block.
471 */
472 b->cursor = nir_after_block_before_jump(continue_block);
473
474 nir_ssa_def *const alu_copy =
475 clone_alu_and_replace_src_defs(b, alu, continue_srcs);
476
477 /* Make a new phi node that selects a value from prev_block and the
478 * result of the new instruction from continue_block.
479 */
480 nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
481 nir_phi_src *phi_src;
482
483 phi_src = ralloc(phi, nir_phi_src);
484 phi_src->pred = prev_block;
485 phi_src->src = nir_src_for_ssa(prev_value);
486 exec_list_push_tail(&phi->srcs, &phi_src->node);
487
488 phi_src = ralloc(phi, nir_phi_src);
489 phi_src->pred = continue_block;
490 phi_src->src = nir_src_for_ssa(alu_copy);
491 exec_list_push_tail(&phi->srcs, &phi_src->node);
492
493 nir_ssa_dest_init(&phi->instr, &phi->dest,
494 alu_copy->num_components, alu_copy->bit_size, NULL);
495
496 b->cursor = nir_after_phis(header_block);
497 nir_builder_instr_insert(b, &phi->instr);
498
499 /* Modify all readers of the original ALU instruction to read the
500 * result of the phi.
501 */
502 nir_foreach_use_safe(use_src, &alu->dest.dest.ssa) {
503 nir_instr_rewrite_src(use_src->parent_instr,
504 use_src,
505 nir_src_for_ssa(&phi->dest.ssa));
506 }
507
508 nir_foreach_if_use_safe(use_src, &alu->dest.dest.ssa) {
509 nir_if_rewrite_condition(use_src->parent_if,
510 nir_src_for_ssa(&phi->dest.ssa));
511 }
512
513 /* Since the original ALU instruction no longer has any readers, just
514 * remove it.
515 */
516 nir_instr_remove_v(&alu->instr);
517 ralloc_free(alu);
518
519 progress = true;
520 }
521 }
522
523 return progress;
524 }
525
526 /**
527 * Get the SSA value from a phi node that corresponds to a specific block
528 */
529 static nir_ssa_def *
530 ssa_for_phi_from_block(nir_phi_instr *phi, nir_block *block)
531 {
532 nir_foreach_phi_src(src, phi) {
533 if (src->pred == block)
534 return src->src.ssa;
535 }
536
537 assert(!"Block is not a predecessor of phi.");
538 return NULL;
539 }
540
541 /**
542 * Simplify a bcsel whose sources are all phi nodes from the loop header block
543 *
544 * bcsel instructions in a loop that meet the following criteria can be
545 * converted to phi nodes:
546 *
547 * - The loop has no continue instructions other than the "natural" continue
548 * at the bottom of the loop.
549 *
550 * - All of the sources of the bcsel are phi nodes in the header block of the
551 * loop.
552 *
553 * - The phi node representing the condition of the bcsel instruction chooses
554 * only constant values.
555 *
556 * The contant value from the condition will select one of the other sources
557 * when entered from outside the loop and the remaining source when entered
558 * from the continue block. Since each of these sources is also a phi node in
559 * the header block, the value of the phi node can be "evaluated." These
560 * evaluated phi nodes provide the sources for a new phi node. All users of
561 * the bcsel result are updated to use the phi node result.
562 *
563 * The replacement transforms loops like:
564 *
565 * vec1 32 ssa_7 = undefined
566 * vec1 32 ssa_8 = load_const (0x00000001)
567 * vec1 32 ssa_9 = load_const (0x000000c8)
568 * vec1 32 ssa_10 = load_const (0x00000000)
569 * // succs: block_1
570 * loop {
571 * block block_1:
572 * // preds: block_0 block_4
573 * vec1 32 ssa_11 = phi block_0: ssa_1, block_4: ssa_14
574 * vec1 32 ssa_12 = phi block_0: ssa_10, block_4: ssa_15
575 * vec1 32 ssa_13 = phi block_0: ssa_7, block_4: ssa_25
576 * vec1 32 ssa_14 = b32csel ssa_12, ssa_13, ssa_11
577 * vec1 32 ssa_16 = ige32 ssa_14, ssa_9
578 * ...
579 * vec1 32 ssa_15 = load_const (0xffffffff)
580 * ...
581 * vec1 32 ssa_25 = iadd ssa_14, ssa_8
582 * // succs: block_1
583 * }
584 *
585 * into:
586 *
587 * vec1 32 ssa_7 = undefined
588 * vec1 32 ssa_8 = load_const (0x00000001)
589 * vec1 32 ssa_9 = load_const (0x000000c8)
590 * vec1 32 ssa_10 = load_const (0x00000000)
591 * // succs: block_1
592 * loop {
593 * block block_1:
594 * // preds: block_0 block_4
595 * vec1 32 ssa_11 = phi block_0: ssa_1, block_4: ssa_14
596 * vec1 32 ssa_12 = phi block_0: ssa_10, block_4: ssa_15
597 * vec1 32 ssa_13 = phi block_0: ssa_7, block_4: ssa_25
598 * vec1 32 sss_26 = phi block_0: ssa_1, block_4: ssa_25
599 * vec1 32 ssa_16 = ige32 ssa_26, ssa_9
600 * ...
601 * vec1 32 ssa_15 = load_const (0xffffffff)
602 * ...
603 * vec1 32 ssa_25 = iadd ssa_26, ssa_8
604 * // succs: block_1
605 * }
606 *
607 * \note
608 * It may be possible modify this function to not require a phi node as the
609 * source of the bcsel that is selected when entering from outside the loop.
610 * The only restriction is that the source must be geneated outside the loop
611 * (since it will become the source of a phi node in the header block of the
612 * loop).
613 */
614 static bool
615 opt_simplify_bcsel_of_phi(nir_builder *b, nir_loop *loop)
616 {
617 bool progress = false;
618 nir_block *header_block = nir_loop_first_block(loop);
619 nir_block *const prev_block =
620 nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
621
622 /* It would be insane if this were not true */
623 assert(_mesa_set_search(header_block->predecessors, prev_block));
624
625 /* The loop must have exactly one continue block which could be a block
626 * ending in a continue instruction or the "natural" continue from the
627 * last block in the loop back to the top.
628 */
629 if (header_block->predecessors->entries != 2)
630 return false;
631
632 /* We can move any bcsel that can guaranteed to execut on every iteration
633 * of a loop. For now this is accomplished by only taking bcsels from the
634 * header_block. In the future, this could be expanced to include any
635 * bcsel that must come before any break.
636 *
637 * For more details, see
638 * https://gitlab.freedesktop.org/mesa/mesa/merge_requests/170#note_110305
639 */
640 nir_foreach_instr_safe(instr, header_block) {
641 if (instr->type != nir_instr_type_alu)
642 continue;
643
644 nir_alu_instr *const bcsel = nir_instr_as_alu(instr);
645 if (bcsel->op != nir_op_bcsel &&
646 bcsel->op != nir_op_b32csel &&
647 bcsel->op != nir_op_fcsel)
648 continue;
649
650 bool match = true;
651 for (unsigned i = 0; i < 3; i++) {
652 /* FINISHME: The abs and negate cases could be handled by adding
653 * move instructions at the bottom of the continue block and more
654 * phi nodes in the header_block.
655 */
656 if (!bcsel->src[i].src.is_ssa ||
657 bcsel->src[i].src.ssa->parent_instr->type != nir_instr_type_phi ||
658 bcsel->src[i].src.ssa->parent_instr->block != header_block ||
659 bcsel->src[i].negate || bcsel->src[i].abs) {
660 match = false;
661 break;
662 }
663 }
664
665 if (!match)
666 continue;
667
668 nir_phi_instr *const cond_phi =
669 nir_instr_as_phi(bcsel->src[0].src.ssa->parent_instr);
670
671 uint32_t entry_val = 0, continue_val = 0;
672 if (!phi_has_constant_from_outside_and_one_from_inside_loop(cond_phi,
673 prev_block,
674 &entry_val,
675 &continue_val))
676 continue;
677
678 /* If they both execute or both don't execute, this is a job for
679 * nir_dead_cf, not this pass.
680 */
681 if ((entry_val && continue_val) || (!entry_val && !continue_val))
682 continue;
683
684 const unsigned entry_src = entry_val ? 1 : 2;
685 const unsigned continue_src = entry_val ? 2 : 1;
686
687 /* Create a new phi node that selects the value for prev_block from
688 * the bcsel source that is selected by entry_val and the value for
689 * continue_block from the other bcsel source. Both sources have
690 * already been verified to be phi nodes.
691 */
692 nir_block *const continue_block = find_continue_block(loop);
693 nir_phi_instr *const phi = nir_phi_instr_create(b->shader);
694 nir_phi_src *phi_src;
695
696 phi_src = ralloc(phi, nir_phi_src);
697 phi_src->pred = prev_block;
698 phi_src->src =
699 nir_src_for_ssa(ssa_for_phi_from_block(nir_instr_as_phi(bcsel->src[entry_src].src.ssa->parent_instr),
700 prev_block));
701 exec_list_push_tail(&phi->srcs, &phi_src->node);
702
703 phi_src = ralloc(phi, nir_phi_src);
704 phi_src->pred = continue_block;
705 phi_src->src =
706 nir_src_for_ssa(ssa_for_phi_from_block(nir_instr_as_phi(bcsel->src[continue_src].src.ssa->parent_instr),
707 continue_block));
708 exec_list_push_tail(&phi->srcs, &phi_src->node);
709
710 nir_ssa_dest_init(&phi->instr,
711 &phi->dest,
712 nir_dest_num_components(bcsel->dest.dest),
713 nir_dest_bit_size(bcsel->dest.dest),
714 NULL);
715
716 b->cursor = nir_after_phis(header_block);
717 nir_builder_instr_insert(b, &phi->instr);
718
719 /* Modify all readers of the bcsel instruction to read the result of
720 * the phi.
721 */
722 nir_foreach_use_safe(use_src, &bcsel->dest.dest.ssa) {
723 nir_instr_rewrite_src(use_src->parent_instr,
724 use_src,
725 nir_src_for_ssa(&phi->dest.ssa));
726 }
727
728 nir_foreach_if_use_safe(use_src, &bcsel->dest.dest.ssa) {
729 nir_if_rewrite_condition(use_src->parent_if,
730 nir_src_for_ssa(&phi->dest.ssa));
731 }
732
733 /* Since the original bcsel instruction no longer has any readers,
734 * just remove it.
735 */
736 nir_instr_remove_v(&bcsel->instr);
737 ralloc_free(bcsel);
738
739 progress = true;
740 }
741
742 return progress;
743 }
744
745 static bool
746 is_block_empty(nir_block *block)
747 {
748 return nir_cf_node_is_last(&block->cf_node) &&
749 exec_list_is_empty(&block->instr_list);
750 }
751
752 static bool
753 nir_block_ends_in_continue(nir_block *block)
754 {
755 if (exec_list_is_empty(&block->instr_list))
756 return false;
757
758 nir_instr *instr = nir_block_last_instr(block);
759 return instr->type == nir_instr_type_jump &&
760 nir_instr_as_jump(instr)->type == nir_jump_continue;
761 }
762
763 /**
764 * This optimization turns:
765 *
766 * loop {
767 * ...
768 * if (cond) {
769 * do_work_1();
770 * continue;
771 * } else {
772 * }
773 * do_work_2();
774 * }
775 *
776 * into:
777 *
778 * loop {
779 * ...
780 * if (cond) {
781 * do_work_1();
782 * continue;
783 * } else {
784 * do_work_2();
785 * }
786 * }
787 *
788 * The continue should then be removed by nir_opt_trivial_continues() and the
789 * loop can potentially be unrolled.
790 *
791 * Note: Unless the function param aggressive_last_continue==true do_work_2()
792 * is only ever blocks and nested loops. We avoid nesting other if-statments
793 * in the branch as this can result in increased register pressure, and in
794 * the i965 driver it causes a large amount of spilling in shader-db.
795 * For RADV however nesting these if-statements allows further continues to be
796 * remove and provides a significant FPS boost in Doom, which is why we have
797 * opted for this special bool to enable more aggresive optimisations.
798 * TODO: The GCM pass solves most of the spilling regressions in i965, if it
799 * is ever enabled we should consider removing the aggressive_last_continue
800 * param.
801 */
802 static bool
803 opt_if_loop_last_continue(nir_loop *loop, bool aggressive_last_continue)
804 {
805 nir_if *nif;
806 bool then_ends_in_continue = false;
807 bool else_ends_in_continue = false;
808
809 /* Scan the control flow of the loop from the last to the first node
810 * looking for an if-statement we can optimise.
811 */
812 nir_block *last_block = nir_loop_last_block(loop);
813 nir_cf_node *if_node = nir_cf_node_prev(&last_block->cf_node);
814 while (if_node) {
815 if (if_node->type == nir_cf_node_if) {
816 nif = nir_cf_node_as_if(if_node);
817 nir_block *then_block = nir_if_last_then_block(nif);
818 nir_block *else_block = nir_if_last_else_block(nif);
819
820 then_ends_in_continue = nir_block_ends_in_continue(then_block);
821 else_ends_in_continue = nir_block_ends_in_continue(else_block);
822
823 /* If both branches end in a jump do nothing, this should be handled
824 * by nir_opt_dead_cf().
825 */
826 if ((then_ends_in_continue || nir_block_ends_in_break(then_block)) &&
827 (else_ends_in_continue || nir_block_ends_in_break(else_block)))
828 return false;
829
830 /* If continue found stop scanning and attempt optimisation, or
831 */
832 if (then_ends_in_continue || else_ends_in_continue ||
833 !aggressive_last_continue)
834 break;
835 }
836
837 if_node = nir_cf_node_prev(if_node);
838 }
839
840 /* If we didn't find an if to optimise return */
841 if (!then_ends_in_continue && !else_ends_in_continue)
842 return false;
843
844 /* If there is nothing after the if-statement we bail */
845 if (&nif->cf_node == nir_cf_node_prev(&last_block->cf_node) &&
846 exec_list_is_empty(&last_block->instr_list))
847 return false;
848
849 /* Move the last block of the loop inside the last if-statement */
850 nir_cf_list tmp;
851 nir_cf_extract(&tmp, nir_after_cf_node(if_node),
852 nir_after_block(last_block));
853 if (then_ends_in_continue)
854 nir_cf_reinsert(&tmp, nir_after_cf_list(&nif->else_list));
855 else
856 nir_cf_reinsert(&tmp, nir_after_cf_list(&nif->then_list));
857
858 /* In order to avoid running nir_lower_regs_to_ssa_impl() every time an if
859 * opt makes progress we leave nir_opt_trivial_continues() to remove the
860 * continue now that the end of the loop has been simplified.
861 */
862
863 return true;
864 }
865
866 /* Walk all the phis in the block immediately following the if statement and
867 * swap the blocks.
868 */
869 static void
870 rewrite_phi_predecessor_blocks(nir_if *nif,
871 nir_block *old_then_block,
872 nir_block *old_else_block,
873 nir_block *new_then_block,
874 nir_block *new_else_block)
875 {
876 nir_block *after_if_block =
877 nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node));
878
879 nir_foreach_instr(instr, after_if_block) {
880 if (instr->type != nir_instr_type_phi)
881 continue;
882
883 nir_phi_instr *phi = nir_instr_as_phi(instr);
884
885 foreach_list_typed(nir_phi_src, src, node, &phi->srcs) {
886 if (src->pred == old_then_block) {
887 src->pred = new_then_block;
888 } else if (src->pred == old_else_block) {
889 src->pred = new_else_block;
890 }
891 }
892 }
893 }
894
895 /**
896 * This optimization turns:
897 *
898 * if (cond) {
899 * } else {
900 * do_work();
901 * }
902 *
903 * into:
904 *
905 * if (!cond) {
906 * do_work();
907 * } else {
908 * }
909 */
910 static bool
911 opt_if_simplification(nir_builder *b, nir_if *nif)
912 {
913 /* Only simplify if the then block is empty and the else block is not. */
914 if (!is_block_empty(nir_if_first_then_block(nif)) ||
915 is_block_empty(nir_if_first_else_block(nif)))
916 return false;
917
918 /* Make sure the condition is a comparison operation. */
919 nir_instr *src_instr = nif->condition.ssa->parent_instr;
920 if (src_instr->type != nir_instr_type_alu)
921 return false;
922
923 nir_alu_instr *alu_instr = nir_instr_as_alu(src_instr);
924 if (!nir_alu_instr_is_comparison(alu_instr))
925 return false;
926
927 /* Insert the inverted instruction and rewrite the condition. */
928 b->cursor = nir_after_instr(&alu_instr->instr);
929
930 nir_ssa_def *new_condition =
931 nir_inot(b, &alu_instr->dest.dest.ssa);
932
933 nir_if_rewrite_condition(nif, nir_src_for_ssa(new_condition));
934
935 /* Grab pointers to the last then/else blocks for fixing up the phis. */
936 nir_block *then_block = nir_if_last_then_block(nif);
937 nir_block *else_block = nir_if_last_else_block(nif);
938
939 rewrite_phi_predecessor_blocks(nif, then_block, else_block, else_block,
940 then_block);
941
942 /* Finally, move the else block to the then block. */
943 nir_cf_list tmp;
944 nir_cf_extract(&tmp, nir_before_cf_list(&nif->else_list),
945 nir_after_cf_list(&nif->else_list));
946 nir_cf_reinsert(&tmp, nir_before_cf_list(&nif->then_list));
947
948 return true;
949 }
950
951 /**
952 * This optimization simplifies potential loop terminators which then allows
953 * other passes such as opt_if_simplification() and loop unrolling to progress
954 * further:
955 *
956 * if (cond) {
957 * ... then block instructions ...
958 * } else {
959 * ...
960 * break;
961 * }
962 *
963 * into:
964 *
965 * if (cond) {
966 * } else {
967 * ...
968 * break;
969 * }
970 * ... then block instructions ...
971 */
972 static bool
973 opt_if_loop_terminator(nir_if *nif)
974 {
975 nir_block *break_blk = NULL;
976 nir_block *continue_from_blk = NULL;
977 bool continue_from_then = true;
978
979 nir_block *last_then = nir_if_last_then_block(nif);
980 nir_block *last_else = nir_if_last_else_block(nif);
981
982 if (nir_block_ends_in_break(last_then)) {
983 break_blk = last_then;
984 continue_from_blk = last_else;
985 continue_from_then = false;
986 } else if (nir_block_ends_in_break(last_else)) {
987 break_blk = last_else;
988 continue_from_blk = last_then;
989 }
990
991 /* Continue if the if-statement contained no jumps at all */
992 if (!break_blk)
993 return false;
994
995 /* If the continue from block is empty then return as there is nothing to
996 * move.
997 */
998 nir_block *first_continue_from_blk = continue_from_then ?
999 nir_if_first_then_block(nif) :
1000 nir_if_first_else_block(nif);
1001 if (is_block_empty(first_continue_from_blk))
1002 return false;
1003
1004 if (!nir_is_trivial_loop_if(nif, break_blk))
1005 return false;
1006
1007 /* Even though this if statement has a jump on one side, we may still have
1008 * phis afterwards. Single-source phis can be produced by loop unrolling
1009 * or dead control-flow passes and are perfectly legal. Run a quick phi
1010 * removal on the block after the if to clean up any such phis.
1011 */
1012 nir_opt_remove_phis_block(nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node)));
1013
1014 /* Finally, move the continue from branch after the if-statement. */
1015 nir_cf_list tmp;
1016 nir_cf_extract(&tmp, nir_before_block(first_continue_from_blk),
1017 nir_after_block(continue_from_blk));
1018 nir_cf_reinsert(&tmp, nir_after_cf_node(&nif->cf_node));
1019
1020 return true;
1021 }
1022
1023 static bool
1024 evaluate_if_condition(nir_if *nif, nir_cursor cursor, bool *value)
1025 {
1026 nir_block *use_block = nir_cursor_current_block(cursor);
1027 if (nir_block_dominates(nir_if_first_then_block(nif), use_block)) {
1028 *value = true;
1029 return true;
1030 } else if (nir_block_dominates(nir_if_first_else_block(nif), use_block)) {
1031 *value = false;
1032 return true;
1033 } else {
1034 return false;
1035 }
1036 }
1037
1038 static nir_ssa_def *
1039 clone_alu_and_replace_src_defs(nir_builder *b, const nir_alu_instr *alu,
1040 nir_ssa_def **src_defs)
1041 {
1042 nir_alu_instr *nalu = nir_alu_instr_create(b->shader, alu->op);
1043 nalu->exact = alu->exact;
1044
1045 nir_ssa_dest_init(&nalu->instr, &nalu->dest.dest,
1046 alu->dest.dest.ssa.num_components,
1047 alu->dest.dest.ssa.bit_size, alu->dest.dest.ssa.name);
1048
1049 nalu->dest.saturate = alu->dest.saturate;
1050 nalu->dest.write_mask = alu->dest.write_mask;
1051
1052 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
1053 assert(alu->src[i].src.is_ssa);
1054 nalu->src[i].src = nir_src_for_ssa(src_defs[i]);
1055 nalu->src[i].negate = alu->src[i].negate;
1056 nalu->src[i].abs = alu->src[i].abs;
1057 memcpy(nalu->src[i].swizzle, alu->src[i].swizzle,
1058 sizeof(nalu->src[i].swizzle));
1059 }
1060
1061 nir_builder_instr_insert(b, &nalu->instr);
1062
1063 return &nalu->dest.dest.ssa;;
1064 }
1065
1066 /*
1067 * This propagates if condition evaluation down the chain of some alu
1068 * instructions. For example by checking the use of some of the following alu
1069 * instruction we can eventually replace ssa_107 with NIR_TRUE.
1070 *
1071 * loop {
1072 * block block_1:
1073 * vec1 32 ssa_85 = load_const (0x00000002)
1074 * vec1 32 ssa_86 = ieq ssa_48, ssa_85
1075 * vec1 32 ssa_87 = load_const (0x00000001)
1076 * vec1 32 ssa_88 = ieq ssa_48, ssa_87
1077 * vec1 32 ssa_89 = ior ssa_86, ssa_88
1078 * vec1 32 ssa_90 = ieq ssa_48, ssa_0
1079 * vec1 32 ssa_91 = ior ssa_89, ssa_90
1080 * if ssa_86 {
1081 * block block_2:
1082 * ...
1083 * break
1084 * } else {
1085 * block block_3:
1086 * }
1087 * block block_4:
1088 * if ssa_88 {
1089 * block block_5:
1090 * ...
1091 * break
1092 * } else {
1093 * block block_6:
1094 * }
1095 * block block_7:
1096 * if ssa_90 {
1097 * block block_8:
1098 * ...
1099 * break
1100 * } else {
1101 * block block_9:
1102 * }
1103 * block block_10:
1104 * vec1 32 ssa_107 = inot ssa_91
1105 * if ssa_107 {
1106 * block block_11:
1107 * break
1108 * } else {
1109 * block block_12:
1110 * }
1111 * }
1112 */
1113 static bool
1114 propagate_condition_eval(nir_builder *b, nir_if *nif, nir_src *use_src,
1115 nir_src *alu_use, nir_alu_instr *alu,
1116 bool is_if_condition)
1117 {
1118 bool bool_value;
1119 b->cursor = nir_before_src(alu_use, is_if_condition);
1120 if (!evaluate_if_condition(nif, b->cursor, &bool_value))
1121 return false;
1122
1123 nir_ssa_def *def[4] = {0};
1124 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
1125 if (alu->src[i].src.ssa == use_src->ssa) {
1126 def[i] = nir_imm_bool(b, bool_value);
1127 } else {
1128 def[i] = alu->src[i].src.ssa;
1129 }
1130 }
1131
1132 nir_ssa_def *nalu = clone_alu_and_replace_src_defs(b, alu, def);
1133
1134 /* Rewrite use to use new alu instruction */
1135 nir_src new_src = nir_src_for_ssa(nalu);
1136
1137 if (is_if_condition)
1138 nir_if_rewrite_condition(alu_use->parent_if, new_src);
1139 else
1140 nir_instr_rewrite_src(alu_use->parent_instr, alu_use, new_src);
1141
1142 return true;
1143 }
1144
1145 static bool
1146 can_propagate_through_alu(nir_src *src)
1147 {
1148 if (src->parent_instr->type != nir_instr_type_alu)
1149 return false;
1150
1151 nir_alu_instr *alu = nir_instr_as_alu(src->parent_instr);
1152 switch (alu->op) {
1153 case nir_op_ior:
1154 case nir_op_iand:
1155 case nir_op_inot:
1156 case nir_op_b2i32:
1157 return true;
1158 case nir_op_bcsel:
1159 return src == &alu->src[0].src;
1160 default:
1161 return false;
1162 }
1163 }
1164
1165 static bool
1166 evaluate_condition_use(nir_builder *b, nir_if *nif, nir_src *use_src,
1167 bool is_if_condition)
1168 {
1169 bool progress = false;
1170
1171 b->cursor = nir_before_src(use_src, is_if_condition);
1172
1173 bool bool_value;
1174 if (evaluate_if_condition(nif, b->cursor, &bool_value)) {
1175 /* Rewrite use to use const */
1176 nir_src imm_src = nir_src_for_ssa(nir_imm_bool(b, bool_value));
1177 if (is_if_condition)
1178 nir_if_rewrite_condition(use_src->parent_if, imm_src);
1179 else
1180 nir_instr_rewrite_src(use_src->parent_instr, use_src, imm_src);
1181
1182 progress = true;
1183 }
1184
1185 if (!is_if_condition && can_propagate_through_alu(use_src)) {
1186 nir_alu_instr *alu = nir_instr_as_alu(use_src->parent_instr);
1187
1188 nir_foreach_use_safe(alu_use, &alu->dest.dest.ssa) {
1189 progress |= propagate_condition_eval(b, nif, use_src, alu_use, alu,
1190 false);
1191 }
1192
1193 nir_foreach_if_use_safe(alu_use, &alu->dest.dest.ssa) {
1194 progress |= propagate_condition_eval(b, nif, use_src, alu_use, alu,
1195 true);
1196 }
1197 }
1198
1199 return progress;
1200 }
1201
1202 static bool
1203 opt_if_evaluate_condition_use(nir_builder *b, nir_if *nif)
1204 {
1205 bool progress = false;
1206
1207 /* Evaluate any uses of the if condition inside the if branches */
1208 assert(nif->condition.is_ssa);
1209 nir_foreach_use_safe(use_src, nif->condition.ssa) {
1210 progress |= evaluate_condition_use(b, nif, use_src, false);
1211 }
1212
1213 nir_foreach_if_use_safe(use_src, nif->condition.ssa) {
1214 if (use_src->parent_if != nif)
1215 progress |= evaluate_condition_use(b, nif, use_src, true);
1216 }
1217
1218 return progress;
1219 }
1220
1221 static void
1222 simple_merge_if(nir_if *dest_if, nir_if *src_if, bool dest_if_then,
1223 bool src_if_then)
1224 {
1225 /* Now merge the if branch */
1226 nir_block *dest_blk = dest_if_then ? nir_if_last_then_block(dest_if)
1227 : nir_if_last_else_block(dest_if);
1228
1229 struct exec_list *list = src_if_then ? &src_if->then_list
1230 : &src_if->else_list;
1231
1232 nir_cf_list if_cf_list;
1233 nir_cf_extract(&if_cf_list, nir_before_cf_list(list),
1234 nir_after_cf_list(list));
1235 nir_cf_reinsert(&if_cf_list, nir_after_block(dest_blk));
1236 }
1237
1238 static bool
1239 opt_if_merge(nir_if *nif)
1240 {
1241 bool progress = false;
1242
1243 nir_block *next_blk = nir_cf_node_cf_tree_next(&nif->cf_node);
1244 if (next_blk && nif->condition.is_ssa) {
1245 nir_if *next_if = nir_block_get_following_if(next_blk);
1246 if (next_if && next_if->condition.is_ssa) {
1247
1248 /* Here we merge two consecutive ifs that have the same
1249 * condition e.g:
1250 *
1251 * if ssa_12 {
1252 * ...
1253 * } else {
1254 * ...
1255 * }
1256 * if ssa_12 {
1257 * ...
1258 * } else {
1259 * ...
1260 * }
1261 *
1262 * Note: This only merges if-statements when the block between them
1263 * is empty. The reason we don't try to merge ifs that just have phis
1264 * between them is because this can results in increased register
1265 * pressure. For example when merging if ladders created by indirect
1266 * indexing.
1267 */
1268 if (nif->condition.ssa == next_if->condition.ssa &&
1269 exec_list_is_empty(&next_blk->instr_list)) {
1270
1271 simple_merge_if(nif, next_if, true, true);
1272 simple_merge_if(nif, next_if, false, false);
1273
1274 nir_block *new_then_block = nir_if_last_then_block(nif);
1275 nir_block *new_else_block = nir_if_last_else_block(nif);
1276
1277 nir_block *old_then_block = nir_if_last_then_block(next_if);
1278 nir_block *old_else_block = nir_if_last_else_block(next_if);
1279
1280 /* Rewrite the predecessor block for any phis following the second
1281 * if-statement.
1282 */
1283 rewrite_phi_predecessor_blocks(next_if, old_then_block,
1284 old_else_block,
1285 new_then_block,
1286 new_else_block);
1287
1288 /* Move phis after merged if to avoid them being deleted when we
1289 * remove the merged if-statement.
1290 */
1291 nir_block *after_next_if_block =
1292 nir_cf_node_as_block(nir_cf_node_next(&next_if->cf_node));
1293
1294 nir_foreach_instr_safe(instr, after_next_if_block) {
1295 if (instr->type != nir_instr_type_phi)
1296 break;
1297
1298 exec_node_remove(&instr->node);
1299 exec_list_push_tail(&next_blk->instr_list, &instr->node);
1300 instr->block = next_blk;
1301 }
1302
1303 nir_cf_node_remove(&next_if->cf_node);
1304
1305 progress = true;
1306 }
1307 }
1308 }
1309
1310 return progress;
1311 }
1312
1313 static bool
1314 opt_if_cf_list(nir_builder *b, struct exec_list *cf_list,
1315 bool aggressive_last_continue)
1316 {
1317 bool progress = false;
1318 foreach_list_typed(nir_cf_node, cf_node, node, cf_list) {
1319 switch (cf_node->type) {
1320 case nir_cf_node_block:
1321 break;
1322
1323 case nir_cf_node_if: {
1324 nir_if *nif = nir_cf_node_as_if(cf_node);
1325 progress |= opt_if_cf_list(b, &nif->then_list,
1326 aggressive_last_continue);
1327 progress |= opt_if_cf_list(b, &nif->else_list,
1328 aggressive_last_continue);
1329 progress |= opt_if_loop_terminator(nif);
1330 progress |= opt_if_merge(nif);
1331 progress |= opt_if_simplification(b, nif);
1332 break;
1333 }
1334
1335 case nir_cf_node_loop: {
1336 nir_loop *loop = nir_cf_node_as_loop(cf_node);
1337 progress |= opt_if_cf_list(b, &loop->body,
1338 aggressive_last_continue);
1339 progress |= opt_simplify_bcsel_of_phi(b, loop);
1340 progress |= opt_peel_loop_initial_if(loop);
1341 progress |= opt_if_loop_last_continue(loop,
1342 aggressive_last_continue);
1343 break;
1344 }
1345
1346 case nir_cf_node_function:
1347 unreachable("Invalid cf type");
1348 }
1349 }
1350
1351 return progress;
1352 }
1353
1354 /**
1355 * These optimisations depend on nir_metadata_block_index and therefore must
1356 * not do anything to cause the metadata to become invalid.
1357 */
1358 static bool
1359 opt_if_safe_cf_list(nir_builder *b, struct exec_list *cf_list)
1360 {
1361 bool progress = false;
1362 foreach_list_typed(nir_cf_node, cf_node, node, cf_list) {
1363 switch (cf_node->type) {
1364 case nir_cf_node_block:
1365 break;
1366
1367 case nir_cf_node_if: {
1368 nir_if *nif = nir_cf_node_as_if(cf_node);
1369 progress |= opt_if_safe_cf_list(b, &nif->then_list);
1370 progress |= opt_if_safe_cf_list(b, &nif->else_list);
1371 progress |= opt_if_evaluate_condition_use(b, nif);
1372 break;
1373 }
1374
1375 case nir_cf_node_loop: {
1376 nir_loop *loop = nir_cf_node_as_loop(cf_node);
1377 progress |= opt_if_safe_cf_list(b, &loop->body);
1378 progress |= opt_split_alu_of_phi(b, loop);
1379 break;
1380 }
1381
1382 case nir_cf_node_function:
1383 unreachable("Invalid cf type");
1384 }
1385 }
1386
1387 return progress;
1388 }
1389
1390 bool
1391 nir_opt_if(nir_shader *shader, bool aggressive_last_continue)
1392 {
1393 bool progress = false;
1394
1395 nir_foreach_function(function, shader) {
1396 if (function->impl == NULL)
1397 continue;
1398
1399 nir_builder b;
1400 nir_builder_init(&b, function->impl);
1401
1402 nir_metadata_require(function->impl, nir_metadata_block_index |
1403 nir_metadata_dominance);
1404 progress = opt_if_safe_cf_list(&b, &function->impl->body);
1405 nir_metadata_preserve(function->impl, nir_metadata_block_index |
1406 nir_metadata_dominance);
1407
1408 if (opt_if_cf_list(&b, &function->impl->body,
1409 aggressive_last_continue)) {
1410 nir_metadata_preserve(function->impl, nir_metadata_none);
1411
1412 /* If that made progress, we're no longer really in SSA form. We
1413 * need to convert registers back into SSA defs and clean up SSA defs
1414 * that don't dominate their uses.
1415 */
1416 nir_lower_regs_to_ssa_impl(function->impl);
1417
1418 progress = true;
1419 } else {
1420 #ifndef NDEBUG
1421 function->impl->valid_metadata &= ~nir_metadata_not_properly_reset;
1422 #endif
1423 }
1424 }
1425
1426 return progress;
1427 }