nir/algebraic: Make algebraic_parser_test.sh executable.
[mesa.git] / src / compiler / nir / nir_opt_if.c
1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir.h"
25 #include "nir/nir_builder.h"
26 #include "nir_constant_expressions.h"
27 #include "nir_control_flow.h"
28 #include "nir_loop_analyze.h"
29
30 /**
31 * Gets the single block that jumps back to the loop header. Already assumes
32 * there is exactly one such block.
33 */
34 static nir_block*
35 find_continue_block(nir_loop *loop)
36 {
37 nir_block *header_block = nir_loop_first_block(loop);
38 nir_block *prev_block =
39 nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
40
41 assert(header_block->predecessors->entries == 2);
42
43 set_foreach(header_block->predecessors, pred_entry) {
44 if (pred_entry->key != prev_block)
45 return (nir_block*)pred_entry->key;
46 }
47
48 unreachable("Continue block not found!");
49 }
50
51 /**
52 * This optimization detects if statements at the tops of loops where the
53 * condition is a phi node of two constants and moves half of the if to above
54 * the loop and the other half of the if to the end of the loop. A simple for
55 * loop "for (int i = 0; i < 4; i++)", when run through the SPIR-V front-end,
56 * ends up looking something like this:
57 *
58 * vec1 32 ssa_0 = load_const (0x00000000)
59 * vec1 32 ssa_1 = load_const (0xffffffff)
60 * loop {
61 * block block_1:
62 * vec1 32 ssa_2 = phi block_0: ssa_0, block_7: ssa_5
63 * vec1 32 ssa_3 = phi block_0: ssa_0, block_7: ssa_1
64 * if ssa_2 {
65 * block block_2:
66 * vec1 32 ssa_4 = load_const (0x00000001)
67 * vec1 32 ssa_5 = iadd ssa_2, ssa_4
68 * } else {
69 * block block_3:
70 * }
71 * block block_4:
72 * vec1 32 ssa_6 = load_const (0x00000004)
73 * vec1 32 ssa_7 = ilt ssa_5, ssa_6
74 * if ssa_7 {
75 * block block_5:
76 * } else {
77 * block block_6:
78 * break
79 * }
80 * block block_7:
81 * }
82 *
83 * This turns it into something like this:
84 *
85 * // Stuff from block 1
86 * // Stuff from block 3
87 * loop {
88 * block block_1:
89 * vec1 32 ssa_3 = phi block_0: ssa_0, block_7: ssa_1
90 * vec1 32 ssa_6 = load_const (0x00000004)
91 * vec1 32 ssa_7 = ilt ssa_5, ssa_6
92 * if ssa_7 {
93 * block block_5:
94 * } else {
95 * block block_6:
96 * break
97 * }
98 * block block_7:
99 * // Stuff from block 1
100 * // Stuff from block 2
101 * vec1 32 ssa_4 = load_const (0x00000001)
102 * vec1 32 ssa_5 = iadd ssa_2, ssa_4
103 * }
104 */
105 static bool
106 opt_peel_loop_initial_if(nir_loop *loop)
107 {
108 nir_block *header_block = nir_loop_first_block(loop);
109 MAYBE_UNUSED nir_block *prev_block =
110 nir_cf_node_as_block(nir_cf_node_prev(&loop->cf_node));
111
112 /* It would be insane if this were not true */
113 assert(_mesa_set_search(header_block->predecessors, prev_block));
114
115 /* The loop must have exactly one continue block which could be a block
116 * ending in a continue instruction or the "natural" continue from the
117 * last block in the loop back to the top.
118 */
119 if (header_block->predecessors->entries != 2)
120 return false;
121
122 nir_block *continue_block = find_continue_block(loop);
123
124 nir_cf_node *if_node = nir_cf_node_next(&header_block->cf_node);
125 if (!if_node || if_node->type != nir_cf_node_if)
126 return false;
127
128 nir_if *nif = nir_cf_node_as_if(if_node);
129 assert(nif->condition.is_ssa);
130
131 nir_ssa_def *cond = nif->condition.ssa;
132 if (cond->parent_instr->type != nir_instr_type_phi)
133 return false;
134
135 nir_phi_instr *cond_phi = nir_instr_as_phi(cond->parent_instr);
136 if (cond->parent_instr->block != header_block)
137 return false;
138
139 /* We already know we have exactly one continue */
140 assert(exec_list_length(&cond_phi->srcs) == 2);
141
142 uint32_t entry_val = 0, continue_val = 0;
143 nir_foreach_phi_src(src, cond_phi) {
144 assert(src->src.is_ssa);
145 nir_const_value *const_src = nir_src_as_const_value(src->src);
146 if (!const_src)
147 return false;
148
149 if (src->pred == continue_block) {
150 continue_val = const_src->u32[0];
151 } else {
152 assert(src->pred == prev_block);
153 entry_val = const_src->u32[0];
154 }
155 }
156
157 /* If they both execute or both don't execute, this is a job for
158 * nir_dead_cf, not this pass.
159 */
160 if ((entry_val && continue_val) || (!entry_val && !continue_val))
161 return false;
162
163 struct exec_list *continue_list, *entry_list;
164 if (continue_val) {
165 continue_list = &nif->then_list;
166 entry_list = &nif->else_list;
167 } else {
168 continue_list = &nif->else_list;
169 entry_list = &nif->then_list;
170 }
171
172 /* We want to be moving the contents of entry_list to above the loop so it
173 * can't contain any break or continue instructions.
174 */
175 foreach_list_typed(nir_cf_node, cf_node, node, entry_list) {
176 nir_foreach_block_in_cf_node(block, cf_node) {
177 nir_instr *last_instr = nir_block_last_instr(block);
178 if (last_instr && last_instr->type == nir_instr_type_jump)
179 return false;
180 }
181 }
182
183 /* We're about to re-arrange a bunch of blocks so make sure that we don't
184 * have deref uses which cross block boundaries. We don't want a deref
185 * accidentally ending up in a phi.
186 */
187 nir_rematerialize_derefs_in_use_blocks_impl(
188 nir_cf_node_get_function(&loop->cf_node));
189
190 /* Before we do anything, convert the loop to LCSSA. We're about to
191 * replace a bunch of SSA defs with registers and this will prevent any of
192 * it from leaking outside the loop.
193 */
194 nir_convert_loop_to_lcssa(loop);
195
196 nir_block *after_if_block =
197 nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node));
198
199 /* Get rid of phis in the header block since we will be duplicating it */
200 nir_lower_phis_to_regs_block(header_block);
201 /* Get rid of phis after the if since dominance will change */
202 nir_lower_phis_to_regs_block(after_if_block);
203
204 /* Get rid of SSA defs in the pieces we're about to move around */
205 nir_lower_ssa_defs_to_regs_block(header_block);
206 nir_foreach_block_in_cf_node(block, &nif->cf_node)
207 nir_lower_ssa_defs_to_regs_block(block);
208
209 nir_cf_list header, tmp;
210 nir_cf_extract(&header, nir_before_block(header_block),
211 nir_after_block(header_block));
212
213 nir_cf_list_clone(&tmp, &header, &loop->cf_node, NULL);
214 nir_cf_reinsert(&tmp, nir_before_cf_node(&loop->cf_node));
215 nir_cf_extract(&tmp, nir_before_cf_list(entry_list),
216 nir_after_cf_list(entry_list));
217 nir_cf_reinsert(&tmp, nir_before_cf_node(&loop->cf_node));
218
219 nir_cf_reinsert(&header, nir_after_block_before_jump(continue_block));
220
221 /* Get continue block again as the previous reinsert might have removed the block. */
222 continue_block = find_continue_block(loop);
223
224 nir_cf_extract(&tmp, nir_before_cf_list(continue_list),
225 nir_after_cf_list(continue_list));
226 nir_cf_reinsert(&tmp, nir_after_block_before_jump(continue_block));
227
228 nir_cf_node_remove(&nif->cf_node);
229
230 return true;
231 }
232
233 static bool
234 is_block_empty(nir_block *block)
235 {
236 return nir_cf_node_is_last(&block->cf_node) &&
237 exec_list_is_empty(&block->instr_list);
238 }
239
240 /**
241 * This optimization turns:
242 *
243 * if (cond) {
244 * } else {
245 * do_work();
246 * }
247 *
248 * into:
249 *
250 * if (!cond) {
251 * do_work();
252 * } else {
253 * }
254 */
255 static bool
256 opt_if_simplification(nir_builder *b, nir_if *nif)
257 {
258 /* Only simplify if the then block is empty and the else block is not. */
259 if (!is_block_empty(nir_if_first_then_block(nif)) ||
260 is_block_empty(nir_if_first_else_block(nif)))
261 return false;
262
263 /* Make sure the condition is a comparison operation. */
264 nir_instr *src_instr = nif->condition.ssa->parent_instr;
265 if (src_instr->type != nir_instr_type_alu)
266 return false;
267
268 nir_alu_instr *alu_instr = nir_instr_as_alu(src_instr);
269 if (!nir_alu_instr_is_comparison(alu_instr))
270 return false;
271
272 /* Insert the inverted instruction and rewrite the condition. */
273 b->cursor = nir_after_instr(&alu_instr->instr);
274
275 nir_ssa_def *new_condition =
276 nir_inot(b, &alu_instr->dest.dest.ssa);
277
278 nir_if_rewrite_condition(nif, nir_src_for_ssa(new_condition));
279
280 /* Grab pointers to the last then/else blocks for fixing up the phis. */
281 nir_block *then_block = nir_if_last_then_block(nif);
282 nir_block *else_block = nir_if_last_else_block(nif);
283
284 /* Walk all the phis in the block immediately following the if statement and
285 * swap the blocks.
286 */
287 nir_block *after_if_block =
288 nir_cf_node_as_block(nir_cf_node_next(&nif->cf_node));
289
290 nir_foreach_instr(instr, after_if_block) {
291 if (instr->type != nir_instr_type_phi)
292 continue;
293
294 nir_phi_instr *phi = nir_instr_as_phi(instr);
295
296 foreach_list_typed(nir_phi_src, src, node, &phi->srcs) {
297 if (src->pred == else_block) {
298 src->pred = then_block;
299 } else if (src->pred == then_block) {
300 src->pred = else_block;
301 }
302 }
303 }
304
305 /* Finally, move the else block to the then block. */
306 nir_cf_list tmp;
307 nir_cf_extract(&tmp, nir_before_cf_list(&nif->else_list),
308 nir_after_cf_list(&nif->else_list));
309 nir_cf_reinsert(&tmp, nir_before_cf_list(&nif->then_list));
310
311 return true;
312 }
313
314 /**
315 * This optimization simplifies potential loop terminators which then allows
316 * other passes such as opt_if_simplification() and loop unrolling to progress
317 * further:
318 *
319 * if (cond) {
320 * ... then block instructions ...
321 * } else {
322 * ...
323 * break;
324 * }
325 *
326 * into:
327 *
328 * if (cond) {
329 * } else {
330 * ...
331 * break;
332 * }
333 * ... then block instructions ...
334 */
335 static bool
336 opt_if_loop_terminator(nir_if *nif)
337 {
338 nir_block *break_blk = NULL;
339 nir_block *continue_from_blk = NULL;
340 bool continue_from_then = true;
341
342 nir_block *last_then = nir_if_last_then_block(nif);
343 nir_block *last_else = nir_if_last_else_block(nif);
344
345 if (nir_block_ends_in_break(last_then)) {
346 break_blk = last_then;
347 continue_from_blk = last_else;
348 continue_from_then = false;
349 } else if (nir_block_ends_in_break(last_else)) {
350 break_blk = last_else;
351 continue_from_blk = last_then;
352 }
353
354 /* Continue if the if-statement contained no jumps at all */
355 if (!break_blk)
356 return false;
357
358 /* If the continue from block is empty then return as there is nothing to
359 * move.
360 */
361 nir_block *first_continue_from_blk = continue_from_then ?
362 nir_if_first_then_block(nif) :
363 nir_if_first_else_block(nif);
364 if (is_block_empty(first_continue_from_blk))
365 return false;
366
367 if (!nir_is_trivial_loop_if(nif, break_blk))
368 return false;
369
370 /* Finally, move the continue from branch after the if-statement. */
371 nir_cf_list tmp;
372 nir_cf_extract(&tmp, nir_before_block(first_continue_from_blk),
373 nir_after_block(continue_from_blk));
374 nir_cf_reinsert(&tmp, nir_after_cf_node(&nif->cf_node));
375
376 return true;
377 }
378
379 static bool
380 evaluate_if_condition(nir_if *nif, nir_cursor cursor, bool *value)
381 {
382 nir_block *use_block = nir_cursor_current_block(cursor);
383 if (nir_block_dominates(nir_if_first_then_block(nif), use_block)) {
384 *value = true;
385 return true;
386 } else if (nir_block_dominates(nir_if_first_else_block(nif), use_block)) {
387 *value = false;
388 return true;
389 } else {
390 return false;
391 }
392 }
393
394 static nir_ssa_def *
395 clone_alu_and_replace_src_defs(nir_builder *b, const nir_alu_instr *alu,
396 nir_ssa_def **src_defs)
397 {
398 nir_alu_instr *nalu = nir_alu_instr_create(b->shader, alu->op);
399 nalu->exact = alu->exact;
400
401 nir_ssa_dest_init(&nalu->instr, &nalu->dest.dest,
402 alu->dest.dest.ssa.num_components,
403 alu->dest.dest.ssa.bit_size, alu->dest.dest.ssa.name);
404
405 nalu->dest.saturate = alu->dest.saturate;
406 nalu->dest.write_mask = alu->dest.write_mask;
407
408 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
409 assert(alu->src[i].src.is_ssa);
410 nalu->src[i].src = nir_src_for_ssa(src_defs[i]);
411 nalu->src[i].negate = alu->src[i].negate;
412 nalu->src[i].abs = alu->src[i].abs;
413 memcpy(nalu->src[i].swizzle, alu->src[i].swizzle,
414 sizeof(nalu->src[i].swizzle));
415 }
416
417 nir_builder_instr_insert(b, &nalu->instr);
418
419 return &nalu->dest.dest.ssa;;
420 }
421
422 /*
423 * This propagates if condition evaluation down the chain of some alu
424 * instructions. For example by checking the use of some of the following alu
425 * instruction we can eventually replace ssa_107 with NIR_TRUE.
426 *
427 * loop {
428 * block block_1:
429 * vec1 32 ssa_85 = load_const (0x00000002)
430 * vec1 32 ssa_86 = ieq ssa_48, ssa_85
431 * vec1 32 ssa_87 = load_const (0x00000001)
432 * vec1 32 ssa_88 = ieq ssa_48, ssa_87
433 * vec1 32 ssa_89 = ior ssa_86, ssa_88
434 * vec1 32 ssa_90 = ieq ssa_48, ssa_0
435 * vec1 32 ssa_91 = ior ssa_89, ssa_90
436 * if ssa_86 {
437 * block block_2:
438 * ...
439 * break
440 * } else {
441 * block block_3:
442 * }
443 * block block_4:
444 * if ssa_88 {
445 * block block_5:
446 * ...
447 * break
448 * } else {
449 * block block_6:
450 * }
451 * block block_7:
452 * if ssa_90 {
453 * block block_8:
454 * ...
455 * break
456 * } else {
457 * block block_9:
458 * }
459 * block block_10:
460 * vec1 32 ssa_107 = inot ssa_91
461 * if ssa_107 {
462 * block block_11:
463 * break
464 * } else {
465 * block block_12:
466 * }
467 * }
468 */
469 static bool
470 propagate_condition_eval(nir_builder *b, nir_if *nif, nir_src *use_src,
471 nir_src *alu_use, nir_alu_instr *alu,
472 bool is_if_condition)
473 {
474 bool bool_value;
475 b->cursor = nir_before_src(alu_use, is_if_condition);
476 if (!evaluate_if_condition(nif, b->cursor, &bool_value))
477 return false;
478
479 nir_ssa_def *def[4] = {0};
480 for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
481 if (alu->src[i].src.ssa == use_src->ssa) {
482 def[i] = nir_imm_bool(b, bool_value);
483 } else {
484 def[i] = alu->src[i].src.ssa;
485 }
486 }
487
488 nir_ssa_def *nalu = clone_alu_and_replace_src_defs(b, alu, def);
489
490 /* Rewrite use to use new alu instruction */
491 nir_src new_src = nir_src_for_ssa(nalu);
492
493 if (is_if_condition)
494 nir_if_rewrite_condition(alu_use->parent_if, new_src);
495 else
496 nir_instr_rewrite_src(alu_use->parent_instr, alu_use, new_src);
497
498 return true;
499 }
500
501 static bool
502 can_propagate_through_alu(nir_src *src)
503 {
504 if (src->parent_instr->type != nir_instr_type_alu)
505 return false;
506
507 nir_alu_instr *alu = nir_instr_as_alu(src->parent_instr);
508 switch (alu->op) {
509 case nir_op_ior:
510 case nir_op_iand:
511 case nir_op_inot:
512 case nir_op_b2i32:
513 return true;
514 case nir_op_bcsel:
515 return src == &alu->src[0].src;
516 default:
517 return false;
518 }
519 }
520
521 static bool
522 evaluate_condition_use(nir_builder *b, nir_if *nif, nir_src *use_src,
523 bool is_if_condition)
524 {
525 bool progress = false;
526
527 b->cursor = nir_before_src(use_src, is_if_condition);
528
529 bool bool_value;
530 if (evaluate_if_condition(nif, b->cursor, &bool_value)) {
531 /* Rewrite use to use const */
532 nir_src imm_src = nir_src_for_ssa(nir_imm_bool(b, bool_value));
533 if (is_if_condition)
534 nir_if_rewrite_condition(use_src->parent_if, imm_src);
535 else
536 nir_instr_rewrite_src(use_src->parent_instr, use_src, imm_src);
537
538 progress = true;
539 }
540
541 if (!is_if_condition && can_propagate_through_alu(use_src)) {
542 nir_alu_instr *alu = nir_instr_as_alu(use_src->parent_instr);
543
544 nir_foreach_use_safe(alu_use, &alu->dest.dest.ssa) {
545 progress |= propagate_condition_eval(b, nif, use_src, alu_use, alu,
546 false);
547 }
548
549 nir_foreach_if_use_safe(alu_use, &alu->dest.dest.ssa) {
550 progress |= propagate_condition_eval(b, nif, use_src, alu_use, alu,
551 true);
552 }
553 }
554
555 return progress;
556 }
557
558 static bool
559 opt_if_evaluate_condition_use(nir_builder *b, nir_if *nif)
560 {
561 bool progress = false;
562
563 /* Evaluate any uses of the if condition inside the if branches */
564 assert(nif->condition.is_ssa);
565 nir_foreach_use_safe(use_src, nif->condition.ssa) {
566 progress |= evaluate_condition_use(b, nif, use_src, false);
567 }
568
569 nir_foreach_if_use_safe(use_src, nif->condition.ssa) {
570 if (use_src->parent_if != nif)
571 progress |= evaluate_condition_use(b, nif, use_src, true);
572 }
573
574 return progress;
575 }
576
577 static bool
578 opt_if_cf_list(nir_builder *b, struct exec_list *cf_list)
579 {
580 bool progress = false;
581 foreach_list_typed(nir_cf_node, cf_node, node, cf_list) {
582 switch (cf_node->type) {
583 case nir_cf_node_block:
584 break;
585
586 case nir_cf_node_if: {
587 nir_if *nif = nir_cf_node_as_if(cf_node);
588 progress |= opt_if_cf_list(b, &nif->then_list);
589 progress |= opt_if_cf_list(b, &nif->else_list);
590 progress |= opt_if_loop_terminator(nif);
591 progress |= opt_if_simplification(b, nif);
592 break;
593 }
594
595 case nir_cf_node_loop: {
596 nir_loop *loop = nir_cf_node_as_loop(cf_node);
597 progress |= opt_if_cf_list(b, &loop->body);
598 progress |= opt_peel_loop_initial_if(loop);
599 break;
600 }
601
602 case nir_cf_node_function:
603 unreachable("Invalid cf type");
604 }
605 }
606
607 return progress;
608 }
609
610 /**
611 * These optimisations depend on nir_metadata_block_index and therefore must
612 * not do anything to cause the metadata to become invalid.
613 */
614 static bool
615 opt_if_safe_cf_list(nir_builder *b, struct exec_list *cf_list)
616 {
617 bool progress = false;
618 foreach_list_typed(nir_cf_node, cf_node, node, cf_list) {
619 switch (cf_node->type) {
620 case nir_cf_node_block:
621 break;
622
623 case nir_cf_node_if: {
624 nir_if *nif = nir_cf_node_as_if(cf_node);
625 progress |= opt_if_safe_cf_list(b, &nif->then_list);
626 progress |= opt_if_safe_cf_list(b, &nif->else_list);
627 progress |= opt_if_evaluate_condition_use(b, nif);
628 break;
629 }
630
631 case nir_cf_node_loop: {
632 nir_loop *loop = nir_cf_node_as_loop(cf_node);
633 progress |= opt_if_safe_cf_list(b, &loop->body);
634 break;
635 }
636
637 case nir_cf_node_function:
638 unreachable("Invalid cf type");
639 }
640 }
641
642 return progress;
643 }
644
645 bool
646 nir_opt_if(nir_shader *shader)
647 {
648 bool progress = false;
649
650 nir_foreach_function(function, shader) {
651 if (function->impl == NULL)
652 continue;
653
654 nir_builder b;
655 nir_builder_init(&b, function->impl);
656
657 nir_metadata_require(function->impl, nir_metadata_block_index |
658 nir_metadata_dominance);
659 progress = opt_if_safe_cf_list(&b, &function->impl->body);
660 nir_metadata_preserve(function->impl, nir_metadata_block_index |
661 nir_metadata_dominance);
662
663 if (opt_if_cf_list(&b, &function->impl->body)) {
664 nir_metadata_preserve(function->impl, nir_metadata_none);
665
666 /* If that made progress, we're no longer really in SSA form. We
667 * need to convert registers back into SSA defs and clean up SSA defs
668 * that don't dominate their uses.
669 */
670 nir_lower_regs_to_ssa_impl(function->impl);
671
672 progress = true;
673 }
674 }
675
676 return progress;
677 }