intel/eu/gen12: Codegen control flow instructions correctly.
[mesa.git] / src / intel / compiler / brw_cfg.cpp
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include "brw_cfg.h"
29
30 /** @file brw_cfg.cpp
31 *
32 * Walks the shader instructions generated and creates a set of basic
33 * blocks with successor/predecessor edges connecting them.
34 */
35
36 static bblock_t *
37 pop_stack(exec_list *list)
38 {
39 bblock_link *link = (bblock_link *)list->get_tail();
40 bblock_t *block = link->block;
41 link->link.remove();
42
43 return block;
44 }
45
46 static exec_node *
47 link(void *mem_ctx, bblock_t *block, enum bblock_link_kind kind)
48 {
49 bblock_link *l = new(mem_ctx) bblock_link(block, kind);
50 return &l->link;
51 }
52
53 void
54 push_stack(exec_list *list, void *mem_ctx, bblock_t *block)
55 {
56 /* The kind of the link is immaterial, but we need to provide one since
57 * this is (ab)using the edge data structure in order to implement a stack.
58 */
59 list->push_tail(link(mem_ctx, block, bblock_link_logical));
60 }
61
62 bblock_t::bblock_t(cfg_t *cfg) :
63 cfg(cfg), idom(NULL), start_ip(0), end_ip(0), num(0), cycle_count(0)
64 {
65 instructions.make_empty();
66 parents.make_empty();
67 children.make_empty();
68 }
69
70 void
71 bblock_t::add_successor(void *mem_ctx, bblock_t *successor,
72 enum bblock_link_kind kind)
73 {
74 successor->parents.push_tail(::link(mem_ctx, this, kind));
75 children.push_tail(::link(mem_ctx, successor, kind));
76 }
77
78 bool
79 bblock_t::is_predecessor_of(const bblock_t *block,
80 enum bblock_link_kind kind) const
81 {
82 foreach_list_typed_safe (bblock_link, parent, link, &block->parents) {
83 if (parent->block == this && parent->kind <= kind) {
84 return true;
85 }
86 }
87
88 return false;
89 }
90
91 bool
92 bblock_t::is_successor_of(const bblock_t *block,
93 enum bblock_link_kind kind) const
94 {
95 foreach_list_typed_safe (bblock_link, child, link, &block->children) {
96 if (child->block == this && child->kind <= kind) {
97 return true;
98 }
99 }
100
101 return false;
102 }
103
104 static bool
105 ends_block(const backend_instruction *inst)
106 {
107 enum opcode op = inst->opcode;
108
109 return op == BRW_OPCODE_IF ||
110 op == BRW_OPCODE_ELSE ||
111 op == BRW_OPCODE_CONTINUE ||
112 op == BRW_OPCODE_BREAK ||
113 op == BRW_OPCODE_DO ||
114 op == BRW_OPCODE_WHILE;
115 }
116
117 static bool
118 starts_block(const backend_instruction *inst)
119 {
120 enum opcode op = inst->opcode;
121
122 return op == BRW_OPCODE_DO ||
123 op == BRW_OPCODE_ENDIF;
124 }
125
126 bool
127 bblock_t::can_combine_with(const bblock_t *that) const
128 {
129 if ((const bblock_t *)this->link.next != that)
130 return false;
131
132 if (ends_block(this->end()) ||
133 starts_block(that->start()))
134 return false;
135
136 return true;
137 }
138
139 void
140 bblock_t::combine_with(bblock_t *that)
141 {
142 assert(this->can_combine_with(that));
143 foreach_list_typed (bblock_link, link, link, &that->parents) {
144 assert(link->block == this);
145 }
146
147 this->end_ip = that->end_ip;
148 this->instructions.append_list(&that->instructions);
149
150 this->cfg->remove_block(that);
151 }
152
153 void
154 bblock_t::dump(backend_shader *s) const
155 {
156 int ip = this->start_ip;
157 foreach_inst_in_block(backend_instruction, inst, this) {
158 fprintf(stderr, "%5d: ", ip);
159 s->dump_instruction(inst);
160 ip++;
161 }
162 }
163
164 cfg_t::cfg_t(exec_list *instructions)
165 {
166 mem_ctx = ralloc_context(NULL);
167 block_list.make_empty();
168 blocks = NULL;
169 num_blocks = 0;
170 idom_dirty = true;
171 cycle_count = 0;
172
173 bblock_t *cur = NULL;
174 int ip = 0;
175
176 bblock_t *entry = new_block();
177 bblock_t *cur_if = NULL; /**< BB ending with IF. */
178 bblock_t *cur_else = NULL; /**< BB ending with ELSE. */
179 bblock_t *cur_endif = NULL; /**< BB starting with ENDIF. */
180 bblock_t *cur_do = NULL; /**< BB starting with DO. */
181 bblock_t *cur_while = NULL; /**< BB immediately following WHILE. */
182 exec_list if_stack, else_stack, do_stack, while_stack;
183 bblock_t *next;
184
185 set_next_block(&cur, entry, ip);
186
187 foreach_in_list_safe(backend_instruction, inst, instructions) {
188 /* set_next_block wants the post-incremented ip */
189 ip++;
190
191 inst->exec_node::remove();
192
193 switch (inst->opcode) {
194 case BRW_OPCODE_IF:
195 cur->instructions.push_tail(inst);
196
197 /* Push our information onto a stack so we can recover from
198 * nested ifs.
199 */
200 push_stack(&if_stack, mem_ctx, cur_if);
201 push_stack(&else_stack, mem_ctx, cur_else);
202
203 cur_if = cur;
204 cur_else = NULL;
205 cur_endif = NULL;
206
207 /* Set up our immediately following block, full of "then"
208 * instructions.
209 */
210 next = new_block();
211 cur_if->add_successor(mem_ctx, next, bblock_link_logical);
212
213 set_next_block(&cur, next, ip);
214 break;
215
216 case BRW_OPCODE_ELSE:
217 cur->instructions.push_tail(inst);
218
219 cur_else = cur;
220
221 next = new_block();
222 assert(cur_if != NULL);
223 cur_if->add_successor(mem_ctx, next, bblock_link_logical);
224 cur_else->add_successor(mem_ctx, next, bblock_link_physical);
225
226 set_next_block(&cur, next, ip);
227 break;
228
229 case BRW_OPCODE_ENDIF: {
230 if (cur->instructions.is_empty()) {
231 /* New block was just created; use it. */
232 cur_endif = cur;
233 } else {
234 cur_endif = new_block();
235
236 cur->add_successor(mem_ctx, cur_endif, bblock_link_logical);
237
238 set_next_block(&cur, cur_endif, ip - 1);
239 }
240
241 cur->instructions.push_tail(inst);
242
243 if (cur_else) {
244 cur_else->add_successor(mem_ctx, cur_endif, bblock_link_logical);
245 } else {
246 assert(cur_if != NULL);
247 cur_if->add_successor(mem_ctx, cur_endif, bblock_link_logical);
248 }
249
250 assert(cur_if->end()->opcode == BRW_OPCODE_IF);
251 assert(!cur_else || cur_else->end()->opcode == BRW_OPCODE_ELSE);
252
253 /* Pop the stack so we're in the previous if/else/endif */
254 cur_if = pop_stack(&if_stack);
255 cur_else = pop_stack(&else_stack);
256 break;
257 }
258 case BRW_OPCODE_DO:
259 /* Push our information onto a stack so we can recover from
260 * nested loops.
261 */
262 push_stack(&do_stack, mem_ctx, cur_do);
263 push_stack(&while_stack, mem_ctx, cur_while);
264
265 /* Set up the block just after the while. Don't know when exactly
266 * it will start, yet.
267 */
268 cur_while = new_block();
269
270 if (cur->instructions.is_empty()) {
271 /* New block was just created; use it. */
272 cur_do = cur;
273 } else {
274 cur_do = new_block();
275
276 cur->add_successor(mem_ctx, cur_do, bblock_link_logical);
277
278 set_next_block(&cur, cur_do, ip - 1);
279 }
280
281 cur->instructions.push_tail(inst);
282
283 /* Represent divergent execution of the loop as a pair of alternative
284 * edges coming out of the DO instruction: For any physical iteration
285 * of the loop a given logical thread can either start off enabled
286 * (which is represented as the "next" successor), or disabled (if it
287 * has reached a non-uniform exit of the loop during a previous
288 * iteration, which is represented as the "cur_while" successor).
289 *
290 * The disabled edge will be taken by the logical thread anytime we
291 * arrive at the DO instruction through a back-edge coming from a
292 * conditional exit of the loop where divergent control flow started.
293 *
294 * This guarantees that there is a control-flow path from any
295 * divergence point of the loop into the convergence point
296 * (immediately past the WHILE instruction) such that it overlaps the
297 * whole IP region of divergent control flow (potentially the whole
298 * loop) *and* doesn't imply the execution of any instructions part
299 * of the loop (since the corresponding execution mask bit will be
300 * disabled for a diverging thread).
301 *
302 * This way we make sure that any variables that are live throughout
303 * the region of divergence for an inactive logical thread are also
304 * considered to interfere with any other variables assigned by
305 * active logical threads within the same physical region of the
306 * program, since otherwise we would risk cross-channel data
307 * corruption.
308 */
309 next = new_block();
310 cur->add_successor(mem_ctx, next, bblock_link_logical);
311 cur->add_successor(mem_ctx, cur_while, bblock_link_physical);
312 set_next_block(&cur, next, ip);
313 break;
314
315 case BRW_OPCODE_CONTINUE:
316 cur->instructions.push_tail(inst);
317
318 /* A conditional CONTINUE may start a region of divergent control
319 * flow until the start of the next loop iteration (*not* until the
320 * end of the loop which is why the successor is not the top-level
321 * divergence point at cur_do). The live interval of any variable
322 * extending through a CONTINUE edge is guaranteed to overlap the
323 * whole region of divergent execution, because any variable live-out
324 * at the CONTINUE instruction will also be live-in at the top of the
325 * loop, and therefore also live-out at the bottom-most point of the
326 * loop which is reachable from the top (since a control flow path
327 * exists from a definition of the variable through this CONTINUE
328 * instruction, the top of the loop, the (reachable) bottom of the
329 * loop, the top of the loop again, into a use of the variable).
330 */
331 assert(cur_do != NULL);
332 cur->add_successor(mem_ctx, cur_do->next(), bblock_link_logical);
333
334 next = new_block();
335 if (inst->predicate)
336 cur->add_successor(mem_ctx, next, bblock_link_logical);
337 else
338 cur->add_successor(mem_ctx, next, bblock_link_physical);
339
340 set_next_block(&cur, next, ip);
341 break;
342
343 case BRW_OPCODE_BREAK:
344 cur->instructions.push_tail(inst);
345
346 /* A conditional BREAK instruction may start a region of divergent
347 * control flow until the end of the loop if the condition is
348 * non-uniform, in which case the loop will execute additional
349 * iterations with the present channel disabled. We model this as a
350 * control flow path from the divergence point to the convergence
351 * point that overlaps the whole IP range of the loop and skips over
352 * the execution of any other instructions part of the loop.
353 *
354 * See the DO case for additional explanation.
355 */
356 assert(cur_do != NULL);
357 cur->add_successor(mem_ctx, cur_do, bblock_link_physical);
358 cur->add_successor(mem_ctx, cur_while, bblock_link_logical);
359
360 next = new_block();
361 if (inst->predicate)
362 cur->add_successor(mem_ctx, next, bblock_link_logical);
363
364 set_next_block(&cur, next, ip);
365 break;
366
367 case BRW_OPCODE_WHILE:
368 cur->instructions.push_tail(inst);
369
370 assert(cur_do != NULL && cur_while != NULL);
371
372 /* A conditional WHILE instruction may start a region of divergent
373 * control flow until the end of the loop, just like the BREAK
374 * instruction. See the BREAK case for more details. OTOH an
375 * unconditional WHILE instruction is non-divergent (just like an
376 * unconditional CONTINUE), and will necessarily lead to the
377 * execution of an additional iteration of the loop for all enabled
378 * channels, so we may skip over the divergence point at the top of
379 * the loop to keep the CFG as unambiguous as possible.
380 */
381 if (inst->predicate) {
382 cur->add_successor(mem_ctx, cur_do, bblock_link_logical);
383 } else {
384 cur->add_successor(mem_ctx, cur_do->next(), bblock_link_logical);
385 }
386
387 set_next_block(&cur, cur_while, ip);
388
389 /* Pop the stack so we're in the previous loop */
390 cur_do = pop_stack(&do_stack);
391 cur_while = pop_stack(&while_stack);
392 break;
393
394 default:
395 cur->instructions.push_tail(inst);
396 break;
397 }
398 }
399
400 cur->end_ip = ip - 1;
401
402 make_block_array();
403 }
404
405 cfg_t::~cfg_t()
406 {
407 ralloc_free(mem_ctx);
408 }
409
410 void
411 cfg_t::remove_block(bblock_t *block)
412 {
413 foreach_list_typed_safe (bblock_link, predecessor, link, &block->parents) {
414 /* Remove block from all of its predecessors' successor lists. */
415 foreach_list_typed_safe (bblock_link, successor, link,
416 &predecessor->block->children) {
417 if (block == successor->block) {
418 successor->link.remove();
419 ralloc_free(successor);
420 }
421 }
422
423 /* Add removed-block's successors to its predecessors' successor lists. */
424 foreach_list_typed (bblock_link, successor, link, &block->children) {
425 if (!successor->block->is_successor_of(predecessor->block,
426 successor->kind)) {
427 predecessor->block->children.push_tail(link(mem_ctx,
428 successor->block,
429 successor->kind));
430 }
431 }
432 }
433
434 foreach_list_typed_safe (bblock_link, successor, link, &block->children) {
435 /* Remove block from all of its childrens' parents lists. */
436 foreach_list_typed_safe (bblock_link, predecessor, link,
437 &successor->block->parents) {
438 if (block == predecessor->block) {
439 predecessor->link.remove();
440 ralloc_free(predecessor);
441 }
442 }
443
444 /* Add removed-block's predecessors to its successors' predecessor lists. */
445 foreach_list_typed (bblock_link, predecessor, link, &block->parents) {
446 if (!predecessor->block->is_predecessor_of(successor->block,
447 predecessor->kind)) {
448 successor->block->parents.push_tail(link(mem_ctx,
449 predecessor->block,
450 predecessor->kind));
451 }
452 }
453 }
454
455 block->link.remove();
456
457 for (int b = block->num; b < this->num_blocks - 1; b++) {
458 this->blocks[b] = this->blocks[b + 1];
459 this->blocks[b]->num = b;
460 }
461
462 this->blocks[this->num_blocks - 1]->num = this->num_blocks - 2;
463 this->num_blocks--;
464 idom_dirty = true;
465 }
466
467 bblock_t *
468 cfg_t::new_block()
469 {
470 bblock_t *block = new(mem_ctx) bblock_t(this);
471
472 return block;
473 }
474
475 void
476 cfg_t::set_next_block(bblock_t **cur, bblock_t *block, int ip)
477 {
478 if (*cur) {
479 (*cur)->end_ip = ip - 1;
480 }
481
482 block->start_ip = ip;
483 block->num = num_blocks++;
484 block_list.push_tail(&block->link);
485 *cur = block;
486 }
487
488 void
489 cfg_t::make_block_array()
490 {
491 blocks = ralloc_array(mem_ctx, bblock_t *, num_blocks);
492
493 int i = 0;
494 foreach_block (block, this) {
495 blocks[i++] = block;
496 }
497 assert(i == num_blocks);
498 }
499
500 void
501 cfg_t::dump(backend_shader *s)
502 {
503 if (idom_dirty)
504 calculate_idom();
505
506 foreach_block (block, this) {
507 if (block->idom)
508 fprintf(stderr, "START B%d IDOM(B%d)", block->num, block->idom->num);
509 else
510 fprintf(stderr, "START B%d IDOM(none)", block->num);
511
512 foreach_list_typed(bblock_link, link, link, &block->parents) {
513 fprintf(stderr, " <%cB%d",
514 link->kind == bblock_link_logical ? '-' : '~',
515 link->block->num);
516 }
517 fprintf(stderr, "\n");
518 if (s != NULL)
519 block->dump(s);
520 fprintf(stderr, "END B%d", block->num);
521 foreach_list_typed(bblock_link, link, link, &block->children) {
522 fprintf(stderr, " %c>B%d",
523 link->kind == bblock_link_logical ? '-' : '~',
524 link->block->num);
525 }
526 fprintf(stderr, "\n");
527 }
528 }
529
530 /* Calculates the immediate dominator of each block, according to "A Simple,
531 * Fast Dominance Algorithm" by Keith D. Cooper, Timothy J. Harvey, and Ken
532 * Kennedy.
533 *
534 * The authors claim that for control flow graphs of sizes normally encountered
535 * (less than 1000 nodes) that this algorithm is significantly faster than
536 * others like Lengauer-Tarjan.
537 */
538 void
539 cfg_t::calculate_idom()
540 {
541 foreach_block(block, this) {
542 block->idom = NULL;
543 }
544 blocks[0]->idom = blocks[0];
545
546 bool changed;
547 do {
548 changed = false;
549
550 foreach_block(block, this) {
551 if (block->num == 0)
552 continue;
553
554 bblock_t *new_idom = NULL;
555 foreach_list_typed(bblock_link, parent, link, &block->parents) {
556 if (parent->block->idom) {
557 if (new_idom == NULL) {
558 new_idom = parent->block;
559 } else if (parent->block->idom != NULL) {
560 new_idom = intersect(parent->block, new_idom);
561 }
562 }
563 }
564
565 if (block->idom != new_idom) {
566 block->idom = new_idom;
567 changed = true;
568 }
569 }
570 } while (changed);
571
572 idom_dirty = false;
573 }
574
575 bblock_t *
576 cfg_t::intersect(bblock_t *b1, bblock_t *b2)
577 {
578 /* Note, the comparisons here are the opposite of what the paper says
579 * because we index blocks from beginning -> end (i.e. reverse post-order)
580 * instead of post-order like they assume.
581 */
582 while (b1->num != b2->num) {
583 while (b1->num > b2->num)
584 b1 = b1->idom;
585 while (b2->num > b1->num)
586 b2 = b2->idom;
587 }
588 assert(b1);
589 return b1;
590 }
591
592 void
593 cfg_t::dump_cfg()
594 {
595 printf("digraph CFG {\n");
596 for (int b = 0; b < num_blocks; b++) {
597 bblock_t *block = this->blocks[b];
598
599 foreach_list_typed_safe (bblock_link, child, link, &block->children) {
600 printf("\t%d -> %d\n", b, child->block->num);
601 }
602 }
603 printf("}\n");
604 }
605
606 void
607 cfg_t::dump_domtree()
608 {
609 printf("digraph DominanceTree {\n");
610 foreach_block(block, this) {
611 if (block->idom) {
612 printf("\t%d -> %d\n", block->idom->num, block->num);
613 }
614 }
615 printf("}\n");
616 }