b71c36ed68b5d5457a119ac8e3e7c4ac37691a04
2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include "brw_shader.h"
33 * Walks the shader instructions generated and creates a set of basic
34 * blocks with successor/predecessor edges connecting them.
40 pop_stack(exec_list
*list
)
42 bblock_link
*link
= (bblock_link
*)list
->get_tail();
43 bblock_t
*block
= link
->block
;
50 link(void *mem_ctx
, bblock_t
*block
, enum bblock_link_kind kind
)
52 bblock_link
*l
= new(mem_ctx
) bblock_link(block
, kind
);
57 push_stack(exec_list
*list
, void *mem_ctx
, bblock_t
*block
)
59 /* The kind of the link is immaterial, but we need to provide one since
60 * this is (ab)using the edge data structure in order to implement a stack.
62 list
->push_tail(link(mem_ctx
, block
, bblock_link_logical
));
65 bblock_t::bblock_t(cfg_t
*cfg
) :
66 cfg(cfg
), idom(NULL
), start_ip(0), end_ip(0), num(0), cycle_count(0)
68 instructions
.make_empty();
70 children
.make_empty();
74 bblock_t::add_successor(void *mem_ctx
, bblock_t
*successor
,
75 enum bblock_link_kind kind
)
77 successor
->parents
.push_tail(::link(mem_ctx
, this, kind
));
78 children
.push_tail(::link(mem_ctx
, successor
, kind
));
82 bblock_t::is_predecessor_of(const bblock_t
*block
,
83 enum bblock_link_kind kind
) const
85 foreach_list_typed_safe (bblock_link
, parent
, link
, &block
->parents
) {
86 if (parent
->block
== this && parent
->kind
<= kind
) {
95 bblock_t::is_successor_of(const bblock_t
*block
,
96 enum bblock_link_kind kind
) const
98 foreach_list_typed_safe (bblock_link
, child
, link
, &block
->children
) {
99 if (child
->block
== this && child
->kind
<= kind
) {
108 ends_block(const backend_instruction
*inst
)
110 enum opcode op
= inst
->opcode
;
112 return op
== BRW_OPCODE_IF
||
113 op
== BRW_OPCODE_ELSE
||
114 op
== BRW_OPCODE_CONTINUE
||
115 op
== BRW_OPCODE_BREAK
||
116 op
== BRW_OPCODE_DO
||
117 op
== BRW_OPCODE_WHILE
;
121 starts_block(const backend_instruction
*inst
)
123 enum opcode op
= inst
->opcode
;
125 return op
== BRW_OPCODE_DO
||
126 op
== BRW_OPCODE_ENDIF
;
130 bblock_t::can_combine_with(const bblock_t
*that
) const
132 if ((const bblock_t
*)this->link
.next
!= that
)
135 if (ends_block(this->end()) ||
136 starts_block(that
->start()))
143 bblock_t::combine_with(bblock_t
*that
)
145 assert(this->can_combine_with(that
));
146 foreach_list_typed (bblock_link
, link
, link
, &that
->parents
) {
147 assert(link
->block
== this);
150 this->end_ip
= that
->end_ip
;
151 this->instructions
.append_list(&that
->instructions
);
153 this->cfg
->remove_block(that
);
157 bblock_t::dump(backend_shader
*s
) const
159 int ip
= this->start_ip
;
160 foreach_inst_in_block(backend_instruction
, inst
, this) {
161 fprintf(stderr
, "%5d: ", ip
);
162 s
->dump_instruction(inst
);
167 cfg_t::cfg_t(exec_list
*instructions
)
169 mem_ctx
= ralloc_context(NULL
);
170 block_list
.make_empty();
175 bblock_t
*cur
= NULL
;
178 bblock_t
*entry
= new_block();
179 bblock_t
*cur_if
= NULL
; /**< BB ending with IF. */
180 bblock_t
*cur_else
= NULL
; /**< BB ending with ELSE. */
181 bblock_t
*cur_endif
= NULL
; /**< BB starting with ENDIF. */
182 bblock_t
*cur_do
= NULL
; /**< BB starting with DO. */
183 bblock_t
*cur_while
= NULL
; /**< BB immediately following WHILE. */
184 exec_list if_stack
, else_stack
, do_stack
, while_stack
;
187 set_next_block(&cur
, entry
, ip
);
189 foreach_in_list_safe(backend_instruction
, inst
, instructions
) {
190 /* set_next_block wants the post-incremented ip */
193 inst
->exec_node::remove();
195 switch (inst
->opcode
) {
197 cur
->instructions
.push_tail(inst
);
199 /* Push our information onto a stack so we can recover from
202 push_stack(&if_stack
, mem_ctx
, cur_if
);
203 push_stack(&else_stack
, mem_ctx
, cur_else
);
209 /* Set up our immediately following block, full of "then"
213 cur_if
->add_successor(mem_ctx
, next
, bblock_link_logical
);
215 set_next_block(&cur
, next
, ip
);
218 case BRW_OPCODE_ELSE
:
219 cur
->instructions
.push_tail(inst
);
224 assert(cur_if
!= NULL
);
225 cur_if
->add_successor(mem_ctx
, next
, bblock_link_logical
);
226 cur_else
->add_successor(mem_ctx
, next
, bblock_link_physical
);
228 set_next_block(&cur
, next
, ip
);
231 case BRW_OPCODE_ENDIF
: {
232 if (cur
->instructions
.is_empty()) {
233 /* New block was just created; use it. */
236 cur_endif
= new_block();
238 cur
->add_successor(mem_ctx
, cur_endif
, bblock_link_logical
);
240 set_next_block(&cur
, cur_endif
, ip
- 1);
243 cur
->instructions
.push_tail(inst
);
246 cur_else
->add_successor(mem_ctx
, cur_endif
, bblock_link_logical
);
248 assert(cur_if
!= NULL
);
249 cur_if
->add_successor(mem_ctx
, cur_endif
, bblock_link_logical
);
252 assert(cur_if
->end()->opcode
== BRW_OPCODE_IF
);
253 assert(!cur_else
|| cur_else
->end()->opcode
== BRW_OPCODE_ELSE
);
255 /* Pop the stack so we're in the previous if/else/endif */
256 cur_if
= pop_stack(&if_stack
);
257 cur_else
= pop_stack(&else_stack
);
261 /* Push our information onto a stack so we can recover from
264 push_stack(&do_stack
, mem_ctx
, cur_do
);
265 push_stack(&while_stack
, mem_ctx
, cur_while
);
267 /* Set up the block just after the while. Don't know when exactly
268 * it will start, yet.
270 cur_while
= new_block();
272 if (cur
->instructions
.is_empty()) {
273 /* New block was just created; use it. */
276 cur_do
= new_block();
278 cur
->add_successor(mem_ctx
, cur_do
, bblock_link_logical
);
280 set_next_block(&cur
, cur_do
, ip
- 1);
283 cur
->instructions
.push_tail(inst
);
285 /* Represent divergent execution of the loop as a pair of alternative
286 * edges coming out of the DO instruction: For any physical iteration
287 * of the loop a given logical thread can either start off enabled
288 * (which is represented as the "next" successor), or disabled (if it
289 * has reached a non-uniform exit of the loop during a previous
290 * iteration, which is represented as the "cur_while" successor).
292 * The disabled edge will be taken by the logical thread anytime we
293 * arrive at the DO instruction through a back-edge coming from a
294 * conditional exit of the loop where divergent control flow started.
296 * This guarantees that there is a control-flow path from any
297 * divergence point of the loop into the convergence point
298 * (immediately past the WHILE instruction) such that it overlaps the
299 * whole IP region of divergent control flow (potentially the whole
300 * loop) *and* doesn't imply the execution of any instructions part
301 * of the loop (since the corresponding execution mask bit will be
302 * disabled for a diverging thread).
304 * This way we make sure that any variables that are live throughout
305 * the region of divergence for an inactive logical thread are also
306 * considered to interfere with any other variables assigned by
307 * active logical threads within the same physical region of the
308 * program, since otherwise we would risk cross-channel data
312 cur
->add_successor(mem_ctx
, next
, bblock_link_logical
);
313 cur
->add_successor(mem_ctx
, cur_while
, bblock_link_physical
);
314 set_next_block(&cur
, next
, ip
);
317 case BRW_OPCODE_CONTINUE
:
318 cur
->instructions
.push_tail(inst
);
320 /* A conditional CONTINUE may start a region of divergent control
321 * flow until the start of the next loop iteration (*not* until the
322 * end of the loop which is why the successor is not the top-level
323 * divergence point at cur_do). The live interval of any variable
324 * extending through a CONTINUE edge is guaranteed to overlap the
325 * whole region of divergent execution, because any variable live-out
326 * at the CONTINUE instruction will also be live-in at the top of the
327 * loop, and therefore also live-out at the bottom-most point of the
328 * loop which is reachable from the top (since a control flow path
329 * exists from a definition of the variable through this CONTINUE
330 * instruction, the top of the loop, the (reachable) bottom of the
331 * loop, the top of the loop again, into a use of the variable).
333 assert(cur_do
!= NULL
);
334 cur
->add_successor(mem_ctx
, cur_do
->next(), bblock_link_logical
);
338 cur
->add_successor(mem_ctx
, next
, bblock_link_logical
);
340 cur
->add_successor(mem_ctx
, next
, bblock_link_physical
);
342 set_next_block(&cur
, next
, ip
);
345 case BRW_OPCODE_BREAK
:
346 cur
->instructions
.push_tail(inst
);
348 /* A conditional BREAK instruction may start a region of divergent
349 * control flow until the end of the loop if the condition is
350 * non-uniform, in which case the loop will execute additional
351 * iterations with the present channel disabled. We model this as a
352 * control flow path from the divergence point to the convergence
353 * point that overlaps the whole IP range of the loop and skips over
354 * the execution of any other instructions part of the loop.
356 * See the DO case for additional explanation.
358 assert(cur_do
!= NULL
);
359 cur
->add_successor(mem_ctx
, cur_do
, bblock_link_physical
);
360 cur
->add_successor(mem_ctx
, cur_while
, bblock_link_logical
);
364 cur
->add_successor(mem_ctx
, next
, bblock_link_logical
);
366 set_next_block(&cur
, next
, ip
);
369 case BRW_OPCODE_WHILE
:
370 cur
->instructions
.push_tail(inst
);
372 assert(cur_do
!= NULL
&& cur_while
!= NULL
);
374 /* A conditional WHILE instruction may start a region of divergent
375 * control flow until the end of the loop, just like the BREAK
376 * instruction. See the BREAK case for more details. OTOH an
377 * unconditional WHILE instruction is non-divergent (just like an
378 * unconditional CONTINUE), and will necessarily lead to the
379 * execution of an additional iteration of the loop for all enabled
380 * channels, so we may skip over the divergence point at the top of
381 * the loop to keep the CFG as unambiguous as possible.
383 if (inst
->predicate
) {
384 cur
->add_successor(mem_ctx
, cur_do
, bblock_link_logical
);
386 cur
->add_successor(mem_ctx
, cur_do
->next(), bblock_link_logical
);
389 set_next_block(&cur
, cur_while
, ip
);
391 /* Pop the stack so we're in the previous loop */
392 cur_do
= pop_stack(&do_stack
);
393 cur_while
= pop_stack(&while_stack
);
397 cur
->instructions
.push_tail(inst
);
402 cur
->end_ip
= ip
- 1;
409 ralloc_free(mem_ctx
);
413 cfg_t::remove_block(bblock_t
*block
)
415 foreach_list_typed_safe (bblock_link
, predecessor
, link
, &block
->parents
) {
416 /* Remove block from all of its predecessors' successor lists. */
417 foreach_list_typed_safe (bblock_link
, successor
, link
,
418 &predecessor
->block
->children
) {
419 if (block
== successor
->block
) {
420 successor
->link
.remove();
421 ralloc_free(successor
);
425 /* Add removed-block's successors to its predecessors' successor lists. */
426 foreach_list_typed (bblock_link
, successor
, link
, &block
->children
) {
427 if (!successor
->block
->is_successor_of(predecessor
->block
,
429 predecessor
->block
->children
.push_tail(link(mem_ctx
,
436 foreach_list_typed_safe (bblock_link
, successor
, link
, &block
->children
) {
437 /* Remove block from all of its childrens' parents lists. */
438 foreach_list_typed_safe (bblock_link
, predecessor
, link
,
439 &successor
->block
->parents
) {
440 if (block
== predecessor
->block
) {
441 predecessor
->link
.remove();
442 ralloc_free(predecessor
);
446 /* Add removed-block's predecessors to its successors' predecessor lists. */
447 foreach_list_typed (bblock_link
, predecessor
, link
, &block
->parents
) {
448 if (!predecessor
->block
->is_predecessor_of(successor
->block
,
449 predecessor
->kind
)) {
450 successor
->block
->parents
.push_tail(link(mem_ctx
,
457 block
->link
.remove();
459 for (int b
= block
->num
; b
< this->num_blocks
- 1; b
++) {
460 this->blocks
[b
] = this->blocks
[b
+ 1];
461 this->blocks
[b
]->num
= b
;
464 this->blocks
[this->num_blocks
- 1]->num
= this->num_blocks
- 2;
471 bblock_t
*block
= new(mem_ctx
) bblock_t(this);
477 cfg_t::set_next_block(bblock_t
**cur
, bblock_t
*block
, int ip
)
480 (*cur
)->end_ip
= ip
- 1;
483 block
->start_ip
= ip
;
484 block
->num
= num_blocks
++;
485 block_list
.push_tail(&block
->link
);
490 cfg_t::make_block_array()
492 blocks
= ralloc_array(mem_ctx
, bblock_t
*, num_blocks
);
495 foreach_block (block
, this) {
498 assert(i
== num_blocks
);
502 cfg_t::dump(backend_shader
*s
)
504 const idom_tree
*idom
= (s
? &s
->idom_analysis
.require() : NULL
);
506 foreach_block (block
, this) {
508 fprintf(stderr
, "START B%d IDOM(B%d)", block
->num
, block
->idom
->num
);
510 fprintf(stderr
, "START B%d IDOM(none)", block
->num
);
512 foreach_list_typed(bblock_link
, link
, link
, &block
->parents
) {
513 fprintf(stderr
, " <%cB%d",
514 link
->kind
== bblock_link_logical
? '-' : '~',
517 fprintf(stderr
, "\n");
520 fprintf(stderr
, "END B%d", block
->num
);
521 foreach_list_typed(bblock_link
, link
, link
, &block
->children
) {
522 fprintf(stderr
, " %c>B%d",
523 link
->kind
== bblock_link_logical
? '-' : '~',
526 fprintf(stderr
, "\n");
530 /* Calculates the immediate dominator of each block, according to "A Simple,
531 * Fast Dominance Algorithm" by Keith D. Cooper, Timothy J. Harvey, and Ken
534 * The authors claim that for control flow graphs of sizes normally encountered
535 * (less than 1000 nodes) that this algorithm is significantly faster than
536 * others like Lengauer-Tarjan.
538 idom_tree::idom_tree(const backend_shader
*s
)
540 foreach_block(block
, s
->cfg
) {
543 s
->cfg
->blocks
[0]->idom
= s
->cfg
->blocks
[0];
549 foreach_block(block
, s
->cfg
) {
553 bblock_t
*new_idom
= NULL
;
554 foreach_list_typed(bblock_link
, parent
, link
, &block
->parents
) {
555 if (parent
->block
->idom
) {
556 if (new_idom
== NULL
) {
557 new_idom
= parent
->block
;
558 } else if (parent
->block
->idom
!= NULL
) {
559 new_idom
= intersect(parent
->block
, new_idom
);
564 if (block
->idom
!= new_idom
) {
565 block
->idom
= new_idom
;
573 idom_tree::intersect(bblock_t
*b1
, bblock_t
*b2
) const
575 /* Note, the comparisons here are the opposite of what the paper says
576 * because we index blocks from beginning -> end (i.e. reverse post-order)
577 * instead of post-order like they assume.
579 while (b1
->num
!= b2
->num
) {
580 while (b1
->num
> b2
->num
)
582 while (b2
->num
> b1
->num
)
590 idom_tree::dump(const backend_shader
*s
) const
592 printf("digraph DominanceTree {\n");
593 foreach_block(block
, s
->cfg
) {
595 printf("\t%d -> %d\n", block
->idom
->num
, block
->num
);
604 printf("digraph CFG {\n");
605 for (int b
= 0; b
< num_blocks
; b
++) {
606 bblock_t
*block
= this->blocks
[b
];
608 foreach_list_typed_safe (bblock_link
, child
, link
, &block
->children
) {
609 printf("\t%d -> %d\n", b
, child
->block
->num
);