2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
33 #include "brw_ir_analysis.h"
41 * A logical edge represents a potential control flow path of the original
42 * scalar program, while a physical edge represents a control flow path that
43 * may not have existed in the original program but was introduced during
44 * vectorization in order to implement divergent control flow of different
45 * shader invocations within the same SIMD thread.
47 * All logical edges in the CFG are considered to be physical edges but not
48 * the other way around -- I.e. the logical CFG is a subset of the physical
51 enum bblock_link_kind
{
52 bblock_link_logical
= 0,
58 DECLARE_RALLOC_CXX_OPERATORS(bblock_link
)
60 bblock_link(bblock_t
*block
, enum bblock_link_kind kind
)
61 : block(block
), kind(kind
)
66 struct exec_node link
;
67 struct bblock_t
*block
;
69 /* Type of this CFG edge. Because bblock_link_logical also implies
70 * bblock_link_physical, the proper way to test for membership of edge 'l'
71 * in CFG kind 'k' is 'l.kind <= k'.
73 enum bblock_link_kind kind
;
76 struct backend_shader
;
81 DECLARE_RALLOC_CXX_OPERATORS(bblock_t
)
83 explicit bblock_t(cfg_t
*cfg
);
85 void add_successor(void *mem_ctx
, bblock_t
*successor
,
86 enum bblock_link_kind kind
);
87 bool is_predecessor_of(const bblock_t
*block
,
88 enum bblock_link_kind kind
) const;
89 bool is_successor_of(const bblock_t
*block
,
90 enum bblock_link_kind kind
) const;
91 bool can_combine_with(const bblock_t
*that
) const;
92 void combine_with(bblock_t
*that
);
95 backend_instruction
*start();
96 const backend_instruction
*start() const;
97 backend_instruction
*end();
98 const backend_instruction
*end() const;
101 const bblock_t
*next() const;
103 const bblock_t
*prev() const;
105 bool starts_with_control_flow() const;
106 bool ends_with_control_flow() const;
108 backend_instruction
*first_non_control_flow_inst();
109 backend_instruction
*last_non_control_flow_inst();
112 struct exec_node link
;
118 struct exec_list instructions
;
119 struct exec_list parents
;
120 struct exec_list children
;
123 unsigned cycle_count
;
126 static inline struct backend_instruction
*
127 bblock_start(struct bblock_t
*block
)
129 return (struct backend_instruction
*)exec_list_get_head(&block
->instructions
);
132 static inline const struct backend_instruction
*
133 bblock_start_const(const struct bblock_t
*block
)
135 return (const struct backend_instruction
*)exec_list_get_head_const(&block
->instructions
);
138 static inline struct backend_instruction
*
139 bblock_end(struct bblock_t
*block
)
141 return (struct backend_instruction
*)exec_list_get_tail(&block
->instructions
);
144 static inline const struct backend_instruction
*
145 bblock_end_const(const struct bblock_t
*block
)
147 return (const struct backend_instruction
*)exec_list_get_tail_const(&block
->instructions
);
150 static inline struct bblock_t
*
151 bblock_next(struct bblock_t
*block
)
153 if (exec_node_is_tail_sentinel(block
->link
.next
))
156 return (struct bblock_t
*)block
->link
.next
;
159 static inline const struct bblock_t
*
160 bblock_next_const(const struct bblock_t
*block
)
162 if (exec_node_is_tail_sentinel(block
->link
.next
))
165 return (const struct bblock_t
*)block
->link
.next
;
168 static inline struct bblock_t
*
169 bblock_prev(struct bblock_t
*block
)
171 if (exec_node_is_head_sentinel(block
->link
.prev
))
174 return (struct bblock_t
*)block
->link
.prev
;
177 static inline const struct bblock_t
*
178 bblock_prev_const(const struct bblock_t
*block
)
180 if (exec_node_is_head_sentinel(block
->link
.prev
))
183 return (const struct bblock_t
*)block
->link
.prev
;
187 bblock_starts_with_control_flow(const struct bblock_t
*block
)
189 enum opcode op
= bblock_start_const(block
)->opcode
;
190 return op
== BRW_OPCODE_DO
|| op
== BRW_OPCODE_ENDIF
;
194 bblock_ends_with_control_flow(const struct bblock_t
*block
)
196 enum opcode op
= bblock_end_const(block
)->opcode
;
197 return op
== BRW_OPCODE_IF
||
198 op
== BRW_OPCODE_ELSE
||
199 op
== BRW_OPCODE_WHILE
||
200 op
== BRW_OPCODE_BREAK
||
201 op
== BRW_OPCODE_CONTINUE
;
204 static inline struct backend_instruction
*
205 bblock_first_non_control_flow_inst(struct bblock_t
*block
)
207 struct backend_instruction
*inst
= bblock_start(block
);
208 if (bblock_starts_with_control_flow(block
))
210 inst
= (struct backend_instruction
*)inst
->next
;
212 inst
= (struct backend_instruction
*)inst
->link
.next
;
217 static inline struct backend_instruction
*
218 bblock_last_non_control_flow_inst(struct bblock_t
*block
)
220 struct backend_instruction
*inst
= bblock_end(block
);
221 if (bblock_ends_with_control_flow(block
))
223 inst
= (struct backend_instruction
*)inst
->prev
;
225 inst
= (struct backend_instruction
*)inst
->link
.prev
;
231 inline backend_instruction
*
234 return bblock_start(this);
237 inline const backend_instruction
*
238 bblock_t::start() const
240 return bblock_start_const(this);
243 inline backend_instruction
*
246 return bblock_end(this);
249 inline const backend_instruction
*
250 bblock_t::end() const
252 return bblock_end_const(this);
258 return bblock_next(this);
261 inline const bblock_t
*
262 bblock_t::next() const
264 return bblock_next_const(this);
270 return bblock_prev(this);
273 inline const bblock_t
*
274 bblock_t::prev() const
276 return bblock_prev_const(this);
280 bblock_t::starts_with_control_flow() const
282 return bblock_starts_with_control_flow(this);
286 bblock_t::ends_with_control_flow() const
288 return bblock_ends_with_control_flow(this);
291 inline backend_instruction
*
292 bblock_t::first_non_control_flow_inst()
294 return bblock_first_non_control_flow_inst(this);
297 inline backend_instruction
*
298 bblock_t::last_non_control_flow_inst()
300 return bblock_last_non_control_flow_inst(this);
306 DECLARE_RALLOC_CXX_OPERATORS(cfg_t
)
308 cfg_t(const backend_shader
*s
, exec_list
*instructions
);
311 void remove_block(bblock_t
*block
);
313 bblock_t
*first_block();
314 const bblock_t
*first_block() const;
315 bblock_t
*last_block();
316 const bblock_t
*last_block() const;
318 bblock_t
*new_block();
319 void set_next_block(bblock_t
**cur
, bblock_t
*block
, int ip
);
320 void make_block_array();
325 const struct backend_shader
*s
;
328 /** Ordered list (by ip) of basic blocks */
329 struct exec_list block_list
;
330 struct bblock_t
**blocks
;
333 unsigned cycle_count
;
336 static inline struct bblock_t
*
337 cfg_first_block(struct cfg_t
*cfg
)
339 return (struct bblock_t
*)exec_list_get_head(&cfg
->block_list
);
342 static inline const struct bblock_t
*
343 cfg_first_block_const(const struct cfg_t
*cfg
)
345 return (const struct bblock_t
*)exec_list_get_head_const(&cfg
->block_list
);
348 static inline struct bblock_t
*
349 cfg_last_block(struct cfg_t
*cfg
)
351 return (struct bblock_t
*)exec_list_get_tail(&cfg
->block_list
);
354 static inline const struct bblock_t
*
355 cfg_last_block_const(const struct cfg_t
*cfg
)
357 return (const struct bblock_t
*)exec_list_get_tail_const(&cfg
->block_list
);
364 return cfg_first_block(this);
367 const inline bblock_t
*
368 cfg_t::first_block() const
370 return cfg_first_block_const(this);
376 return cfg_last_block(this);
379 const inline bblock_t
*
380 cfg_t::last_block() const
382 return cfg_last_block_const(this);
386 /* Note that this is implemented with a double for loop -- break will
387 * break from the inner loop only!
389 #define foreach_block_and_inst(__block, __type, __inst, __cfg) \
390 foreach_block (__block, __cfg) \
391 foreach_inst_in_block (__type, __inst, __block)
393 /* Note that this is implemented with a double for loop -- break will
394 * break from the inner loop only!
396 #define foreach_block_and_inst_safe(__block, __type, __inst, __cfg) \
397 foreach_block_safe (__block, __cfg) \
398 foreach_inst_in_block_safe (__type, __inst, __block)
400 #define foreach_block(__block, __cfg) \
401 foreach_list_typed (bblock_t, __block, link, &(__cfg)->block_list)
403 #define foreach_block_reverse(__block, __cfg) \
404 foreach_list_typed_reverse (bblock_t, __block, link, &(__cfg)->block_list)
406 #define foreach_block_safe(__block, __cfg) \
407 foreach_list_typed_safe (bblock_t, __block, link, &(__cfg)->block_list)
409 #define foreach_block_reverse_safe(__block, __cfg) \
410 foreach_list_typed_reverse_safe (bblock_t, __block, link, &(__cfg)->block_list)
412 #define foreach_inst_in_block(__type, __inst, __block) \
413 foreach_in_list(__type, __inst, &(__block)->instructions)
415 #define foreach_inst_in_block_safe(__type, __inst, __block) \
416 for (__type *__inst = (__type *)__block->instructions.head_sentinel.next, \
417 *__next = (__type *)__inst->next; \
420 __next = (__type *)__next->next)
422 #define foreach_inst_in_block_reverse(__type, __inst, __block) \
423 foreach_in_list_reverse(__type, __inst, &(__block)->instructions)
425 #define foreach_inst_in_block_reverse_safe(__type, __inst, __block) \
426 foreach_in_list_reverse_safe(__type, __inst, &(__block)->instructions)
428 #define foreach_inst_in_block_starting_from(__type, __scan_inst, __inst) \
429 for (__type *__scan_inst = (__type *)__inst->next; \
430 !__scan_inst->is_tail_sentinel(); \
431 __scan_inst = (__type *)__scan_inst->next)
433 #define foreach_inst_in_block_reverse_starting_from(__type, __scan_inst, __inst) \
434 for (__type *__scan_inst = (__type *)__inst->prev; \
435 !__scan_inst->is_head_sentinel(); \
436 __scan_inst = (__type *)__scan_inst->prev)
441 * Immediate dominator tree analysis of a shader.
444 idom_tree(const backend_shader
*s
);
448 validate(const backend_shader
*) const
454 analysis_dependency_class
455 dependency_class() const
457 return DEPENDENCY_BLOCKS
;
461 parent(const bblock_t
*b
) const
463 assert(unsigned(b
->num
) < num_parents
);
464 return parents
[b
->num
];
468 parent(bblock_t
*b
) const
470 assert(unsigned(b
->num
) < num_parents
);
471 return parents
[b
->num
];
475 intersect(bblock_t
*b1
, bblock_t
*b2
) const;
481 unsigned num_parents
;
487 #endif /* BRW_CFG_H */