2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
33 #include "brw_ir_analysis.h"
41 * A logical edge represents a potential control flow path of the original
42 * scalar program, while a physical edge represents a control flow path that
43 * may not have existed in the original program but was introduced during
44 * vectorization in order to implement divergent control flow of different
45 * shader invocations within the same SIMD thread.
47 * All logical edges in the CFG are considered to be physical edges but not
48 * the other way around -- I.e. the logical CFG is a subset of the physical
51 enum bblock_link_kind
{
52 bblock_link_logical
= 0,
58 DECLARE_RALLOC_CXX_OPERATORS(bblock_link
)
60 bblock_link(bblock_t
*block
, enum bblock_link_kind kind
)
61 : block(block
), kind(kind
)
66 struct exec_node link
;
67 struct bblock_t
*block
;
69 /* Type of this CFG edge. Because bblock_link_logical also implies
70 * bblock_link_physical, the proper way to test for membership of edge 'l'
71 * in CFG kind 'k' is 'l.kind <= k'.
73 enum bblock_link_kind kind
;
76 struct backend_shader
;
81 DECLARE_RALLOC_CXX_OPERATORS(bblock_t
)
83 explicit bblock_t(cfg_t
*cfg
);
85 void add_successor(void *mem_ctx
, bblock_t
*successor
,
86 enum bblock_link_kind kind
);
87 bool is_predecessor_of(const bblock_t
*block
,
88 enum bblock_link_kind kind
) const;
89 bool is_successor_of(const bblock_t
*block
,
90 enum bblock_link_kind kind
) const;
91 bool can_combine_with(const bblock_t
*that
) const;
92 void combine_with(bblock_t
*that
);
95 backend_instruction
*start();
96 const backend_instruction
*start() const;
97 backend_instruction
*end();
98 const backend_instruction
*end() const;
101 const bblock_t
*next() const;
103 const bblock_t
*prev() const;
105 bool starts_with_control_flow() const;
106 bool ends_with_control_flow() const;
108 backend_instruction
*first_non_control_flow_inst();
109 backend_instruction
*last_non_control_flow_inst();
112 struct exec_node link
;
118 struct exec_list instructions
;
119 struct exec_list parents
;
120 struct exec_list children
;
124 static inline struct backend_instruction
*
125 bblock_start(struct bblock_t
*block
)
127 return (struct backend_instruction
*)exec_list_get_head(&block
->instructions
);
130 static inline const struct backend_instruction
*
131 bblock_start_const(const struct bblock_t
*block
)
133 return (const struct backend_instruction
*)exec_list_get_head_const(&block
->instructions
);
136 static inline struct backend_instruction
*
137 bblock_end(struct bblock_t
*block
)
139 return (struct backend_instruction
*)exec_list_get_tail(&block
->instructions
);
142 static inline const struct backend_instruction
*
143 bblock_end_const(const struct bblock_t
*block
)
145 return (const struct backend_instruction
*)exec_list_get_tail_const(&block
->instructions
);
148 static inline struct bblock_t
*
149 bblock_next(struct bblock_t
*block
)
151 if (exec_node_is_tail_sentinel(block
->link
.next
))
154 return (struct bblock_t
*)block
->link
.next
;
157 static inline const struct bblock_t
*
158 bblock_next_const(const struct bblock_t
*block
)
160 if (exec_node_is_tail_sentinel(block
->link
.next
))
163 return (const struct bblock_t
*)block
->link
.next
;
166 static inline struct bblock_t
*
167 bblock_prev(struct bblock_t
*block
)
169 if (exec_node_is_head_sentinel(block
->link
.prev
))
172 return (struct bblock_t
*)block
->link
.prev
;
175 static inline const struct bblock_t
*
176 bblock_prev_const(const struct bblock_t
*block
)
178 if (exec_node_is_head_sentinel(block
->link
.prev
))
181 return (const struct bblock_t
*)block
->link
.prev
;
185 bblock_starts_with_control_flow(const struct bblock_t
*block
)
187 enum opcode op
= bblock_start_const(block
)->opcode
;
188 return op
== BRW_OPCODE_DO
|| op
== BRW_OPCODE_ENDIF
;
192 bblock_ends_with_control_flow(const struct bblock_t
*block
)
194 enum opcode op
= bblock_end_const(block
)->opcode
;
195 return op
== BRW_OPCODE_IF
||
196 op
== BRW_OPCODE_ELSE
||
197 op
== BRW_OPCODE_WHILE
||
198 op
== BRW_OPCODE_BREAK
||
199 op
== BRW_OPCODE_CONTINUE
;
202 static inline struct backend_instruction
*
203 bblock_first_non_control_flow_inst(struct bblock_t
*block
)
205 struct backend_instruction
*inst
= bblock_start(block
);
206 if (bblock_starts_with_control_flow(block
))
208 inst
= (struct backend_instruction
*)inst
->next
;
210 inst
= (struct backend_instruction
*)inst
->link
.next
;
215 static inline struct backend_instruction
*
216 bblock_last_non_control_flow_inst(struct bblock_t
*block
)
218 struct backend_instruction
*inst
= bblock_end(block
);
219 if (bblock_ends_with_control_flow(block
))
221 inst
= (struct backend_instruction
*)inst
->prev
;
223 inst
= (struct backend_instruction
*)inst
->link
.prev
;
229 inline backend_instruction
*
232 return bblock_start(this);
235 inline const backend_instruction
*
236 bblock_t::start() const
238 return bblock_start_const(this);
241 inline backend_instruction
*
244 return bblock_end(this);
247 inline const backend_instruction
*
248 bblock_t::end() const
250 return bblock_end_const(this);
256 return bblock_next(this);
259 inline const bblock_t
*
260 bblock_t::next() const
262 return bblock_next_const(this);
268 return bblock_prev(this);
271 inline const bblock_t
*
272 bblock_t::prev() const
274 return bblock_prev_const(this);
278 bblock_t::starts_with_control_flow() const
280 return bblock_starts_with_control_flow(this);
284 bblock_t::ends_with_control_flow() const
286 return bblock_ends_with_control_flow(this);
289 inline backend_instruction
*
290 bblock_t::first_non_control_flow_inst()
292 return bblock_first_non_control_flow_inst(this);
295 inline backend_instruction
*
296 bblock_t::last_non_control_flow_inst()
298 return bblock_last_non_control_flow_inst(this);
304 DECLARE_RALLOC_CXX_OPERATORS(cfg_t
)
306 cfg_t(const backend_shader
*s
, exec_list
*instructions
);
309 void remove_block(bblock_t
*block
);
311 bblock_t
*first_block();
312 const bblock_t
*first_block() const;
313 bblock_t
*last_block();
314 const bblock_t
*last_block() const;
316 bblock_t
*new_block();
317 void set_next_block(bblock_t
**cur
, bblock_t
*block
, int ip
);
318 void make_block_array();
323 const struct backend_shader
*s
;
326 /** Ordered list (by ip) of basic blocks */
327 struct exec_list block_list
;
328 struct bblock_t
**blocks
;
332 static inline struct bblock_t
*
333 cfg_first_block(struct cfg_t
*cfg
)
335 return (struct bblock_t
*)exec_list_get_head(&cfg
->block_list
);
338 static inline const struct bblock_t
*
339 cfg_first_block_const(const struct cfg_t
*cfg
)
341 return (const struct bblock_t
*)exec_list_get_head_const(&cfg
->block_list
);
344 static inline struct bblock_t
*
345 cfg_last_block(struct cfg_t
*cfg
)
347 return (struct bblock_t
*)exec_list_get_tail(&cfg
->block_list
);
350 static inline const struct bblock_t
*
351 cfg_last_block_const(const struct cfg_t
*cfg
)
353 return (const struct bblock_t
*)exec_list_get_tail_const(&cfg
->block_list
);
360 return cfg_first_block(this);
363 const inline bblock_t
*
364 cfg_t::first_block() const
366 return cfg_first_block_const(this);
372 return cfg_last_block(this);
375 const inline bblock_t
*
376 cfg_t::last_block() const
378 return cfg_last_block_const(this);
382 /* Note that this is implemented with a double for loop -- break will
383 * break from the inner loop only!
385 #define foreach_block_and_inst(__block, __type, __inst, __cfg) \
386 foreach_block (__block, __cfg) \
387 foreach_inst_in_block (__type, __inst, __block)
389 /* Note that this is implemented with a double for loop -- break will
390 * break from the inner loop only!
392 #define foreach_block_and_inst_safe(__block, __type, __inst, __cfg) \
393 foreach_block_safe (__block, __cfg) \
394 foreach_inst_in_block_safe (__type, __inst, __block)
396 #define foreach_block(__block, __cfg) \
397 foreach_list_typed (bblock_t, __block, link, &(__cfg)->block_list)
399 #define foreach_block_reverse(__block, __cfg) \
400 foreach_list_typed_reverse (bblock_t, __block, link, &(__cfg)->block_list)
402 #define foreach_block_safe(__block, __cfg) \
403 foreach_list_typed_safe (bblock_t, __block, link, &(__cfg)->block_list)
405 #define foreach_block_reverse_safe(__block, __cfg) \
406 foreach_list_typed_reverse_safe (bblock_t, __block, link, &(__cfg)->block_list)
408 #define foreach_inst_in_block(__type, __inst, __block) \
409 foreach_in_list(__type, __inst, &(__block)->instructions)
411 #define foreach_inst_in_block_safe(__type, __inst, __block) \
412 for (__type *__inst = (__type *)__block->instructions.head_sentinel.next, \
413 *__next = (__type *)__inst->next; \
416 __next = (__type *)__next->next)
418 #define foreach_inst_in_block_reverse(__type, __inst, __block) \
419 foreach_in_list_reverse(__type, __inst, &(__block)->instructions)
421 #define foreach_inst_in_block_reverse_safe(__type, __inst, __block) \
422 foreach_in_list_reverse_safe(__type, __inst, &(__block)->instructions)
424 #define foreach_inst_in_block_starting_from(__type, __scan_inst, __inst) \
425 for (__type *__scan_inst = (__type *)__inst->next; \
426 !__scan_inst->is_tail_sentinel(); \
427 __scan_inst = (__type *)__scan_inst->next)
429 #define foreach_inst_in_block_reverse_starting_from(__type, __scan_inst, __inst) \
430 for (__type *__scan_inst = (__type *)__inst->prev; \
431 !__scan_inst->is_head_sentinel(); \
432 __scan_inst = (__type *)__scan_inst->prev)
437 * Immediate dominator tree analysis of a shader.
440 idom_tree(const backend_shader
*s
);
444 validate(const backend_shader
*) const
450 analysis_dependency_class
451 dependency_class() const
453 return DEPENDENCY_BLOCKS
;
457 parent(const bblock_t
*b
) const
459 assert(unsigned(b
->num
) < num_parents
);
460 return parents
[b
->num
];
464 parent(bblock_t
*b
) const
466 assert(unsigned(b
->num
) < num_parents
);
467 return parents
[b
->num
];
471 intersect(bblock_t
*b1
, bblock_t
*b2
) const;
477 unsigned num_parents
;
483 #endif /* BRW_CFG_H */