2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
31 #include "brw_shader.h"
38 * A logical edge represents a potential control flow path of the original
39 * scalar program, while a physical edge represents a control flow path that
40 * may not have existed in the original program but was introduced during
41 * vectorization in order to implement divergent control flow of different
42 * shader invocations within the same SIMD thread.
44 * All logical edges in the CFG are considered to be physical edges but not
45 * the other way around -- I.e. the logical CFG is a subset of the physical
48 enum bblock_link_kind
{
49 bblock_link_logical
= 0,
55 DECLARE_RALLOC_CXX_OPERATORS(bblock_link
)
57 bblock_link(bblock_t
*block
, enum bblock_link_kind kind
)
58 : block(block
), kind(kind
)
63 struct exec_node link
;
64 struct bblock_t
*block
;
66 /* Type of this CFG edge. Because bblock_link_logical also implies
67 * bblock_link_physical, the proper way to test for membership of edge 'l'
68 * in CFG kind 'k' is 'l.kind <= k'.
70 enum bblock_link_kind kind
;
73 struct backend_instruction
;
77 DECLARE_RALLOC_CXX_OPERATORS(bblock_t
)
79 explicit bblock_t(cfg_t
*cfg
);
81 void add_successor(void *mem_ctx
, bblock_t
*successor
,
82 enum bblock_link_kind kind
);
83 bool is_predecessor_of(const bblock_t
*block
,
84 enum bblock_link_kind kind
) const;
85 bool is_successor_of(const bblock_t
*block
,
86 enum bblock_link_kind kind
) const;
87 bool can_combine_with(const bblock_t
*that
) const;
88 void combine_with(bblock_t
*that
);
89 void dump(backend_shader
*s
) const;
91 backend_instruction
*start();
92 const backend_instruction
*start() const;
93 backend_instruction
*end();
94 const backend_instruction
*end() const;
97 const bblock_t
*next() const;
99 const bblock_t
*prev() const;
101 bool starts_with_control_flow() const;
102 bool ends_with_control_flow() const;
104 backend_instruction
*first_non_control_flow_inst();
105 backend_instruction
*last_non_control_flow_inst();
108 struct exec_node link
;
110 struct bblock_t
*idom
;
115 struct exec_list instructions
;
116 struct exec_list parents
;
117 struct exec_list children
;
120 unsigned cycle_count
;
123 static inline struct backend_instruction
*
124 bblock_start(struct bblock_t
*block
)
126 return (struct backend_instruction
*)exec_list_get_head(&block
->instructions
);
129 static inline const struct backend_instruction
*
130 bblock_start_const(const struct bblock_t
*block
)
132 return (const struct backend_instruction
*)exec_list_get_head_const(&block
->instructions
);
135 static inline struct backend_instruction
*
136 bblock_end(struct bblock_t
*block
)
138 return (struct backend_instruction
*)exec_list_get_tail(&block
->instructions
);
141 static inline const struct backend_instruction
*
142 bblock_end_const(const struct bblock_t
*block
)
144 return (const struct backend_instruction
*)exec_list_get_tail_const(&block
->instructions
);
147 static inline struct bblock_t
*
148 bblock_next(struct bblock_t
*block
)
150 if (exec_node_is_tail_sentinel(block
->link
.next
))
153 return (struct bblock_t
*)block
->link
.next
;
156 static inline const struct bblock_t
*
157 bblock_next_const(const struct bblock_t
*block
)
159 if (exec_node_is_tail_sentinel(block
->link
.next
))
162 return (const struct bblock_t
*)block
->link
.next
;
165 static inline struct bblock_t
*
166 bblock_prev(struct bblock_t
*block
)
168 if (exec_node_is_head_sentinel(block
->link
.prev
))
171 return (struct bblock_t
*)block
->link
.prev
;
174 static inline const struct bblock_t
*
175 bblock_prev_const(const struct bblock_t
*block
)
177 if (exec_node_is_head_sentinel(block
->link
.prev
))
180 return (const struct bblock_t
*)block
->link
.prev
;
184 bblock_starts_with_control_flow(const struct bblock_t
*block
)
186 enum opcode op
= bblock_start_const(block
)->opcode
;
187 return op
== BRW_OPCODE_DO
|| op
== BRW_OPCODE_ENDIF
;
191 bblock_ends_with_control_flow(const struct bblock_t
*block
)
193 enum opcode op
= bblock_end_const(block
)->opcode
;
194 return op
== BRW_OPCODE_IF
||
195 op
== BRW_OPCODE_ELSE
||
196 op
== BRW_OPCODE_WHILE
||
197 op
== BRW_OPCODE_BREAK
||
198 op
== BRW_OPCODE_CONTINUE
;
201 static inline struct backend_instruction
*
202 bblock_first_non_control_flow_inst(struct bblock_t
*block
)
204 struct backend_instruction
*inst
= bblock_start(block
);
205 if (bblock_starts_with_control_flow(block
))
207 inst
= (struct backend_instruction
*)inst
->next
;
209 inst
= (struct backend_instruction
*)inst
->link
.next
;
214 static inline struct backend_instruction
*
215 bblock_last_non_control_flow_inst(struct bblock_t
*block
)
217 struct backend_instruction
*inst
= bblock_end(block
);
218 if (bblock_ends_with_control_flow(block
))
220 inst
= (struct backend_instruction
*)inst
->prev
;
222 inst
= (struct backend_instruction
*)inst
->link
.prev
;
228 inline backend_instruction
*
231 return bblock_start(this);
234 inline const backend_instruction
*
235 bblock_t::start() const
237 return bblock_start_const(this);
240 inline backend_instruction
*
243 return bblock_end(this);
246 inline const backend_instruction
*
247 bblock_t::end() const
249 return bblock_end_const(this);
255 return bblock_next(this);
258 inline const bblock_t
*
259 bblock_t::next() const
261 return bblock_next_const(this);
267 return bblock_prev(this);
270 inline const bblock_t
*
271 bblock_t::prev() const
273 return bblock_prev_const(this);
277 bblock_t::starts_with_control_flow() const
279 return bblock_starts_with_control_flow(this);
283 bblock_t::ends_with_control_flow() const
285 return bblock_ends_with_control_flow(this);
288 inline backend_instruction
*
289 bblock_t::first_non_control_flow_inst()
291 return bblock_first_non_control_flow_inst(this);
294 inline backend_instruction
*
295 bblock_t::last_non_control_flow_inst()
297 return bblock_last_non_control_flow_inst(this);
303 DECLARE_RALLOC_CXX_OPERATORS(cfg_t
)
305 cfg_t(exec_list
*instructions
);
308 void remove_block(bblock_t
*block
);
310 bblock_t
*new_block();
311 void set_next_block(bblock_t
**cur
, bblock_t
*block
, int ip
);
312 void make_block_array();
313 void calculate_idom();
314 static bblock_t
*intersect(bblock_t
*b1
, bblock_t
*b2
);
316 void dump(backend_shader
*s
);
322 /** Ordered list (by ip) of basic blocks */
323 struct exec_list block_list
;
324 struct bblock_t
**blocks
;
329 unsigned cycle_count
;
332 /* Note that this is implemented with a double for loop -- break will
333 * break from the inner loop only!
335 #define foreach_block_and_inst(__block, __type, __inst, __cfg) \
336 foreach_block (__block, __cfg) \
337 foreach_inst_in_block (__type, __inst, __block)
339 /* Note that this is implemented with a double for loop -- break will
340 * break from the inner loop only!
342 #define foreach_block_and_inst_safe(__block, __type, __inst, __cfg) \
343 foreach_block_safe (__block, __cfg) \
344 foreach_inst_in_block_safe (__type, __inst, __block)
346 #define foreach_block(__block, __cfg) \
347 foreach_list_typed (bblock_t, __block, link, &(__cfg)->block_list)
349 #define foreach_block_reverse(__block, __cfg) \
350 foreach_list_typed_reverse (bblock_t, __block, link, &(__cfg)->block_list)
352 #define foreach_block_safe(__block, __cfg) \
353 foreach_list_typed_safe (bblock_t, __block, link, &(__cfg)->block_list)
355 #define foreach_block_reverse_safe(__block, __cfg) \
356 foreach_list_typed_reverse_safe (bblock_t, __block, link, &(__cfg)->block_list)
358 #define foreach_inst_in_block(__type, __inst, __block) \
359 foreach_in_list(__type, __inst, &(__block)->instructions)
361 #define foreach_inst_in_block_safe(__type, __inst, __block) \
362 for (__type *__inst = (__type *)__block->instructions.head_sentinel.next, \
363 *__next = (__type *)__inst->next; \
366 __next = (__type *)__next->next)
368 #define foreach_inst_in_block_reverse(__type, __inst, __block) \
369 foreach_in_list_reverse(__type, __inst, &(__block)->instructions)
371 #define foreach_inst_in_block_reverse_safe(__type, __inst, __block) \
372 foreach_in_list_reverse_safe(__type, __inst, &(__block)->instructions)
374 #define foreach_inst_in_block_starting_from(__type, __scan_inst, __inst) \
375 for (__type *__scan_inst = (__type *)__inst->next; \
376 !__scan_inst->is_tail_sentinel(); \
377 __scan_inst = (__type *)__scan_inst->next)
379 #define foreach_inst_in_block_reverse_starting_from(__type, __scan_inst, __inst) \
380 for (__type *__scan_inst = (__type *)__inst->prev; \
381 !__scan_inst->is_head_sentinel(); \
382 __scan_inst = (__type *)__scan_inst->prev)
384 #endif /* BRW_CFG_H */