intel/eu/gen12: Set SWSB annotations in hand-crafted assembly.
[mesa.git] / src / intel / compiler / brw_cfg.h
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #ifndef BRW_CFG_H
29 #define BRW_CFG_H
30
31 #include "brw_shader.h"
32
33 struct bblock_t;
34
35 /**
36 * CFG edge types.
37 *
38 * A logical edge represents a potential control flow path of the original
39 * scalar program, while a physical edge represents a control flow path that
40 * may not have existed in the original program but was introduced during
41 * vectorization in order to implement divergent control flow of different
42 * shader invocations within the same SIMD thread.
43 *
44 * All logical edges in the CFG are considered to be physical edges but not
45 * the other way around -- I.e. the logical CFG is a subset of the physical
46 * one.
47 */
48 enum bblock_link_kind {
49 bblock_link_logical = 0,
50 bblock_link_physical
51 };
52
53 struct bblock_link {
54 #ifdef __cplusplus
55 DECLARE_RALLOC_CXX_OPERATORS(bblock_link)
56
57 bblock_link(bblock_t *block, enum bblock_link_kind kind)
58 : block(block), kind(kind)
59 {
60 }
61 #endif
62
63 struct exec_node link;
64 struct bblock_t *block;
65
66 /* Type of this CFG edge. Because bblock_link_logical also implies
67 * bblock_link_physical, the proper way to test for membership of edge 'l'
68 * in CFG kind 'k' is 'l.kind <= k'.
69 */
70 enum bblock_link_kind kind;
71 };
72
73 struct backend_instruction;
74
75 struct bblock_t {
76 #ifdef __cplusplus
77 DECLARE_RALLOC_CXX_OPERATORS(bblock_t)
78
79 explicit bblock_t(cfg_t *cfg);
80
81 void add_successor(void *mem_ctx, bblock_t *successor,
82 enum bblock_link_kind kind);
83 bool is_predecessor_of(const bblock_t *block,
84 enum bblock_link_kind kind) const;
85 bool is_successor_of(const bblock_t *block,
86 enum bblock_link_kind kind) const;
87 bool can_combine_with(const bblock_t *that) const;
88 void combine_with(bblock_t *that);
89 void dump(backend_shader *s) const;
90
91 backend_instruction *start();
92 const backend_instruction *start() const;
93 backend_instruction *end();
94 const backend_instruction *end() const;
95
96 bblock_t *next();
97 const bblock_t *next() const;
98 bblock_t *prev();
99 const bblock_t *prev() const;
100
101 bool starts_with_control_flow() const;
102 bool ends_with_control_flow() const;
103
104 backend_instruction *first_non_control_flow_inst();
105 backend_instruction *last_non_control_flow_inst();
106 #endif
107
108 struct exec_node link;
109 struct cfg_t *cfg;
110 struct bblock_t *idom;
111
112 int start_ip;
113 int end_ip;
114
115 struct exec_list instructions;
116 struct exec_list parents;
117 struct exec_list children;
118 int num;
119
120 unsigned cycle_count;
121 };
122
123 static inline struct backend_instruction *
124 bblock_start(struct bblock_t *block)
125 {
126 return (struct backend_instruction *)exec_list_get_head(&block->instructions);
127 }
128
129 static inline const struct backend_instruction *
130 bblock_start_const(const struct bblock_t *block)
131 {
132 return (const struct backend_instruction *)exec_list_get_head_const(&block->instructions);
133 }
134
135 static inline struct backend_instruction *
136 bblock_end(struct bblock_t *block)
137 {
138 return (struct backend_instruction *)exec_list_get_tail(&block->instructions);
139 }
140
141 static inline const struct backend_instruction *
142 bblock_end_const(const struct bblock_t *block)
143 {
144 return (const struct backend_instruction *)exec_list_get_tail_const(&block->instructions);
145 }
146
147 static inline struct bblock_t *
148 bblock_next(struct bblock_t *block)
149 {
150 if (exec_node_is_tail_sentinel(block->link.next))
151 return NULL;
152
153 return (struct bblock_t *)block->link.next;
154 }
155
156 static inline const struct bblock_t *
157 bblock_next_const(const struct bblock_t *block)
158 {
159 if (exec_node_is_tail_sentinel(block->link.next))
160 return NULL;
161
162 return (const struct bblock_t *)block->link.next;
163 }
164
165 static inline struct bblock_t *
166 bblock_prev(struct bblock_t *block)
167 {
168 if (exec_node_is_head_sentinel(block->link.prev))
169 return NULL;
170
171 return (struct bblock_t *)block->link.prev;
172 }
173
174 static inline const struct bblock_t *
175 bblock_prev_const(const struct bblock_t *block)
176 {
177 if (exec_node_is_head_sentinel(block->link.prev))
178 return NULL;
179
180 return (const struct bblock_t *)block->link.prev;
181 }
182
183 static inline bool
184 bblock_starts_with_control_flow(const struct bblock_t *block)
185 {
186 enum opcode op = bblock_start_const(block)->opcode;
187 return op == BRW_OPCODE_DO || op == BRW_OPCODE_ENDIF;
188 }
189
190 static inline bool
191 bblock_ends_with_control_flow(const struct bblock_t *block)
192 {
193 enum opcode op = bblock_end_const(block)->opcode;
194 return op == BRW_OPCODE_IF ||
195 op == BRW_OPCODE_ELSE ||
196 op == BRW_OPCODE_WHILE ||
197 op == BRW_OPCODE_BREAK ||
198 op == BRW_OPCODE_CONTINUE;
199 }
200
201 static inline struct backend_instruction *
202 bblock_first_non_control_flow_inst(struct bblock_t *block)
203 {
204 struct backend_instruction *inst = bblock_start(block);
205 if (bblock_starts_with_control_flow(block))
206 #ifdef __cplusplus
207 inst = (struct backend_instruction *)inst->next;
208 #else
209 inst = (struct backend_instruction *)inst->link.next;
210 #endif
211 return inst;
212 }
213
214 static inline struct backend_instruction *
215 bblock_last_non_control_flow_inst(struct bblock_t *block)
216 {
217 struct backend_instruction *inst = bblock_end(block);
218 if (bblock_ends_with_control_flow(block))
219 #ifdef __cplusplus
220 inst = (struct backend_instruction *)inst->prev;
221 #else
222 inst = (struct backend_instruction *)inst->link.prev;
223 #endif
224 return inst;
225 }
226
227 #ifdef __cplusplus
228 inline backend_instruction *
229 bblock_t::start()
230 {
231 return bblock_start(this);
232 }
233
234 inline const backend_instruction *
235 bblock_t::start() const
236 {
237 return bblock_start_const(this);
238 }
239
240 inline backend_instruction *
241 bblock_t::end()
242 {
243 return bblock_end(this);
244 }
245
246 inline const backend_instruction *
247 bblock_t::end() const
248 {
249 return bblock_end_const(this);
250 }
251
252 inline bblock_t *
253 bblock_t::next()
254 {
255 return bblock_next(this);
256 }
257
258 inline const bblock_t *
259 bblock_t::next() const
260 {
261 return bblock_next_const(this);
262 }
263
264 inline bblock_t *
265 bblock_t::prev()
266 {
267 return bblock_prev(this);
268 }
269
270 inline const bblock_t *
271 bblock_t::prev() const
272 {
273 return bblock_prev_const(this);
274 }
275
276 inline bool
277 bblock_t::starts_with_control_flow() const
278 {
279 return bblock_starts_with_control_flow(this);
280 }
281
282 inline bool
283 bblock_t::ends_with_control_flow() const
284 {
285 return bblock_ends_with_control_flow(this);
286 }
287
288 inline backend_instruction *
289 bblock_t::first_non_control_flow_inst()
290 {
291 return bblock_first_non_control_flow_inst(this);
292 }
293
294 inline backend_instruction *
295 bblock_t::last_non_control_flow_inst()
296 {
297 return bblock_last_non_control_flow_inst(this);
298 }
299 #endif
300
301 struct cfg_t {
302 #ifdef __cplusplus
303 DECLARE_RALLOC_CXX_OPERATORS(cfg_t)
304
305 cfg_t(exec_list *instructions);
306 ~cfg_t();
307
308 void remove_block(bblock_t *block);
309
310 bblock_t *new_block();
311 void set_next_block(bblock_t **cur, bblock_t *block, int ip);
312 void make_block_array();
313 void calculate_idom();
314 static bblock_t *intersect(bblock_t *b1, bblock_t *b2);
315
316 void dump(backend_shader *s);
317 void dump_cfg();
318 void dump_domtree();
319 #endif
320 void *mem_ctx;
321
322 /** Ordered list (by ip) of basic blocks */
323 struct exec_list block_list;
324 struct bblock_t **blocks;
325 int num_blocks;
326
327 bool idom_dirty;
328
329 unsigned cycle_count;
330 };
331
332 /* Note that this is implemented with a double for loop -- break will
333 * break from the inner loop only!
334 */
335 #define foreach_block_and_inst(__block, __type, __inst, __cfg) \
336 foreach_block (__block, __cfg) \
337 foreach_inst_in_block (__type, __inst, __block)
338
339 /* Note that this is implemented with a double for loop -- break will
340 * break from the inner loop only!
341 */
342 #define foreach_block_and_inst_safe(__block, __type, __inst, __cfg) \
343 foreach_block_safe (__block, __cfg) \
344 foreach_inst_in_block_safe (__type, __inst, __block)
345
346 #define foreach_block(__block, __cfg) \
347 foreach_list_typed (bblock_t, __block, link, &(__cfg)->block_list)
348
349 #define foreach_block_reverse(__block, __cfg) \
350 foreach_list_typed_reverse (bblock_t, __block, link, &(__cfg)->block_list)
351
352 #define foreach_block_safe(__block, __cfg) \
353 foreach_list_typed_safe (bblock_t, __block, link, &(__cfg)->block_list)
354
355 #define foreach_block_reverse_safe(__block, __cfg) \
356 foreach_list_typed_reverse_safe (bblock_t, __block, link, &(__cfg)->block_list)
357
358 #define foreach_inst_in_block(__type, __inst, __block) \
359 foreach_in_list(__type, __inst, &(__block)->instructions)
360
361 #define foreach_inst_in_block_safe(__type, __inst, __block) \
362 for (__type *__inst = (__type *)__block->instructions.head_sentinel.next, \
363 *__next = (__type *)__inst->next; \
364 __next != NULL; \
365 __inst = __next, \
366 __next = (__type *)__next->next)
367
368 #define foreach_inst_in_block_reverse(__type, __inst, __block) \
369 foreach_in_list_reverse(__type, __inst, &(__block)->instructions)
370
371 #define foreach_inst_in_block_reverse_safe(__type, __inst, __block) \
372 foreach_in_list_reverse_safe(__type, __inst, &(__block)->instructions)
373
374 #define foreach_inst_in_block_starting_from(__type, __scan_inst, __inst) \
375 for (__type *__scan_inst = (__type *)__inst->next; \
376 !__scan_inst->is_tail_sentinel(); \
377 __scan_inst = (__type *)__scan_inst->next)
378
379 #define foreach_inst_in_block_reverse_starting_from(__type, __scan_inst, __inst) \
380 for (__type *__scan_inst = (__type *)__inst->prev; \
381 !__scan_inst->is_head_sentinel(); \
382 __scan_inst = (__type *)__scan_inst->prev)
383
384 #endif /* BRW_CFG_H */