intel/nir: Stop using nir_lower_vars_to_scratch
[mesa.git] / src / intel / compiler / brw_cfg.h
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #ifndef BRW_CFG_H
29 #define BRW_CFG_H
30
31 #include "brw_ir.h"
32 #ifdef __cplusplus
33 #include "brw_ir_analysis.h"
34 #endif
35
36 struct bblock_t;
37
38 /**
39 * CFG edge types.
40 *
41 * A logical edge represents a potential control flow path of the original
42 * scalar program, while a physical edge represents a control flow path that
43 * may not have existed in the original program but was introduced during
44 * vectorization in order to implement divergent control flow of different
45 * shader invocations within the same SIMD thread.
46 *
47 * All logical edges in the CFG are considered to be physical edges but not
48 * the other way around -- I.e. the logical CFG is a subset of the physical
49 * one.
50 */
51 enum bblock_link_kind {
52 bblock_link_logical = 0,
53 bblock_link_physical
54 };
55
56 struct bblock_link {
57 #ifdef __cplusplus
58 DECLARE_RALLOC_CXX_OPERATORS(bblock_link)
59
60 bblock_link(bblock_t *block, enum bblock_link_kind kind)
61 : block(block), kind(kind)
62 {
63 }
64 #endif
65
66 struct exec_node link;
67 struct bblock_t *block;
68
69 /* Type of this CFG edge. Because bblock_link_logical also implies
70 * bblock_link_physical, the proper way to test for membership of edge 'l'
71 * in CFG kind 'k' is 'l.kind <= k'.
72 */
73 enum bblock_link_kind kind;
74 };
75
76 struct backend_shader;
77 struct cfg_t;
78
79 struct bblock_t {
80 #ifdef __cplusplus
81 DECLARE_RALLOC_CXX_OPERATORS(bblock_t)
82
83 explicit bblock_t(cfg_t *cfg);
84
85 void add_successor(void *mem_ctx, bblock_t *successor,
86 enum bblock_link_kind kind);
87 bool is_predecessor_of(const bblock_t *block,
88 enum bblock_link_kind kind) const;
89 bool is_successor_of(const bblock_t *block,
90 enum bblock_link_kind kind) const;
91 bool can_combine_with(const bblock_t *that) const;
92 void combine_with(bblock_t *that);
93 void dump() const;
94
95 backend_instruction *start();
96 const backend_instruction *start() const;
97 backend_instruction *end();
98 const backend_instruction *end() const;
99
100 bblock_t *next();
101 const bblock_t *next() const;
102 bblock_t *prev();
103 const bblock_t *prev() const;
104
105 bool starts_with_control_flow() const;
106 bool ends_with_control_flow() const;
107
108 backend_instruction *first_non_control_flow_inst();
109 backend_instruction *last_non_control_flow_inst();
110 #endif
111
112 struct exec_node link;
113 struct cfg_t *cfg;
114
115 int start_ip;
116 int end_ip;
117
118 struct exec_list instructions;
119 struct exec_list parents;
120 struct exec_list children;
121 int num;
122 };
123
124 static inline struct backend_instruction *
125 bblock_start(struct bblock_t *block)
126 {
127 return (struct backend_instruction *)exec_list_get_head(&block->instructions);
128 }
129
130 static inline const struct backend_instruction *
131 bblock_start_const(const struct bblock_t *block)
132 {
133 return (const struct backend_instruction *)exec_list_get_head_const(&block->instructions);
134 }
135
136 static inline struct backend_instruction *
137 bblock_end(struct bblock_t *block)
138 {
139 return (struct backend_instruction *)exec_list_get_tail(&block->instructions);
140 }
141
142 static inline const struct backend_instruction *
143 bblock_end_const(const struct bblock_t *block)
144 {
145 return (const struct backend_instruction *)exec_list_get_tail_const(&block->instructions);
146 }
147
148 static inline struct bblock_t *
149 bblock_next(struct bblock_t *block)
150 {
151 if (exec_node_is_tail_sentinel(block->link.next))
152 return NULL;
153
154 return (struct bblock_t *)block->link.next;
155 }
156
157 static inline const struct bblock_t *
158 bblock_next_const(const struct bblock_t *block)
159 {
160 if (exec_node_is_tail_sentinel(block->link.next))
161 return NULL;
162
163 return (const struct bblock_t *)block->link.next;
164 }
165
166 static inline struct bblock_t *
167 bblock_prev(struct bblock_t *block)
168 {
169 if (exec_node_is_head_sentinel(block->link.prev))
170 return NULL;
171
172 return (struct bblock_t *)block->link.prev;
173 }
174
175 static inline const struct bblock_t *
176 bblock_prev_const(const struct bblock_t *block)
177 {
178 if (exec_node_is_head_sentinel(block->link.prev))
179 return NULL;
180
181 return (const struct bblock_t *)block->link.prev;
182 }
183
184 static inline bool
185 bblock_starts_with_control_flow(const struct bblock_t *block)
186 {
187 enum opcode op = bblock_start_const(block)->opcode;
188 return op == BRW_OPCODE_DO || op == BRW_OPCODE_ENDIF;
189 }
190
191 static inline bool
192 bblock_ends_with_control_flow(const struct bblock_t *block)
193 {
194 enum opcode op = bblock_end_const(block)->opcode;
195 return op == BRW_OPCODE_IF ||
196 op == BRW_OPCODE_ELSE ||
197 op == BRW_OPCODE_WHILE ||
198 op == BRW_OPCODE_BREAK ||
199 op == BRW_OPCODE_CONTINUE;
200 }
201
202 static inline struct backend_instruction *
203 bblock_first_non_control_flow_inst(struct bblock_t *block)
204 {
205 struct backend_instruction *inst = bblock_start(block);
206 if (bblock_starts_with_control_flow(block))
207 #ifdef __cplusplus
208 inst = (struct backend_instruction *)inst->next;
209 #else
210 inst = (struct backend_instruction *)inst->link.next;
211 #endif
212 return inst;
213 }
214
215 static inline struct backend_instruction *
216 bblock_last_non_control_flow_inst(struct bblock_t *block)
217 {
218 struct backend_instruction *inst = bblock_end(block);
219 if (bblock_ends_with_control_flow(block))
220 #ifdef __cplusplus
221 inst = (struct backend_instruction *)inst->prev;
222 #else
223 inst = (struct backend_instruction *)inst->link.prev;
224 #endif
225 return inst;
226 }
227
228 #ifdef __cplusplus
229 inline backend_instruction *
230 bblock_t::start()
231 {
232 return bblock_start(this);
233 }
234
235 inline const backend_instruction *
236 bblock_t::start() const
237 {
238 return bblock_start_const(this);
239 }
240
241 inline backend_instruction *
242 bblock_t::end()
243 {
244 return bblock_end(this);
245 }
246
247 inline const backend_instruction *
248 bblock_t::end() const
249 {
250 return bblock_end_const(this);
251 }
252
253 inline bblock_t *
254 bblock_t::next()
255 {
256 return bblock_next(this);
257 }
258
259 inline const bblock_t *
260 bblock_t::next() const
261 {
262 return bblock_next_const(this);
263 }
264
265 inline bblock_t *
266 bblock_t::prev()
267 {
268 return bblock_prev(this);
269 }
270
271 inline const bblock_t *
272 bblock_t::prev() const
273 {
274 return bblock_prev_const(this);
275 }
276
277 inline bool
278 bblock_t::starts_with_control_flow() const
279 {
280 return bblock_starts_with_control_flow(this);
281 }
282
283 inline bool
284 bblock_t::ends_with_control_flow() const
285 {
286 return bblock_ends_with_control_flow(this);
287 }
288
289 inline backend_instruction *
290 bblock_t::first_non_control_flow_inst()
291 {
292 return bblock_first_non_control_flow_inst(this);
293 }
294
295 inline backend_instruction *
296 bblock_t::last_non_control_flow_inst()
297 {
298 return bblock_last_non_control_flow_inst(this);
299 }
300 #endif
301
302 struct cfg_t {
303 #ifdef __cplusplus
304 DECLARE_RALLOC_CXX_OPERATORS(cfg_t)
305
306 cfg_t(const backend_shader *s, exec_list *instructions);
307 ~cfg_t();
308
309 void remove_block(bblock_t *block);
310
311 bblock_t *first_block();
312 const bblock_t *first_block() const;
313 bblock_t *last_block();
314 const bblock_t *last_block() const;
315
316 bblock_t *new_block();
317 void set_next_block(bblock_t **cur, bblock_t *block, int ip);
318 void make_block_array();
319
320 void dump();
321 void dump_cfg();
322 #endif
323 const struct backend_shader *s;
324 void *mem_ctx;
325
326 /** Ordered list (by ip) of basic blocks */
327 struct exec_list block_list;
328 struct bblock_t **blocks;
329 int num_blocks;
330 };
331
332 static inline struct bblock_t *
333 cfg_first_block(struct cfg_t *cfg)
334 {
335 return (struct bblock_t *)exec_list_get_head(&cfg->block_list);
336 }
337
338 static inline const struct bblock_t *
339 cfg_first_block_const(const struct cfg_t *cfg)
340 {
341 return (const struct bblock_t *)exec_list_get_head_const(&cfg->block_list);
342 }
343
344 static inline struct bblock_t *
345 cfg_last_block(struct cfg_t *cfg)
346 {
347 return (struct bblock_t *)exec_list_get_tail(&cfg->block_list);
348 }
349
350 static inline const struct bblock_t *
351 cfg_last_block_const(const struct cfg_t *cfg)
352 {
353 return (const struct bblock_t *)exec_list_get_tail_const(&cfg->block_list);
354 }
355
356 #ifdef __cplusplus
357 inline bblock_t *
358 cfg_t::first_block()
359 {
360 return cfg_first_block(this);
361 }
362
363 const inline bblock_t *
364 cfg_t::first_block() const
365 {
366 return cfg_first_block_const(this);
367 }
368
369 inline bblock_t *
370 cfg_t::last_block()
371 {
372 return cfg_last_block(this);
373 }
374
375 const inline bblock_t *
376 cfg_t::last_block() const
377 {
378 return cfg_last_block_const(this);
379 }
380 #endif
381
382 /* Note that this is implemented with a double for loop -- break will
383 * break from the inner loop only!
384 */
385 #define foreach_block_and_inst(__block, __type, __inst, __cfg) \
386 foreach_block (__block, __cfg) \
387 foreach_inst_in_block (__type, __inst, __block)
388
389 /* Note that this is implemented with a double for loop -- break will
390 * break from the inner loop only!
391 */
392 #define foreach_block_and_inst_safe(__block, __type, __inst, __cfg) \
393 foreach_block_safe (__block, __cfg) \
394 foreach_inst_in_block_safe (__type, __inst, __block)
395
396 #define foreach_block(__block, __cfg) \
397 foreach_list_typed (bblock_t, __block, link, &(__cfg)->block_list)
398
399 #define foreach_block_reverse(__block, __cfg) \
400 foreach_list_typed_reverse (bblock_t, __block, link, &(__cfg)->block_list)
401
402 #define foreach_block_safe(__block, __cfg) \
403 foreach_list_typed_safe (bblock_t, __block, link, &(__cfg)->block_list)
404
405 #define foreach_block_reverse_safe(__block, __cfg) \
406 foreach_list_typed_reverse_safe (bblock_t, __block, link, &(__cfg)->block_list)
407
408 #define foreach_inst_in_block(__type, __inst, __block) \
409 foreach_in_list(__type, __inst, &(__block)->instructions)
410
411 #define foreach_inst_in_block_safe(__type, __inst, __block) \
412 for (__type *__inst = (__type *)__block->instructions.head_sentinel.next, \
413 *__next = (__type *)__inst->next; \
414 __next != NULL; \
415 __inst = __next, \
416 __next = (__type *)__next->next)
417
418 #define foreach_inst_in_block_reverse(__type, __inst, __block) \
419 foreach_in_list_reverse(__type, __inst, &(__block)->instructions)
420
421 #define foreach_inst_in_block_reverse_safe(__type, __inst, __block) \
422 foreach_in_list_reverse_safe(__type, __inst, &(__block)->instructions)
423
424 #define foreach_inst_in_block_starting_from(__type, __scan_inst, __inst) \
425 for (__type *__scan_inst = (__type *)__inst->next; \
426 !__scan_inst->is_tail_sentinel(); \
427 __scan_inst = (__type *)__scan_inst->next)
428
429 #define foreach_inst_in_block_reverse_starting_from(__type, __scan_inst, __inst) \
430 for (__type *__scan_inst = (__type *)__inst->prev; \
431 !__scan_inst->is_head_sentinel(); \
432 __scan_inst = (__type *)__scan_inst->prev)
433
434 #ifdef __cplusplus
435 namespace brw {
436 /**
437 * Immediate dominator tree analysis of a shader.
438 */
439 struct idom_tree {
440 idom_tree(const backend_shader *s);
441 ~idom_tree();
442
443 bool
444 validate(const backend_shader *) const
445 {
446 /* FINISHME */
447 return true;
448 }
449
450 analysis_dependency_class
451 dependency_class() const
452 {
453 return DEPENDENCY_BLOCKS;
454 }
455
456 const bblock_t *
457 parent(const bblock_t *b) const
458 {
459 assert(unsigned(b->num) < num_parents);
460 return parents[b->num];
461 }
462
463 bblock_t *
464 parent(bblock_t *b) const
465 {
466 assert(unsigned(b->num) < num_parents);
467 return parents[b->num];
468 }
469
470 bblock_t *
471 intersect(bblock_t *b1, bblock_t *b2) const;
472
473 void
474 dump() const;
475
476 private:
477 unsigned num_parents;
478 bblock_t **parents;
479 };
480 }
481 #endif
482
483 #endif /* BRW_CFG_H */