2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include "brw_vec4_live_variables.h"
33 /** @file brw_vec4_live_variables.cpp
35 * Support for computing at the basic block level which variables
36 * (virtual GRFs in our case) are live at entry and exit.
38 * See Muchnick's Advanced Compiler Design and Implementation, section
43 * Sets up the use[] and def[] arrays.
45 * The basic-block-level live variable analysis needs to know which
46 * variables get used before they're completely defined, and which
47 * variables are completely defined before they're used.
49 * We independently track each channel of a vec4. This is because we need to
50 * be able to recognize a sequence like:
55 * MUL result.xy tmp.xy e.xy
58 * as having tmp live only across that sequence (assuming it's used nowhere
59 * else), because it's a common pattern. A more conservative approach that
60 * doesn't get tmp marked a deffed in this block will tend to result in
64 vec4_live_variables::setup_def_use()
68 foreach_block (block
, cfg
) {
69 assert(ip
== block
->start_ip
);
71 assert(cfg
->blocks
[block
->num
- 1]->end_ip
== ip
- 1);
73 foreach_inst_in_block(vec4_instruction
, inst
, block
) {
74 struct block_data
*bd
= &block_data
[block
->num
];
76 /* Set use[] for this instruction */
77 for (unsigned int i
= 0; i
< 3; i
++) {
78 if (inst
->src
[i
].file
== VGRF
) {
79 for (unsigned j
= 0; j
< DIV_ROUND_UP(inst
->size_read(i
), 16); j
++) {
80 for (int c
= 0; c
< 4; c
++) {
81 const unsigned v
= var_from_reg(alloc
, inst
->src
[i
], c
, j
);
82 if (!BITSET_TEST(bd
->def
, v
))
83 BITSET_SET(bd
->use
, v
);
88 for (unsigned c
= 0; c
< 4; c
++) {
89 if (inst
->reads_flag(c
) &&
90 !BITSET_TEST(bd
->flag_def
, c
)) {
91 BITSET_SET(bd
->flag_use
, c
);
95 /* Check for unconditional writes to whole registers. These
96 * are the things that screen off preceding definitions of a
97 * variable, and thus qualify for being in def[].
99 if (inst
->dst
.file
== VGRF
&&
100 (!inst
->predicate
|| inst
->opcode
== BRW_OPCODE_SEL
)) {
101 for (unsigned i
= 0; i
< DIV_ROUND_UP(inst
->size_written
, 16); i
++) {
102 for (int c
= 0; c
< 4; c
++) {
103 if (inst
->dst
.writemask
& (1 << c
)) {
104 const unsigned v
= var_from_reg(alloc
, inst
->dst
, c
, i
);
105 if (!BITSET_TEST(bd
->use
, v
))
106 BITSET_SET(bd
->def
, v
);
111 if (inst
->writes_flag()) {
112 for (unsigned c
= 0; c
< 4; c
++) {
113 if ((inst
->dst
.writemask
& (1 << c
)) &&
114 !BITSET_TEST(bd
->flag_use
, c
)) {
115 BITSET_SET(bd
->flag_def
, c
);
126 * The algorithm incrementally sets bits in liveout and livein,
127 * propagating it through control flow. It will eventually terminate
128 * because it only ever adds bits, and stops when no bits are added in
132 vec4_live_variables::compute_live_variables()
139 foreach_block_reverse (block
, cfg
) {
140 struct block_data
*bd
= &block_data
[block
->num
];
143 foreach_list_typed(bblock_link
, child_link
, link
, &block
->children
) {
144 struct block_data
*child_bd
= &block_data
[child_link
->block
->num
];
146 for (int i
= 0; i
< bitset_words
; i
++) {
147 BITSET_WORD new_liveout
= (child_bd
->livein
[i
] &
150 bd
->liveout
[i
] |= new_liveout
;
154 BITSET_WORD new_liveout
= (child_bd
->flag_livein
[0] &
155 ~bd
->flag_liveout
[0]);
157 bd
->flag_liveout
[0] |= new_liveout
;
163 for (int i
= 0; i
< bitset_words
; i
++) {
164 BITSET_WORD new_livein
= (bd
->use
[i
] |
167 if (new_livein
& ~bd
->livein
[i
]) {
168 bd
->livein
[i
] |= new_livein
;
172 BITSET_WORD new_livein
= (bd
->flag_use
[0] |
173 (bd
->flag_liveout
[0] &
175 if (new_livein
& ~bd
->flag_livein
[0]) {
176 bd
->flag_livein
[0] |= new_livein
;
183 vec4_live_variables::vec4_live_variables(const simple_allocator
&alloc
,
185 : alloc(alloc
), cfg(cfg
)
187 mem_ctx
= ralloc_context(NULL
);
189 num_vars
= alloc
.total_size
* 8;
190 block_data
= rzalloc_array(mem_ctx
, struct block_data
, cfg
->num_blocks
);
192 bitset_words
= BITSET_WORDS(num_vars
);
193 for (int i
= 0; i
< cfg
->num_blocks
; i
++) {
194 block_data
[i
].def
= rzalloc_array(mem_ctx
, BITSET_WORD
, bitset_words
);
195 block_data
[i
].use
= rzalloc_array(mem_ctx
, BITSET_WORD
, bitset_words
);
196 block_data
[i
].livein
= rzalloc_array(mem_ctx
, BITSET_WORD
, bitset_words
);
197 block_data
[i
].liveout
= rzalloc_array(mem_ctx
, BITSET_WORD
, bitset_words
);
199 block_data
[i
].flag_def
[0] = 0;
200 block_data
[i
].flag_use
[0] = 0;
201 block_data
[i
].flag_livein
[0] = 0;
202 block_data
[i
].flag_liveout
[0] = 0;
206 compute_live_variables();
209 vec4_live_variables::~vec4_live_variables()
211 ralloc_free(mem_ctx
);
214 #define MAX_INSTRUCTION (1 << 30)
217 * Computes a conservative start/end of the live intervals for each virtual GRF.
219 * We could expose per-channel live intervals to the consumer based on the
220 * information we computed in vec4_live_variables, except that our only
221 * current user is virtual_grf_interferes(). So we instead union the
222 * per-channel ranges into a per-vgrf range for virtual_grf_start[] and
225 * We could potentially have virtual_grf_interferes() do the test per-channel,
226 * which would let some interesting register allocation occur (particularly on
227 * code-generated GLSL sequences from the Cg compiler which does register
228 * allocation at the GLSL level and thus reuses components of the variable
229 * with distinct lifetimes). But right now the complexity of doing so doesn't
230 * seem worth it, since having virtual_grf_interferes() be cheap is important
231 * for register allocation performance.
234 vec4_visitor::calculate_live_intervals()
236 if (this->live_intervals
)
239 int *start
= ralloc_array(mem_ctx
, int, this->alloc
.total_size
* 8);
240 int *end
= ralloc_array(mem_ctx
, int, this->alloc
.total_size
* 8);
241 ralloc_free(this->virtual_grf_start
);
242 ralloc_free(this->virtual_grf_end
);
243 this->virtual_grf_start
= start
;
244 this->virtual_grf_end
= end
;
246 for (unsigned i
= 0; i
< this->alloc
.total_size
* 8; i
++) {
247 start
[i
] = MAX_INSTRUCTION
;
251 /* Start by setting up the intervals with no knowledge of control
255 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
256 for (unsigned int i
= 0; i
< 3; i
++) {
257 if (inst
->src
[i
].file
== VGRF
) {
258 for (unsigned j
= 0; j
< DIV_ROUND_UP(inst
->size_read(i
), 16); j
++) {
259 for (int c
= 0; c
< 4; c
++) {
260 const unsigned v
= var_from_reg(alloc
, inst
->src
[i
], c
, j
);
261 start
[v
] = MIN2(start
[v
], ip
);
268 if (inst
->dst
.file
== VGRF
) {
269 for (unsigned i
= 0; i
< DIV_ROUND_UP(inst
->size_written
, 16); i
++) {
270 for (int c
= 0; c
< 4; c
++) {
271 if (inst
->dst
.writemask
& (1 << c
)) {
272 const unsigned v
= var_from_reg(alloc
, inst
->dst
, c
, i
);
273 start
[v
] = MIN2(start
[v
], ip
);
283 /* Now, extend those intervals using our analysis of control flow.
285 * The control flow-aware analysis was done at a channel level, while at
286 * this point we're distilling it down to vgrfs.
288 this->live_intervals
= new(mem_ctx
) vec4_live_variables(alloc
, cfg
);
290 foreach_block (block
, cfg
) {
291 struct block_data
*bd
= &live_intervals
->block_data
[block
->num
];
293 for (int i
= 0; i
< live_intervals
->num_vars
; i
++) {
294 if (BITSET_TEST(bd
->livein
, i
)) {
295 start
[i
] = MIN2(start
[i
], block
->start_ip
);
296 end
[i
] = MAX2(end
[i
], block
->start_ip
);
299 if (BITSET_TEST(bd
->liveout
, i
)) {
300 start
[i
] = MIN2(start
[i
], block
->end_ip
);
301 end
[i
] = MAX2(end
[i
], block
->end_ip
);
308 vec4_visitor::invalidate_live_intervals()
310 ralloc_free(live_intervals
);
311 live_intervals
= NULL
;
315 vec4_visitor::var_range_start(unsigned v
, unsigned n
) const
319 for (unsigned i
= 0; i
< n
; i
++)
320 start
= MIN2(start
, virtual_grf_start
[v
+ i
]);
326 vec4_visitor::var_range_end(unsigned v
, unsigned n
) const
330 for (unsigned i
= 0; i
< n
; i
++)
331 end
= MAX2(end
, virtual_grf_end
[v
+ i
]);
337 vec4_visitor::virtual_grf_interferes(int a
, int b
)
339 return !((var_range_end(8 * alloc
.offsets
[a
], 8 * alloc
.sizes
[a
]) <=
340 var_range_start(8 * alloc
.offsets
[b
], 8 * alloc
.sizes
[b
])) ||
341 (var_range_end(8 * alloc
.offsets
[b
], 8 * alloc
.sizes
[b
]) <=
342 var_range_start(8 * alloc
.offsets
[a
], 8 * alloc
.sizes
[a
])));