i965/vec4: use byte_offset() instead of offset()
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4_live_variables.cpp
1 /*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include "brw_cfg.h"
29 #include "brw_vec4_live_variables.h"
30
31 using namespace brw;
32
33 /** @file brw_vec4_live_variables.cpp
34 *
35 * Support for computing at the basic block level which variables
36 * (virtual GRFs in our case) are live at entry and exit.
37 *
38 * See Muchnick's Advanced Compiler Design and Implementation, section
39 * 14.1 (p444).
40 */
41
42 /**
43 * Sets up the use[] and def[] arrays.
44 *
45 * The basic-block-level live variable analysis needs to know which
46 * variables get used before they're completely defined, and which
47 * variables are completely defined before they're used.
48 *
49 * We independently track each channel of a vec4. This is because we need to
50 * be able to recognize a sequence like:
51 *
52 * ...
53 * DP4 tmp.x a b;
54 * DP4 tmp.y c d;
55 * MUL result.xy tmp.xy e.xy
56 * ...
57 *
58 * as having tmp live only across that sequence (assuming it's used nowhere
59 * else), because it's a common pattern. A more conservative approach that
60 * doesn't get tmp marked a deffed in this block will tend to result in
61 * spilling.
62 */
63 void
64 vec4_live_variables::setup_def_use()
65 {
66 int ip = 0;
67
68 foreach_block (block, cfg) {
69 assert(ip == block->start_ip);
70 if (block->num > 0)
71 assert(cfg->blocks[block->num - 1]->end_ip == ip - 1);
72
73 foreach_inst_in_block(vec4_instruction, inst, block) {
74 struct block_data *bd = &block_data[block->num];
75
76 /* Set use[] for this instruction */
77 for (unsigned int i = 0; i < 3; i++) {
78 if (inst->src[i].file == VGRF) {
79 for (unsigned j = 0; j < regs_read(inst, i); j++) {
80 for (int c = 0; c < 4; c++) {
81 const unsigned v =
82 var_from_reg(alloc,
83 byte_offset(inst->src[i], j * REG_SIZE),
84 c);
85 if (!BITSET_TEST(bd->def, v))
86 BITSET_SET(bd->use, v);
87 }
88 }
89 }
90 }
91 for (unsigned c = 0; c < 4; c++) {
92 if (inst->reads_flag(c) &&
93 !BITSET_TEST(bd->flag_def, c)) {
94 BITSET_SET(bd->flag_use, c);
95 }
96 }
97
98 /* Check for unconditional writes to whole registers. These
99 * are the things that screen off preceding definitions of a
100 * variable, and thus qualify for being in def[].
101 */
102 if (inst->dst.file == VGRF &&
103 (!inst->predicate || inst->opcode == BRW_OPCODE_SEL)) {
104 for (unsigned i = 0; i < regs_written(inst); i++) {
105 for (int c = 0; c < 4; c++) {
106 if (inst->dst.writemask & (1 << c)) {
107 const unsigned v =
108 var_from_reg(alloc,
109 byte_offset(inst->dst, i * REG_SIZE), c);
110 if (!BITSET_TEST(bd->use, v))
111 BITSET_SET(bd->def, v);
112 }
113 }
114 }
115 }
116 if (inst->writes_flag()) {
117 for (unsigned c = 0; c < 4; c++) {
118 if ((inst->dst.writemask & (1 << c)) &&
119 !BITSET_TEST(bd->flag_use, c)) {
120 BITSET_SET(bd->flag_def, c);
121 }
122 }
123 }
124
125 ip++;
126 }
127 }
128 }
129
130 /**
131 * The algorithm incrementally sets bits in liveout and livein,
132 * propagating it through control flow. It will eventually terminate
133 * because it only ever adds bits, and stops when no bits are added in
134 * a pass.
135 */
136 void
137 vec4_live_variables::compute_live_variables()
138 {
139 bool cont = true;
140
141 while (cont) {
142 cont = false;
143
144 foreach_block_reverse (block, cfg) {
145 struct block_data *bd = &block_data[block->num];
146
147 /* Update liveout */
148 foreach_list_typed(bblock_link, child_link, link, &block->children) {
149 struct block_data *child_bd = &block_data[child_link->block->num];
150
151 for (int i = 0; i < bitset_words; i++) {
152 BITSET_WORD new_liveout = (child_bd->livein[i] &
153 ~bd->liveout[i]);
154 if (new_liveout) {
155 bd->liveout[i] |= new_liveout;
156 cont = true;
157 }
158 }
159 BITSET_WORD new_liveout = (child_bd->flag_livein[0] &
160 ~bd->flag_liveout[0]);
161 if (new_liveout) {
162 bd->flag_liveout[0] |= new_liveout;
163 cont = true;
164 }
165 }
166
167 /* Update livein */
168 for (int i = 0; i < bitset_words; i++) {
169 BITSET_WORD new_livein = (bd->use[i] |
170 (bd->liveout[i] &
171 ~bd->def[i]));
172 if (new_livein & ~bd->livein[i]) {
173 bd->livein[i] |= new_livein;
174 cont = true;
175 }
176 }
177 BITSET_WORD new_livein = (bd->flag_use[0] |
178 (bd->flag_liveout[0] &
179 ~bd->flag_def[0]));
180 if (new_livein & ~bd->flag_livein[0]) {
181 bd->flag_livein[0] |= new_livein;
182 cont = true;
183 }
184 }
185 }
186 }
187
188 vec4_live_variables::vec4_live_variables(const simple_allocator &alloc,
189 cfg_t *cfg)
190 : alloc(alloc), cfg(cfg)
191 {
192 mem_ctx = ralloc_context(NULL);
193
194 num_vars = alloc.total_size * 4;
195 block_data = rzalloc_array(mem_ctx, struct block_data, cfg->num_blocks);
196
197 bitset_words = BITSET_WORDS(num_vars);
198 for (int i = 0; i < cfg->num_blocks; i++) {
199 block_data[i].def = rzalloc_array(mem_ctx, BITSET_WORD, bitset_words);
200 block_data[i].use = rzalloc_array(mem_ctx, BITSET_WORD, bitset_words);
201 block_data[i].livein = rzalloc_array(mem_ctx, BITSET_WORD, bitset_words);
202 block_data[i].liveout = rzalloc_array(mem_ctx, BITSET_WORD, bitset_words);
203
204 block_data[i].flag_def[0] = 0;
205 block_data[i].flag_use[0] = 0;
206 block_data[i].flag_livein[0] = 0;
207 block_data[i].flag_liveout[0] = 0;
208 }
209
210 setup_def_use();
211 compute_live_variables();
212 }
213
214 vec4_live_variables::~vec4_live_variables()
215 {
216 ralloc_free(mem_ctx);
217 }
218
219 #define MAX_INSTRUCTION (1 << 30)
220
221 /**
222 * Computes a conservative start/end of the live intervals for each virtual GRF.
223 *
224 * We could expose per-channel live intervals to the consumer based on the
225 * information we computed in vec4_live_variables, except that our only
226 * current user is virtual_grf_interferes(). So we instead union the
227 * per-channel ranges into a per-vgrf range for virtual_grf_start[] and
228 * virtual_grf_end[].
229 *
230 * We could potentially have virtual_grf_interferes() do the test per-channel,
231 * which would let some interesting register allocation occur (particularly on
232 * code-generated GLSL sequences from the Cg compiler which does register
233 * allocation at the GLSL level and thus reuses components of the variable
234 * with distinct lifetimes). But right now the complexity of doing so doesn't
235 * seem worth it, since having virtual_grf_interferes() be cheap is important
236 * for register allocation performance.
237 */
238 void
239 vec4_visitor::calculate_live_intervals()
240 {
241 if (this->live_intervals)
242 return;
243
244 int *start = ralloc_array(mem_ctx, int, this->alloc.total_size * 4);
245 int *end = ralloc_array(mem_ctx, int, this->alloc.total_size * 4);
246 ralloc_free(this->virtual_grf_start);
247 ralloc_free(this->virtual_grf_end);
248 this->virtual_grf_start = start;
249 this->virtual_grf_end = end;
250
251 for (unsigned i = 0; i < this->alloc.total_size * 4; i++) {
252 start[i] = MAX_INSTRUCTION;
253 end[i] = -1;
254 }
255
256 /* Start by setting up the intervals with no knowledge of control
257 * flow.
258 */
259 int ip = 0;
260 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
261 for (unsigned int i = 0; i < 3; i++) {
262 if (inst->src[i].file == VGRF) {
263 for (unsigned j = 0; j < regs_read(inst, i); j++) {
264 for (int c = 0; c < 4; c++) {
265 const unsigned v =
266 var_from_reg(alloc,
267 byte_offset(inst->src[i], j * REG_SIZE), c);
268 start[v] = MIN2(start[v], ip);
269 end[v] = ip;
270 }
271 }
272 }
273 }
274
275 if (inst->dst.file == VGRF) {
276 for (unsigned i = 0; i < regs_written(inst); i++) {
277 for (int c = 0; c < 4; c++) {
278 if (inst->dst.writemask & (1 << c)) {
279 const unsigned v =
280 var_from_reg(alloc,
281 byte_offset(inst->dst, i * REG_SIZE), c);
282 start[v] = MIN2(start[v], ip);
283 end[v] = ip;
284 }
285 }
286 }
287 }
288
289 ip++;
290 }
291
292 /* Now, extend those intervals using our analysis of control flow.
293 *
294 * The control flow-aware analysis was done at a channel level, while at
295 * this point we're distilling it down to vgrfs.
296 */
297 this->live_intervals = new(mem_ctx) vec4_live_variables(alloc, cfg);
298
299 foreach_block (block, cfg) {
300 struct block_data *bd = &live_intervals->block_data[block->num];
301
302 for (int i = 0; i < live_intervals->num_vars; i++) {
303 if (BITSET_TEST(bd->livein, i)) {
304 start[i] = MIN2(start[i], block->start_ip);
305 end[i] = MAX2(end[i], block->start_ip);
306 }
307
308 if (BITSET_TEST(bd->liveout, i)) {
309 start[i] = MIN2(start[i], block->end_ip);
310 end[i] = MAX2(end[i], block->end_ip);
311 }
312 }
313 }
314 }
315
316 void
317 vec4_visitor::invalidate_live_intervals()
318 {
319 ralloc_free(live_intervals);
320 live_intervals = NULL;
321 }
322
323 int
324 vec4_visitor::var_range_start(unsigned v, unsigned n) const
325 {
326 int start = INT_MAX;
327
328 for (unsigned i = 0; i < n; i++)
329 start = MIN2(start, virtual_grf_start[v + i]);
330
331 return start;
332 }
333
334 int
335 vec4_visitor::var_range_end(unsigned v, unsigned n) const
336 {
337 int end = INT_MIN;
338
339 for (unsigned i = 0; i < n; i++)
340 end = MAX2(end, virtual_grf_end[v + i]);
341
342 return end;
343 }
344
345 bool
346 vec4_visitor::virtual_grf_interferes(int a, int b)
347 {
348 return !((var_range_end(4 * alloc.offsets[a], 4 * alloc.sizes[a]) <=
349 var_range_start(4 * alloc.offsets[b], 4 * alloc.sizes[b])) ||
350 (var_range_end(4 * alloc.offsets[b], 4 * alloc.sizes[b]) <=
351 var_range_start(4 * alloc.offsets[a], 4 * alloc.sizes[a])));
352 }