2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
29 #include "brw_vec4_live_variables.h"
33 #define MAX_INSTRUCTION (1 << 30)
35 /** @file brw_vec4_live_variables.cpp
37 * Support for computing at the basic block level which variables
38 * (virtual GRFs in our case) are live at entry and exit.
40 * See Muchnick's Advanced Compiler Design and Implementation, section
45 * Sets up the use/def arrays and block-local approximation of the live ranges.
47 * The basic-block-level live variable analysis needs to know which
48 * variables get used before they're completely defined, and which
49 * variables are completely defined before they're used.
51 * We independently track each channel of a vec4. This is because we need to
52 * be able to recognize a sequence like:
57 * MUL result.xy tmp.xy e.xy
60 * as having tmp live only across that sequence (assuming it's used nowhere
61 * else), because it's a common pattern. A more conservative approach that
62 * doesn't get tmp marked a deffed in this block will tend to result in
66 vec4_live_variables::setup_def_use()
70 foreach_block (block
, cfg
) {
71 assert(ip
== block
->start_ip
);
73 assert(cfg
->blocks
[block
->num
- 1]->end_ip
== ip
- 1);
75 foreach_inst_in_block(vec4_instruction
, inst
, block
) {
76 struct block_data
*bd
= &block_data
[block
->num
];
78 /* Set up the instruction uses. */
79 for (unsigned int i
= 0; i
< 3; i
++) {
80 if (inst
->src
[i
].file
== VGRF
) {
81 for (unsigned j
= 0; j
< DIV_ROUND_UP(inst
->size_read(i
), 16); j
++) {
82 for (int c
= 0; c
< 4; c
++) {
83 const unsigned v
= var_from_reg(alloc
, inst
->src
[i
], c
, j
);
85 start
[v
] = MIN2(start
[v
], ip
);
88 if (!BITSET_TEST(bd
->def
, v
))
89 BITSET_SET(bd
->use
, v
);
94 for (unsigned c
= 0; c
< 4; c
++) {
95 if (inst
->reads_flag(c
) &&
96 !BITSET_TEST(bd
->flag_def
, c
)) {
97 BITSET_SET(bd
->flag_use
, c
);
101 /* Set up the instruction defs. */
102 if (inst
->dst
.file
== VGRF
) {
103 for (unsigned i
= 0; i
< DIV_ROUND_UP(inst
->size_written
, 16); i
++) {
104 for (int c
= 0; c
< 4; c
++) {
105 if (inst
->dst
.writemask
& (1 << c
)) {
106 const unsigned v
= var_from_reg(alloc
, inst
->dst
, c
, i
);
108 start
[v
] = MIN2(start
[v
], ip
);
111 /* Check for unconditional register writes, these are the
112 * things that screen off preceding definitions of a
113 * variable, and thus qualify for being in def[].
115 if ((!inst
->predicate
|| inst
->opcode
== BRW_OPCODE_SEL
) &&
116 !BITSET_TEST(bd
->use
, v
))
117 BITSET_SET(bd
->def
, v
);
122 if (inst
->writes_flag()) {
123 for (unsigned c
= 0; c
< 4; c
++) {
124 if ((inst
->dst
.writemask
& (1 << c
)) &&
125 !BITSET_TEST(bd
->flag_use
, c
)) {
126 BITSET_SET(bd
->flag_def
, c
);
137 * The algorithm incrementally sets bits in liveout and livein,
138 * propagating it through control flow. It will eventually terminate
139 * because it only ever adds bits, and stops when no bits are added in
143 vec4_live_variables::compute_live_variables()
150 foreach_block_reverse (block
, cfg
) {
151 struct block_data
*bd
= &block_data
[block
->num
];
154 foreach_list_typed(bblock_link
, child_link
, link
, &block
->children
) {
155 struct block_data
*child_bd
= &block_data
[child_link
->block
->num
];
157 for (int i
= 0; i
< bitset_words
; i
++) {
158 BITSET_WORD new_liveout
= (child_bd
->livein
[i
] &
161 bd
->liveout
[i
] |= new_liveout
;
165 BITSET_WORD new_liveout
= (child_bd
->flag_livein
[0] &
166 ~bd
->flag_liveout
[0]);
168 bd
->flag_liveout
[0] |= new_liveout
;
174 for (int i
= 0; i
< bitset_words
; i
++) {
175 BITSET_WORD new_livein
= (bd
->use
[i
] |
178 if (new_livein
& ~bd
->livein
[i
]) {
179 bd
->livein
[i
] |= new_livein
;
183 BITSET_WORD new_livein
= (bd
->flag_use
[0] |
184 (bd
->flag_liveout
[0] &
186 if (new_livein
& ~bd
->flag_livein
[0]) {
187 bd
->flag_livein
[0] |= new_livein
;
195 * Extend the start/end ranges for each variable to account for the
196 * new information calculated from control flow.
199 vec4_live_variables::compute_start_end()
201 foreach_block (block
, cfg
) {
202 const struct block_data
&bd
= block_data
[block
->num
];
204 for (int i
= 0; i
< num_vars
; i
++) {
205 if (BITSET_TEST(bd
.livein
, i
)) {
206 start
[i
] = MIN2(start
[i
], block
->start_ip
);
207 end
[i
] = MAX2(end
[i
], block
->start_ip
);
210 if (BITSET_TEST(bd
.liveout
, i
)) {
211 start
[i
] = MIN2(start
[i
], block
->end_ip
);
212 end
[i
] = MAX2(end
[i
], block
->end_ip
);
218 vec4_live_variables::vec4_live_variables(const backend_shader
*s
)
219 : alloc(s
->alloc
), cfg(s
->cfg
)
221 mem_ctx
= ralloc_context(NULL
);
223 num_vars
= alloc
.total_size
* 8;
224 start
= ralloc_array(mem_ctx
, int, num_vars
);
225 end
= ralloc_array(mem_ctx
, int, num_vars
);
227 for (int i
= 0; i
< num_vars
; i
++) {
228 start
[i
] = MAX_INSTRUCTION
;
232 block_data
= rzalloc_array(mem_ctx
, struct block_data
, cfg
->num_blocks
);
234 bitset_words
= BITSET_WORDS(num_vars
);
235 for (int i
= 0; i
< cfg
->num_blocks
; i
++) {
236 block_data
[i
].def
= rzalloc_array(mem_ctx
, BITSET_WORD
, bitset_words
);
237 block_data
[i
].use
= rzalloc_array(mem_ctx
, BITSET_WORD
, bitset_words
);
238 block_data
[i
].livein
= rzalloc_array(mem_ctx
, BITSET_WORD
, bitset_words
);
239 block_data
[i
].liveout
= rzalloc_array(mem_ctx
, BITSET_WORD
, bitset_words
);
241 block_data
[i
].flag_def
[0] = 0;
242 block_data
[i
].flag_use
[0] = 0;
243 block_data
[i
].flag_livein
[0] = 0;
244 block_data
[i
].flag_liveout
[0] = 0;
248 compute_live_variables();
252 vec4_live_variables::~vec4_live_variables()
254 ralloc_free(mem_ctx
);
258 check_register_live_range(const vec4_live_variables
*live
, int ip
,
259 unsigned var
, unsigned n
)
261 for (unsigned j
= 0; j
< n
; j
+= 4) {
262 if (var
+ j
>= unsigned(live
->num_vars
) ||
263 live
->start
[var
+ j
] > ip
|| live
->end
[var
+ j
] < ip
)
271 vec4_live_variables::validate(const backend_shader
*s
) const
275 foreach_block_and_inst(block
, vec4_instruction
, inst
, s
->cfg
) {
276 for (unsigned c
= 0; c
< 4; c
++) {
277 if (inst
->dst
.writemask
& (1 << c
)) {
278 for (unsigned i
= 0; i
< 3; i
++) {
279 if (inst
->src
[i
].file
== VGRF
&&
280 !check_register_live_range(this, ip
,
281 var_from_reg(alloc
, inst
->src
[i
], c
),
286 if (inst
->dst
.file
== VGRF
&&
287 !check_register_live_range(this, ip
,
288 var_from_reg(alloc
, inst
->dst
, c
),
301 vec4_live_variables::var_range_start(unsigned v
, unsigned n
) const
305 for (unsigned i
= 0; i
< n
; i
++)
306 ip
= MIN2(ip
, start
[v
+ i
]);
312 vec4_live_variables::var_range_end(unsigned v
, unsigned n
) const
316 for (unsigned i
= 0; i
< n
; i
++)
317 ip
= MAX2(ip
, end
[v
+ i
]);
323 vec4_live_variables::vgrfs_interfere(int a
, int b
) const
325 return !((var_range_end(8 * alloc
.offsets
[a
], 8 * alloc
.sizes
[a
]) <=
326 var_range_start(8 * alloc
.offsets
[b
], 8 * alloc
.sizes
[b
])) ||
327 (var_range_end(8 * alloc
.offsets
[b
], 8 * alloc
.sizes
[b
]) <=
328 var_range_start(8 * alloc
.offsets
[a
], 8 * alloc
.sizes
[a
])));