i965/vec4: Move c->last_scratch into vec4_visitor.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4_reg_allocate.cpp
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "main/macros.h"
25 #include "util/register_allocate.h"
26 #include "brw_vec4.h"
27 #include "brw_vs.h"
28 #include "brw_cfg.h"
29
30 using namespace brw;
31
32 namespace brw {
33
34 static void
35 assign(unsigned int *reg_hw_locations, backend_reg *reg)
36 {
37 if (reg->file == GRF) {
38 reg->reg = reg_hw_locations[reg->reg] + reg->reg_offset;
39 reg->reg_offset = 0;
40 }
41 }
42
43 bool
44 vec4_visitor::reg_allocate_trivial()
45 {
46 unsigned int hw_reg_mapping[this->alloc.count];
47 bool virtual_grf_used[this->alloc.count];
48 int next;
49
50 /* Calculate which virtual GRFs are actually in use after whatever
51 * optimization passes have occurred.
52 */
53 for (unsigned i = 0; i < this->alloc.count; i++) {
54 virtual_grf_used[i] = false;
55 }
56
57 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
58 if (inst->dst.file == GRF)
59 virtual_grf_used[inst->dst.reg] = true;
60
61 for (unsigned i = 0; i < 3; i++) {
62 if (inst->src[i].file == GRF)
63 virtual_grf_used[inst->src[i].reg] = true;
64 }
65 }
66
67 hw_reg_mapping[0] = this->first_non_payload_grf;
68 next = hw_reg_mapping[0] + this->alloc.sizes[0];
69 for (unsigned i = 1; i < this->alloc.count; i++) {
70 if (virtual_grf_used[i]) {
71 hw_reg_mapping[i] = next;
72 next += this->alloc.sizes[i];
73 }
74 }
75 prog_data->total_grf = next;
76
77 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
78 assign(hw_reg_mapping, &inst->dst);
79 assign(hw_reg_mapping, &inst->src[0]);
80 assign(hw_reg_mapping, &inst->src[1]);
81 assign(hw_reg_mapping, &inst->src[2]);
82 }
83
84 if (prog_data->total_grf > max_grf) {
85 fail("Ran out of regs on trivial allocator (%d/%d)\n",
86 prog_data->total_grf, max_grf);
87 return false;
88 }
89
90 return true;
91 }
92
93 extern "C" void
94 brw_vec4_alloc_reg_set(struct brw_compiler *compiler)
95 {
96 int base_reg_count =
97 compiler->devinfo->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF;
98
99 /* After running split_virtual_grfs(), almost all VGRFs will be of size 1.
100 * SEND-from-GRF sources cannot be split, so we also need classes for each
101 * potential message length.
102 */
103 const int class_count = MAX_VGRF_SIZE;
104 int class_sizes[MAX_VGRF_SIZE];
105
106 for (int i = 0; i < class_count; i++)
107 class_sizes[i] = i + 1;
108
109 /* Compute the total number of registers across all classes. */
110 int ra_reg_count = 0;
111 for (int i = 0; i < class_count; i++) {
112 ra_reg_count += base_reg_count - (class_sizes[i] - 1);
113 }
114
115 ralloc_free(compiler->vec4_reg_set.ra_reg_to_grf);
116 compiler->vec4_reg_set.ra_reg_to_grf = ralloc_array(compiler, uint8_t, ra_reg_count);
117 ralloc_free(compiler->vec4_reg_set.regs);
118 compiler->vec4_reg_set.regs = ra_alloc_reg_set(compiler, ra_reg_count);
119 if (compiler->devinfo->gen >= 6)
120 ra_set_allocate_round_robin(compiler->vec4_reg_set.regs);
121 ralloc_free(compiler->vec4_reg_set.classes);
122 compiler->vec4_reg_set.classes = ralloc_array(compiler, int, class_count);
123
124 /* Now, add the registers to their classes, and add the conflicts
125 * between them and the base GRF registers (and also each other).
126 */
127 int reg = 0;
128 unsigned *q_values[MAX_VGRF_SIZE];
129 for (int i = 0; i < class_count; i++) {
130 int class_reg_count = base_reg_count - (class_sizes[i] - 1);
131 compiler->vec4_reg_set.classes[i] = ra_alloc_reg_class(compiler->vec4_reg_set.regs);
132
133 q_values[i] = new unsigned[MAX_VGRF_SIZE];
134
135 for (int j = 0; j < class_reg_count; j++) {
136 ra_class_add_reg(compiler->vec4_reg_set.regs, compiler->vec4_reg_set.classes[i], reg);
137
138 compiler->vec4_reg_set.ra_reg_to_grf[reg] = j;
139
140 for (int base_reg = j;
141 base_reg < j + class_sizes[i];
142 base_reg++) {
143 ra_add_transitive_reg_conflict(compiler->vec4_reg_set.regs, base_reg, reg);
144 }
145
146 reg++;
147 }
148
149 for (int j = 0; j < class_count; j++) {
150 /* Calculate the q values manually because the algorithm used by
151 * ra_set_finalize() to do it has higher complexity affecting the
152 * start-up time of some applications. q(i, j) is just the maximum
153 * number of registers from class i a register from class j can
154 * conflict with.
155 */
156 q_values[i][j] = class_sizes[i] + class_sizes[j] - 1;
157 }
158 }
159 assert(reg == ra_reg_count);
160
161 ra_set_finalize(compiler->vec4_reg_set.regs, q_values);
162
163 for (int i = 0; i < MAX_VGRF_SIZE; i++)
164 delete[] q_values[i];
165 }
166
167 void
168 vec4_visitor::setup_payload_interference(struct ra_graph *g,
169 int first_payload_node,
170 int reg_node_count)
171 {
172 int payload_node_count = this->first_non_payload_grf;
173
174 for (int i = 0; i < payload_node_count; i++) {
175 /* Mark each payload reg node as being allocated to its physical register.
176 *
177 * The alternative would be to have per-physical register classes, which
178 * would just be silly.
179 */
180 ra_set_node_reg(g, first_payload_node + i, i);
181
182 /* For now, just mark each payload node as interfering with every other
183 * node to be allocated.
184 */
185 for (int j = 0; j < reg_node_count; j++) {
186 ra_add_node_interference(g, first_payload_node + i, j);
187 }
188 }
189 }
190
191 bool
192 vec4_visitor::reg_allocate()
193 {
194 unsigned int hw_reg_mapping[alloc.count];
195 int payload_reg_count = this->first_non_payload_grf;
196
197 /* Using the trivial allocator can be useful in debugging undefined
198 * register access as a result of broken optimization passes.
199 */
200 if (0)
201 return reg_allocate_trivial();
202
203 calculate_live_intervals();
204
205 int node_count = alloc.count;
206 int first_payload_node = node_count;
207 node_count += payload_reg_count;
208 struct ra_graph *g =
209 ra_alloc_interference_graph(compiler->vec4_reg_set.regs, node_count);
210
211 for (unsigned i = 0; i < alloc.count; i++) {
212 int size = this->alloc.sizes[i];
213 assert(size >= 1 && size <= MAX_VGRF_SIZE);
214 ra_set_node_class(g, i, compiler->vec4_reg_set.classes[size - 1]);
215
216 for (unsigned j = 0; j < i; j++) {
217 if (virtual_grf_interferes(i, j)) {
218 ra_add_node_interference(g, i, j);
219 }
220 }
221 }
222
223 setup_payload_interference(g, first_payload_node, node_count);
224
225 if (!ra_allocate(g)) {
226 /* Failed to allocate registers. Spill a reg, and the caller will
227 * loop back into here to try again.
228 */
229 int reg = choose_spill_reg(g);
230 if (this->no_spills) {
231 fail("Failure to register allocate. Reduce number of live "
232 "values to avoid this.");
233 } else if (reg == -1) {
234 fail("no register to spill\n");
235 } else {
236 spill_reg(reg);
237 }
238 ralloc_free(g);
239 return false;
240 }
241
242 /* Get the chosen virtual registers for each node, and map virtual
243 * regs in the register classes back down to real hardware reg
244 * numbers.
245 */
246 prog_data->total_grf = payload_reg_count;
247 for (unsigned i = 0; i < alloc.count; i++) {
248 int reg = ra_get_node_reg(g, i);
249
250 hw_reg_mapping[i] = compiler->vec4_reg_set.ra_reg_to_grf[reg];
251 prog_data->total_grf = MAX2(prog_data->total_grf,
252 hw_reg_mapping[i] + alloc.sizes[i]);
253 }
254
255 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
256 assign(hw_reg_mapping, &inst->dst);
257 assign(hw_reg_mapping, &inst->src[0]);
258 assign(hw_reg_mapping, &inst->src[1]);
259 assign(hw_reg_mapping, &inst->src[2]);
260 }
261
262 ralloc_free(g);
263
264 return true;
265 }
266
267 void
268 vec4_visitor::evaluate_spill_costs(float *spill_costs, bool *no_spill)
269 {
270 float loop_scale = 1.0;
271
272 for (unsigned i = 0; i < this->alloc.count; i++) {
273 spill_costs[i] = 0.0;
274 no_spill[i] = alloc.sizes[i] != 1;
275 }
276
277 /* Calculate costs for spilling nodes. Call it a cost of 1 per
278 * spill/unspill we'll have to do, and guess that the insides of
279 * loops run 10 times.
280 */
281 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
282 for (unsigned int i = 0; i < 3; i++) {
283 if (inst->src[i].file == GRF) {
284 spill_costs[inst->src[i].reg] += loop_scale;
285 if (inst->src[i].reladdr)
286 no_spill[inst->src[i].reg] = true;
287 }
288 }
289
290 if (inst->dst.file == GRF) {
291 spill_costs[inst->dst.reg] += loop_scale;
292 if (inst->dst.reladdr)
293 no_spill[inst->dst.reg] = true;
294 }
295
296 switch (inst->opcode) {
297
298 case BRW_OPCODE_DO:
299 loop_scale *= 10;
300 break;
301
302 case BRW_OPCODE_WHILE:
303 loop_scale /= 10;
304 break;
305
306 case SHADER_OPCODE_GEN4_SCRATCH_READ:
307 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
308 for (int i = 0; i < 3; i++) {
309 if (inst->src[i].file == GRF)
310 no_spill[inst->src[i].reg] = true;
311 }
312 if (inst->dst.file == GRF)
313 no_spill[inst->dst.reg] = true;
314 break;
315
316 default:
317 break;
318 }
319 }
320 }
321
322 int
323 vec4_visitor::choose_spill_reg(struct ra_graph *g)
324 {
325 float spill_costs[this->alloc.count];
326 bool no_spill[this->alloc.count];
327
328 evaluate_spill_costs(spill_costs, no_spill);
329
330 for (unsigned i = 0; i < this->alloc.count; i++) {
331 if (!no_spill[i])
332 ra_set_node_spill_cost(g, i, spill_costs[i]);
333 }
334
335 return ra_get_best_spill_node(g);
336 }
337
338 void
339 vec4_visitor::spill_reg(int spill_reg_nr)
340 {
341 assert(alloc.sizes[spill_reg_nr] == 1);
342 unsigned int spill_offset = last_scratch++;
343
344 /* Generate spill/unspill instructions for the objects being spilled. */
345 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
346 for (unsigned int i = 0; i < 3; i++) {
347 if (inst->src[i].file == GRF && inst->src[i].reg == spill_reg_nr) {
348 src_reg spill_reg = inst->src[i];
349 inst->src[i].reg = alloc.allocate(1);
350 dst_reg temp = dst_reg(inst->src[i]);
351
352 emit_scratch_read(block, inst, temp, spill_reg, spill_offset);
353 }
354 }
355
356 if (inst->dst.file == GRF && inst->dst.reg == spill_reg_nr) {
357 emit_scratch_write(block, inst, spill_offset);
358 }
359 }
360
361 invalidate_live_intervals();
362 }
363
364 } /* namespace brw */