i965: Only consider fixed_hw_reg in equals() if file is HW_REG/IMM.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_vec4_reg_allocate.cpp
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "main/macros.h"
25 #include "util/register_allocate.h"
26 #include "brw_vec4.h"
27 #include "brw_vs.h"
28 #include "brw_cfg.h"
29
30 using namespace brw;
31
32 namespace brw {
33
34 static void
35 assign(unsigned int *reg_hw_locations, backend_reg *reg)
36 {
37 if (reg->file == GRF) {
38 reg->reg = reg_hw_locations[reg->reg] + reg->reg_offset;
39 reg->reg_offset = 0;
40 }
41 }
42
43 bool
44 vec4_visitor::reg_allocate_trivial()
45 {
46 unsigned int hw_reg_mapping[this->alloc.count];
47 bool virtual_grf_used[this->alloc.count];
48 int next;
49
50 /* Calculate which virtual GRFs are actually in use after whatever
51 * optimization passes have occurred.
52 */
53 for (unsigned i = 0; i < this->alloc.count; i++) {
54 virtual_grf_used[i] = false;
55 }
56
57 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
58 if (inst->dst.file == GRF)
59 virtual_grf_used[inst->dst.reg] = true;
60
61 for (unsigned i = 0; i < 3; i++) {
62 if (inst->src[i].file == GRF)
63 virtual_grf_used[inst->src[i].reg] = true;
64 }
65 }
66
67 hw_reg_mapping[0] = this->first_non_payload_grf;
68 next = hw_reg_mapping[0] + this->alloc.sizes[0];
69 for (unsigned i = 1; i < this->alloc.count; i++) {
70 if (virtual_grf_used[i]) {
71 hw_reg_mapping[i] = next;
72 next += this->alloc.sizes[i];
73 }
74 }
75 prog_data->total_grf = next;
76
77 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
78 assign(hw_reg_mapping, &inst->dst);
79 assign(hw_reg_mapping, &inst->src[0]);
80 assign(hw_reg_mapping, &inst->src[1]);
81 assign(hw_reg_mapping, &inst->src[2]);
82 }
83
84 if (prog_data->total_grf > max_grf) {
85 fail("Ran out of regs on trivial allocator (%d/%d)\n",
86 prog_data->total_grf, max_grf);
87 return false;
88 }
89
90 return true;
91 }
92
93 extern "C" void
94 brw_vec4_alloc_reg_set(struct brw_compiler *compiler)
95 {
96 int base_reg_count =
97 compiler->devinfo->gen >= 7 ? GEN7_MRF_HACK_START : BRW_MAX_GRF;
98
99 /* After running split_virtual_grfs(), almost all VGRFs will be of size 1.
100 * SEND-from-GRF sources cannot be split, so we also need classes for each
101 * potential message length.
102 */
103 const int class_count = MAX_VGRF_SIZE;
104 int class_sizes[MAX_VGRF_SIZE];
105
106 for (int i = 0; i < class_count; i++)
107 class_sizes[i] = i + 1;
108
109 /* Compute the total number of registers across all classes. */
110 int ra_reg_count = 0;
111 for (int i = 0; i < class_count; i++) {
112 ra_reg_count += base_reg_count - (class_sizes[i] - 1);
113 }
114
115 ralloc_free(compiler->vec4_reg_set.ra_reg_to_grf);
116 compiler->vec4_reg_set.ra_reg_to_grf = ralloc_array(compiler, uint8_t, ra_reg_count);
117 ralloc_free(compiler->vec4_reg_set.regs);
118 compiler->vec4_reg_set.regs = ra_alloc_reg_set(compiler, ra_reg_count, false);
119 if (compiler->devinfo->gen >= 6)
120 ra_set_allocate_round_robin(compiler->vec4_reg_set.regs);
121 ralloc_free(compiler->vec4_reg_set.classes);
122 compiler->vec4_reg_set.classes = ralloc_array(compiler, int, class_count);
123
124 /* Now, add the registers to their classes, and add the conflicts
125 * between them and the base GRF registers (and also each other).
126 */
127 int reg = 0;
128 unsigned *q_values[MAX_VGRF_SIZE];
129 for (int i = 0; i < class_count; i++) {
130 int class_reg_count = base_reg_count - (class_sizes[i] - 1);
131 compiler->vec4_reg_set.classes[i] = ra_alloc_reg_class(compiler->vec4_reg_set.regs);
132
133 q_values[i] = new unsigned[MAX_VGRF_SIZE];
134
135 for (int j = 0; j < class_reg_count; j++) {
136 ra_class_add_reg(compiler->vec4_reg_set.regs, compiler->vec4_reg_set.classes[i], reg);
137
138 compiler->vec4_reg_set.ra_reg_to_grf[reg] = j;
139
140 for (int base_reg = j;
141 base_reg < j + class_sizes[i];
142 base_reg++) {
143 ra_add_reg_conflict(compiler->vec4_reg_set.regs, base_reg, reg);
144 }
145
146 reg++;
147 }
148
149 for (int j = 0; j < class_count; j++) {
150 /* Calculate the q values manually because the algorithm used by
151 * ra_set_finalize() to do it has higher complexity affecting the
152 * start-up time of some applications. q(i, j) is just the maximum
153 * number of registers from class i a register from class j can
154 * conflict with.
155 */
156 q_values[i][j] = class_sizes[i] + class_sizes[j] - 1;
157 }
158 }
159 assert(reg == ra_reg_count);
160
161 for (int reg = 0; reg < base_reg_count; reg++)
162 ra_make_reg_conflicts_transitive(compiler->vec4_reg_set.regs, reg);
163
164 ra_set_finalize(compiler->vec4_reg_set.regs, q_values);
165
166 for (int i = 0; i < MAX_VGRF_SIZE; i++)
167 delete[] q_values[i];
168 }
169
170 void
171 vec4_visitor::setup_payload_interference(struct ra_graph *g,
172 int first_payload_node,
173 int reg_node_count)
174 {
175 int payload_node_count = this->first_non_payload_grf;
176
177 for (int i = 0; i < payload_node_count; i++) {
178 /* Mark each payload reg node as being allocated to its physical register.
179 *
180 * The alternative would be to have per-physical register classes, which
181 * would just be silly.
182 */
183 ra_set_node_reg(g, first_payload_node + i, i);
184
185 /* For now, just mark each payload node as interfering with every other
186 * node to be allocated.
187 */
188 for (int j = 0; j < reg_node_count; j++) {
189 ra_add_node_interference(g, first_payload_node + i, j);
190 }
191 }
192 }
193
194 bool
195 vec4_visitor::reg_allocate()
196 {
197 unsigned int hw_reg_mapping[alloc.count];
198 int payload_reg_count = this->first_non_payload_grf;
199
200 /* Using the trivial allocator can be useful in debugging undefined
201 * register access as a result of broken optimization passes.
202 */
203 if (0)
204 return reg_allocate_trivial();
205
206 calculate_live_intervals();
207
208 int node_count = alloc.count;
209 int first_payload_node = node_count;
210 node_count += payload_reg_count;
211 struct ra_graph *g =
212 ra_alloc_interference_graph(compiler->vec4_reg_set.regs, node_count);
213
214 for (unsigned i = 0; i < alloc.count; i++) {
215 int size = this->alloc.sizes[i];
216 assert(size >= 1 && size <= MAX_VGRF_SIZE);
217 ra_set_node_class(g, i, compiler->vec4_reg_set.classes[size - 1]);
218
219 for (unsigned j = 0; j < i; j++) {
220 if (virtual_grf_interferes(i, j)) {
221 ra_add_node_interference(g, i, j);
222 }
223 }
224 }
225
226 setup_payload_interference(g, first_payload_node, node_count);
227
228 if (!ra_allocate(g)) {
229 /* Failed to allocate registers. Spill a reg, and the caller will
230 * loop back into here to try again.
231 */
232 int reg = choose_spill_reg(g);
233 if (this->no_spills) {
234 fail("Failure to register allocate. Reduce number of live "
235 "values to avoid this.");
236 } else if (reg == -1) {
237 fail("no register to spill\n");
238 } else {
239 spill_reg(reg);
240 }
241 ralloc_free(g);
242 return false;
243 }
244
245 /* Get the chosen virtual registers for each node, and map virtual
246 * regs in the register classes back down to real hardware reg
247 * numbers.
248 */
249 prog_data->total_grf = payload_reg_count;
250 for (unsigned i = 0; i < alloc.count; i++) {
251 int reg = ra_get_node_reg(g, i);
252
253 hw_reg_mapping[i] = compiler->vec4_reg_set.ra_reg_to_grf[reg];
254 prog_data->total_grf = MAX2(prog_data->total_grf,
255 hw_reg_mapping[i] + alloc.sizes[i]);
256 }
257
258 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
259 assign(hw_reg_mapping, &inst->dst);
260 assign(hw_reg_mapping, &inst->src[0]);
261 assign(hw_reg_mapping, &inst->src[1]);
262 assign(hw_reg_mapping, &inst->src[2]);
263 }
264
265 ralloc_free(g);
266
267 return true;
268 }
269
270 void
271 vec4_visitor::evaluate_spill_costs(float *spill_costs, bool *no_spill)
272 {
273 float loop_scale = 1.0;
274
275 for (unsigned i = 0; i < this->alloc.count; i++) {
276 spill_costs[i] = 0.0;
277 no_spill[i] = alloc.sizes[i] != 1;
278 }
279
280 /* Calculate costs for spilling nodes. Call it a cost of 1 per
281 * spill/unspill we'll have to do, and guess that the insides of
282 * loops run 10 times.
283 */
284 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
285 for (unsigned int i = 0; i < 3; i++) {
286 if (inst->src[i].file == GRF) {
287 spill_costs[inst->src[i].reg] += loop_scale;
288 if (inst->src[i].reladdr)
289 no_spill[inst->src[i].reg] = true;
290 }
291 }
292
293 if (inst->dst.file == GRF) {
294 spill_costs[inst->dst.reg] += loop_scale;
295 if (inst->dst.reladdr)
296 no_spill[inst->dst.reg] = true;
297 }
298
299 switch (inst->opcode) {
300
301 case BRW_OPCODE_DO:
302 loop_scale *= 10;
303 break;
304
305 case BRW_OPCODE_WHILE:
306 loop_scale /= 10;
307 break;
308
309 case SHADER_OPCODE_GEN4_SCRATCH_READ:
310 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
311 for (int i = 0; i < 3; i++) {
312 if (inst->src[i].file == GRF)
313 no_spill[inst->src[i].reg] = true;
314 }
315 if (inst->dst.file == GRF)
316 no_spill[inst->dst.reg] = true;
317 break;
318
319 default:
320 break;
321 }
322 }
323 }
324
325 int
326 vec4_visitor::choose_spill_reg(struct ra_graph *g)
327 {
328 float spill_costs[this->alloc.count];
329 bool no_spill[this->alloc.count];
330
331 evaluate_spill_costs(spill_costs, no_spill);
332
333 for (unsigned i = 0; i < this->alloc.count; i++) {
334 if (!no_spill[i])
335 ra_set_node_spill_cost(g, i, spill_costs[i]);
336 }
337
338 return ra_get_best_spill_node(g);
339 }
340
341 void
342 vec4_visitor::spill_reg(int spill_reg_nr)
343 {
344 assert(alloc.sizes[spill_reg_nr] == 1);
345 unsigned int spill_offset = last_scratch++;
346
347 /* Generate spill/unspill instructions for the objects being spilled. */
348 foreach_block_and_inst(block, vec4_instruction, inst, cfg) {
349 for (unsigned int i = 0; i < 3; i++) {
350 if (inst->src[i].file == GRF && inst->src[i].reg == spill_reg_nr) {
351 src_reg spill_reg = inst->src[i];
352 inst->src[i].reg = alloc.allocate(1);
353 dst_reg temp = dst_reg(inst->src[i]);
354
355 emit_scratch_read(block, inst, temp, spill_reg, spill_offset);
356 }
357 }
358
359 if (inst->dst.file == GRF && inst->dst.reg == spill_reg_nr) {
360 emit_scratch_write(block, inst, spill_offset);
361 }
362 }
363
364 invalidate_live_intervals();
365 }
366
367 } /* namespace brw */