2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "main/macros.h"
25 #include "util/register_allocate.h"
35 assign(unsigned int *reg_hw_locations
, backend_reg
*reg
)
37 if (reg
->file
== VGRF
) {
38 reg
->nr
= reg_hw_locations
[reg
->nr
] + reg
->reg_offset
;
44 vec4_visitor::reg_allocate_trivial()
46 unsigned int hw_reg_mapping
[this->alloc
.count
];
47 bool virtual_grf_used
[this->alloc
.count
];
50 /* Calculate which virtual GRFs are actually in use after whatever
51 * optimization passes have occurred.
53 for (unsigned i
= 0; i
< this->alloc
.count
; i
++) {
54 virtual_grf_used
[i
] = false;
57 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
58 if (inst
->dst
.file
== VGRF
)
59 virtual_grf_used
[inst
->dst
.nr
] = true;
61 for (unsigned i
= 0; i
< 3; i
++) {
62 if (inst
->src
[i
].file
== VGRF
)
63 virtual_grf_used
[inst
->src
[i
].nr
] = true;
67 hw_reg_mapping
[0] = this->first_non_payload_grf
;
68 next
= hw_reg_mapping
[0] + this->alloc
.sizes
[0];
69 for (unsigned i
= 1; i
< this->alloc
.count
; i
++) {
70 if (virtual_grf_used
[i
]) {
71 hw_reg_mapping
[i
] = next
;
72 next
+= this->alloc
.sizes
[i
];
75 prog_data
->total_grf
= next
;
77 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
78 assign(hw_reg_mapping
, &inst
->dst
);
79 assign(hw_reg_mapping
, &inst
->src
[0]);
80 assign(hw_reg_mapping
, &inst
->src
[1]);
81 assign(hw_reg_mapping
, &inst
->src
[2]);
84 if (prog_data
->total_grf
> max_grf
) {
85 fail("Ran out of regs on trivial allocator (%d/%d)\n",
86 prog_data
->total_grf
, max_grf
);
94 brw_vec4_alloc_reg_set(struct brw_compiler
*compiler
)
97 compiler
->devinfo
->gen
>= 7 ? GEN7_MRF_HACK_START
: BRW_MAX_GRF
;
99 /* After running split_virtual_grfs(), almost all VGRFs will be of size 1.
100 * SEND-from-GRF sources cannot be split, so we also need classes for each
101 * potential message length.
103 const int class_count
= MAX_VGRF_SIZE
;
104 int class_sizes
[MAX_VGRF_SIZE
];
106 for (int i
= 0; i
< class_count
; i
++)
107 class_sizes
[i
] = i
+ 1;
109 /* Compute the total number of registers across all classes. */
110 int ra_reg_count
= 0;
111 for (int i
= 0; i
< class_count
; i
++) {
112 ra_reg_count
+= base_reg_count
- (class_sizes
[i
] - 1);
115 ralloc_free(compiler
->vec4_reg_set
.ra_reg_to_grf
);
116 compiler
->vec4_reg_set
.ra_reg_to_grf
= ralloc_array(compiler
, uint8_t, ra_reg_count
);
117 ralloc_free(compiler
->vec4_reg_set
.regs
);
118 compiler
->vec4_reg_set
.regs
= ra_alloc_reg_set(compiler
, ra_reg_count
, false);
119 if (compiler
->devinfo
->gen
>= 6)
120 ra_set_allocate_round_robin(compiler
->vec4_reg_set
.regs
);
121 ralloc_free(compiler
->vec4_reg_set
.classes
);
122 compiler
->vec4_reg_set
.classes
= ralloc_array(compiler
, int, class_count
);
124 /* Now, add the registers to their classes, and add the conflicts
125 * between them and the base GRF registers (and also each other).
128 unsigned *q_values
[MAX_VGRF_SIZE
];
129 for (int i
= 0; i
< class_count
; i
++) {
130 int class_reg_count
= base_reg_count
- (class_sizes
[i
] - 1);
131 compiler
->vec4_reg_set
.classes
[i
] = ra_alloc_reg_class(compiler
->vec4_reg_set
.regs
);
133 q_values
[i
] = new unsigned[MAX_VGRF_SIZE
];
135 for (int j
= 0; j
< class_reg_count
; j
++) {
136 ra_class_add_reg(compiler
->vec4_reg_set
.regs
, compiler
->vec4_reg_set
.classes
[i
], reg
);
138 compiler
->vec4_reg_set
.ra_reg_to_grf
[reg
] = j
;
140 for (int base_reg
= j
;
141 base_reg
< j
+ class_sizes
[i
];
143 ra_add_reg_conflict(compiler
->vec4_reg_set
.regs
, base_reg
, reg
);
149 for (int j
= 0; j
< class_count
; j
++) {
150 /* Calculate the q values manually because the algorithm used by
151 * ra_set_finalize() to do it has higher complexity affecting the
152 * start-up time of some applications. q(i, j) is just the maximum
153 * number of registers from class i a register from class j can
156 q_values
[i
][j
] = class_sizes
[i
] + class_sizes
[j
] - 1;
159 assert(reg
== ra_reg_count
);
161 for (int reg
= 0; reg
< base_reg_count
; reg
++)
162 ra_make_reg_conflicts_transitive(compiler
->vec4_reg_set
.regs
, reg
);
164 ra_set_finalize(compiler
->vec4_reg_set
.regs
, q_values
);
166 for (int i
= 0; i
< MAX_VGRF_SIZE
; i
++)
167 delete[] q_values
[i
];
171 vec4_visitor::setup_payload_interference(struct ra_graph
*g
,
172 int first_payload_node
,
175 int payload_node_count
= this->first_non_payload_grf
;
177 for (int i
= 0; i
< payload_node_count
; i
++) {
178 /* Mark each payload reg node as being allocated to its physical register.
180 * The alternative would be to have per-physical register classes, which
181 * would just be silly.
183 ra_set_node_reg(g
, first_payload_node
+ i
, i
);
185 /* For now, just mark each payload node as interfering with every other
186 * node to be allocated.
188 for (int j
= 0; j
< reg_node_count
; j
++) {
189 ra_add_node_interference(g
, first_payload_node
+ i
, j
);
195 vec4_visitor::reg_allocate()
197 unsigned int hw_reg_mapping
[alloc
.count
];
198 int payload_reg_count
= this->first_non_payload_grf
;
200 /* Using the trivial allocator can be useful in debugging undefined
201 * register access as a result of broken optimization passes.
204 return reg_allocate_trivial();
206 calculate_live_intervals();
208 int node_count
= alloc
.count
;
209 int first_payload_node
= node_count
;
210 node_count
+= payload_reg_count
;
212 ra_alloc_interference_graph(compiler
->vec4_reg_set
.regs
, node_count
);
214 for (unsigned i
= 0; i
< alloc
.count
; i
++) {
215 int size
= this->alloc
.sizes
[i
];
216 assert(size
>= 1 && size
<= MAX_VGRF_SIZE
);
217 ra_set_node_class(g
, i
, compiler
->vec4_reg_set
.classes
[size
- 1]);
219 for (unsigned j
= 0; j
< i
; j
++) {
220 if (virtual_grf_interferes(i
, j
)) {
221 ra_add_node_interference(g
, i
, j
);
226 setup_payload_interference(g
, first_payload_node
, node_count
);
228 if (!ra_allocate(g
)) {
229 /* Failed to allocate registers. Spill a reg, and the caller will
230 * loop back into here to try again.
232 int reg
= choose_spill_reg(g
);
233 if (this->no_spills
) {
234 fail("Failure to register allocate. Reduce number of live "
235 "values to avoid this.");
236 } else if (reg
== -1) {
237 fail("no register to spill\n");
245 /* Get the chosen virtual registers for each node, and map virtual
246 * regs in the register classes back down to real hardware reg
249 prog_data
->total_grf
= payload_reg_count
;
250 for (unsigned i
= 0; i
< alloc
.count
; i
++) {
251 int reg
= ra_get_node_reg(g
, i
);
253 hw_reg_mapping
[i
] = compiler
->vec4_reg_set
.ra_reg_to_grf
[reg
];
254 prog_data
->total_grf
= MAX2(prog_data
->total_grf
,
255 hw_reg_mapping
[i
] + alloc
.sizes
[i
]);
258 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
259 assign(hw_reg_mapping
, &inst
->dst
);
260 assign(hw_reg_mapping
, &inst
->src
[0]);
261 assign(hw_reg_mapping
, &inst
->src
[1]);
262 assign(hw_reg_mapping
, &inst
->src
[2]);
271 * When we decide to spill a register, instead of blindly spilling every use,
272 * save unspills when the spill register is used (read) in consecutive
273 * instructions. This can potentially save a bunch of unspills that would
274 * have very little impact in register allocation anyway.
276 * Notice that we need to account for this behavior when spilling a register
277 * and when evaluating spilling costs. This function is designed so it can
278 * be called from both places and avoid repeating the logic.
280 * - When we call this function from spill_reg(), we pass in scratch_reg the
281 * actual unspill/spill register that we want to reuse in the current
284 * - When we call this from evaluate_spill_costs(), we pass the register for
285 * which we are evaluating spilling costs.
287 * In either case, we check if the previous instructions read scratch_reg until
288 * we find one that writes to it with a compatible mask or does not read/write
289 * scratch_reg at all.
292 can_use_scratch_for_source(const vec4_instruction
*inst
, unsigned i
,
293 unsigned scratch_reg
)
295 assert(inst
->src
[i
].file
== VGRF
);
296 bool prev_inst_read_scratch_reg
= false;
298 /* See if any previous source in the same instructions reads scratch_reg */
299 for (unsigned n
= 0; n
< i
; n
++) {
300 if (inst
->src
[n
].file
== VGRF
&& inst
->src
[n
].nr
== scratch_reg
)
301 prev_inst_read_scratch_reg
= true;
304 /* Now check if previous instructions read/write scratch_reg */
305 for (vec4_instruction
*prev_inst
= (vec4_instruction
*) inst
->prev
;
306 !prev_inst
->is_head_sentinel();
307 prev_inst
= (vec4_instruction
*) prev_inst
->prev
) {
309 /* If the previous instruction writes to scratch_reg then we can reuse
310 * it if the write is not conditional and the channels we write are
311 * compatible with our read mask
313 if (prev_inst
->dst
.file
== VGRF
&& prev_inst
->dst
.nr
== scratch_reg
) {
314 return (!prev_inst
->predicate
|| prev_inst
->opcode
== BRW_OPCODE_SEL
) &&
315 (brw_mask_for_swizzle(inst
->src
[i
].swizzle
) &
316 ~prev_inst
->dst
.writemask
) == 0;
319 /* Skip scratch read/writes so that instructions generated by spilling
320 * other registers (that won't read/write scratch_reg) do not stop us from
321 * reusing scratch_reg for this instruction.
323 if (prev_inst
->opcode
== SHADER_OPCODE_GEN4_SCRATCH_WRITE
||
324 prev_inst
->opcode
== SHADER_OPCODE_GEN4_SCRATCH_READ
)
327 /* If the previous instruction does not write to scratch_reg, then check
331 for (n
= 0; n
< 3; n
++) {
332 if (prev_inst
->src
[n
].file
== VGRF
&&
333 prev_inst
->src
[n
].nr
== scratch_reg
) {
334 prev_inst_read_scratch_reg
= true;
339 /* The previous instruction does not read scratch_reg. At this point,
340 * if no previous instruction has read scratch_reg it means that we
341 * will need to unspill it here and we can't reuse it (so we return
342 * false). Otherwise, if we found at least one consecutive instruction
343 * that read scratch_reg, then we know that we got here from
344 * evaluate_spill_costs (since for the spill_reg path any block of
345 * consecutive instructions using scratch_reg must start with a write
346 * to that register, so we would've exited the loop in the check for
347 * the write that we have at the start of this loop), and in that case
348 * it means that we found the point at which the scratch_reg would be
349 * unspilled. Since we always unspill a full vec4, it means that we
350 * have all the channels available and we can just return true to
351 * signal that we can reuse the register in the current instruction
354 return prev_inst_read_scratch_reg
;
358 return prev_inst_read_scratch_reg
;
362 vec4_visitor::evaluate_spill_costs(float *spill_costs
, bool *no_spill
)
364 float loop_scale
= 1.0;
366 for (unsigned i
= 0; i
< this->alloc
.count
; i
++) {
367 spill_costs
[i
] = 0.0;
368 no_spill
[i
] = alloc
.sizes
[i
] != 1;
371 /* Calculate costs for spilling nodes. Call it a cost of 1 per
372 * spill/unspill we'll have to do, and guess that the insides of
373 * loops run 10 times.
375 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
376 for (unsigned int i
= 0; i
< 3; i
++) {
377 if (inst
->src
[i
].file
== VGRF
) {
378 /* We will only unspill src[i] it it wasn't unspilled for the
379 * previous instruction, in which case we'll just reuse the scratch
380 * reg for this instruction.
382 if (!can_use_scratch_for_source(inst
, i
, inst
->src
[i
].nr
)) {
383 spill_costs
[inst
->src
[i
].nr
] += loop_scale
;
384 if (inst
->src
[i
].reladdr
)
385 no_spill
[inst
->src
[i
].nr
] = true;
390 if (inst
->dst
.file
== VGRF
) {
391 spill_costs
[inst
->dst
.nr
] += loop_scale
;
392 if (inst
->dst
.reladdr
)
393 no_spill
[inst
->dst
.nr
] = true;
396 switch (inst
->opcode
) {
402 case BRW_OPCODE_WHILE
:
406 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
407 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
408 for (int i
= 0; i
< 3; i
++) {
409 if (inst
->src
[i
].file
== VGRF
)
410 no_spill
[inst
->src
[i
].nr
] = true;
412 if (inst
->dst
.file
== VGRF
)
413 no_spill
[inst
->dst
.nr
] = true;
423 vec4_visitor::choose_spill_reg(struct ra_graph
*g
)
425 float spill_costs
[this->alloc
.count
];
426 bool no_spill
[this->alloc
.count
];
428 evaluate_spill_costs(spill_costs
, no_spill
);
430 for (unsigned i
= 0; i
< this->alloc
.count
; i
++) {
432 ra_set_node_spill_cost(g
, i
, spill_costs
[i
]);
435 return ra_get_best_spill_node(g
);
439 vec4_visitor::spill_reg(int spill_reg_nr
)
441 assert(alloc
.sizes
[spill_reg_nr
] == 1);
442 unsigned int spill_offset
= last_scratch
++;
444 /* Generate spill/unspill instructions for the objects being spilled. */
445 int scratch_reg
= -1;
446 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
447 for (unsigned int i
= 0; i
< 3; i
++) {
448 if (inst
->src
[i
].file
== VGRF
&& inst
->src
[i
].nr
== spill_reg_nr
) {
449 if (scratch_reg
== -1 ||
450 !can_use_scratch_for_source(inst
, i
, scratch_reg
)) {
451 /* We need to unspill anyway so make sure we read the full vec4
452 * in any case. This way, the cached register can be reused
453 * for consecutive instructions that read different channels of
456 scratch_reg
= alloc
.allocate(1);
457 src_reg temp
= inst
->src
[i
];
458 temp
.nr
= scratch_reg
;
459 temp
.swizzle
= BRW_SWIZZLE_XYZW
;
460 emit_scratch_read(block
, inst
,
461 dst_reg(temp
), inst
->src
[i
], spill_offset
);
463 assert(scratch_reg
!= -1);
464 inst
->src
[i
].nr
= scratch_reg
;
468 if (inst
->dst
.file
== VGRF
&& inst
->dst
.nr
== spill_reg_nr
) {
469 emit_scratch_write(block
, inst
, spill_offset
);
470 scratch_reg
= inst
->dst
.nr
;
474 invalidate_live_intervals();
477 } /* namespace brw */