2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "util/register_allocate.h"
33 assign(unsigned int *reg_hw_locations
, backend_reg
*reg
)
35 if (reg
->file
== VGRF
) {
36 reg
->nr
= reg_hw_locations
[reg
->nr
] + reg
->offset
/ REG_SIZE
;
37 reg
->offset
%= REG_SIZE
;
42 vec4_visitor::reg_allocate_trivial()
44 unsigned int hw_reg_mapping
[this->alloc
.count
];
45 bool virtual_grf_used
[this->alloc
.count
];
48 /* Calculate which virtual GRFs are actually in use after whatever
49 * optimization passes have occurred.
51 for (unsigned i
= 0; i
< this->alloc
.count
; i
++) {
52 virtual_grf_used
[i
] = false;
55 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
56 if (inst
->dst
.file
== VGRF
)
57 virtual_grf_used
[inst
->dst
.nr
] = true;
59 for (unsigned i
= 0; i
< 3; i
++) {
60 if (inst
->src
[i
].file
== VGRF
)
61 virtual_grf_used
[inst
->src
[i
].nr
] = true;
65 hw_reg_mapping
[0] = this->first_non_payload_grf
;
66 next
= hw_reg_mapping
[0] + this->alloc
.sizes
[0];
67 for (unsigned i
= 1; i
< this->alloc
.count
; i
++) {
68 if (virtual_grf_used
[i
]) {
69 hw_reg_mapping
[i
] = next
;
70 next
+= this->alloc
.sizes
[i
];
73 prog_data
->total_grf
= next
;
75 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
76 assign(hw_reg_mapping
, &inst
->dst
);
77 assign(hw_reg_mapping
, &inst
->src
[0]);
78 assign(hw_reg_mapping
, &inst
->src
[1]);
79 assign(hw_reg_mapping
, &inst
->src
[2]);
82 if (prog_data
->total_grf
> max_grf
) {
83 fail("Ran out of regs on trivial allocator (%d/%d)\n",
84 prog_data
->total_grf
, max_grf
);
92 brw_vec4_alloc_reg_set(struct brw_compiler
*compiler
)
95 compiler
->devinfo
->gen
>= 7 ? GEN7_MRF_HACK_START
: BRW_MAX_GRF
;
97 /* After running split_virtual_grfs(), almost all VGRFs will be of size 1.
98 * SEND-from-GRF sources cannot be split, so we also need classes for each
99 * potential message length.
101 const int class_count
= MAX_VGRF_SIZE
;
102 int class_sizes
[MAX_VGRF_SIZE
];
104 for (int i
= 0; i
< class_count
; i
++)
105 class_sizes
[i
] = i
+ 1;
107 /* Compute the total number of registers across all classes. */
108 int ra_reg_count
= 0;
109 for (int i
= 0; i
< class_count
; i
++) {
110 ra_reg_count
+= base_reg_count
- (class_sizes
[i
] - 1);
113 ralloc_free(compiler
->vec4_reg_set
.ra_reg_to_grf
);
114 compiler
->vec4_reg_set
.ra_reg_to_grf
= ralloc_array(compiler
, uint8_t, ra_reg_count
);
115 ralloc_free(compiler
->vec4_reg_set
.regs
);
116 compiler
->vec4_reg_set
.regs
= ra_alloc_reg_set(compiler
, ra_reg_count
, false);
117 if (compiler
->devinfo
->gen
>= 6)
118 ra_set_allocate_round_robin(compiler
->vec4_reg_set
.regs
);
119 ralloc_free(compiler
->vec4_reg_set
.classes
);
120 compiler
->vec4_reg_set
.classes
= ralloc_array(compiler
, int, class_count
);
122 /* Now, add the registers to their classes, and add the conflicts
123 * between them and the base GRF registers (and also each other).
126 unsigned *q_values
[MAX_VGRF_SIZE
];
127 for (int i
= 0; i
< class_count
; i
++) {
128 int class_reg_count
= base_reg_count
- (class_sizes
[i
] - 1);
129 compiler
->vec4_reg_set
.classes
[i
] = ra_alloc_reg_class(compiler
->vec4_reg_set
.regs
);
131 q_values
[i
] = new unsigned[MAX_VGRF_SIZE
];
133 for (int j
= 0; j
< class_reg_count
; j
++) {
134 ra_class_add_reg(compiler
->vec4_reg_set
.regs
, compiler
->vec4_reg_set
.classes
[i
], reg
);
136 compiler
->vec4_reg_set
.ra_reg_to_grf
[reg
] = j
;
138 for (int base_reg
= j
;
139 base_reg
< j
+ class_sizes
[i
];
141 ra_add_reg_conflict(compiler
->vec4_reg_set
.regs
, base_reg
, reg
);
147 for (int j
= 0; j
< class_count
; j
++) {
148 /* Calculate the q values manually because the algorithm used by
149 * ra_set_finalize() to do it has higher complexity affecting the
150 * start-up time of some applications. q(i, j) is just the maximum
151 * number of registers from class i a register from class j can
154 q_values
[i
][j
] = class_sizes
[i
] + class_sizes
[j
] - 1;
157 assert(reg
== ra_reg_count
);
159 for (int reg
= 0; reg
< base_reg_count
; reg
++)
160 ra_make_reg_conflicts_transitive(compiler
->vec4_reg_set
.regs
, reg
);
162 ra_set_finalize(compiler
->vec4_reg_set
.regs
, q_values
);
164 for (int i
= 0; i
< MAX_VGRF_SIZE
; i
++)
165 delete[] q_values
[i
];
169 vec4_visitor::setup_payload_interference(struct ra_graph
*g
,
170 int first_payload_node
,
173 int payload_node_count
= this->first_non_payload_grf
;
175 for (int i
= 0; i
< payload_node_count
; i
++) {
176 /* Mark each payload reg node as being allocated to its physical register.
178 * The alternative would be to have per-physical register classes, which
179 * would just be silly.
181 ra_set_node_reg(g
, first_payload_node
+ i
, i
);
183 /* For now, just mark each payload node as interfering with every other
184 * node to be allocated.
186 for (int j
= 0; j
< reg_node_count
; j
++) {
187 ra_add_node_interference(g
, first_payload_node
+ i
, j
);
193 vec4_visitor::reg_allocate()
195 unsigned int hw_reg_mapping
[alloc
.count
];
196 int payload_reg_count
= this->first_non_payload_grf
;
198 /* Using the trivial allocator can be useful in debugging undefined
199 * register access as a result of broken optimization passes.
202 return reg_allocate_trivial();
204 const vec4_live_variables
&live
= live_analysis
.require();
205 int node_count
= alloc
.count
;
206 int first_payload_node
= node_count
;
207 node_count
+= payload_reg_count
;
209 ra_alloc_interference_graph(compiler
->vec4_reg_set
.regs
, node_count
);
211 for (unsigned i
= 0; i
< alloc
.count
; i
++) {
212 int size
= this->alloc
.sizes
[i
];
213 assert(size
>= 1 && size
<= MAX_VGRF_SIZE
);
214 ra_set_node_class(g
, i
, compiler
->vec4_reg_set
.classes
[size
- 1]);
216 for (unsigned j
= 0; j
< i
; j
++) {
217 if (live
.vgrfs_interfere(i
, j
)) {
218 ra_add_node_interference(g
, i
, j
);
223 /* Certain instructions can't safely use the same register for their
224 * sources and destination. Add interference.
226 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
227 if (inst
->dst
.file
== VGRF
&& inst
->has_source_and_destination_hazard()) {
228 for (unsigned i
= 0; i
< 3; i
++) {
229 if (inst
->src
[i
].file
== VGRF
) {
230 ra_add_node_interference(g
, inst
->dst
.nr
, inst
->src
[i
].nr
);
236 setup_payload_interference(g
, first_payload_node
, node_count
);
238 if (!ra_allocate(g
)) {
239 /* Failed to allocate registers. Spill a reg, and the caller will
240 * loop back into here to try again.
242 int reg
= choose_spill_reg(g
);
243 if (this->no_spills
) {
244 fail("Failure to register allocate. Reduce number of live "
245 "values to avoid this.");
246 } else if (reg
== -1) {
247 fail("no register to spill\n");
255 /* Get the chosen virtual registers for each node, and map virtual
256 * regs in the register classes back down to real hardware reg
259 prog_data
->total_grf
= payload_reg_count
;
260 for (unsigned i
= 0; i
< alloc
.count
; i
++) {
261 int reg
= ra_get_node_reg(g
, i
);
263 hw_reg_mapping
[i
] = compiler
->vec4_reg_set
.ra_reg_to_grf
[reg
];
264 prog_data
->total_grf
= MAX2(prog_data
->total_grf
,
265 hw_reg_mapping
[i
] + alloc
.sizes
[i
]);
268 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
269 assign(hw_reg_mapping
, &inst
->dst
);
270 assign(hw_reg_mapping
, &inst
->src
[0]);
271 assign(hw_reg_mapping
, &inst
->src
[1]);
272 assign(hw_reg_mapping
, &inst
->src
[2]);
281 * When we decide to spill a register, instead of blindly spilling every use,
282 * save unspills when the spill register is used (read) in consecutive
283 * instructions. This can potentially save a bunch of unspills that would
284 * have very little impact in register allocation anyway.
286 * Notice that we need to account for this behavior when spilling a register
287 * and when evaluating spilling costs. This function is designed so it can
288 * be called from both places and avoid repeating the logic.
290 * - When we call this function from spill_reg(), we pass in scratch_reg the
291 * actual unspill/spill register that we want to reuse in the current
294 * - When we call this from evaluate_spill_costs(), we pass the register for
295 * which we are evaluating spilling costs.
297 * In either case, we check if the previous instructions read scratch_reg until
298 * we find one that writes to it with a compatible mask or does not read/write
299 * scratch_reg at all.
302 can_use_scratch_for_source(const vec4_instruction
*inst
, unsigned i
,
303 unsigned scratch_reg
)
305 assert(inst
->src
[i
].file
== VGRF
);
306 bool prev_inst_read_scratch_reg
= false;
308 /* See if any previous source in the same instructions reads scratch_reg */
309 for (unsigned n
= 0; n
< i
; n
++) {
310 if (inst
->src
[n
].file
== VGRF
&& inst
->src
[n
].nr
== scratch_reg
)
311 prev_inst_read_scratch_reg
= true;
314 /* Now check if previous instructions read/write scratch_reg */
315 for (vec4_instruction
*prev_inst
= (vec4_instruction
*) inst
->prev
;
316 !prev_inst
->is_head_sentinel();
317 prev_inst
= (vec4_instruction
*) prev_inst
->prev
) {
319 /* If the previous instruction writes to scratch_reg then we can reuse
320 * it if the write is not conditional and the channels we write are
321 * compatible with our read mask
323 if (prev_inst
->dst
.file
== VGRF
&& prev_inst
->dst
.nr
== scratch_reg
) {
324 return (!prev_inst
->predicate
|| prev_inst
->opcode
== BRW_OPCODE_SEL
) &&
325 (brw_mask_for_swizzle(inst
->src
[i
].swizzle
) &
326 ~prev_inst
->dst
.writemask
) == 0;
329 /* Skip scratch read/writes so that instructions generated by spilling
330 * other registers (that won't read/write scratch_reg) do not stop us from
331 * reusing scratch_reg for this instruction.
333 if (prev_inst
->opcode
== SHADER_OPCODE_GEN4_SCRATCH_WRITE
||
334 prev_inst
->opcode
== SHADER_OPCODE_GEN4_SCRATCH_READ
)
337 /* If the previous instruction does not write to scratch_reg, then check
341 for (n
= 0; n
< 3; n
++) {
342 if (prev_inst
->src
[n
].file
== VGRF
&&
343 prev_inst
->src
[n
].nr
== scratch_reg
) {
344 prev_inst_read_scratch_reg
= true;
349 /* The previous instruction does not read scratch_reg. At this point,
350 * if no previous instruction has read scratch_reg it means that we
351 * will need to unspill it here and we can't reuse it (so we return
352 * false). Otherwise, if we found at least one consecutive instruction
353 * that read scratch_reg, then we know that we got here from
354 * evaluate_spill_costs (since for the spill_reg path any block of
355 * consecutive instructions using scratch_reg must start with a write
356 * to that register, so we would've exited the loop in the check for
357 * the write that we have at the start of this loop), and in that case
358 * it means that we found the point at which the scratch_reg would be
359 * unspilled. Since we always unspill a full vec4, it means that we
360 * have all the channels available and we can just return true to
361 * signal that we can reuse the register in the current instruction
364 return prev_inst_read_scratch_reg
;
368 return prev_inst_read_scratch_reg
;
372 spill_cost_for_type(enum brw_reg_type type
)
374 /* Spilling of a 64-bit register involves emitting 2 32-bit scratch
375 * messages plus the 64b/32b shuffling code.
377 return type_sz(type
) == 8 ? 2.25f
: 1.0f
;
381 vec4_visitor::evaluate_spill_costs(float *spill_costs
, bool *no_spill
)
383 float loop_scale
= 1.0;
385 unsigned *reg_type_size
= (unsigned *)
386 ralloc_size(NULL
, this->alloc
.count
* sizeof(unsigned));
388 for (unsigned i
= 0; i
< this->alloc
.count
; i
++) {
389 spill_costs
[i
] = 0.0;
390 no_spill
[i
] = alloc
.sizes
[i
] != 1 && alloc
.sizes
[i
] != 2;
391 reg_type_size
[i
] = 0;
394 /* Calculate costs for spilling nodes. Call it a cost of 1 per
395 * spill/unspill we'll have to do, and guess that the insides of
396 * loops run 10 times.
398 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
399 for (unsigned int i
= 0; i
< 3; i
++) {
400 if (inst
->src
[i
].file
== VGRF
&& !no_spill
[inst
->src
[i
].nr
]) {
401 /* We will only unspill src[i] it it wasn't unspilled for the
402 * previous instruction, in which case we'll just reuse the scratch
403 * reg for this instruction.
405 if (!can_use_scratch_for_source(inst
, i
, inst
->src
[i
].nr
)) {
406 spill_costs
[inst
->src
[i
].nr
] +=
407 loop_scale
* spill_cost_for_type(inst
->src
[i
].type
);
408 if (inst
->src
[i
].reladdr
||
409 inst
->src
[i
].offset
>= REG_SIZE
)
410 no_spill
[inst
->src
[i
].nr
] = true;
412 /* We don't support unspills of partial DF reads.
414 * Our 64-bit unspills are implemented with two 32-bit scratch
415 * messages, each one reading that for both SIMD4x2 threads that
416 * we need to shuffle into correct 64-bit data. Ensure that we
417 * are reading data for both threads.
419 if (type_sz(inst
->src
[i
].type
) == 8 && inst
->exec_size
!= 8)
420 no_spill
[inst
->src
[i
].nr
] = true;
423 /* We can't spill registers that mix 32-bit and 64-bit access (that
424 * contain 64-bit data that is operated on via 32-bit instructions)
426 unsigned type_size
= type_sz(inst
->src
[i
].type
);
427 if (reg_type_size
[inst
->src
[i
].nr
] == 0)
428 reg_type_size
[inst
->src
[i
].nr
] = type_size
;
429 else if (reg_type_size
[inst
->src
[i
].nr
] != type_size
)
430 no_spill
[inst
->src
[i
].nr
] = true;
434 if (inst
->dst
.file
== VGRF
&& !no_spill
[inst
->dst
.nr
]) {
435 spill_costs
[inst
->dst
.nr
] +=
436 loop_scale
* spill_cost_for_type(inst
->dst
.type
);
437 if (inst
->dst
.reladdr
|| inst
->dst
.offset
>= REG_SIZE
)
438 no_spill
[inst
->dst
.nr
] = true;
440 /* We don't support spills of partial DF writes.
442 * Our 64-bit spills are implemented with two 32-bit scratch messages,
443 * each one writing that for both SIMD4x2 threads. Ensure that we
444 * are writing data for both threads.
446 if (type_sz(inst
->dst
.type
) == 8 && inst
->exec_size
!= 8)
447 no_spill
[inst
->dst
.nr
] = true;
449 /* We can't spill registers that mix 32-bit and 64-bit access (that
450 * contain 64-bit data that is operated on via 32-bit instructions)
452 unsigned type_size
= type_sz(inst
->dst
.type
);
453 if (reg_type_size
[inst
->dst
.nr
] == 0)
454 reg_type_size
[inst
->dst
.nr
] = type_size
;
455 else if (reg_type_size
[inst
->dst
.nr
] != type_size
)
456 no_spill
[inst
->dst
.nr
] = true;
459 switch (inst
->opcode
) {
465 case BRW_OPCODE_WHILE
:
469 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
470 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
471 for (int i
= 0; i
< 3; i
++) {
472 if (inst
->src
[i
].file
== VGRF
)
473 no_spill
[inst
->src
[i
].nr
] = true;
475 if (inst
->dst
.file
== VGRF
)
476 no_spill
[inst
->dst
.nr
] = true;
484 ralloc_free(reg_type_size
);
488 vec4_visitor::choose_spill_reg(struct ra_graph
*g
)
490 float spill_costs
[this->alloc
.count
];
491 bool no_spill
[this->alloc
.count
];
493 evaluate_spill_costs(spill_costs
, no_spill
);
495 for (unsigned i
= 0; i
< this->alloc
.count
; i
++) {
497 ra_set_node_spill_cost(g
, i
, spill_costs
[i
]);
500 return ra_get_best_spill_node(g
);
504 vec4_visitor::spill_reg(unsigned spill_reg_nr
)
506 assert(alloc
.sizes
[spill_reg_nr
] == 1 || alloc
.sizes
[spill_reg_nr
] == 2);
507 unsigned spill_offset
= last_scratch
;
508 last_scratch
+= alloc
.sizes
[spill_reg_nr
];
510 /* Generate spill/unspill instructions for the objects being spilled. */
511 unsigned scratch_reg
= ~0u;
512 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
513 for (unsigned i
= 0; i
< 3; i
++) {
514 if (inst
->src
[i
].file
== VGRF
&& inst
->src
[i
].nr
== spill_reg_nr
) {
515 if (scratch_reg
== ~0u ||
516 !can_use_scratch_for_source(inst
, i
, scratch_reg
)) {
517 /* We need to unspill anyway so make sure we read the full vec4
518 * in any case. This way, the cached register can be reused
519 * for consecutive instructions that read different channels of
522 scratch_reg
= alloc
.allocate(alloc
.sizes
[spill_reg_nr
]);
523 src_reg temp
= inst
->src
[i
];
524 temp
.nr
= scratch_reg
;
526 temp
.swizzle
= BRW_SWIZZLE_XYZW
;
527 emit_scratch_read(block
, inst
,
528 dst_reg(temp
), inst
->src
[i
], spill_offset
);
529 temp
.offset
= inst
->src
[i
].offset
;
531 assert(scratch_reg
!= ~0u);
532 inst
->src
[i
].nr
= scratch_reg
;
536 if (inst
->dst
.file
== VGRF
&& inst
->dst
.nr
== spill_reg_nr
) {
537 emit_scratch_write(block
, inst
, spill_offset
);
538 scratch_reg
= inst
->dst
.nr
;
542 invalidate_analysis(DEPENDENCY_INSTRUCTIONS
| DEPENDENCY_VARIABLES
);
545 } /* namespace brw */