2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "main/macros.h"
25 #include "util/register_allocate.h"
35 assign(unsigned int *reg_hw_locations
, backend_reg
*reg
)
37 if (reg
->file
== GRF
) {
38 reg
->reg
= reg_hw_locations
[reg
->reg
] + reg
->reg_offset
;
44 vec4_visitor::reg_allocate_trivial()
46 unsigned int hw_reg_mapping
[this->alloc
.count
];
47 bool virtual_grf_used
[this->alloc
.count
];
50 /* Calculate which virtual GRFs are actually in use after whatever
51 * optimization passes have occurred.
53 for (unsigned i
= 0; i
< this->alloc
.count
; i
++) {
54 virtual_grf_used
[i
] = false;
57 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
58 if (inst
->dst
.file
== GRF
)
59 virtual_grf_used
[inst
->dst
.reg
] = true;
61 for (unsigned i
= 0; i
< 3; i
++) {
62 if (inst
->src
[i
].file
== GRF
)
63 virtual_grf_used
[inst
->src
[i
].reg
] = true;
67 hw_reg_mapping
[0] = this->first_non_payload_grf
;
68 next
= hw_reg_mapping
[0] + this->alloc
.sizes
[0];
69 for (unsigned i
= 1; i
< this->alloc
.count
; i
++) {
70 if (virtual_grf_used
[i
]) {
71 hw_reg_mapping
[i
] = next
;
72 next
+= this->alloc
.sizes
[i
];
75 prog_data
->total_grf
= next
;
77 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
78 assign(hw_reg_mapping
, &inst
->dst
);
79 assign(hw_reg_mapping
, &inst
->src
[0]);
80 assign(hw_reg_mapping
, &inst
->src
[1]);
81 assign(hw_reg_mapping
, &inst
->src
[2]);
84 if (prog_data
->total_grf
> max_grf
) {
85 fail("Ran out of regs on trivial allocator (%d/%d)\n",
86 prog_data
->total_grf
, max_grf
);
94 brw_vec4_alloc_reg_set(struct brw_compiler
*compiler
)
97 compiler
->devinfo
->gen
>= 7 ? GEN7_MRF_HACK_START
: BRW_MAX_GRF
;
99 /* After running split_virtual_grfs(), almost all VGRFs will be of size 1.
100 * SEND-from-GRF sources cannot be split, so we also need classes for each
101 * potential message length.
103 const int class_count
= MAX_VGRF_SIZE
;
104 int class_sizes
[MAX_VGRF_SIZE
];
106 for (int i
= 0; i
< class_count
; i
++)
107 class_sizes
[i
] = i
+ 1;
109 /* Compute the total number of registers across all classes. */
110 int ra_reg_count
= 0;
111 for (int i
= 0; i
< class_count
; i
++) {
112 ra_reg_count
+= base_reg_count
- (class_sizes
[i
] - 1);
115 ralloc_free(compiler
->vec4_reg_set
.ra_reg_to_grf
);
116 compiler
->vec4_reg_set
.ra_reg_to_grf
= ralloc_array(compiler
, uint8_t, ra_reg_count
);
117 ralloc_free(compiler
->vec4_reg_set
.regs
);
118 compiler
->vec4_reg_set
.regs
= ra_alloc_reg_set(compiler
, ra_reg_count
);
119 if (compiler
->devinfo
->gen
>= 6)
120 ra_set_allocate_round_robin(compiler
->vec4_reg_set
.regs
);
121 ralloc_free(compiler
->vec4_reg_set
.classes
);
122 compiler
->vec4_reg_set
.classes
= ralloc_array(compiler
, int, class_count
);
124 /* Now, add the registers to their classes, and add the conflicts
125 * between them and the base GRF registers (and also each other).
128 unsigned *q_values
[MAX_VGRF_SIZE
];
129 for (int i
= 0; i
< class_count
; i
++) {
130 int class_reg_count
= base_reg_count
- (class_sizes
[i
] - 1);
131 compiler
->vec4_reg_set
.classes
[i
] = ra_alloc_reg_class(compiler
->vec4_reg_set
.regs
);
133 q_values
[i
] = new unsigned[MAX_VGRF_SIZE
];
135 for (int j
= 0; j
< class_reg_count
; j
++) {
136 ra_class_add_reg(compiler
->vec4_reg_set
.regs
, compiler
->vec4_reg_set
.classes
[i
], reg
);
138 compiler
->vec4_reg_set
.ra_reg_to_grf
[reg
] = j
;
140 for (int base_reg
= j
;
141 base_reg
< j
+ class_sizes
[i
];
143 ra_add_transitive_reg_conflict(compiler
->vec4_reg_set
.regs
, base_reg
, reg
);
149 for (int j
= 0; j
< class_count
; j
++) {
150 /* Calculate the q values manually because the algorithm used by
151 * ra_set_finalize() to do it has higher complexity affecting the
152 * start-up time of some applications. q(i, j) is just the maximum
153 * number of registers from class i a register from class j can
156 q_values
[i
][j
] = class_sizes
[i
] + class_sizes
[j
] - 1;
159 assert(reg
== ra_reg_count
);
161 ra_set_finalize(compiler
->vec4_reg_set
.regs
, q_values
);
163 for (int i
= 0; i
< MAX_VGRF_SIZE
; i
++)
164 delete[] q_values
[i
];
168 vec4_visitor::setup_payload_interference(struct ra_graph
*g
,
169 int first_payload_node
,
172 int payload_node_count
= this->first_non_payload_grf
;
174 for (int i
= 0; i
< payload_node_count
; i
++) {
175 /* Mark each payload reg node as being allocated to its physical register.
177 * The alternative would be to have per-physical register classes, which
178 * would just be silly.
180 ra_set_node_reg(g
, first_payload_node
+ i
, i
);
182 /* For now, just mark each payload node as interfering with every other
183 * node to be allocated.
185 for (int j
= 0; j
< reg_node_count
; j
++) {
186 ra_add_node_interference(g
, first_payload_node
+ i
, j
);
192 vec4_visitor::reg_allocate()
194 unsigned int hw_reg_mapping
[alloc
.count
];
195 int payload_reg_count
= this->first_non_payload_grf
;
197 /* Using the trivial allocator can be useful in debugging undefined
198 * register access as a result of broken optimization passes.
201 return reg_allocate_trivial();
203 calculate_live_intervals();
205 int node_count
= alloc
.count
;
206 int first_payload_node
= node_count
;
207 node_count
+= payload_reg_count
;
209 ra_alloc_interference_graph(compiler
->vec4_reg_set
.regs
, node_count
);
211 for (unsigned i
= 0; i
< alloc
.count
; i
++) {
212 int size
= this->alloc
.sizes
[i
];
213 assert(size
>= 1 && size
<= MAX_VGRF_SIZE
);
214 ra_set_node_class(g
, i
, compiler
->vec4_reg_set
.classes
[size
- 1]);
216 for (unsigned j
= 0; j
< i
; j
++) {
217 if (virtual_grf_interferes(i
, j
)) {
218 ra_add_node_interference(g
, i
, j
);
223 setup_payload_interference(g
, first_payload_node
, node_count
);
225 if (!ra_allocate(g
)) {
226 /* Failed to allocate registers. Spill a reg, and the caller will
227 * loop back into here to try again.
229 int reg
= choose_spill_reg(g
);
230 if (this->no_spills
) {
231 fail("Failure to register allocate. Reduce number of live "
232 "values to avoid this.");
233 } else if (reg
== -1) {
234 fail("no register to spill\n");
242 /* Get the chosen virtual registers for each node, and map virtual
243 * regs in the register classes back down to real hardware reg
246 prog_data
->total_grf
= payload_reg_count
;
247 for (unsigned i
= 0; i
< alloc
.count
; i
++) {
248 int reg
= ra_get_node_reg(g
, i
);
250 hw_reg_mapping
[i
] = compiler
->vec4_reg_set
.ra_reg_to_grf
[reg
];
251 prog_data
->total_grf
= MAX2(prog_data
->total_grf
,
252 hw_reg_mapping
[i
] + alloc
.sizes
[i
]);
255 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
256 assign(hw_reg_mapping
, &inst
->dst
);
257 assign(hw_reg_mapping
, &inst
->src
[0]);
258 assign(hw_reg_mapping
, &inst
->src
[1]);
259 assign(hw_reg_mapping
, &inst
->src
[2]);
268 vec4_visitor::evaluate_spill_costs(float *spill_costs
, bool *no_spill
)
270 float loop_scale
= 1.0;
272 for (unsigned i
= 0; i
< this->alloc
.count
; i
++) {
273 spill_costs
[i
] = 0.0;
274 no_spill
[i
] = alloc
.sizes
[i
] != 1;
277 /* Calculate costs for spilling nodes. Call it a cost of 1 per
278 * spill/unspill we'll have to do, and guess that the insides of
279 * loops run 10 times.
281 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
282 for (unsigned int i
= 0; i
< 3; i
++) {
283 if (inst
->src
[i
].file
== GRF
) {
284 spill_costs
[inst
->src
[i
].reg
] += loop_scale
;
285 if (inst
->src
[i
].reladdr
)
286 no_spill
[inst
->src
[i
].reg
] = true;
290 if (inst
->dst
.file
== GRF
) {
291 spill_costs
[inst
->dst
.reg
] += loop_scale
;
292 if (inst
->dst
.reladdr
)
293 no_spill
[inst
->dst
.reg
] = true;
296 switch (inst
->opcode
) {
302 case BRW_OPCODE_WHILE
:
306 case SHADER_OPCODE_GEN4_SCRATCH_READ
:
307 case SHADER_OPCODE_GEN4_SCRATCH_WRITE
:
308 for (int i
= 0; i
< 3; i
++) {
309 if (inst
->src
[i
].file
== GRF
)
310 no_spill
[inst
->src
[i
].reg
] = true;
312 if (inst
->dst
.file
== GRF
)
313 no_spill
[inst
->dst
.reg
] = true;
323 vec4_visitor::choose_spill_reg(struct ra_graph
*g
)
325 float spill_costs
[this->alloc
.count
];
326 bool no_spill
[this->alloc
.count
];
328 evaluate_spill_costs(spill_costs
, no_spill
);
330 for (unsigned i
= 0; i
< this->alloc
.count
; i
++) {
332 ra_set_node_spill_cost(g
, i
, spill_costs
[i
]);
335 return ra_get_best_spill_node(g
);
339 vec4_visitor::spill_reg(int spill_reg_nr
)
341 assert(alloc
.sizes
[spill_reg_nr
] == 1);
342 unsigned int spill_offset
= last_scratch
++;
344 /* Generate spill/unspill instructions for the objects being spilled. */
345 foreach_block_and_inst(block
, vec4_instruction
, inst
, cfg
) {
346 for (unsigned int i
= 0; i
< 3; i
++) {
347 if (inst
->src
[i
].file
== GRF
&& inst
->src
[i
].reg
== spill_reg_nr
) {
348 src_reg spill_reg
= inst
->src
[i
];
349 inst
->src
[i
].reg
= alloc
.allocate(1);
350 dst_reg temp
= dst_reg(inst
->src
[i
]);
352 emit_scratch_read(block
, inst
, temp
, spill_reg
, spill_offset
);
356 if (inst
->dst
.file
== GRF
&& inst
->dst
.reg
== spill_reg_nr
) {
357 emit_scratch_write(block
, inst
, spill_offset
);
361 invalidate_live_intervals();
364 } /* namespace brw */