2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "util/ralloc.h"
25 #include "util/register_allocate.h"
26 #include "vc4_context.h"
30 #define QPU_R(file, index) { QPU_MUX_##file, index }
32 static const struct qpu_reg vc4_regs
[] = {
104 #define AB_INDEX (ACC_INDEX + 5)
107 vc4_alloc_reg_set(struct vc4_context
*vc4
)
109 assert(vc4_regs
[AB_INDEX
].addr
== 0);
110 assert(vc4_regs
[AB_INDEX
+ 1].addr
== 0);
111 STATIC_ASSERT(ARRAY_SIZE(vc4_regs
) == AB_INDEX
+ 64);
116 vc4
->regs
= ra_alloc_reg_set(vc4
, ARRAY_SIZE(vc4_regs
), true);
118 vc4
->reg_class_any
= ra_alloc_reg_class(vc4
->regs
);
119 vc4
->reg_class_a_or_b_or_acc
= ra_alloc_reg_class(vc4
->regs
);
120 vc4
->reg_class_r4_or_a
= ra_alloc_reg_class(vc4
->regs
);
121 vc4
->reg_class_a
= ra_alloc_reg_class(vc4
->regs
);
122 vc4
->reg_class_r0_r3
= ra_alloc_reg_class(vc4
->regs
);
125 for (uint32_t i
= ACC_INDEX
; i
< ACC_INDEX
+ 4; i
++) {
126 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_r0_r3
, i
);
127 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_a_or_b_or_acc
, i
);
130 /* R4 gets a special class because it can't be written as a general
131 * purpose register. (it's TMU_NOSWAP as a write address).
133 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_r4_or_a
, ACC_INDEX
+ 4);
136 for (uint32_t i
= AB_INDEX
; i
< AB_INDEX
+ 64; i
++) {
137 /* Reserve ra31/rb31 for spilling fixup_raddr_conflict() in
140 if (vc4_regs
[i
].addr
== 31)
143 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_any
, i
);
144 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_a_or_b_or_acc
, i
);
147 if (((i
- AB_INDEX
) & 1) == 0) {
148 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_a
, i
);
149 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_r4_or_a
, i
);
153 ra_set_finalize(vc4
->regs
, NULL
);
156 struct node_to_temp_map
{
162 node_to_temp_priority(const void *in_a
, const void *in_b
)
164 const struct node_to_temp_map
*a
= in_a
;
165 const struct node_to_temp_map
*b
= in_b
;
167 return a
->priority
- b
->priority
;
170 #define CLASS_BIT_A (1 << 0)
171 #define CLASS_BIT_B (1 << 1)
172 #define CLASS_BIT_R4 (1 << 2)
173 #define CLASS_BIT_R0_R3 (1 << 4)
176 * Returns a mapping from QFILE_TEMP indices to struct qpu_regs.
178 * The return value should be freed by the caller.
181 vc4_register_allocate(struct vc4_context
*vc4
, struct vc4_compile
*c
)
183 struct node_to_temp_map map
[c
->num_temps
];
184 uint32_t temp_to_node
[c
->num_temps
];
185 uint8_t class_bits
[c
->num_temps
];
186 struct qpu_reg
*temp_registers
= calloc(c
->num_temps
,
187 sizeof(*temp_registers
));
189 /* If things aren't ever written (undefined values), just read from
192 for (uint32_t i
= 0; i
< c
->num_temps
; i
++)
193 temp_registers
[i
] = qpu_rn(0);
195 vc4_alloc_reg_set(vc4
);
197 struct ra_graph
*g
= ra_alloc_interference_graph(vc4
->regs
,
200 /* Compute the live ranges so we can figure out interference. */
201 qir_calculate_live_intervals(c
);
203 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
205 map
[i
].priority
= c
->temp_end
[i
] - c
->temp_start
[i
];
207 qsort(map
, c
->num_temps
, sizeof(map
[0]), node_to_temp_priority
);
208 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
209 temp_to_node
[map
[i
].temp
] = i
;
212 /* Figure out our register classes and preallocated registers. We
213 * start with any temp being able to be in any file, then instructions
214 * incrementally remove bits that the temp definitely can't be in.
217 CLASS_BIT_A
| CLASS_BIT_B
| CLASS_BIT_R4
| CLASS_BIT_R0_R3
,
221 qir_for_each_inst_inorder(inst
, c
) {
222 if (qir_writes_r4(inst
)) {
223 /* This instruction writes r4 (and optionally moves
224 * its result to a temp), so nothing else can be
225 * stored in r4 across it.
227 for (int i
= 0; i
< c
->num_temps
; i
++) {
228 if (c
->temp_start
[i
] < ip
&& c
->temp_end
[i
] > ip
)
229 class_bits
[i
] &= ~CLASS_BIT_R4
;
232 /* R4 can't be written as a general purpose
233 * register. (it's TMU_NOSWAP as a write address).
235 if (inst
->dst
.file
== QFILE_TEMP
)
236 class_bits
[inst
->dst
.index
] &= ~CLASS_BIT_R4
;
241 ra_set_node_reg(g
, temp_to_node
[inst
->dst
.index
],
242 AB_INDEX
+ QPU_R_FRAG_PAYLOAD_ZW
* 2 + 1);
246 ra_set_node_reg(g
, temp_to_node
[inst
->dst
.index
],
247 AB_INDEX
+ QPU_R_FRAG_PAYLOAD_ZW
* 2);
251 assert(inst
->src
[0].file
== QFILE_TEMP
);
252 class_bits
[inst
->src
[0].index
] &= CLASS_BIT_R0_R3
;
259 if (inst
->dst
.pack
&& !qir_is_mul(inst
)) {
260 /* The non-MUL pack flags require an A-file dst
263 class_bits
[inst
->dst
.index
] &= CLASS_BIT_A
;
266 /* Apply restrictions for src unpacks. The integer unpacks
267 * can only be done from regfile A, while float unpacks can be
270 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
271 if (inst
->src
[i
].file
== QFILE_TEMP
&&
273 if (qir_is_float_input(inst
)) {
274 class_bits
[inst
->src
[i
].index
] &=
275 CLASS_BIT_A
| CLASS_BIT_R4
;
277 class_bits
[inst
->src
[i
].index
] &=
286 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
287 int node
= temp_to_node
[i
];
289 switch (class_bits
[i
]) {
290 case CLASS_BIT_A
| CLASS_BIT_B
| CLASS_BIT_R4
| CLASS_BIT_R0_R3
:
291 ra_set_node_class(g
, node
, vc4
->reg_class_any
);
293 case CLASS_BIT_A
| CLASS_BIT_B
| CLASS_BIT_R0_R3
:
294 ra_set_node_class(g
, node
, vc4
->reg_class_a_or_b_or_acc
);
296 case CLASS_BIT_A
| CLASS_BIT_R4
:
297 ra_set_node_class(g
, node
, vc4
->reg_class_r4_or_a
);
300 ra_set_node_class(g
, node
, vc4
->reg_class_a
);
302 case CLASS_BIT_R0_R3
:
303 ra_set_node_class(g
, node
, vc4
->reg_class_r0_r3
);
306 fprintf(stderr
, "temp %d: bad class bits: 0x%x\n",
313 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
314 for (uint32_t j
= i
+ 1; j
< c
->num_temps
; j
++) {
315 if (!(c
->temp_start
[i
] >= c
->temp_end
[j
] ||
316 c
->temp_start
[j
] >= c
->temp_end
[i
])) {
317 ra_add_node_interference(g
,
324 bool ok
= ra_allocate(g
);
326 fprintf(stderr
, "Failed to register allocate:\n");
332 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
333 temp_registers
[i
] = vc4_regs
[ra_get_node_reg(g
, temp_to_node
[i
])];
335 /* If the value's never used, just write to the NOP register
336 * for clarity in debug output.
338 if (c
->temp_start
[i
] == c
->temp_end
[i
])
339 temp_registers
[i
] = qpu_ra(QPU_W_NOP
);
344 return temp_registers
;