2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "util/ralloc.h"
25 #include "util/register_allocate.h"
26 #include "vc4_context.h"
30 #define QPU_R(file, index) { QPU_MUX_##file, index }
32 static const struct qpu_reg vc4_regs
[] = {
104 #define AB_INDEX (ACC_INDEX + 5)
107 vc4_alloc_reg_set(struct vc4_context
*vc4
)
109 assert(vc4_regs
[AB_INDEX
].addr
== 0);
110 assert(vc4_regs
[AB_INDEX
+ 1].addr
== 0);
111 STATIC_ASSERT(ARRAY_SIZE(vc4_regs
) == AB_INDEX
+ 64);
116 vc4
->regs
= ra_alloc_reg_set(vc4
, ARRAY_SIZE(vc4_regs
), true);
118 vc4
->reg_class_any
= ra_alloc_reg_class(vc4
->regs
);
119 vc4
->reg_class_a_or_b_or_acc
= ra_alloc_reg_class(vc4
->regs
);
120 vc4
->reg_class_r4_or_a
= ra_alloc_reg_class(vc4
->regs
);
121 vc4
->reg_class_a
= ra_alloc_reg_class(vc4
->regs
);
122 vc4
->reg_class_r0_r3
= ra_alloc_reg_class(vc4
->regs
);
123 for (uint32_t i
= 0; i
< ARRAY_SIZE(vc4_regs
); i
++) {
124 /* Reserve ra31/rb31 for spilling fixup_raddr_conflict() in
127 if (vc4_regs
[i
].addr
== 31)
130 /* R4 can't be written as a general purpose register. (it's
131 * TMU_NOSWAP as a write address).
133 if (vc4_regs
[i
].mux
== QPU_MUX_R4
) {
134 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_r4_or_a
, i
);
135 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_any
, i
);
139 if (vc4_regs
[i
].mux
<= QPU_MUX_R3
)
140 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_r0_r3
, i
);
142 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_any
, i
);
143 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_a_or_b_or_acc
, i
);
146 for (uint32_t i
= AB_INDEX
; i
< AB_INDEX
+ 64; i
+= 2) {
147 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_a
, i
);
148 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_r4_or_a
, i
);
151 ra_set_finalize(vc4
->regs
, NULL
);
154 struct node_to_temp_map
{
160 node_to_temp_priority(const void *in_a
, const void *in_b
)
162 const struct node_to_temp_map
*a
= in_a
;
163 const struct node_to_temp_map
*b
= in_b
;
165 return a
->priority
- b
->priority
;
168 #define CLASS_BIT_A (1 << 0)
169 #define CLASS_BIT_B_OR_ACC (1 << 1)
170 #define CLASS_BIT_R4 (1 << 2)
171 #define CLASS_BIT_R0_R3 (1 << 4)
174 * Returns a mapping from QFILE_TEMP indices to struct qpu_regs.
176 * The return value should be freed by the caller.
179 vc4_register_allocate(struct vc4_context
*vc4
, struct vc4_compile
*c
)
181 struct node_to_temp_map map
[c
->num_temps
];
182 uint32_t temp_to_node
[c
->num_temps
];
183 uint8_t class_bits
[c
->num_temps
];
184 struct qpu_reg
*temp_registers
= calloc(c
->num_temps
,
185 sizeof(*temp_registers
));
187 /* If things aren't ever written (undefined values), just read from
190 for (uint32_t i
= 0; i
< c
->num_temps
; i
++)
191 temp_registers
[i
] = qpu_rn(0);
193 vc4_alloc_reg_set(vc4
);
195 struct ra_graph
*g
= ra_alloc_interference_graph(vc4
->regs
,
198 /* Compute the live ranges so we can figure out interference. */
199 qir_calculate_live_intervals(c
);
201 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
203 map
[i
].priority
= c
->temp_end
[i
] - c
->temp_start
[i
];
205 qsort(map
, c
->num_temps
, sizeof(map
[0]), node_to_temp_priority
);
206 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
207 temp_to_node
[map
[i
].temp
] = i
;
210 /* Figure out our register classes and preallocated registers. We
211 * start with any temp being able to be in any file, then instructions
212 * incrementally remove bits that the temp definitely can't be in.
215 CLASS_BIT_A
| CLASS_BIT_B_OR_ACC
| CLASS_BIT_R4
,
219 qir_for_each_inst_inorder(inst
, c
) {
220 if (qir_writes_r4(inst
)) {
221 /* This instruction writes r4 (and optionally moves
222 * its result to a temp), so nothing else can be
223 * stored in r4 across it.
225 for (int i
= 0; i
< c
->num_temps
; i
++) {
226 if (c
->temp_start
[i
] < ip
&& c
->temp_end
[i
] > ip
)
227 class_bits
[i
] &= ~CLASS_BIT_R4
;
230 /* R4 can't be written as a general purpose
231 * register. (it's TMU_NOSWAP as a write address).
233 if (inst
->dst
.file
== QFILE_TEMP
)
234 class_bits
[inst
->dst
.index
] &= ~CLASS_BIT_R4
;
239 ra_set_node_reg(g
, temp_to_node
[inst
->dst
.index
],
240 AB_INDEX
+ QPU_R_FRAG_PAYLOAD_ZW
* 2 + 1);
244 ra_set_node_reg(g
, temp_to_node
[inst
->dst
.index
],
245 AB_INDEX
+ QPU_R_FRAG_PAYLOAD_ZW
* 2);
249 assert(inst
->src
[0].file
== QFILE_TEMP
);
250 class_bits
[inst
->src
[0].index
] &= ~CLASS_BIT_R0_R3
;
257 if (inst
->dst
.pack
&& !qir_is_mul(inst
)) {
258 /* The non-MUL pack flags require an A-file dst
261 class_bits
[inst
->dst
.index
] &= CLASS_BIT_A
;
264 /* Apply restrictions for src unpacks. The integer unpacks
265 * can only be done from regfile A, while float unpacks can be
268 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
269 if (inst
->src
[i
].file
== QFILE_TEMP
&&
271 if (qir_is_float_input(inst
)) {
272 class_bits
[inst
->src
[i
].index
] &=
273 CLASS_BIT_A
| CLASS_BIT_R4
;
275 class_bits
[inst
->src
[i
].index
] &=
284 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
285 int node
= temp_to_node
[i
];
287 switch (class_bits
[i
]) {
288 case CLASS_BIT_A
| CLASS_BIT_B_OR_ACC
| CLASS_BIT_R4
:
289 ra_set_node_class(g
, node
, vc4
->reg_class_any
);
291 case CLASS_BIT_A
| CLASS_BIT_B_OR_ACC
:
292 ra_set_node_class(g
, node
, vc4
->reg_class_a_or_b_or_acc
);
294 case CLASS_BIT_A
| CLASS_BIT_R4
:
295 ra_set_node_class(g
, node
, vc4
->reg_class_r4_or_a
);
298 ra_set_node_class(g
, node
, vc4
->reg_class_a
);
300 case CLASS_BIT_R0_R3
:
301 ra_set_node_class(g
, node
, vc4
->reg_class_r0_r3
);
304 fprintf(stderr
, "temp %d: bad class bits: 0x%x\n",
311 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
312 for (uint32_t j
= i
+ 1; j
< c
->num_temps
; j
++) {
313 if (!(c
->temp_start
[i
] >= c
->temp_end
[j
] ||
314 c
->temp_start
[j
] >= c
->temp_end
[i
])) {
315 ra_add_node_interference(g
,
322 bool ok
= ra_allocate(g
);
324 fprintf(stderr
, "Failed to register allocate:\n");
329 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
330 temp_registers
[i
] = vc4_regs
[ra_get_node_reg(g
, temp_to_node
[i
])];
332 /* If the value's never used, just write to the NOP register
333 * for clarity in debug output.
335 if (c
->temp_start
[i
] == c
->temp_end
[i
])
336 temp_registers
[i
] = qpu_ra(QPU_W_NOP
);
341 return temp_registers
;