2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "util/ralloc.h"
25 #include "util/register_allocate.h"
26 #include "vc4_context.h"
30 #define QPU_R(file, index) { QPU_MUX_##file, index }
32 static const struct qpu_reg vc4_regs
[] = {
104 #define AB_INDEX (ACC_INDEX + 5)
107 vc4_alloc_reg_set(struct vc4_context
*vc4
)
109 assert(vc4_regs
[AB_INDEX
].addr
== 0);
110 assert(vc4_regs
[AB_INDEX
+ 1].addr
== 0);
111 STATIC_ASSERT(ARRAY_SIZE(vc4_regs
) == AB_INDEX
+ 64);
116 vc4
->regs
= ra_alloc_reg_set(vc4
, ARRAY_SIZE(vc4_regs
), true);
118 /* The physical regfiles split us into two classes, with [0] being the
119 * whole space and [1] being the bottom half (for threaded fragment
122 for (int i
= 0; i
< 2; i
++) {
123 vc4
->reg_class_any
[i
] = ra_alloc_reg_class(vc4
->regs
);
124 vc4
->reg_class_a_or_b
[i
] = ra_alloc_reg_class(vc4
->regs
);
125 vc4
->reg_class_a_or_b_or_acc
[i
] = ra_alloc_reg_class(vc4
->regs
);
126 vc4
->reg_class_r4_or_a
[i
] = ra_alloc_reg_class(vc4
->regs
);
127 vc4
->reg_class_a
[i
] = ra_alloc_reg_class(vc4
->regs
);
129 vc4
->reg_class_r0_r3
= ra_alloc_reg_class(vc4
->regs
);
132 for (uint32_t i
= ACC_INDEX
; i
< ACC_INDEX
+ 4; i
++) {
133 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_r0_r3
, i
);
134 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_a_or_b_or_acc
[0], i
);
135 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_a_or_b_or_acc
[1], i
);
138 /* R4 gets a special class because it can't be written as a general
139 * purpose register. (it's TMU_NOSWAP as a write address).
141 for (int i
= 0; i
< 2; i
++) {
142 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_r4_or_a
[i
],
144 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_any
[i
],
149 for (uint32_t i
= AB_INDEX
; i
< AB_INDEX
+ 64; i
++) {
150 /* Reserve ra14/rb14 for spilling fixup_raddr_conflict() in
153 if (vc4_regs
[i
].addr
== 14)
156 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_any
[0], i
);
157 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_a_or_b
[0], i
);
158 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_a_or_b_or_acc
[0], i
);
160 if (vc4_regs
[i
].addr
< 16) {
161 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_any
[1], i
);
162 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_a_or_b
[1], i
);
163 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_a_or_b_or_acc
[1], i
);
168 if (((i
- AB_INDEX
) & 1) == 0) {
169 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_a
[0], i
);
170 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_r4_or_a
[0], i
);
172 if (vc4_regs
[i
].addr
< 16) {
173 ra_class_add_reg(vc4
->regs
,
174 vc4
->reg_class_a
[1], i
);
175 ra_class_add_reg(vc4
->regs
,
176 vc4
->reg_class_r4_or_a
[1], i
);
181 ra_set_finalize(vc4
->regs
, NULL
);
184 struct node_to_temp_map
{
190 node_to_temp_priority(const void *in_a
, const void *in_b
)
192 const struct node_to_temp_map
*a
= in_a
;
193 const struct node_to_temp_map
*b
= in_b
;
195 return a
->priority
- b
->priority
;
198 #define CLASS_BIT_A (1 << 0)
199 #define CLASS_BIT_B (1 << 1)
200 #define CLASS_BIT_R4 (1 << 2)
201 #define CLASS_BIT_R0_R3 (1 << 4)
204 * Returns a mapping from QFILE_TEMP indices to struct qpu_regs.
206 * The return value should be freed by the caller.
209 vc4_register_allocate(struct vc4_context
*vc4
, struct vc4_compile
*c
)
211 struct node_to_temp_map map
[c
->num_temps
];
212 uint32_t temp_to_node
[c
->num_temps
];
213 uint8_t class_bits
[c
->num_temps
];
214 struct qpu_reg
*temp_registers
= calloc(c
->num_temps
,
215 sizeof(*temp_registers
));
217 /* If things aren't ever written (undefined values), just read from
220 for (uint32_t i
= 0; i
< c
->num_temps
; i
++)
221 temp_registers
[i
] = qpu_rn(0);
223 vc4_alloc_reg_set(vc4
);
225 struct ra_graph
*g
= ra_alloc_interference_graph(vc4
->regs
,
228 /* Compute the live ranges so we can figure out interference. */
229 qir_calculate_live_intervals(c
);
231 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
233 map
[i
].priority
= c
->temp_end
[i
] - c
->temp_start
[i
];
235 qsort(map
, c
->num_temps
, sizeof(map
[0]), node_to_temp_priority
);
236 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
237 temp_to_node
[map
[i
].temp
] = i
;
240 /* Figure out our register classes and preallocated registers. We
241 * start with any temp being able to be in any file, then instructions
242 * incrementally remove bits that the temp definitely can't be in.
245 CLASS_BIT_A
| CLASS_BIT_B
| CLASS_BIT_R4
| CLASS_BIT_R0_R3
,
249 qir_for_each_inst_inorder(inst
, c
) {
250 if (qir_writes_r4(inst
)) {
251 /* This instruction writes r4 (and optionally moves
252 * its result to a temp), so nothing else can be
253 * stored in r4 across it.
255 for (int i
= 0; i
< c
->num_temps
; i
++) {
256 if (c
->temp_start
[i
] < ip
&& c
->temp_end
[i
] > ip
)
257 class_bits
[i
] &= ~CLASS_BIT_R4
;
260 /* If we're doing a conditional write of something
261 * writing R4 (math, tex results), then make sure that
262 * we store in a temp so that we actually
263 * conditionally move the result.
265 if (inst
->cond
!= QPU_COND_ALWAYS
)
266 class_bits
[inst
->dst
.index
] &= ~CLASS_BIT_R4
;
268 /* R4 can't be written as a general purpose
269 * register. (it's TMU_NOSWAP as a write address).
271 if (inst
->dst
.file
== QFILE_TEMP
)
272 class_bits
[inst
->dst
.index
] &= ~CLASS_BIT_R4
;
277 ra_set_node_reg(g
, temp_to_node
[inst
->dst
.index
],
278 AB_INDEX
+ QPU_R_FRAG_PAYLOAD_ZW
* 2 + 1);
282 ra_set_node_reg(g
, temp_to_node
[inst
->dst
.index
],
283 AB_INDEX
+ QPU_R_FRAG_PAYLOAD_ZW
* 2);
287 assert(inst
->src
[0].file
== QFILE_TEMP
);
288 class_bits
[inst
->src
[0].index
] &= CLASS_BIT_R0_R3
;
292 /* All accumulators are invalidated across a thread
295 for (int i
= 0; i
< c
->num_temps
; i
++) {
296 if (c
->temp_start
[i
] < ip
&& c
->temp_end
[i
] > ip
)
297 class_bits
[i
] &= ~(CLASS_BIT_R0_R3
|
306 if (inst
->dst
.pack
&& !qir_is_mul(inst
)) {
307 /* The non-MUL pack flags require an A-file dst
310 class_bits
[inst
->dst
.index
] &= CLASS_BIT_A
;
313 /* Apply restrictions for src unpacks. The integer unpacks
314 * can only be done from regfile A, while float unpacks can be
317 for (int i
= 0; i
< qir_get_nsrc(inst
); i
++) {
318 if (inst
->src
[i
].file
== QFILE_TEMP
&&
320 if (qir_is_float_input(inst
)) {
321 class_bits
[inst
->src
[i
].index
] &=
322 CLASS_BIT_A
| CLASS_BIT_R4
;
324 class_bits
[inst
->src
[i
].index
] &=
333 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
334 int node
= temp_to_node
[i
];
336 switch (class_bits
[i
]) {
337 case CLASS_BIT_A
| CLASS_BIT_B
| CLASS_BIT_R4
| CLASS_BIT_R0_R3
:
338 ra_set_node_class(g
, node
,
339 vc4
->reg_class_any
[c
->fs_threaded
]);
341 case CLASS_BIT_A
| CLASS_BIT_B
:
342 ra_set_node_class(g
, node
,
343 vc4
->reg_class_a_or_b
[c
->fs_threaded
]);
345 case CLASS_BIT_A
| CLASS_BIT_B
| CLASS_BIT_R0_R3
:
346 ra_set_node_class(g
, node
,
347 vc4
->reg_class_a_or_b_or_acc
[c
->fs_threaded
]);
349 case CLASS_BIT_A
| CLASS_BIT_R4
:
350 ra_set_node_class(g
, node
,
351 vc4
->reg_class_r4_or_a
[c
->fs_threaded
]);
354 ra_set_node_class(g
, node
,
355 vc4
->reg_class_a
[c
->fs_threaded
]);
357 case CLASS_BIT_R0_R3
:
358 ra_set_node_class(g
, node
, vc4
->reg_class_r0_r3
);
362 /* DDX/DDY used across thread switched might get us
365 if (c
->fs_threaded
) {
367 free(temp_registers
);
371 fprintf(stderr
, "temp %d: bad class bits: 0x%x\n",
378 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
379 for (uint32_t j
= i
+ 1; j
< c
->num_temps
; j
++) {
380 if (!(c
->temp_start
[i
] >= c
->temp_end
[j
] ||
381 c
->temp_start
[j
] >= c
->temp_end
[i
])) {
382 ra_add_node_interference(g
,
389 bool ok
= ra_allocate(g
);
391 if (!c
->fs_threaded
) {
392 fprintf(stderr
, "Failed to register allocate:\n");
397 free(temp_registers
);
401 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
402 temp_registers
[i
] = vc4_regs
[ra_get_node_reg(g
, temp_to_node
[i
])];
404 /* If the value's never used, just write to the NOP register
405 * for clarity in debug output.
407 if (c
->temp_start
[i
] == c
->temp_end
[i
])
408 temp_registers
[i
] = qpu_ra(QPU_W_NOP
);
413 return temp_registers
;