Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / gallium / drivers / vc4 / vc4_register_allocate.c
1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/ralloc.h"
25 #include "util/register_allocate.h"
26 #include "vc4_context.h"
27 #include "vc4_qir.h"
28 #include "vc4_qpu.h"
29
30 #define QPU_R(file, index) { QPU_MUX_##file, index }
31
32 static const struct qpu_reg vc4_regs[] = {
33 { QPU_MUX_R0, 0},
34 { QPU_MUX_R1, 0},
35 { QPU_MUX_R2, 0},
36 { QPU_MUX_R3, 0},
37 { QPU_MUX_R4, 0},
38 QPU_R(A, 0),
39 QPU_R(B, 0),
40 QPU_R(A, 1),
41 QPU_R(B, 1),
42 QPU_R(A, 2),
43 QPU_R(B, 2),
44 QPU_R(A, 3),
45 QPU_R(B, 3),
46 QPU_R(A, 4),
47 QPU_R(B, 4),
48 QPU_R(A, 5),
49 QPU_R(B, 5),
50 QPU_R(A, 6),
51 QPU_R(B, 6),
52 QPU_R(A, 7),
53 QPU_R(B, 7),
54 QPU_R(A, 8),
55 QPU_R(B, 8),
56 QPU_R(A, 9),
57 QPU_R(B, 9),
58 QPU_R(A, 10),
59 QPU_R(B, 10),
60 QPU_R(A, 11),
61 QPU_R(B, 11),
62 QPU_R(A, 12),
63 QPU_R(B, 12),
64 QPU_R(A, 13),
65 QPU_R(B, 13),
66 QPU_R(A, 14),
67 QPU_R(B, 14),
68 QPU_R(A, 15),
69 QPU_R(B, 15),
70 QPU_R(A, 16),
71 QPU_R(B, 16),
72 QPU_R(A, 17),
73 QPU_R(B, 17),
74 QPU_R(A, 18),
75 QPU_R(B, 18),
76 QPU_R(A, 19),
77 QPU_R(B, 19),
78 QPU_R(A, 20),
79 QPU_R(B, 20),
80 QPU_R(A, 21),
81 QPU_R(B, 21),
82 QPU_R(A, 22),
83 QPU_R(B, 22),
84 QPU_R(A, 23),
85 QPU_R(B, 23),
86 QPU_R(A, 24),
87 QPU_R(B, 24),
88 QPU_R(A, 25),
89 QPU_R(B, 25),
90 QPU_R(A, 26),
91 QPU_R(B, 26),
92 QPU_R(A, 27),
93 QPU_R(B, 27),
94 QPU_R(A, 28),
95 QPU_R(B, 28),
96 QPU_R(A, 29),
97 QPU_R(B, 29),
98 QPU_R(A, 30),
99 QPU_R(B, 30),
100 QPU_R(A, 31),
101 QPU_R(B, 31),
102 };
103 #define ACC_INDEX 0
104 #define AB_INDEX (ACC_INDEX + 5)
105
106 static void
107 vc4_alloc_reg_set(struct vc4_context *vc4)
108 {
109 assert(vc4_regs[AB_INDEX].addr == 0);
110 assert(vc4_regs[AB_INDEX + 1].addr == 0);
111 STATIC_ASSERT(ARRAY_SIZE(vc4_regs) == AB_INDEX + 64);
112
113 if (vc4->regs)
114 return;
115
116 vc4->regs = ra_alloc_reg_set(vc4, ARRAY_SIZE(vc4_regs));
117
118 vc4->reg_class_any = ra_alloc_reg_class(vc4->regs);
119 vc4->reg_class_r4_or_a = ra_alloc_reg_class(vc4->regs);
120 vc4->reg_class_a = ra_alloc_reg_class(vc4->regs);
121 for (uint32_t i = 0; i < ARRAY_SIZE(vc4_regs); i++) {
122 /* Reserve ra31/rb31 for spilling fixup_raddr_conflict() in
123 * vc4_qpu_emit.c
124 */
125 if (vc4_regs[i].addr == 31)
126 continue;
127
128 /* R4 can't be written as a general purpose register. (it's
129 * TMU_NOSWAP as a write address).
130 */
131 if (vc4_regs[i].mux == QPU_MUX_R4) {
132 ra_class_add_reg(vc4->regs, vc4->reg_class_r4_or_a, i);
133 continue;
134 }
135
136 ra_class_add_reg(vc4->regs, vc4->reg_class_any, i);
137 }
138
139 for (uint32_t i = AB_INDEX; i < AB_INDEX + 64; i += 2) {
140 ra_class_add_reg(vc4->regs, vc4->reg_class_a, i);
141 ra_class_add_reg(vc4->regs, vc4->reg_class_r4_or_a, i);
142 }
143
144 ra_set_finalize(vc4->regs, NULL);
145 }
146
147 struct node_to_temp_map {
148 uint32_t temp;
149 uint32_t priority;
150 };
151
152 static int
153 node_to_temp_priority(const void *in_a, const void *in_b)
154 {
155 const struct node_to_temp_map *a = in_a;
156 const struct node_to_temp_map *b = in_b;
157
158 return a->priority - b->priority;
159 }
160
161 #define CLASS_BIT_A (1 << 0)
162 #define CLASS_BIT_B_OR_ACC (1 << 1)
163 #define CLASS_BIT_R4 (1 << 2)
164
165 /**
166 * Returns a mapping from QFILE_TEMP indices to struct qpu_regs.
167 *
168 * The return value should be freed by the caller.
169 */
170 struct qpu_reg *
171 vc4_register_allocate(struct vc4_context *vc4, struct vc4_compile *c)
172 {
173 struct node_to_temp_map map[c->num_temps];
174 uint32_t temp_to_node[c->num_temps];
175 uint32_t def[c->num_temps];
176 uint32_t use[c->num_temps];
177 uint8_t class_bits[c->num_temps];
178 struct qpu_reg *temp_registers = calloc(c->num_temps,
179 sizeof(*temp_registers));
180 memset(def, 0, sizeof(def));
181 memset(use, 0, sizeof(use));
182
183 /* If things aren't ever written (undefined values), just read from
184 * r0.
185 */
186 for (uint32_t i = 0; i < c->num_temps; i++)
187 temp_registers[i] = qpu_rn(0);
188
189 vc4_alloc_reg_set(vc4);
190
191 struct ra_graph *g = ra_alloc_interference_graph(vc4->regs,
192 c->num_temps);
193
194 /* Compute the live ranges so we can figure out interference.
195 */
196 uint32_t ip = 0;
197 list_for_each_entry(struct qinst, inst, &c->instructions, link) {
198 if (inst->dst.file == QFILE_TEMP) {
199 def[inst->dst.index] = ip;
200 use[inst->dst.index] = ip;
201 }
202
203 for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) {
204 if (inst->src[i].file == QFILE_TEMP)
205 use[inst->src[i].index] = ip;
206 }
207
208 switch (inst->op) {
209 case QOP_FRAG_Z:
210 case QOP_FRAG_W:
211 /* The payload registers have values implicitly loaded
212 * at the start of the program.
213 */
214 def[inst->dst.index] = 0;
215 break;
216 default:
217 break;
218 }
219
220 ip++;
221 }
222
223 for (uint32_t i = 0; i < c->num_temps; i++) {
224 map[i].temp = i;
225 map[i].priority = use[i] - def[i];
226 }
227 qsort(map, c->num_temps, sizeof(map[0]), node_to_temp_priority);
228 for (uint32_t i = 0; i < c->num_temps; i++) {
229 temp_to_node[map[i].temp] = i;
230 }
231
232 /* Figure out our register classes and preallocated registers. We
233 * start with any temp being able to be in any file, then instructions
234 * incrementally remove bits that the temp definitely can't be in.
235 */
236 memset(class_bits,
237 CLASS_BIT_A | CLASS_BIT_B_OR_ACC | CLASS_BIT_R4,
238 sizeof(class_bits));
239
240 ip = 0;
241 list_for_each_entry(struct qinst, inst, &c->instructions, link) {
242 if (qir_writes_r4(inst)) {
243 /* This instruction writes r4 (and optionally moves
244 * its result to a temp), so nothing else can be
245 * stored in r4 across it.
246 */
247 for (int i = 0; i < c->num_temps; i++) {
248 if (def[i] < ip && use[i] > ip)
249 class_bits[i] &= ~CLASS_BIT_R4;
250 }
251 } else {
252 /* R4 can't be written as a general purpose
253 * register. (it's TMU_NOSWAP as a write address).
254 */
255 if (inst->dst.file == QFILE_TEMP)
256 class_bits[inst->dst.index] &= ~CLASS_BIT_R4;
257 }
258
259 switch (inst->op) {
260 case QOP_FRAG_Z:
261 ra_set_node_reg(g, temp_to_node[inst->dst.index],
262 AB_INDEX + QPU_R_FRAG_PAYLOAD_ZW * 2 + 1);
263 break;
264
265 case QOP_FRAG_W:
266 ra_set_node_reg(g, temp_to_node[inst->dst.index],
267 AB_INDEX + QPU_R_FRAG_PAYLOAD_ZW * 2);
268 break;
269
270 case QOP_PACK_SCALED:
271 /* The pack flags require an A-file dst register. */
272 class_bits[inst->dst.index] &= CLASS_BIT_A;
273 break;
274
275 default:
276 break;
277 }
278
279 if (qir_src_needs_a_file(inst)) {
280 class_bits[inst->src[0].index] &= CLASS_BIT_A;
281 }
282 ip++;
283 }
284
285 for (uint32_t i = 0; i < c->num_temps; i++) {
286 int node = temp_to_node[i];
287
288 switch (class_bits[i]) {
289 case CLASS_BIT_A | CLASS_BIT_B_OR_ACC | CLASS_BIT_R4:
290 case CLASS_BIT_A | CLASS_BIT_B_OR_ACC:
291 ra_set_node_class(g, node, vc4->reg_class_any);
292 break;
293 case CLASS_BIT_A | CLASS_BIT_R4:
294 ra_set_node_class(g, node, vc4->reg_class_r4_or_a);
295 break;
296 case CLASS_BIT_A:
297 ra_set_node_class(g, node, vc4->reg_class_a);
298 break;
299 default:
300 fprintf(stderr, "temp %d: bad class bits: 0x%x\n",
301 i, class_bits[i]);
302 abort();
303 break;
304 }
305 }
306
307 for (uint32_t i = 0; i < c->num_temps; i++) {
308 for (uint32_t j = i + 1; j < c->num_temps; j++) {
309 if (!(def[i] >= use[j] || def[j] >= use[i])) {
310 ra_add_node_interference(g,
311 temp_to_node[i],
312 temp_to_node[j]);
313 }
314 }
315 }
316
317 bool ok = ra_allocate(g);
318 if (!ok) {
319 fprintf(stderr, "Failed to register allocate:\n");
320 qir_dump(c);
321 abort();
322 }
323
324 for (uint32_t i = 0; i < c->num_temps; i++) {
325 temp_registers[i] = vc4_regs[ra_get_node_reg(g, temp_to_node[i])];
326
327 /* If the value's never used, just write to the NOP register
328 * for clarity in debug output.
329 */
330 if (def[i] == use[i])
331 temp_registers[i] = qpu_ra(QPU_W_NOP);
332 }
333
334 ralloc_free(g);
335
336 return temp_registers;
337 }