v3d: Fix temporary leaks of temp_registers and when spilling.
[mesa.git] / src / broadcom / compiler / vir_register_allocate.c
1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/ralloc.h"
25 #include "util/register_allocate.h"
26 #include "common/v3d_device_info.h"
27 #include "v3d_compiler.h"
28
29 #define QPU_R(i) { .magic = false, .index = i }
30
31 #define ACC_INDEX 0
32 #define ACC_COUNT 5
33 #define PHYS_INDEX (ACC_INDEX + ACC_COUNT)
34 #define PHYS_COUNT 64
35
36 static bool
37 is_last_ldtmu(struct qinst *inst, struct qblock *block)
38 {
39 list_for_each_entry_from(struct qinst, scan_inst, inst,
40 &block->instructions, link) {
41 if (inst->qpu.sig.ldtmu)
42 return false;
43 if (v3d_qpu_writes_tmu(&inst->qpu))
44 return true;
45 }
46
47 return true;
48 }
49
50 static bool
51 vir_is_mov_uniform(struct v3d_compile *c, int temp)
52 {
53 struct qinst *def = c->defs[temp];
54
55 return (def &&
56 vir_is_raw_mov(def) &&
57 def->src[0].file == QFILE_UNIF);
58 }
59
60 static int
61 v3d_choose_spill_node(struct v3d_compile *c, struct ra_graph *g,
62 uint32_t *temp_to_node)
63 {
64 const float tmu_scale = 5;
65 float block_scale = 1.0;
66 float spill_costs[c->num_temps];
67 bool in_tmu_operation = false;
68 bool started_last_seg = false;
69
70 for (unsigned i = 0; i < c->num_temps; i++)
71 spill_costs[i] = 0.0;
72
73 /* XXX: Scale the cost up when inside of a loop. */
74 vir_for_each_block(block, c) {
75 vir_for_each_inst(inst, block) {
76 /* We can't insert a new TMU operation while currently
77 * in a TMU operation, and we can't insert new thread
78 * switches after starting output writes.
79 */
80 bool no_spilling =
81 (in_tmu_operation ||
82 (c->threads > 1 && started_last_seg));
83
84 for (int i = 0; i < vir_get_nsrc(inst); i++) {
85 if (inst->src[i].file != QFILE_TEMP)
86 continue;
87
88 int temp = inst->src[i].index;
89 if (vir_is_mov_uniform(c, temp)) {
90 spill_costs[temp] += block_scale;
91 } else if (!no_spilling) {
92 spill_costs[temp] += (block_scale *
93 tmu_scale);
94 } else {
95 BITSET_CLEAR(c->spillable, temp);
96 }
97 }
98
99 if (inst->dst.file == QFILE_TEMP) {
100 int temp = inst->dst.index;
101
102 if (vir_is_mov_uniform(c, temp)) {
103 /* We just rematerialize the unform
104 * later.
105 */
106 } else if (!no_spilling) {
107 spill_costs[temp] += (block_scale *
108 tmu_scale);
109 } else {
110 BITSET_CLEAR(c->spillable, temp);
111 }
112 }
113
114 /* Refuse to spill a ldvary's dst, because that means
115 * that ldvary's r5 would end up being used across a
116 * thrsw.
117 */
118 if (inst->qpu.sig.ldvary) {
119 assert(inst->dst.file == QFILE_TEMP);
120 BITSET_CLEAR(c->spillable, inst->dst.index);
121 }
122
123 if (inst->is_last_thrsw)
124 started_last_seg = true;
125
126 if (v3d_qpu_writes_vpm(&inst->qpu) ||
127 v3d_qpu_uses_tlb(&inst->qpu))
128 started_last_seg = true;
129
130 /* Track when we're in between a TMU setup and the
131 * final LDTMU or TMUWT from that TMU setup. We can't
132 * spill/fill any temps during that time, because that
133 * involves inserting a new TMU setup/LDTMU sequence.
134 */
135 if (inst->qpu.sig.ldtmu &&
136 is_last_ldtmu(inst, block))
137 in_tmu_operation = false;
138
139 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
140 inst->qpu.alu.add.op == V3D_QPU_A_TMUWT)
141 in_tmu_operation = false;
142
143 if (v3d_qpu_writes_tmu(&inst->qpu))
144 in_tmu_operation = true;
145 }
146 }
147
148 for (unsigned i = 0; i < c->num_temps; i++) {
149 int node = temp_to_node[i];
150
151 if (BITSET_TEST(c->spillable, i))
152 ra_set_node_spill_cost(g, node, spill_costs[i]);
153 }
154
155 return ra_get_best_spill_node(g);
156 }
157
158 /* The spill offset for this thread takes a bit of setup, so do it once at
159 * program start.
160 */
161 static void
162 v3d_setup_spill_base(struct v3d_compile *c)
163 {
164 c->cursor = vir_before_block(vir_entry_block(c));
165
166 int start_num_temps = c->num_temps;
167
168 /* Each thread wants to be in a separate region of the scratch space
169 * so that the QPUs aren't fighting over cache lines. We have the
170 * driver keep a single global spill BO rather than
171 * per-spilling-program BOs, so we need a uniform from the driver for
172 * what the per-thread scale is.
173 */
174 struct qreg thread_offset =
175 vir_UMUL(c,
176 vir_TIDX(c),
177 vir_uniform(c, QUNIFORM_SPILL_SIZE_PER_THREAD, 0));
178
179 /* Each channel in a reg is 4 bytes, so scale them up by that. */
180 struct qreg element_offset = vir_SHL(c, vir_EIDX(c),
181 vir_uniform_ui(c, 2));
182
183 c->spill_base = vir_ADD(c,
184 vir_ADD(c, thread_offset, element_offset),
185 vir_uniform(c, QUNIFORM_SPILL_OFFSET, 0));
186
187 /* Make sure that we don't spill the spilling setup instructions. */
188 for (int i = start_num_temps; i < c->num_temps; i++)
189 BITSET_CLEAR(c->spillable, i);
190 }
191
192 static void
193 v3d_emit_spill_tmua(struct v3d_compile *c, uint32_t spill_offset)
194 {
195 vir_ADD_dest(c, vir_reg(QFILE_MAGIC,
196 V3D_QPU_WADDR_TMUA),
197 c->spill_base,
198 vir_uniform_ui(c, spill_offset));
199 }
200
201 static void
202 v3d_spill_reg(struct v3d_compile *c, int spill_temp)
203 {
204 bool is_uniform = vir_is_mov_uniform(c, spill_temp);
205
206 uint32_t spill_offset = 0;
207
208 if (!is_uniform) {
209 uint32_t spill_offset = c->spill_size;
210 c->spill_size += 16 * sizeof(uint32_t);
211
212 if (spill_offset == 0)
213 v3d_setup_spill_base(c);
214 }
215
216 struct qinst *last_thrsw = c->last_thrsw;
217 assert(!last_thrsw || last_thrsw->is_last_thrsw);
218
219 int start_num_temps = c->num_temps;
220
221 struct qreg uniform_src = c->undef;
222 if (is_uniform)
223 uniform_src = c->defs[spill_temp]->src[0];
224
225 vir_for_each_inst_inorder_safe(inst, c) {
226 for (int i = 0; i < vir_get_nsrc(inst); i++) {
227 if (inst->src[i].file != QFILE_TEMP ||
228 inst->src[i].index != spill_temp) {
229 continue;
230 }
231
232 c->cursor = vir_before_inst(inst);
233
234 if (is_uniform) {
235 inst->src[i] = vir_MOV(c, uniform_src);
236 } else {
237 v3d_emit_spill_tmua(c, spill_offset);
238 vir_emit_thrsw(c);
239 inst->src[i] = vir_LDTMU(c);
240 c->fills++;
241 }
242 }
243
244 if (inst->dst.file == QFILE_TEMP &&
245 inst->dst.index == spill_temp) {
246 if (is_uniform) {
247 c->cursor.link = NULL;
248 vir_remove_instruction(c, inst);
249 } else {
250 c->cursor = vir_after_inst(inst);
251
252 inst->dst.index = c->num_temps++;
253 vir_MOV_dest(c, vir_reg(QFILE_MAGIC,
254 V3D_QPU_WADDR_TMUD),
255 inst->dst);
256 v3d_emit_spill_tmua(c, spill_offset);
257 vir_emit_thrsw(c);
258 vir_TMUWT(c);
259 c->spills++;
260 }
261 }
262
263 /* If we didn't have a last-thrsw inserted by nir_to_vir and
264 * we've been inserting thrsws, then insert a new last_thrsw
265 * right before we start the vpm/tlb sequence for the last
266 * thread segment.
267 */
268 if (!is_uniform && !last_thrsw && c->last_thrsw &&
269 (v3d_qpu_writes_vpm(&inst->qpu) ||
270 v3d_qpu_uses_tlb(&inst->qpu))) {
271 c->cursor = vir_before_inst(inst);
272 vir_emit_thrsw(c);
273
274 last_thrsw = c->last_thrsw;
275 last_thrsw->is_last_thrsw = true;
276 }
277 }
278
279 /* Make sure c->last_thrsw is the actual last thrsw, not just one we
280 * inserted in our most recent unspill.
281 */
282 if (last_thrsw)
283 c->last_thrsw = last_thrsw;
284
285 /* Don't allow spilling of our spilling instructions. There's no way
286 * they can help get things colored.
287 */
288 for (int i = start_num_temps; i < c->num_temps; i++)
289 BITSET_CLEAR(c->spillable, i);
290 }
291
292 struct v3d_ra_select_callback_data {
293 uint32_t next_acc;
294 uint32_t next_phys;
295 };
296
297 static unsigned int
298 v3d_ra_select_callback(struct ra_graph *g, BITSET_WORD *regs, void *data)
299 {
300 struct v3d_ra_select_callback_data *v3d_ra = data;
301
302 /* Choose an accumulator if possible (I think it's lower power than
303 * phys regs), but round-robin through them to give post-RA
304 * instruction selection more options.
305 */
306 for (int i = 0; i < ACC_COUNT; i++) {
307 int acc_off = (v3d_ra->next_acc + i) % ACC_COUNT;
308 int acc = ACC_INDEX + acc_off;
309
310 if (BITSET_TEST(regs, acc)) {
311 v3d_ra->next_acc = acc_off + 1;
312 return acc;
313 }
314 }
315
316 for (int i = 0; i < PHYS_COUNT; i++) {
317 int phys_off = (v3d_ra->next_phys + i) % PHYS_COUNT;
318 int phys = PHYS_INDEX + phys_off;
319
320 if (BITSET_TEST(regs, phys)) {
321 v3d_ra->next_phys = phys_off + 1;
322 return phys;
323 }
324 }
325
326 unreachable("RA must pass us at least one possible reg.");
327 }
328
329 bool
330 vir_init_reg_sets(struct v3d_compiler *compiler)
331 {
332 /* Allocate up to 3 regfile classes, for the ways the physical
333 * register file can be divided up for fragment shader threading.
334 */
335 int max_thread_index = (compiler->devinfo->ver >= 40 ? 2 : 3);
336
337 compiler->regs = ra_alloc_reg_set(compiler, PHYS_INDEX + PHYS_COUNT,
338 true);
339 if (!compiler->regs)
340 return false;
341
342 for (int threads = 0; threads < max_thread_index; threads++) {
343 compiler->reg_class_phys_or_acc[threads] =
344 ra_alloc_reg_class(compiler->regs);
345 compiler->reg_class_phys[threads] =
346 ra_alloc_reg_class(compiler->regs);
347
348 for (int i = PHYS_INDEX;
349 i < PHYS_INDEX + (PHYS_COUNT >> threads); i++) {
350 ra_class_add_reg(compiler->regs,
351 compiler->reg_class_phys_or_acc[threads], i);
352 ra_class_add_reg(compiler->regs,
353 compiler->reg_class_phys[threads], i);
354 }
355
356 for (int i = ACC_INDEX + 0; i < ACC_INDEX + ACC_COUNT; i++) {
357 ra_class_add_reg(compiler->regs,
358 compiler->reg_class_phys_or_acc[threads], i);
359 }
360 }
361
362 ra_set_finalize(compiler->regs, NULL);
363
364 return true;
365 }
366
367 struct node_to_temp_map {
368 uint32_t temp;
369 uint32_t priority;
370 };
371
372 static int
373 node_to_temp_priority(const void *in_a, const void *in_b)
374 {
375 const struct node_to_temp_map *a = in_a;
376 const struct node_to_temp_map *b = in_b;
377
378 return a->priority - b->priority;
379 }
380
381 #define CLASS_BIT_PHYS (1 << 0)
382 #define CLASS_BIT_R0_R2 (1 << 1)
383 #define CLASS_BIT_R3 (1 << 2)
384 #define CLASS_BIT_R4 (1 << 3)
385
386 /**
387 * Returns a mapping from QFILE_TEMP indices to struct qpu_regs.
388 *
389 * The return value should be freed by the caller.
390 */
391 struct qpu_reg *
392 v3d_register_allocate(struct v3d_compile *c, bool *spilled)
393 {
394 struct node_to_temp_map map[c->num_temps];
395 uint32_t temp_to_node[c->num_temps];
396 uint8_t class_bits[c->num_temps];
397 int acc_nodes[ACC_COUNT];
398 struct v3d_ra_select_callback_data callback_data = {
399 .next_acc = 0,
400 /* Start at RF3, to try to keep the TLB writes from using
401 * RF0-2.
402 */
403 .next_phys = 3,
404 };
405
406 *spilled = false;
407
408 vir_calculate_live_intervals(c);
409
410 /* Convert 1, 2, 4 threads to 0, 1, 2 index.
411 *
412 * V3D 4.x has double the physical register space, so 64 physical regs
413 * are available at both 1x and 2x threading, and 4x has 32.
414 */
415 int thread_index = ffs(c->threads) - 1;
416 if (c->devinfo->ver >= 40) {
417 if (thread_index >= 1)
418 thread_index--;
419 }
420
421 struct ra_graph *g = ra_alloc_interference_graph(c->compiler->regs,
422 c->num_temps +
423 ARRAY_SIZE(acc_nodes));
424 ra_set_select_reg_callback(g, v3d_ra_select_callback, &callback_data);
425
426 /* Make some fixed nodes for the accumulators, which we will need to
427 * interfere with when ops have implied r3/r4 writes or for the thread
428 * switches. We could represent these as classes for the nodes to
429 * live in, but the classes take up a lot of memory to set up, so we
430 * don't want to make too many.
431 */
432 for (int i = 0; i < ARRAY_SIZE(acc_nodes); i++) {
433 acc_nodes[i] = c->num_temps + i;
434 ra_set_node_reg(g, acc_nodes[i], ACC_INDEX + i);
435 }
436
437 for (uint32_t i = 0; i < c->num_temps; i++) {
438 map[i].temp = i;
439 map[i].priority = c->temp_end[i] - c->temp_start[i];
440 }
441 qsort(map, c->num_temps, sizeof(map[0]), node_to_temp_priority);
442 for (uint32_t i = 0; i < c->num_temps; i++) {
443 temp_to_node[map[i].temp] = i;
444 }
445
446 /* Figure out our register classes and preallocated registers. We
447 * start with any temp being able to be in any file, then instructions
448 * incrementally remove bits that the temp definitely can't be in.
449 */
450 memset(class_bits,
451 CLASS_BIT_PHYS | CLASS_BIT_R0_R2 | CLASS_BIT_R3 | CLASS_BIT_R4,
452 sizeof(class_bits));
453
454 int ip = 0;
455 vir_for_each_inst_inorder(inst, c) {
456 /* If the instruction writes r3/r4 (and optionally moves its
457 * result to a temp), nothing else can be stored in r3/r4 across
458 * it.
459 */
460 if (vir_writes_r3(c->devinfo, inst)) {
461 for (int i = 0; i < c->num_temps; i++) {
462 if (c->temp_start[i] < ip &&
463 c->temp_end[i] > ip) {
464 ra_add_node_interference(g,
465 temp_to_node[i],
466 acc_nodes[3]);
467 }
468 }
469 }
470 if (vir_writes_r4(c->devinfo, inst)) {
471 for (int i = 0; i < c->num_temps; i++) {
472 if (c->temp_start[i] < ip &&
473 c->temp_end[i] > ip) {
474 ra_add_node_interference(g,
475 temp_to_node[i],
476 acc_nodes[4]);
477 }
478 }
479 }
480
481 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU) {
482 switch (inst->qpu.alu.add.op) {
483 case V3D_QPU_A_LDVPMV_IN:
484 case V3D_QPU_A_LDVPMV_OUT:
485 case V3D_QPU_A_LDVPMD_IN:
486 case V3D_QPU_A_LDVPMD_OUT:
487 case V3D_QPU_A_LDVPMP:
488 case V3D_QPU_A_LDVPMG_IN:
489 case V3D_QPU_A_LDVPMG_OUT:
490 /* LDVPMs only store to temps (the MA flag
491 * decides whether the LDVPM is in or out)
492 */
493 assert(inst->dst.file == QFILE_TEMP);
494 class_bits[inst->dst.index] &= CLASS_BIT_PHYS;
495 break;
496
497 case V3D_QPU_A_RECIP:
498 case V3D_QPU_A_RSQRT:
499 case V3D_QPU_A_EXP:
500 case V3D_QPU_A_LOG:
501 case V3D_QPU_A_SIN:
502 case V3D_QPU_A_RSQRT2:
503 /* The SFU instructions write directly to the
504 * phys regfile.
505 */
506 assert(inst->dst.file == QFILE_TEMP);
507 class_bits[inst->dst.index] &= CLASS_BIT_PHYS;
508 break;
509
510 default:
511 break;
512 }
513 }
514
515 if (inst->src[0].file == QFILE_REG) {
516 switch (inst->src[0].index) {
517 case 0:
518 case 1:
519 case 2:
520 case 3:
521 /* Payload setup instructions: Force allocate
522 * the dst to the given register (so the MOV
523 * will disappear).
524 */
525 assert(inst->qpu.alu.mul.op == V3D_QPU_M_MOV);
526 assert(inst->dst.file == QFILE_TEMP);
527 ra_set_node_reg(g,
528 temp_to_node[inst->dst.index],
529 PHYS_INDEX +
530 inst->src[0].index);
531 break;
532 }
533 }
534
535 if (inst->qpu.sig.thrsw) {
536 /* All accumulators are invalidated across a thread
537 * switch.
538 */
539 for (int i = 0; i < c->num_temps; i++) {
540 if (c->temp_start[i] < ip && c->temp_end[i] > ip)
541 class_bits[i] &= CLASS_BIT_PHYS;
542 }
543 }
544
545 ip++;
546 }
547
548 for (uint32_t i = 0; i < c->num_temps; i++) {
549 if (class_bits[i] == CLASS_BIT_PHYS) {
550 ra_set_node_class(g, temp_to_node[i],
551 c->compiler->reg_class_phys[thread_index]);
552 } else {
553 assert(class_bits[i] == (CLASS_BIT_PHYS |
554 CLASS_BIT_R0_R2 |
555 CLASS_BIT_R3 |
556 CLASS_BIT_R4));
557 ra_set_node_class(g, temp_to_node[i],
558 c->compiler->reg_class_phys_or_acc[thread_index]);
559 }
560 }
561
562 for (uint32_t i = 0; i < c->num_temps; i++) {
563 for (uint32_t j = i + 1; j < c->num_temps; j++) {
564 if (!(c->temp_start[i] >= c->temp_end[j] ||
565 c->temp_start[j] >= c->temp_end[i])) {
566 ra_add_node_interference(g,
567 temp_to_node[i],
568 temp_to_node[j]);
569 }
570 }
571 }
572
573 /* Debug code to force a bit of register spilling, for running across
574 * conformance tests to make sure that spilling works.
575 */
576 int force_register_spills = 0;
577 if (c->spill_size < 16 * sizeof(uint32_t) * force_register_spills) {
578 int node = v3d_choose_spill_node(c, g, temp_to_node);
579 if (node != -1) {
580 v3d_spill_reg(c, map[node].temp);
581 ralloc_free(g);
582 *spilled = true;
583 return NULL;
584 }
585 }
586
587 bool ok = ra_allocate(g);
588 if (!ok) {
589 /* Try to spill, if we can't reduce threading first. */
590 if (thread_index == 0) {
591 int node = v3d_choose_spill_node(c, g, temp_to_node);
592
593 if (node != -1) {
594 v3d_spill_reg(c, map[node].temp);
595
596 /* Ask the outer loop to call back in. */
597 *spilled = true;
598 }
599 }
600
601 ralloc_free(g);
602 return NULL;
603 }
604
605 struct qpu_reg *temp_registers = calloc(c->num_temps,
606 sizeof(*temp_registers));
607
608 for (uint32_t i = 0; i < c->num_temps; i++) {
609 int ra_reg = ra_get_node_reg(g, temp_to_node[i]);
610 if (ra_reg < PHYS_INDEX) {
611 temp_registers[i].magic = true;
612 temp_registers[i].index = (V3D_QPU_WADDR_R0 +
613 ra_reg - ACC_INDEX);
614 } else {
615 temp_registers[i].magic = false;
616 temp_registers[i].index = ra_reg - PHYS_INDEX;
617 }
618
619 /* If the value's never used, just write to the NOP register
620 * for clarity in debug output.
621 */
622 if (c->temp_start[i] == c->temp_end[i]) {
623 temp_registers[i].magic = true;
624 temp_registers[i].index = V3D_QPU_WADDR_NOP;
625 }
626 }
627
628 ralloc_free(g);
629
630 if (V3D_DEBUG & V3D_DEBUG_SHADERDB) {
631 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d spills\n",
632 vir_get_stage_name(c),
633 c->program_id, c->variant_id,
634 c->spills);
635
636 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d fills\n",
637 vir_get_stage_name(c),
638 c->program_id, c->variant_id,
639 c->fills);
640 }
641
642 return temp_registers;
643 }