v3d: Use ldunif instructions for uniforms.
[mesa.git] / src / broadcom / compiler / vir_register_allocate.c
1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/ralloc.h"
25 #include "util/register_allocate.h"
26 #include "common/v3d_device_info.h"
27 #include "v3d_compiler.h"
28
29 #define QPU_R(i) { .magic = false, .index = i }
30
31 #define ACC_INDEX 0
32 #define ACC_COUNT 6
33 #define PHYS_INDEX (ACC_INDEX + ACC_COUNT)
34 #define PHYS_COUNT 64
35
36 static bool
37 is_last_ldtmu(struct qinst *inst, struct qblock *block)
38 {
39 list_for_each_entry_from(struct qinst, scan_inst, inst,
40 &block->instructions, link) {
41 if (inst->qpu.sig.ldtmu)
42 return false;
43 if (v3d_qpu_writes_tmu(&inst->qpu))
44 return true;
45 }
46
47 return true;
48 }
49
50 static bool
51 vir_is_mov_uniform(struct v3d_compile *c, int temp)
52 {
53 struct qinst *def = c->defs[temp];
54
55 return def && def->qpu.sig.ldunif;
56 }
57
58 static int
59 v3d_choose_spill_node(struct v3d_compile *c, struct ra_graph *g,
60 uint32_t *temp_to_node)
61 {
62 const float tmu_scale = 5;
63 float block_scale = 1.0;
64 float spill_costs[c->num_temps];
65 bool in_tmu_operation = false;
66 bool started_last_seg = false;
67
68 for (unsigned i = 0; i < c->num_temps; i++)
69 spill_costs[i] = 0.0;
70
71 /* XXX: Scale the cost up when inside of a loop. */
72 vir_for_each_block(block, c) {
73 vir_for_each_inst(inst, block) {
74 /* We can't insert a new TMU operation while currently
75 * in a TMU operation, and we can't insert new thread
76 * switches after starting output writes.
77 */
78 bool no_spilling =
79 (in_tmu_operation ||
80 (c->threads > 1 && started_last_seg));
81
82 for (int i = 0; i < vir_get_nsrc(inst); i++) {
83 if (inst->src[i].file != QFILE_TEMP)
84 continue;
85
86 int temp = inst->src[i].index;
87 if (vir_is_mov_uniform(c, temp)) {
88 spill_costs[temp] += block_scale;
89 } else if (!no_spilling) {
90 spill_costs[temp] += (block_scale *
91 tmu_scale);
92 } else {
93 BITSET_CLEAR(c->spillable, temp);
94 }
95 }
96
97 if (inst->dst.file == QFILE_TEMP) {
98 int temp = inst->dst.index;
99
100 if (vir_is_mov_uniform(c, temp)) {
101 /* We just rematerialize the unform
102 * later.
103 */
104 } else if (!no_spilling) {
105 spill_costs[temp] += (block_scale *
106 tmu_scale);
107 } else {
108 BITSET_CLEAR(c->spillable, temp);
109 }
110 }
111
112 /* Refuse to spill a ldvary's dst, because that means
113 * that ldvary's r5 would end up being used across a
114 * thrsw.
115 */
116 if (inst->qpu.sig.ldvary) {
117 assert(inst->dst.file == QFILE_TEMP);
118 BITSET_CLEAR(c->spillable, inst->dst.index);
119 }
120
121 if (inst->is_last_thrsw)
122 started_last_seg = true;
123
124 if (v3d_qpu_writes_vpm(&inst->qpu) ||
125 v3d_qpu_uses_tlb(&inst->qpu))
126 started_last_seg = true;
127
128 /* Track when we're in between a TMU setup and the
129 * final LDTMU or TMUWT from that TMU setup. We can't
130 * spill/fill any temps during that time, because that
131 * involves inserting a new TMU setup/LDTMU sequence.
132 */
133 if (inst->qpu.sig.ldtmu &&
134 is_last_ldtmu(inst, block))
135 in_tmu_operation = false;
136
137 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
138 inst->qpu.alu.add.op == V3D_QPU_A_TMUWT)
139 in_tmu_operation = false;
140
141 if (v3d_qpu_writes_tmu(&inst->qpu))
142 in_tmu_operation = true;
143 }
144 }
145
146 for (unsigned i = 0; i < c->num_temps; i++) {
147 int node = temp_to_node[i];
148
149 if (BITSET_TEST(c->spillable, i))
150 ra_set_node_spill_cost(g, node, spill_costs[i]);
151 }
152
153 return ra_get_best_spill_node(g);
154 }
155
156 /* The spill offset for this thread takes a bit of setup, so do it once at
157 * program start.
158 */
159 static void
160 v3d_setup_spill_base(struct v3d_compile *c)
161 {
162 c->cursor = vir_before_block(vir_entry_block(c));
163
164 int start_num_temps = c->num_temps;
165
166 /* Each thread wants to be in a separate region of the scratch space
167 * so that the QPUs aren't fighting over cache lines. We have the
168 * driver keep a single global spill BO rather than
169 * per-spilling-program BOs, so we need a uniform from the driver for
170 * what the per-thread scale is.
171 */
172 struct qreg thread_offset =
173 vir_UMUL(c,
174 vir_TIDX(c),
175 vir_uniform(c, QUNIFORM_SPILL_SIZE_PER_THREAD, 0));
176
177 /* Each channel in a reg is 4 bytes, so scale them up by that. */
178 struct qreg element_offset = vir_SHL(c, vir_EIDX(c),
179 vir_uniform_ui(c, 2));
180
181 c->spill_base = vir_ADD(c,
182 vir_ADD(c, thread_offset, element_offset),
183 vir_uniform(c, QUNIFORM_SPILL_OFFSET, 0));
184
185 /* Make sure that we don't spill the spilling setup instructions. */
186 for (int i = start_num_temps; i < c->num_temps; i++)
187 BITSET_CLEAR(c->spillable, i);
188 }
189
190 static void
191 v3d_emit_spill_tmua(struct v3d_compile *c, uint32_t spill_offset)
192 {
193 vir_ADD_dest(c, vir_reg(QFILE_MAGIC,
194 V3D_QPU_WADDR_TMUA),
195 c->spill_base,
196 vir_uniform_ui(c, spill_offset));
197 }
198
199 static void
200 v3d_spill_reg(struct v3d_compile *c, int spill_temp)
201 {
202 bool is_uniform = vir_is_mov_uniform(c, spill_temp);
203
204 uint32_t spill_offset = 0;
205
206 if (!is_uniform) {
207 uint32_t spill_offset = c->spill_size;
208 c->spill_size += 16 * sizeof(uint32_t);
209
210 if (spill_offset == 0)
211 v3d_setup_spill_base(c);
212 }
213
214 struct qinst *last_thrsw = c->last_thrsw;
215 assert(!last_thrsw || last_thrsw->is_last_thrsw);
216
217 int start_num_temps = c->num_temps;
218
219 int uniform_index = ~0;
220 if (is_uniform) {
221 struct qinst *orig_unif = c->defs[spill_temp];
222 uniform_index = orig_unif->uniform;
223 }
224
225 vir_for_each_inst_inorder_safe(inst, c) {
226 for (int i = 0; i < vir_get_nsrc(inst); i++) {
227 if (inst->src[i].file != QFILE_TEMP ||
228 inst->src[i].index != spill_temp) {
229 continue;
230 }
231
232 c->cursor = vir_before_inst(inst);
233
234 if (is_uniform) {
235 struct qreg unif =
236 vir_uniform(c,
237 c->uniform_contents[uniform_index],
238 c->uniform_data[uniform_index]);
239 inst->src[i] = unif;
240 } else {
241 v3d_emit_spill_tmua(c, spill_offset);
242 vir_emit_thrsw(c);
243 inst->src[i] = vir_LDTMU(c);
244 c->fills++;
245 }
246 }
247
248 if (inst->dst.file == QFILE_TEMP &&
249 inst->dst.index == spill_temp) {
250 if (is_uniform) {
251 c->cursor.link = NULL;
252 vir_remove_instruction(c, inst);
253 } else {
254 c->cursor = vir_after_inst(inst);
255
256 inst->dst.index = c->num_temps++;
257 vir_MOV_dest(c, vir_reg(QFILE_MAGIC,
258 V3D_QPU_WADDR_TMUD),
259 inst->dst);
260 v3d_emit_spill_tmua(c, spill_offset);
261 vir_emit_thrsw(c);
262 vir_TMUWT(c);
263 c->spills++;
264 }
265 }
266
267 /* If we didn't have a last-thrsw inserted by nir_to_vir and
268 * we've been inserting thrsws, then insert a new last_thrsw
269 * right before we start the vpm/tlb sequence for the last
270 * thread segment.
271 */
272 if (!is_uniform && !last_thrsw && c->last_thrsw &&
273 (v3d_qpu_writes_vpm(&inst->qpu) ||
274 v3d_qpu_uses_tlb(&inst->qpu))) {
275 c->cursor = vir_before_inst(inst);
276 vir_emit_thrsw(c);
277
278 last_thrsw = c->last_thrsw;
279 last_thrsw->is_last_thrsw = true;
280 }
281 }
282
283 /* Make sure c->last_thrsw is the actual last thrsw, not just one we
284 * inserted in our most recent unspill.
285 */
286 if (last_thrsw)
287 c->last_thrsw = last_thrsw;
288
289 /* Don't allow spilling of our spilling instructions. There's no way
290 * they can help get things colored.
291 */
292 for (int i = start_num_temps; i < c->num_temps; i++)
293 BITSET_CLEAR(c->spillable, i);
294 }
295
296 struct v3d_ra_select_callback_data {
297 uint32_t next_acc;
298 uint32_t next_phys;
299 };
300
301 static unsigned int
302 v3d_ra_select_callback(struct ra_graph *g, BITSET_WORD *regs, void *data)
303 {
304 struct v3d_ra_select_callback_data *v3d_ra = data;
305 int r5 = ACC_INDEX + 5;
306
307 /* Choose r5 for our ldunifs if possible (nobody else can load to that
308 * reg, and it keeps the QPU cond field free from being occupied by
309 * ldunifrf).
310 */
311 if (BITSET_TEST(regs, r5))
312 return r5;
313
314 /* Choose an accumulator if possible (I think it's lower power than
315 * phys regs), but round-robin through them to give post-RA
316 * instruction selection more options.
317 */
318 for (int i = 0; i < ACC_COUNT; i++) {
319 int acc_off = (v3d_ra->next_acc + i) % ACC_COUNT;
320 int acc = ACC_INDEX + acc_off;
321
322 if (BITSET_TEST(regs, acc)) {
323 v3d_ra->next_acc = acc_off + 1;
324 return acc;
325 }
326 }
327
328 for (int i = 0; i < PHYS_COUNT; i++) {
329 int phys_off = (v3d_ra->next_phys + i) % PHYS_COUNT;
330 int phys = PHYS_INDEX + phys_off;
331
332 if (BITSET_TEST(regs, phys)) {
333 v3d_ra->next_phys = phys_off + 1;
334 return phys;
335 }
336 }
337
338 unreachable("RA must pass us at least one possible reg.");
339 }
340
341 bool
342 vir_init_reg_sets(struct v3d_compiler *compiler)
343 {
344 /* Allocate up to 3 regfile classes, for the ways the physical
345 * register file can be divided up for fragment shader threading.
346 */
347 int max_thread_index = (compiler->devinfo->ver >= 40 ? 2 : 3);
348
349 compiler->regs = ra_alloc_reg_set(compiler, PHYS_INDEX + PHYS_COUNT,
350 true);
351 if (!compiler->regs)
352 return false;
353
354 for (int threads = 0; threads < max_thread_index; threads++) {
355 compiler->reg_class_any[threads] =
356 ra_alloc_reg_class(compiler->regs);
357 compiler->reg_class_r5[threads] =
358 ra_alloc_reg_class(compiler->regs);
359 compiler->reg_class_phys_or_acc[threads] =
360 ra_alloc_reg_class(compiler->regs);
361 compiler->reg_class_phys[threads] =
362 ra_alloc_reg_class(compiler->regs);
363
364 for (int i = PHYS_INDEX;
365 i < PHYS_INDEX + (PHYS_COUNT >> threads); i++) {
366 ra_class_add_reg(compiler->regs,
367 compiler->reg_class_phys_or_acc[threads], i);
368 ra_class_add_reg(compiler->regs,
369 compiler->reg_class_phys[threads], i);
370 ra_class_add_reg(compiler->regs,
371 compiler->reg_class_any[threads], i);
372 }
373
374 for (int i = ACC_INDEX + 0; i < ACC_INDEX + ACC_COUNT - 1; i++) {
375 ra_class_add_reg(compiler->regs,
376 compiler->reg_class_phys_or_acc[threads], i);
377 ra_class_add_reg(compiler->regs,
378 compiler->reg_class_any[threads], i);
379 }
380 /* r5 can only store a single 32-bit value, so not much can
381 * use it.
382 */
383 ra_class_add_reg(compiler->regs,
384 compiler->reg_class_r5[threads],
385 ACC_INDEX + 5);
386 ra_class_add_reg(compiler->regs,
387 compiler->reg_class_any[threads],
388 ACC_INDEX + 5);
389 }
390
391 ra_set_finalize(compiler->regs, NULL);
392
393 return true;
394 }
395
396 struct node_to_temp_map {
397 uint32_t temp;
398 uint32_t priority;
399 };
400
401 static int
402 node_to_temp_priority(const void *in_a, const void *in_b)
403 {
404 const struct node_to_temp_map *a = in_a;
405 const struct node_to_temp_map *b = in_b;
406
407 return a->priority - b->priority;
408 }
409
410 #define CLASS_BIT_PHYS (1 << 0)
411 #define CLASS_BIT_ACC (1 << 1)
412 #define CLASS_BIT_R5 (1 << 4)
413 #define CLASS_BITS_ANY (CLASS_BIT_PHYS | \
414 CLASS_BIT_ACC | \
415 CLASS_BIT_R5)
416
417 /**
418 * Returns a mapping from QFILE_TEMP indices to struct qpu_regs.
419 *
420 * The return value should be freed by the caller.
421 */
422 struct qpu_reg *
423 v3d_register_allocate(struct v3d_compile *c, bool *spilled)
424 {
425 struct node_to_temp_map map[c->num_temps];
426 uint32_t temp_to_node[c->num_temps];
427 uint8_t class_bits[c->num_temps];
428 int acc_nodes[ACC_COUNT];
429 struct v3d_ra_select_callback_data callback_data = {
430 .next_acc = 0,
431 /* Start at RF3, to try to keep the TLB writes from using
432 * RF0-2.
433 */
434 .next_phys = 3,
435 };
436
437 *spilled = false;
438
439 vir_calculate_live_intervals(c);
440
441 /* Convert 1, 2, 4 threads to 0, 1, 2 index.
442 *
443 * V3D 4.x has double the physical register space, so 64 physical regs
444 * are available at both 1x and 2x threading, and 4x has 32.
445 */
446 int thread_index = ffs(c->threads) - 1;
447 if (c->devinfo->ver >= 40) {
448 if (thread_index >= 1)
449 thread_index--;
450 }
451
452 struct ra_graph *g = ra_alloc_interference_graph(c->compiler->regs,
453 c->num_temps +
454 ARRAY_SIZE(acc_nodes));
455 ra_set_select_reg_callback(g, v3d_ra_select_callback, &callback_data);
456
457 /* Make some fixed nodes for the accumulators, which we will need to
458 * interfere with when ops have implied r3/r4 writes or for the thread
459 * switches. We could represent these as classes for the nodes to
460 * live in, but the classes take up a lot of memory to set up, so we
461 * don't want to make too many.
462 */
463 for (int i = 0; i < ARRAY_SIZE(acc_nodes); i++) {
464 acc_nodes[i] = c->num_temps + i;
465 ra_set_node_reg(g, acc_nodes[i], ACC_INDEX + i);
466 }
467
468 for (uint32_t i = 0; i < c->num_temps; i++) {
469 map[i].temp = i;
470 map[i].priority = c->temp_end[i] - c->temp_start[i];
471 }
472 qsort(map, c->num_temps, sizeof(map[0]), node_to_temp_priority);
473 for (uint32_t i = 0; i < c->num_temps; i++) {
474 temp_to_node[map[i].temp] = i;
475 }
476
477 /* Figure out our register classes and preallocated registers. We
478 * start with any temp being able to be in any file, then instructions
479 * incrementally remove bits that the temp definitely can't be in.
480 */
481 memset(class_bits, CLASS_BITS_ANY, sizeof(class_bits));
482
483 int ip = 0;
484 vir_for_each_inst_inorder(inst, c) {
485 /* If the instruction writes r3/r4 (and optionally moves its
486 * result to a temp), nothing else can be stored in r3/r4 across
487 * it.
488 */
489 if (vir_writes_r3(c->devinfo, inst)) {
490 for (int i = 0; i < c->num_temps; i++) {
491 if (c->temp_start[i] < ip &&
492 c->temp_end[i] > ip) {
493 ra_add_node_interference(g,
494 temp_to_node[i],
495 acc_nodes[3]);
496 }
497 }
498 }
499 if (vir_writes_r4(c->devinfo, inst)) {
500 for (int i = 0; i < c->num_temps; i++) {
501 if (c->temp_start[i] < ip &&
502 c->temp_end[i] > ip) {
503 ra_add_node_interference(g,
504 temp_to_node[i],
505 acc_nodes[4]);
506 }
507 }
508 }
509
510 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU) {
511 switch (inst->qpu.alu.add.op) {
512 case V3D_QPU_A_LDVPMV_IN:
513 case V3D_QPU_A_LDVPMV_OUT:
514 case V3D_QPU_A_LDVPMD_IN:
515 case V3D_QPU_A_LDVPMD_OUT:
516 case V3D_QPU_A_LDVPMP:
517 case V3D_QPU_A_LDVPMG_IN:
518 case V3D_QPU_A_LDVPMG_OUT:
519 /* LDVPMs only store to temps (the MA flag
520 * decides whether the LDVPM is in or out)
521 */
522 assert(inst->dst.file == QFILE_TEMP);
523 class_bits[inst->dst.index] &= CLASS_BIT_PHYS;
524 break;
525
526 case V3D_QPU_A_RECIP:
527 case V3D_QPU_A_RSQRT:
528 case V3D_QPU_A_EXP:
529 case V3D_QPU_A_LOG:
530 case V3D_QPU_A_SIN:
531 case V3D_QPU_A_RSQRT2:
532 /* The SFU instructions write directly to the
533 * phys regfile.
534 */
535 assert(inst->dst.file == QFILE_TEMP);
536 class_bits[inst->dst.index] &= CLASS_BIT_PHYS;
537 break;
538
539 default:
540 break;
541 }
542 }
543
544 if (inst->src[0].file == QFILE_REG) {
545 switch (inst->src[0].index) {
546 case 0:
547 case 1:
548 case 2:
549 case 3:
550 /* Payload setup instructions: Force allocate
551 * the dst to the given register (so the MOV
552 * will disappear).
553 */
554 assert(inst->qpu.alu.mul.op == V3D_QPU_M_MOV);
555 assert(inst->dst.file == QFILE_TEMP);
556 ra_set_node_reg(g,
557 temp_to_node[inst->dst.index],
558 PHYS_INDEX +
559 inst->src[0].index);
560 break;
561 }
562 }
563
564 if (inst->dst.file == QFILE_TEMP) {
565 /* Only a ldunif gets to write to R5, which only has a
566 * single 32-bit channel of storage.
567 */
568 if (!inst->qpu.sig.ldunif) {
569 class_bits[inst->dst.index] &= ~CLASS_BIT_R5;
570 } else {
571 /* Until V3D 4.x, we could only load a uniform
572 * to r5, so we'll need to spill if uniform
573 * loads interfere with each other.
574 */
575 if (c->devinfo->ver < 40) {
576 class_bits[inst->dst.index] &=
577 CLASS_BIT_R5;
578 }
579 }
580 }
581
582 if (inst->qpu.sig.thrsw) {
583 /* All accumulators are invalidated across a thread
584 * switch.
585 */
586 for (int i = 0; i < c->num_temps; i++) {
587 if (c->temp_start[i] < ip && c->temp_end[i] > ip)
588 class_bits[i] &= CLASS_BIT_PHYS;
589 }
590 }
591
592 ip++;
593 }
594
595 for (uint32_t i = 0; i < c->num_temps; i++) {
596 if (class_bits[i] == CLASS_BIT_PHYS) {
597 ra_set_node_class(g, temp_to_node[i],
598 c->compiler->reg_class_phys[thread_index]);
599 } else if (class_bits[i] == (CLASS_BIT_R5)) {
600 ra_set_node_class(g, temp_to_node[i],
601 c->compiler->reg_class_r5[thread_index]);
602 } else if (class_bits[i] == (CLASS_BIT_PHYS | CLASS_BIT_ACC)) {
603 ra_set_node_class(g, temp_to_node[i],
604 c->compiler->reg_class_phys_or_acc[thread_index]);
605 } else {
606 assert(class_bits[i] == CLASS_BITS_ANY);
607 ra_set_node_class(g, temp_to_node[i],
608 c->compiler->reg_class_any[thread_index]);
609 }
610 }
611
612 for (uint32_t i = 0; i < c->num_temps; i++) {
613 for (uint32_t j = i + 1; j < c->num_temps; j++) {
614 if (!(c->temp_start[i] >= c->temp_end[j] ||
615 c->temp_start[j] >= c->temp_end[i])) {
616 ra_add_node_interference(g,
617 temp_to_node[i],
618 temp_to_node[j]);
619 }
620 }
621 }
622
623 /* Debug code to force a bit of register spilling, for running across
624 * conformance tests to make sure that spilling works.
625 */
626 int force_register_spills = 0;
627 if (c->spill_size < 16 * sizeof(uint32_t) * force_register_spills) {
628 int node = v3d_choose_spill_node(c, g, temp_to_node);
629 if (node != -1) {
630 v3d_spill_reg(c, map[node].temp);
631 ralloc_free(g);
632 *spilled = true;
633 return NULL;
634 }
635 }
636
637 bool ok = ra_allocate(g);
638 if (!ok) {
639 int node = v3d_choose_spill_node(c, g, temp_to_node);
640
641 /* Don't emit spills using the TMU until we've dropped thread
642 * conut first.
643 */
644 if (node != -1 &&
645 (vir_is_mov_uniform(c, map[node].temp) ||
646 thread_index == 0)) {
647 v3d_spill_reg(c, map[node].temp);
648
649 /* Ask the outer loop to call back in. */
650 *spilled = true;
651 }
652
653 ralloc_free(g);
654 return NULL;
655 }
656
657 struct qpu_reg *temp_registers = calloc(c->num_temps,
658 sizeof(*temp_registers));
659
660 for (uint32_t i = 0; i < c->num_temps; i++) {
661 int ra_reg = ra_get_node_reg(g, temp_to_node[i]);
662 if (ra_reg < PHYS_INDEX) {
663 temp_registers[i].magic = true;
664 temp_registers[i].index = (V3D_QPU_WADDR_R0 +
665 ra_reg - ACC_INDEX);
666 } else {
667 temp_registers[i].magic = false;
668 temp_registers[i].index = ra_reg - PHYS_INDEX;
669 }
670
671 /* If the value's never used, just write to the NOP register
672 * for clarity in debug output.
673 */
674 if (c->temp_start[i] == c->temp_end[i]) {
675 temp_registers[i].magic = true;
676 temp_registers[i].index = V3D_QPU_WADDR_NOP;
677 }
678 }
679
680 ralloc_free(g);
681
682 if (V3D_DEBUG & V3D_DEBUG_SHADERDB) {
683 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d spills\n",
684 vir_get_stage_name(c),
685 c->program_id, c->variant_id,
686 c->spills);
687
688 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d fills\n",
689 vir_get_stage_name(c),
690 c->program_id, c->variant_id,
691 c->fills);
692 }
693
694 return temp_registers;
695 }