util/ra: spiff out select_reg_callback
[mesa.git] / src / broadcom / compiler / vir_register_allocate.c
1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/ralloc.h"
25 #include "util/register_allocate.h"
26 #include "common/v3d_device_info.h"
27 #include "v3d_compiler.h"
28
29 #define QPU_R(i) { .magic = false, .index = i }
30
31 #define ACC_INDEX 0
32 #define ACC_COUNT 6
33 #define PHYS_INDEX (ACC_INDEX + ACC_COUNT)
34 #define PHYS_COUNT 64
35
36 static inline bool
37 qinst_writes_tmu(struct qinst *inst)
38 {
39 return (inst->dst.file == QFILE_MAGIC &&
40 v3d_qpu_magic_waddr_is_tmu(inst->dst.index));
41 }
42
43 static bool
44 is_last_ldtmu(struct qinst *inst, struct qblock *block)
45 {
46 list_for_each_entry_from(struct qinst, scan_inst, inst->link.next,
47 &block->instructions, link) {
48 if (scan_inst->qpu.sig.ldtmu)
49 return false;
50 if (qinst_writes_tmu(scan_inst))
51 return true;
52 }
53
54 return true;
55 }
56
57 static bool
58 vir_is_mov_uniform(struct v3d_compile *c, int temp)
59 {
60 struct qinst *def = c->defs[temp];
61
62 return def && def->qpu.sig.ldunif;
63 }
64
65 static int
66 v3d_choose_spill_node(struct v3d_compile *c, struct ra_graph *g,
67 uint32_t *temp_to_node)
68 {
69 const float tmu_scale = 5;
70 float block_scale = 1.0;
71 float spill_costs[c->num_temps];
72 bool in_tmu_operation = false;
73 bool started_last_seg = false;
74
75 for (unsigned i = 0; i < c->num_temps; i++)
76 spill_costs[i] = 0.0;
77
78 /* XXX: Scale the cost up when inside of a loop. */
79 vir_for_each_block(block, c) {
80 vir_for_each_inst(inst, block) {
81 /* We can't insert a new TMU operation while currently
82 * in a TMU operation, and we can't insert new thread
83 * switches after starting output writes.
84 */
85 bool no_spilling =
86 (in_tmu_operation ||
87 (c->threads > 1 && started_last_seg));
88
89 for (int i = 0; i < vir_get_nsrc(inst); i++) {
90 if (inst->src[i].file != QFILE_TEMP)
91 continue;
92
93 int temp = inst->src[i].index;
94 if (vir_is_mov_uniform(c, temp)) {
95 spill_costs[temp] += block_scale;
96 } else if (!no_spilling) {
97 spill_costs[temp] += (block_scale *
98 tmu_scale);
99 } else {
100 BITSET_CLEAR(c->spillable, temp);
101 }
102 }
103
104 if (inst->dst.file == QFILE_TEMP) {
105 int temp = inst->dst.index;
106
107 if (vir_is_mov_uniform(c, temp)) {
108 /* We just rematerialize the unform
109 * later.
110 */
111 } else if (!no_spilling) {
112 spill_costs[temp] += (block_scale *
113 tmu_scale);
114 } else {
115 BITSET_CLEAR(c->spillable, temp);
116 }
117 }
118
119 /* Refuse to spill a ldvary's dst, because that means
120 * that ldvary's r5 would end up being used across a
121 * thrsw.
122 */
123 if (inst->qpu.sig.ldvary) {
124 assert(inst->dst.file == QFILE_TEMP);
125 BITSET_CLEAR(c->spillable, inst->dst.index);
126 }
127
128 if (inst->is_last_thrsw)
129 started_last_seg = true;
130
131 if (v3d_qpu_writes_vpm(&inst->qpu) ||
132 v3d_qpu_uses_tlb(&inst->qpu))
133 started_last_seg = true;
134
135 /* Track when we're in between a TMU setup and the
136 * final LDTMU or TMUWT from that TMU setup. We can't
137 * spill/fill any temps during that time, because that
138 * involves inserting a new TMU setup/LDTMU sequence.
139 */
140 if (inst->qpu.sig.ldtmu &&
141 is_last_ldtmu(inst, block))
142 in_tmu_operation = false;
143
144 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
145 inst->qpu.alu.add.op == V3D_QPU_A_TMUWT)
146 in_tmu_operation = false;
147
148 if (qinst_writes_tmu(inst))
149 in_tmu_operation = true;
150 }
151 }
152
153 for (unsigned i = 0; i < c->num_temps; i++) {
154 int node = temp_to_node[i];
155
156 if (BITSET_TEST(c->spillable, i))
157 ra_set_node_spill_cost(g, node, spill_costs[i]);
158 }
159
160 return ra_get_best_spill_node(g);
161 }
162
163 /* The spill offset for this thread takes a bit of setup, so do it once at
164 * program start.
165 */
166 void
167 v3d_setup_spill_base(struct v3d_compile *c)
168 {
169 c->cursor = vir_before_block(vir_entry_block(c));
170
171 int start_num_temps = c->num_temps;
172
173 /* Each thread wants to be in a separate region of the scratch space
174 * so that the QPUs aren't fighting over cache lines. We have the
175 * driver keep a single global spill BO rather than
176 * per-spilling-program BOs, so we need a uniform from the driver for
177 * what the per-thread scale is.
178 */
179 struct qreg thread_offset =
180 vir_UMUL(c,
181 vir_TIDX(c),
182 vir_uniform(c, QUNIFORM_SPILL_SIZE_PER_THREAD, 0));
183
184 /* Each channel in a reg is 4 bytes, so scale them up by that. */
185 struct qreg element_offset = vir_SHL(c, vir_EIDX(c),
186 vir_uniform_ui(c, 2));
187
188 c->spill_base = vir_ADD(c,
189 vir_ADD(c, thread_offset, element_offset),
190 vir_uniform(c, QUNIFORM_SPILL_OFFSET, 0));
191
192 /* Make sure that we don't spill the spilling setup instructions. */
193 for (int i = start_num_temps; i < c->num_temps; i++)
194 BITSET_CLEAR(c->spillable, i);
195
196 c->cursor = vir_after_block(c->cur_block);
197 }
198
199 static void
200 v3d_emit_spill_tmua(struct v3d_compile *c, uint32_t spill_offset)
201 {
202 vir_ADD_dest(c, vir_reg(QFILE_MAGIC,
203 V3D_QPU_WADDR_TMUA),
204 c->spill_base,
205 vir_uniform_ui(c, spill_offset));
206 }
207
208 static void
209 v3d_spill_reg(struct v3d_compile *c, int spill_temp)
210 {
211 bool is_uniform = vir_is_mov_uniform(c, spill_temp);
212
213 uint32_t spill_offset = 0;
214
215 if (!is_uniform) {
216 uint32_t spill_offset = c->spill_size;
217 c->spill_size += V3D_CHANNELS * sizeof(uint32_t);
218
219 if (spill_offset == 0)
220 v3d_setup_spill_base(c);
221 }
222
223 struct qinst *last_thrsw = c->last_thrsw;
224 assert(!last_thrsw || last_thrsw->is_last_thrsw);
225
226 int start_num_temps = c->num_temps;
227
228 int uniform_index = ~0;
229 if (is_uniform) {
230 struct qinst *orig_unif = c->defs[spill_temp];
231 uniform_index = orig_unif->uniform;
232 }
233
234 vir_for_each_inst_inorder_safe(inst, c) {
235 for (int i = 0; i < vir_get_nsrc(inst); i++) {
236 if (inst->src[i].file != QFILE_TEMP ||
237 inst->src[i].index != spill_temp) {
238 continue;
239 }
240
241 c->cursor = vir_before_inst(inst);
242
243 if (is_uniform) {
244 struct qreg unif =
245 vir_uniform(c,
246 c->uniform_contents[uniform_index],
247 c->uniform_data[uniform_index]);
248 inst->src[i] = unif;
249 } else {
250 v3d_emit_spill_tmua(c, spill_offset);
251 vir_emit_thrsw(c);
252 inst->src[i] = vir_LDTMU(c);
253 c->fills++;
254 }
255 }
256
257 if (inst->dst.file == QFILE_TEMP &&
258 inst->dst.index == spill_temp) {
259 if (is_uniform) {
260 c->cursor.link = NULL;
261 vir_remove_instruction(c, inst);
262 } else {
263 c->cursor = vir_after_inst(inst);
264
265 inst->dst.index = c->num_temps++;
266 vir_MOV_dest(c, vir_reg(QFILE_MAGIC,
267 V3D_QPU_WADDR_TMUD),
268 inst->dst);
269 v3d_emit_spill_tmua(c, spill_offset);
270 vir_emit_thrsw(c);
271 vir_TMUWT(c);
272 c->spills++;
273 c->tmu_dirty_rcl = true;
274 }
275 }
276
277 /* If we didn't have a last-thrsw inserted by nir_to_vir and
278 * we've been inserting thrsws, then insert a new last_thrsw
279 * right before we start the vpm/tlb sequence for the last
280 * thread segment.
281 */
282 if (!is_uniform && !last_thrsw && c->last_thrsw &&
283 (v3d_qpu_writes_vpm(&inst->qpu) ||
284 v3d_qpu_uses_tlb(&inst->qpu))) {
285 c->cursor = vir_before_inst(inst);
286 vir_emit_thrsw(c);
287
288 last_thrsw = c->last_thrsw;
289 last_thrsw->is_last_thrsw = true;
290 }
291 }
292
293 /* Make sure c->last_thrsw is the actual last thrsw, not just one we
294 * inserted in our most recent unspill.
295 */
296 if (last_thrsw)
297 c->last_thrsw = last_thrsw;
298
299 /* Don't allow spilling of our spilling instructions. There's no way
300 * they can help get things colored.
301 */
302 for (int i = start_num_temps; i < c->num_temps; i++)
303 BITSET_CLEAR(c->spillable, i);
304 }
305
306 struct v3d_ra_select_callback_data {
307 uint32_t next_acc;
308 uint32_t next_phys;
309 };
310
311 static unsigned int
312 v3d_ra_select_callback(unsigned int n, BITSET_WORD *regs, void *data)
313 {
314 struct v3d_ra_select_callback_data *v3d_ra = data;
315 int r5 = ACC_INDEX + 5;
316
317 /* Choose r5 for our ldunifs if possible (nobody else can load to that
318 * reg, and it keeps the QPU cond field free from being occupied by
319 * ldunifrf).
320 */
321 if (BITSET_TEST(regs, r5))
322 return r5;
323
324 /* Choose an accumulator if possible (I think it's lower power than
325 * phys regs), but round-robin through them to give post-RA
326 * instruction selection more options.
327 */
328 for (int i = 0; i < ACC_COUNT; i++) {
329 int acc_off = (v3d_ra->next_acc + i) % ACC_COUNT;
330 int acc = ACC_INDEX + acc_off;
331
332 if (BITSET_TEST(regs, acc)) {
333 v3d_ra->next_acc = acc_off + 1;
334 return acc;
335 }
336 }
337
338 for (int i = 0; i < PHYS_COUNT; i++) {
339 int phys_off = (v3d_ra->next_phys + i) % PHYS_COUNT;
340 int phys = PHYS_INDEX + phys_off;
341
342 if (BITSET_TEST(regs, phys)) {
343 v3d_ra->next_phys = phys_off + 1;
344 return phys;
345 }
346 }
347
348 unreachable("RA must pass us at least one possible reg.");
349 }
350
351 bool
352 vir_init_reg_sets(struct v3d_compiler *compiler)
353 {
354 /* Allocate up to 3 regfile classes, for the ways the physical
355 * register file can be divided up for fragment shader threading.
356 */
357 int max_thread_index = (compiler->devinfo->ver >= 40 ? 2 : 3);
358
359 compiler->regs = ra_alloc_reg_set(compiler, PHYS_INDEX + PHYS_COUNT,
360 true);
361 if (!compiler->regs)
362 return false;
363
364 for (int threads = 0; threads < max_thread_index; threads++) {
365 compiler->reg_class_any[threads] =
366 ra_alloc_reg_class(compiler->regs);
367 compiler->reg_class_r5[threads] =
368 ra_alloc_reg_class(compiler->regs);
369 compiler->reg_class_phys_or_acc[threads] =
370 ra_alloc_reg_class(compiler->regs);
371 compiler->reg_class_phys[threads] =
372 ra_alloc_reg_class(compiler->regs);
373
374 for (int i = PHYS_INDEX;
375 i < PHYS_INDEX + (PHYS_COUNT >> threads); i++) {
376 ra_class_add_reg(compiler->regs,
377 compiler->reg_class_phys_or_acc[threads], i);
378 ra_class_add_reg(compiler->regs,
379 compiler->reg_class_phys[threads], i);
380 ra_class_add_reg(compiler->regs,
381 compiler->reg_class_any[threads], i);
382 }
383
384 for (int i = ACC_INDEX + 0; i < ACC_INDEX + ACC_COUNT - 1; i++) {
385 ra_class_add_reg(compiler->regs,
386 compiler->reg_class_phys_or_acc[threads], i);
387 ra_class_add_reg(compiler->regs,
388 compiler->reg_class_any[threads], i);
389 }
390 /* r5 can only store a single 32-bit value, so not much can
391 * use it.
392 */
393 ra_class_add_reg(compiler->regs,
394 compiler->reg_class_r5[threads],
395 ACC_INDEX + 5);
396 ra_class_add_reg(compiler->regs,
397 compiler->reg_class_any[threads],
398 ACC_INDEX + 5);
399 }
400
401 ra_set_finalize(compiler->regs, NULL);
402
403 return true;
404 }
405
406 struct node_to_temp_map {
407 uint32_t temp;
408 uint32_t priority;
409 };
410
411 static int
412 node_to_temp_priority(const void *in_a, const void *in_b)
413 {
414 const struct node_to_temp_map *a = in_a;
415 const struct node_to_temp_map *b = in_b;
416
417 return a->priority - b->priority;
418 }
419
420 #define CLASS_BIT_PHYS (1 << 0)
421 #define CLASS_BIT_ACC (1 << 1)
422 #define CLASS_BIT_R5 (1 << 4)
423 #define CLASS_BITS_ANY (CLASS_BIT_PHYS | \
424 CLASS_BIT_ACC | \
425 CLASS_BIT_R5)
426
427 /**
428 * Returns a mapping from QFILE_TEMP indices to struct qpu_regs.
429 *
430 * The return value should be freed by the caller.
431 */
432 struct qpu_reg *
433 v3d_register_allocate(struct v3d_compile *c, bool *spilled)
434 {
435 struct node_to_temp_map map[c->num_temps];
436 uint32_t temp_to_node[c->num_temps];
437 uint8_t class_bits[c->num_temps];
438 int acc_nodes[ACC_COUNT];
439 struct v3d_ra_select_callback_data callback_data = {
440 .next_acc = 0,
441 /* Start at RF3, to try to keep the TLB writes from using
442 * RF0-2.
443 */
444 .next_phys = 3,
445 };
446
447 *spilled = false;
448
449 vir_calculate_live_intervals(c);
450
451 /* Convert 1, 2, 4 threads to 0, 1, 2 index.
452 *
453 * V3D 4.x has double the physical register space, so 64 physical regs
454 * are available at both 1x and 2x threading, and 4x has 32.
455 */
456 int thread_index = ffs(c->threads) - 1;
457 if (c->devinfo->ver >= 40) {
458 if (thread_index >= 1)
459 thread_index--;
460 }
461
462 struct ra_graph *g = ra_alloc_interference_graph(c->compiler->regs,
463 c->num_temps +
464 ARRAY_SIZE(acc_nodes));
465 ra_set_select_reg_callback(g, v3d_ra_select_callback, &callback_data);
466
467 /* Make some fixed nodes for the accumulators, which we will need to
468 * interfere with when ops have implied r3/r4 writes or for the thread
469 * switches. We could represent these as classes for the nodes to
470 * live in, but the classes take up a lot of memory to set up, so we
471 * don't want to make too many.
472 */
473 for (int i = 0; i < ARRAY_SIZE(acc_nodes); i++) {
474 acc_nodes[i] = c->num_temps + i;
475 ra_set_node_reg(g, acc_nodes[i], ACC_INDEX + i);
476 }
477
478 for (uint32_t i = 0; i < c->num_temps; i++) {
479 map[i].temp = i;
480 map[i].priority = c->temp_end[i] - c->temp_start[i];
481 }
482 qsort(map, c->num_temps, sizeof(map[0]), node_to_temp_priority);
483 for (uint32_t i = 0; i < c->num_temps; i++) {
484 temp_to_node[map[i].temp] = i;
485 }
486
487 /* Figure out our register classes and preallocated registers. We
488 * start with any temp being able to be in any file, then instructions
489 * incrementally remove bits that the temp definitely can't be in.
490 */
491 memset(class_bits, CLASS_BITS_ANY, sizeof(class_bits));
492
493 int ip = 0;
494 vir_for_each_inst_inorder(inst, c) {
495 /* If the instruction writes r3/r4 (and optionally moves its
496 * result to a temp), nothing else can be stored in r3/r4 across
497 * it.
498 */
499 if (vir_writes_r3(c->devinfo, inst)) {
500 for (int i = 0; i < c->num_temps; i++) {
501 if (c->temp_start[i] < ip &&
502 c->temp_end[i] > ip) {
503 ra_add_node_interference(g,
504 temp_to_node[i],
505 acc_nodes[3]);
506 }
507 }
508 }
509 if (vir_writes_r4(c->devinfo, inst)) {
510 for (int i = 0; i < c->num_temps; i++) {
511 if (c->temp_start[i] < ip &&
512 c->temp_end[i] > ip) {
513 ra_add_node_interference(g,
514 temp_to_node[i],
515 acc_nodes[4]);
516 }
517 }
518 }
519
520 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU) {
521 switch (inst->qpu.alu.add.op) {
522 case V3D_QPU_A_LDVPMV_IN:
523 case V3D_QPU_A_LDVPMV_OUT:
524 case V3D_QPU_A_LDVPMD_IN:
525 case V3D_QPU_A_LDVPMD_OUT:
526 case V3D_QPU_A_LDVPMP:
527 case V3D_QPU_A_LDVPMG_IN:
528 case V3D_QPU_A_LDVPMG_OUT:
529 /* LDVPMs only store to temps (the MA flag
530 * decides whether the LDVPM is in or out)
531 */
532 assert(inst->dst.file == QFILE_TEMP);
533 class_bits[inst->dst.index] &= CLASS_BIT_PHYS;
534 break;
535
536 case V3D_QPU_A_RECIP:
537 case V3D_QPU_A_RSQRT:
538 case V3D_QPU_A_EXP:
539 case V3D_QPU_A_LOG:
540 case V3D_QPU_A_SIN:
541 case V3D_QPU_A_RSQRT2:
542 /* The SFU instructions write directly to the
543 * phys regfile.
544 */
545 assert(inst->dst.file == QFILE_TEMP);
546 class_bits[inst->dst.index] &= CLASS_BIT_PHYS;
547 break;
548
549 default:
550 break;
551 }
552 }
553
554 if (inst->src[0].file == QFILE_REG) {
555 switch (inst->src[0].index) {
556 case 0:
557 case 1:
558 case 2:
559 case 3:
560 /* Payload setup instructions: Force allocate
561 * the dst to the given register (so the MOV
562 * will disappear).
563 */
564 assert(inst->qpu.alu.mul.op == V3D_QPU_M_MOV);
565 assert(inst->dst.file == QFILE_TEMP);
566 ra_set_node_reg(g,
567 temp_to_node[inst->dst.index],
568 PHYS_INDEX +
569 inst->src[0].index);
570 break;
571 }
572 }
573
574 if (inst->dst.file == QFILE_TEMP) {
575 /* Only a ldunif gets to write to R5, which only has a
576 * single 32-bit channel of storage.
577 */
578 if (!inst->qpu.sig.ldunif) {
579 class_bits[inst->dst.index] &= ~CLASS_BIT_R5;
580 } else {
581 /* Until V3D 4.x, we could only load a uniform
582 * to r5, so we'll need to spill if uniform
583 * loads interfere with each other.
584 */
585 if (c->devinfo->ver < 40) {
586 class_bits[inst->dst.index] &=
587 CLASS_BIT_R5;
588 }
589 }
590 }
591
592 if (inst->qpu.sig.thrsw) {
593 /* All accumulators are invalidated across a thread
594 * switch.
595 */
596 for (int i = 0; i < c->num_temps; i++) {
597 if (c->temp_start[i] < ip && c->temp_end[i] > ip)
598 class_bits[i] &= CLASS_BIT_PHYS;
599 }
600 }
601
602 ip++;
603 }
604
605 for (uint32_t i = 0; i < c->num_temps; i++) {
606 if (class_bits[i] == CLASS_BIT_PHYS) {
607 ra_set_node_class(g, temp_to_node[i],
608 c->compiler->reg_class_phys[thread_index]);
609 } else if (class_bits[i] == (CLASS_BIT_R5)) {
610 ra_set_node_class(g, temp_to_node[i],
611 c->compiler->reg_class_r5[thread_index]);
612 } else if (class_bits[i] == (CLASS_BIT_PHYS | CLASS_BIT_ACC)) {
613 ra_set_node_class(g, temp_to_node[i],
614 c->compiler->reg_class_phys_or_acc[thread_index]);
615 } else {
616 assert(class_bits[i] == CLASS_BITS_ANY);
617 ra_set_node_class(g, temp_to_node[i],
618 c->compiler->reg_class_any[thread_index]);
619 }
620 }
621
622 for (uint32_t i = 0; i < c->num_temps; i++) {
623 for (uint32_t j = i + 1; j < c->num_temps; j++) {
624 if (!(c->temp_start[i] >= c->temp_end[j] ||
625 c->temp_start[j] >= c->temp_end[i])) {
626 ra_add_node_interference(g,
627 temp_to_node[i],
628 temp_to_node[j]);
629 }
630 }
631 }
632
633 /* Debug code to force a bit of register spilling, for running across
634 * conformance tests to make sure that spilling works.
635 */
636 int force_register_spills = 0;
637 if (c->spill_size <
638 V3D_CHANNELS * sizeof(uint32_t) * force_register_spills) {
639 int node = v3d_choose_spill_node(c, g, temp_to_node);
640 if (node != -1) {
641 v3d_spill_reg(c, map[node].temp);
642 ralloc_free(g);
643 *spilled = true;
644 return NULL;
645 }
646 }
647
648 bool ok = ra_allocate(g);
649 if (!ok) {
650 int node = v3d_choose_spill_node(c, g, temp_to_node);
651
652 /* Don't emit spills using the TMU until we've dropped thread
653 * conut first.
654 */
655 if (node != -1 &&
656 (vir_is_mov_uniform(c, map[node].temp) ||
657 thread_index == 0)) {
658 v3d_spill_reg(c, map[node].temp);
659
660 /* Ask the outer loop to call back in. */
661 *spilled = true;
662 }
663
664 ralloc_free(g);
665 return NULL;
666 }
667
668 struct qpu_reg *temp_registers = calloc(c->num_temps,
669 sizeof(*temp_registers));
670
671 for (uint32_t i = 0; i < c->num_temps; i++) {
672 int ra_reg = ra_get_node_reg(g, temp_to_node[i]);
673 if (ra_reg < PHYS_INDEX) {
674 temp_registers[i].magic = true;
675 temp_registers[i].index = (V3D_QPU_WADDR_R0 +
676 ra_reg - ACC_INDEX);
677 } else {
678 temp_registers[i].magic = false;
679 temp_registers[i].index = ra_reg - PHYS_INDEX;
680 }
681
682 /* If the value's never used, just write to the NOP register
683 * for clarity in debug output.
684 */
685 if (c->temp_start[i] == c->temp_end[i]) {
686 temp_registers[i].magic = true;
687 temp_registers[i].index = V3D_QPU_WADDR_NOP;
688 }
689 }
690
691 ralloc_free(g);
692
693 return temp_registers;
694 }