2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "util/ralloc.h"
25 #include "util/register_allocate.h"
26 #include "common/v3d_device_info.h"
27 #include "v3d_compiler.h"
29 #define QPU_R(i) { .magic = false, .index = i }
33 #define PHYS_INDEX (ACC_INDEX + ACC_COUNT)
37 is_last_ldtmu(struct qinst
*inst
, struct qblock
*block
)
39 list_for_each_entry_from(struct qinst
, scan_inst
, inst
->link
.next
,
40 &block
->instructions
, link
) {
41 if (scan_inst
->qpu
.sig
.ldtmu
)
43 if (v3d_qpu_writes_tmu(&scan_inst
->qpu
))
51 vir_is_mov_uniform(struct v3d_compile
*c
, int temp
)
53 struct qinst
*def
= c
->defs
[temp
];
55 return def
&& def
->qpu
.sig
.ldunif
;
59 v3d_choose_spill_node(struct v3d_compile
*c
, struct ra_graph
*g
,
60 uint32_t *temp_to_node
)
62 const float tmu_scale
= 5;
63 float block_scale
= 1.0;
64 float spill_costs
[c
->num_temps
];
65 bool in_tmu_operation
= false;
66 bool started_last_seg
= false;
68 for (unsigned i
= 0; i
< c
->num_temps
; i
++)
71 /* XXX: Scale the cost up when inside of a loop. */
72 vir_for_each_block(block
, c
) {
73 vir_for_each_inst(inst
, block
) {
74 /* We can't insert a new TMU operation while currently
75 * in a TMU operation, and we can't insert new thread
76 * switches after starting output writes.
80 (c
->threads
> 1 && started_last_seg
));
82 for (int i
= 0; i
< vir_get_nsrc(inst
); i
++) {
83 if (inst
->src
[i
].file
!= QFILE_TEMP
)
86 int temp
= inst
->src
[i
].index
;
87 if (vir_is_mov_uniform(c
, temp
)) {
88 spill_costs
[temp
] += block_scale
;
89 } else if (!no_spilling
) {
90 spill_costs
[temp
] += (block_scale
*
93 BITSET_CLEAR(c
->spillable
, temp
);
97 if (inst
->dst
.file
== QFILE_TEMP
) {
98 int temp
= inst
->dst
.index
;
100 if (vir_is_mov_uniform(c
, temp
)) {
101 /* We just rematerialize the unform
104 } else if (!no_spilling
) {
105 spill_costs
[temp
] += (block_scale
*
108 BITSET_CLEAR(c
->spillable
, temp
);
112 /* Refuse to spill a ldvary's dst, because that means
113 * that ldvary's r5 would end up being used across a
116 if (inst
->qpu
.sig
.ldvary
) {
117 assert(inst
->dst
.file
== QFILE_TEMP
);
118 BITSET_CLEAR(c
->spillable
, inst
->dst
.index
);
121 if (inst
->is_last_thrsw
)
122 started_last_seg
= true;
124 if (v3d_qpu_writes_vpm(&inst
->qpu
) ||
125 v3d_qpu_uses_tlb(&inst
->qpu
))
126 started_last_seg
= true;
128 /* Track when we're in between a TMU setup and the
129 * final LDTMU or TMUWT from that TMU setup. We can't
130 * spill/fill any temps during that time, because that
131 * involves inserting a new TMU setup/LDTMU sequence.
133 if (inst
->qpu
.sig
.ldtmu
&&
134 is_last_ldtmu(inst
, block
))
135 in_tmu_operation
= false;
137 if (inst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
&&
138 inst
->qpu
.alu
.add
.op
== V3D_QPU_A_TMUWT
)
139 in_tmu_operation
= false;
141 if (v3d_qpu_writes_tmu(&inst
->qpu
))
142 in_tmu_operation
= true;
146 for (unsigned i
= 0; i
< c
->num_temps
; i
++) {
147 int node
= temp_to_node
[i
];
149 if (BITSET_TEST(c
->spillable
, i
))
150 ra_set_node_spill_cost(g
, node
, spill_costs
[i
]);
153 return ra_get_best_spill_node(g
);
156 /* The spill offset for this thread takes a bit of setup, so do it once at
160 v3d_setup_spill_base(struct v3d_compile
*c
)
162 c
->cursor
= vir_before_block(vir_entry_block(c
));
164 int start_num_temps
= c
->num_temps
;
166 /* Each thread wants to be in a separate region of the scratch space
167 * so that the QPUs aren't fighting over cache lines. We have the
168 * driver keep a single global spill BO rather than
169 * per-spilling-program BOs, so we need a uniform from the driver for
170 * what the per-thread scale is.
172 struct qreg thread_offset
=
175 vir_uniform(c
, QUNIFORM_SPILL_SIZE_PER_THREAD
, 0));
177 /* Each channel in a reg is 4 bytes, so scale them up by that. */
178 struct qreg element_offset
= vir_SHL(c
, vir_EIDX(c
),
179 vir_uniform_ui(c
, 2));
181 c
->spill_base
= vir_ADD(c
,
182 vir_ADD(c
, thread_offset
, element_offset
),
183 vir_uniform(c
, QUNIFORM_SPILL_OFFSET
, 0));
185 /* Make sure that we don't spill the spilling setup instructions. */
186 for (int i
= start_num_temps
; i
< c
->num_temps
; i
++)
187 BITSET_CLEAR(c
->spillable
, i
);
189 c
->cursor
= vir_after_block(c
->cur_block
);
193 v3d_emit_spill_tmua(struct v3d_compile
*c
, uint32_t spill_offset
)
195 vir_ADD_dest(c
, vir_reg(QFILE_MAGIC
,
198 vir_uniform_ui(c
, spill_offset
));
202 v3d_spill_reg(struct v3d_compile
*c
, int spill_temp
)
204 bool is_uniform
= vir_is_mov_uniform(c
, spill_temp
);
206 uint32_t spill_offset
= 0;
209 uint32_t spill_offset
= c
->spill_size
;
210 c
->spill_size
+= V3D_CHANNELS
* sizeof(uint32_t);
212 if (spill_offset
== 0)
213 v3d_setup_spill_base(c
);
216 struct qinst
*last_thrsw
= c
->last_thrsw
;
217 assert(!last_thrsw
|| last_thrsw
->is_last_thrsw
);
219 int start_num_temps
= c
->num_temps
;
221 int uniform_index
= ~0;
223 struct qinst
*orig_unif
= c
->defs
[spill_temp
];
224 uniform_index
= orig_unif
->uniform
;
227 vir_for_each_inst_inorder_safe(inst
, c
) {
228 for (int i
= 0; i
< vir_get_nsrc(inst
); i
++) {
229 if (inst
->src
[i
].file
!= QFILE_TEMP
||
230 inst
->src
[i
].index
!= spill_temp
) {
234 c
->cursor
= vir_before_inst(inst
);
239 c
->uniform_contents
[uniform_index
],
240 c
->uniform_data
[uniform_index
]);
243 v3d_emit_spill_tmua(c
, spill_offset
);
245 inst
->src
[i
] = vir_LDTMU(c
);
250 if (inst
->dst
.file
== QFILE_TEMP
&&
251 inst
->dst
.index
== spill_temp
) {
253 c
->cursor
.link
= NULL
;
254 vir_remove_instruction(c
, inst
);
256 c
->cursor
= vir_after_inst(inst
);
258 inst
->dst
.index
= c
->num_temps
++;
259 vir_MOV_dest(c
, vir_reg(QFILE_MAGIC
,
262 v3d_emit_spill_tmua(c
, spill_offset
);
269 /* If we didn't have a last-thrsw inserted by nir_to_vir and
270 * we've been inserting thrsws, then insert a new last_thrsw
271 * right before we start the vpm/tlb sequence for the last
274 if (!is_uniform
&& !last_thrsw
&& c
->last_thrsw
&&
275 (v3d_qpu_writes_vpm(&inst
->qpu
) ||
276 v3d_qpu_uses_tlb(&inst
->qpu
))) {
277 c
->cursor
= vir_before_inst(inst
);
280 last_thrsw
= c
->last_thrsw
;
281 last_thrsw
->is_last_thrsw
= true;
285 /* Make sure c->last_thrsw is the actual last thrsw, not just one we
286 * inserted in our most recent unspill.
289 c
->last_thrsw
= last_thrsw
;
291 /* Don't allow spilling of our spilling instructions. There's no way
292 * they can help get things colored.
294 for (int i
= start_num_temps
; i
< c
->num_temps
; i
++)
295 BITSET_CLEAR(c
->spillable
, i
);
298 struct v3d_ra_select_callback_data
{
304 v3d_ra_select_callback(struct ra_graph
*g
, BITSET_WORD
*regs
, void *data
)
306 struct v3d_ra_select_callback_data
*v3d_ra
= data
;
307 int r5
= ACC_INDEX
+ 5;
309 /* Choose r5 for our ldunifs if possible (nobody else can load to that
310 * reg, and it keeps the QPU cond field free from being occupied by
313 if (BITSET_TEST(regs
, r5
))
316 /* Choose an accumulator if possible (I think it's lower power than
317 * phys regs), but round-robin through them to give post-RA
318 * instruction selection more options.
320 for (int i
= 0; i
< ACC_COUNT
; i
++) {
321 int acc_off
= (v3d_ra
->next_acc
+ i
) % ACC_COUNT
;
322 int acc
= ACC_INDEX
+ acc_off
;
324 if (BITSET_TEST(regs
, acc
)) {
325 v3d_ra
->next_acc
= acc_off
+ 1;
330 for (int i
= 0; i
< PHYS_COUNT
; i
++) {
331 int phys_off
= (v3d_ra
->next_phys
+ i
) % PHYS_COUNT
;
332 int phys
= PHYS_INDEX
+ phys_off
;
334 if (BITSET_TEST(regs
, phys
)) {
335 v3d_ra
->next_phys
= phys_off
+ 1;
340 unreachable("RA must pass us at least one possible reg.");
344 vir_init_reg_sets(struct v3d_compiler
*compiler
)
346 /* Allocate up to 3 regfile classes, for the ways the physical
347 * register file can be divided up for fragment shader threading.
349 int max_thread_index
= (compiler
->devinfo
->ver
>= 40 ? 2 : 3);
351 compiler
->regs
= ra_alloc_reg_set(compiler
, PHYS_INDEX
+ PHYS_COUNT
,
356 for (int threads
= 0; threads
< max_thread_index
; threads
++) {
357 compiler
->reg_class_any
[threads
] =
358 ra_alloc_reg_class(compiler
->regs
);
359 compiler
->reg_class_r5
[threads
] =
360 ra_alloc_reg_class(compiler
->regs
);
361 compiler
->reg_class_phys_or_acc
[threads
] =
362 ra_alloc_reg_class(compiler
->regs
);
363 compiler
->reg_class_phys
[threads
] =
364 ra_alloc_reg_class(compiler
->regs
);
366 for (int i
= PHYS_INDEX
;
367 i
< PHYS_INDEX
+ (PHYS_COUNT
>> threads
); i
++) {
368 ra_class_add_reg(compiler
->regs
,
369 compiler
->reg_class_phys_or_acc
[threads
], i
);
370 ra_class_add_reg(compiler
->regs
,
371 compiler
->reg_class_phys
[threads
], i
);
372 ra_class_add_reg(compiler
->regs
,
373 compiler
->reg_class_any
[threads
], i
);
376 for (int i
= ACC_INDEX
+ 0; i
< ACC_INDEX
+ ACC_COUNT
- 1; i
++) {
377 ra_class_add_reg(compiler
->regs
,
378 compiler
->reg_class_phys_or_acc
[threads
], i
);
379 ra_class_add_reg(compiler
->regs
,
380 compiler
->reg_class_any
[threads
], i
);
382 /* r5 can only store a single 32-bit value, so not much can
385 ra_class_add_reg(compiler
->regs
,
386 compiler
->reg_class_r5
[threads
],
388 ra_class_add_reg(compiler
->regs
,
389 compiler
->reg_class_any
[threads
],
393 ra_set_finalize(compiler
->regs
, NULL
);
398 struct node_to_temp_map
{
404 node_to_temp_priority(const void *in_a
, const void *in_b
)
406 const struct node_to_temp_map
*a
= in_a
;
407 const struct node_to_temp_map
*b
= in_b
;
409 return a
->priority
- b
->priority
;
412 #define CLASS_BIT_PHYS (1 << 0)
413 #define CLASS_BIT_ACC (1 << 1)
414 #define CLASS_BIT_R5 (1 << 4)
415 #define CLASS_BITS_ANY (CLASS_BIT_PHYS | \
420 * Returns a mapping from QFILE_TEMP indices to struct qpu_regs.
422 * The return value should be freed by the caller.
425 v3d_register_allocate(struct v3d_compile
*c
, bool *spilled
)
427 struct node_to_temp_map map
[c
->num_temps
];
428 uint32_t temp_to_node
[c
->num_temps
];
429 uint8_t class_bits
[c
->num_temps
];
430 int acc_nodes
[ACC_COUNT
];
431 struct v3d_ra_select_callback_data callback_data
= {
433 /* Start at RF3, to try to keep the TLB writes from using
441 vir_calculate_live_intervals(c
);
443 /* Convert 1, 2, 4 threads to 0, 1, 2 index.
445 * V3D 4.x has double the physical register space, so 64 physical regs
446 * are available at both 1x and 2x threading, and 4x has 32.
448 int thread_index
= ffs(c
->threads
) - 1;
449 if (c
->devinfo
->ver
>= 40) {
450 if (thread_index
>= 1)
454 struct ra_graph
*g
= ra_alloc_interference_graph(c
->compiler
->regs
,
456 ARRAY_SIZE(acc_nodes
));
457 ra_set_select_reg_callback(g
, v3d_ra_select_callback
, &callback_data
);
459 /* Make some fixed nodes for the accumulators, which we will need to
460 * interfere with when ops have implied r3/r4 writes or for the thread
461 * switches. We could represent these as classes for the nodes to
462 * live in, but the classes take up a lot of memory to set up, so we
463 * don't want to make too many.
465 for (int i
= 0; i
< ARRAY_SIZE(acc_nodes
); i
++) {
466 acc_nodes
[i
] = c
->num_temps
+ i
;
467 ra_set_node_reg(g
, acc_nodes
[i
], ACC_INDEX
+ i
);
470 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
472 map
[i
].priority
= c
->temp_end
[i
] - c
->temp_start
[i
];
474 qsort(map
, c
->num_temps
, sizeof(map
[0]), node_to_temp_priority
);
475 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
476 temp_to_node
[map
[i
].temp
] = i
;
479 /* Figure out our register classes and preallocated registers. We
480 * start with any temp being able to be in any file, then instructions
481 * incrementally remove bits that the temp definitely can't be in.
483 memset(class_bits
, CLASS_BITS_ANY
, sizeof(class_bits
));
486 vir_for_each_inst_inorder(inst
, c
) {
487 /* If the instruction writes r3/r4 (and optionally moves its
488 * result to a temp), nothing else can be stored in r3/r4 across
491 if (vir_writes_r3(c
->devinfo
, inst
)) {
492 for (int i
= 0; i
< c
->num_temps
; i
++) {
493 if (c
->temp_start
[i
] < ip
&&
494 c
->temp_end
[i
] > ip
) {
495 ra_add_node_interference(g
,
501 if (vir_writes_r4(c
->devinfo
, inst
)) {
502 for (int i
= 0; i
< c
->num_temps
; i
++) {
503 if (c
->temp_start
[i
] < ip
&&
504 c
->temp_end
[i
] > ip
) {
505 ra_add_node_interference(g
,
512 if (inst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
) {
513 switch (inst
->qpu
.alu
.add
.op
) {
514 case V3D_QPU_A_LDVPMV_IN
:
515 case V3D_QPU_A_LDVPMV_OUT
:
516 case V3D_QPU_A_LDVPMD_IN
:
517 case V3D_QPU_A_LDVPMD_OUT
:
518 case V3D_QPU_A_LDVPMP
:
519 case V3D_QPU_A_LDVPMG_IN
:
520 case V3D_QPU_A_LDVPMG_OUT
:
521 /* LDVPMs only store to temps (the MA flag
522 * decides whether the LDVPM is in or out)
524 assert(inst
->dst
.file
== QFILE_TEMP
);
525 class_bits
[inst
->dst
.index
] &= CLASS_BIT_PHYS
;
528 case V3D_QPU_A_RECIP
:
529 case V3D_QPU_A_RSQRT
:
533 case V3D_QPU_A_RSQRT2
:
534 /* The SFU instructions write directly to the
537 assert(inst
->dst
.file
== QFILE_TEMP
);
538 class_bits
[inst
->dst
.index
] &= CLASS_BIT_PHYS
;
546 if (inst
->src
[0].file
== QFILE_REG
) {
547 switch (inst
->src
[0].index
) {
552 /* Payload setup instructions: Force allocate
553 * the dst to the given register (so the MOV
556 assert(inst
->qpu
.alu
.mul
.op
== V3D_QPU_M_MOV
);
557 assert(inst
->dst
.file
== QFILE_TEMP
);
559 temp_to_node
[inst
->dst
.index
],
566 if (inst
->dst
.file
== QFILE_TEMP
) {
567 /* Only a ldunif gets to write to R5, which only has a
568 * single 32-bit channel of storage.
570 if (!inst
->qpu
.sig
.ldunif
) {
571 class_bits
[inst
->dst
.index
] &= ~CLASS_BIT_R5
;
573 /* Until V3D 4.x, we could only load a uniform
574 * to r5, so we'll need to spill if uniform
575 * loads interfere with each other.
577 if (c
->devinfo
->ver
< 40) {
578 class_bits
[inst
->dst
.index
] &=
584 if (inst
->qpu
.sig
.thrsw
) {
585 /* All accumulators are invalidated across a thread
588 for (int i
= 0; i
< c
->num_temps
; i
++) {
589 if (c
->temp_start
[i
] < ip
&& c
->temp_end
[i
] > ip
)
590 class_bits
[i
] &= CLASS_BIT_PHYS
;
597 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
598 if (class_bits
[i
] == CLASS_BIT_PHYS
) {
599 ra_set_node_class(g
, temp_to_node
[i
],
600 c
->compiler
->reg_class_phys
[thread_index
]);
601 } else if (class_bits
[i
] == (CLASS_BIT_R5
)) {
602 ra_set_node_class(g
, temp_to_node
[i
],
603 c
->compiler
->reg_class_r5
[thread_index
]);
604 } else if (class_bits
[i
] == (CLASS_BIT_PHYS
| CLASS_BIT_ACC
)) {
605 ra_set_node_class(g
, temp_to_node
[i
],
606 c
->compiler
->reg_class_phys_or_acc
[thread_index
]);
608 assert(class_bits
[i
] == CLASS_BITS_ANY
);
609 ra_set_node_class(g
, temp_to_node
[i
],
610 c
->compiler
->reg_class_any
[thread_index
]);
614 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
615 for (uint32_t j
= i
+ 1; j
< c
->num_temps
; j
++) {
616 if (!(c
->temp_start
[i
] >= c
->temp_end
[j
] ||
617 c
->temp_start
[j
] >= c
->temp_end
[i
])) {
618 ra_add_node_interference(g
,
625 /* Debug code to force a bit of register spilling, for running across
626 * conformance tests to make sure that spilling works.
628 int force_register_spills
= 0;
630 V3D_CHANNELS
* sizeof(uint32_t) * force_register_spills
) {
631 int node
= v3d_choose_spill_node(c
, g
, temp_to_node
);
633 v3d_spill_reg(c
, map
[node
].temp
);
640 bool ok
= ra_allocate(g
);
642 int node
= v3d_choose_spill_node(c
, g
, temp_to_node
);
644 /* Don't emit spills using the TMU until we've dropped thread
648 (vir_is_mov_uniform(c
, map
[node
].temp
) ||
649 thread_index
== 0)) {
650 v3d_spill_reg(c
, map
[node
].temp
);
652 /* Ask the outer loop to call back in. */
660 struct qpu_reg
*temp_registers
= calloc(c
->num_temps
,
661 sizeof(*temp_registers
));
663 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
664 int ra_reg
= ra_get_node_reg(g
, temp_to_node
[i
]);
665 if (ra_reg
< PHYS_INDEX
) {
666 temp_registers
[i
].magic
= true;
667 temp_registers
[i
].index
= (V3D_QPU_WADDR_R0
+
670 temp_registers
[i
].magic
= false;
671 temp_registers
[i
].index
= ra_reg
- PHYS_INDEX
;
674 /* If the value's never used, just write to the NOP register
675 * for clarity in debug output.
677 if (c
->temp_start
[i
] == c
->temp_end
[i
]) {
678 temp_registers
[i
].magic
= true;
679 temp_registers
[i
].index
= V3D_QPU_WADDR_NOP
;
685 return temp_registers
;