2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "util/ralloc.h"
25 #include "util/register_allocate.h"
26 #include "common/v3d_device_info.h"
27 #include "v3d_compiler.h"
29 #define QPU_R(i) { .magic = false, .index = i }
33 #define PHYS_INDEX (ACC_INDEX + ACC_COUNT)
37 qinst_writes_tmu(struct qinst
*inst
)
39 return (inst
->dst
.file
== QFILE_MAGIC
&&
40 v3d_qpu_magic_waddr_is_tmu(inst
->dst
.index
));
44 is_last_ldtmu(struct qinst
*inst
, struct qblock
*block
)
46 list_for_each_entry_from(struct qinst
, scan_inst
, inst
->link
.next
,
47 &block
->instructions
, link
) {
48 if (scan_inst
->qpu
.sig
.ldtmu
)
50 if (qinst_writes_tmu(scan_inst
))
58 vir_is_mov_uniform(struct v3d_compile
*c
, int temp
)
60 struct qinst
*def
= c
->defs
[temp
];
62 return def
&& def
->qpu
.sig
.ldunif
;
66 v3d_choose_spill_node(struct v3d_compile
*c
, struct ra_graph
*g
,
67 uint32_t *temp_to_node
)
69 const float tmu_scale
= 5;
70 float block_scale
= 1.0;
71 float spill_costs
[c
->num_temps
];
72 bool in_tmu_operation
= false;
73 bool started_last_seg
= false;
75 for (unsigned i
= 0; i
< c
->num_temps
; i
++)
78 /* XXX: Scale the cost up when inside of a loop. */
79 vir_for_each_block(block
, c
) {
80 vir_for_each_inst(inst
, block
) {
81 /* We can't insert a new TMU operation while currently
82 * in a TMU operation, and we can't insert new thread
83 * switches after starting output writes.
87 (c
->threads
> 1 && started_last_seg
));
89 for (int i
= 0; i
< vir_get_nsrc(inst
); i
++) {
90 if (inst
->src
[i
].file
!= QFILE_TEMP
)
93 int temp
= inst
->src
[i
].index
;
94 if (vir_is_mov_uniform(c
, temp
)) {
95 spill_costs
[temp
] += block_scale
;
96 } else if (!no_spilling
) {
97 spill_costs
[temp
] += (block_scale
*
100 BITSET_CLEAR(c
->spillable
, temp
);
104 if (inst
->dst
.file
== QFILE_TEMP
) {
105 int temp
= inst
->dst
.index
;
107 if (vir_is_mov_uniform(c
, temp
)) {
108 /* We just rematerialize the unform
111 } else if (!no_spilling
) {
112 spill_costs
[temp
] += (block_scale
*
115 BITSET_CLEAR(c
->spillable
, temp
);
119 /* Refuse to spill a ldvary's dst, because that means
120 * that ldvary's r5 would end up being used across a
123 if (inst
->qpu
.sig
.ldvary
) {
124 assert(inst
->dst
.file
== QFILE_TEMP
);
125 BITSET_CLEAR(c
->spillable
, inst
->dst
.index
);
128 if (inst
->is_last_thrsw
)
129 started_last_seg
= true;
131 if (v3d_qpu_writes_vpm(&inst
->qpu
) ||
132 v3d_qpu_uses_tlb(&inst
->qpu
))
133 started_last_seg
= true;
135 /* Track when we're in between a TMU setup and the
136 * final LDTMU or TMUWT from that TMU setup. We can't
137 * spill/fill any temps during that time, because that
138 * involves inserting a new TMU setup/LDTMU sequence.
140 if (inst
->qpu
.sig
.ldtmu
&&
141 is_last_ldtmu(inst
, block
))
142 in_tmu_operation
= false;
144 if (inst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
&&
145 inst
->qpu
.alu
.add
.op
== V3D_QPU_A_TMUWT
)
146 in_tmu_operation
= false;
148 if (qinst_writes_tmu(inst
))
149 in_tmu_operation
= true;
153 for (unsigned i
= 0; i
< c
->num_temps
; i
++) {
154 int node
= temp_to_node
[i
];
156 if (BITSET_TEST(c
->spillable
, i
))
157 ra_set_node_spill_cost(g
, node
, spill_costs
[i
]);
160 return ra_get_best_spill_node(g
);
163 /* The spill offset for this thread takes a bit of setup, so do it once at
167 v3d_setup_spill_base(struct v3d_compile
*c
)
169 c
->cursor
= vir_before_block(vir_entry_block(c
));
171 int start_num_temps
= c
->num_temps
;
173 /* Each thread wants to be in a separate region of the scratch space
174 * so that the QPUs aren't fighting over cache lines. We have the
175 * driver keep a single global spill BO rather than
176 * per-spilling-program BOs, so we need a uniform from the driver for
177 * what the per-thread scale is.
179 struct qreg thread_offset
=
182 vir_uniform(c
, QUNIFORM_SPILL_SIZE_PER_THREAD
, 0));
184 /* Each channel in a reg is 4 bytes, so scale them up by that. */
185 struct qreg element_offset
= vir_SHL(c
, vir_EIDX(c
),
186 vir_uniform_ui(c
, 2));
188 c
->spill_base
= vir_ADD(c
,
189 vir_ADD(c
, thread_offset
, element_offset
),
190 vir_uniform(c
, QUNIFORM_SPILL_OFFSET
, 0));
192 /* Make sure that we don't spill the spilling setup instructions. */
193 for (int i
= start_num_temps
; i
< c
->num_temps
; i
++)
194 BITSET_CLEAR(c
->spillable
, i
);
196 c
->cursor
= vir_after_block(c
->cur_block
);
200 v3d_emit_spill_tmua(struct v3d_compile
*c
, uint32_t spill_offset
)
202 vir_ADD_dest(c
, vir_reg(QFILE_MAGIC
,
205 vir_uniform_ui(c
, spill_offset
));
209 v3d_spill_reg(struct v3d_compile
*c
, int spill_temp
)
211 bool is_uniform
= vir_is_mov_uniform(c
, spill_temp
);
213 uint32_t spill_offset
= 0;
216 uint32_t spill_offset
= c
->spill_size
;
217 c
->spill_size
+= V3D_CHANNELS
* sizeof(uint32_t);
219 if (spill_offset
== 0)
220 v3d_setup_spill_base(c
);
223 struct qinst
*last_thrsw
= c
->last_thrsw
;
224 assert(!last_thrsw
|| last_thrsw
->is_last_thrsw
);
226 int start_num_temps
= c
->num_temps
;
228 int uniform_index
= ~0;
230 struct qinst
*orig_unif
= c
->defs
[spill_temp
];
231 uniform_index
= orig_unif
->uniform
;
234 vir_for_each_inst_inorder_safe(inst
, c
) {
235 for (int i
= 0; i
< vir_get_nsrc(inst
); i
++) {
236 if (inst
->src
[i
].file
!= QFILE_TEMP
||
237 inst
->src
[i
].index
!= spill_temp
) {
241 c
->cursor
= vir_before_inst(inst
);
246 c
->uniform_contents
[uniform_index
],
247 c
->uniform_data
[uniform_index
]);
250 v3d_emit_spill_tmua(c
, spill_offset
);
252 inst
->src
[i
] = vir_LDTMU(c
);
257 if (inst
->dst
.file
== QFILE_TEMP
&&
258 inst
->dst
.index
== spill_temp
) {
260 c
->cursor
.link
= NULL
;
261 vir_remove_instruction(c
, inst
);
263 c
->cursor
= vir_after_inst(inst
);
265 inst
->dst
.index
= c
->num_temps
++;
266 vir_MOV_dest(c
, vir_reg(QFILE_MAGIC
,
269 v3d_emit_spill_tmua(c
, spill_offset
);
273 c
->tmu_dirty_rcl
= true;
277 /* If we didn't have a last-thrsw inserted by nir_to_vir and
278 * we've been inserting thrsws, then insert a new last_thrsw
279 * right before we start the vpm/tlb sequence for the last
282 if (!is_uniform
&& !last_thrsw
&& c
->last_thrsw
&&
283 (v3d_qpu_writes_vpm(&inst
->qpu
) ||
284 v3d_qpu_uses_tlb(&inst
->qpu
))) {
285 c
->cursor
= vir_before_inst(inst
);
288 last_thrsw
= c
->last_thrsw
;
289 last_thrsw
->is_last_thrsw
= true;
293 /* Make sure c->last_thrsw is the actual last thrsw, not just one we
294 * inserted in our most recent unspill.
297 c
->last_thrsw
= last_thrsw
;
299 /* Don't allow spilling of our spilling instructions. There's no way
300 * they can help get things colored.
302 for (int i
= start_num_temps
; i
< c
->num_temps
; i
++)
303 BITSET_CLEAR(c
->spillable
, i
);
306 struct v3d_ra_select_callback_data
{
312 v3d_ra_select_callback(unsigned int n
, BITSET_WORD
*regs
, void *data
)
314 struct v3d_ra_select_callback_data
*v3d_ra
= data
;
315 int r5
= ACC_INDEX
+ 5;
317 /* Choose r5 for our ldunifs if possible (nobody else can load to that
318 * reg, and it keeps the QPU cond field free from being occupied by
321 if (BITSET_TEST(regs
, r5
))
324 /* Choose an accumulator if possible (I think it's lower power than
325 * phys regs), but round-robin through them to give post-RA
326 * instruction selection more options.
328 for (int i
= 0; i
< ACC_COUNT
; i
++) {
329 int acc_off
= (v3d_ra
->next_acc
+ i
) % ACC_COUNT
;
330 int acc
= ACC_INDEX
+ acc_off
;
332 if (BITSET_TEST(regs
, acc
)) {
333 v3d_ra
->next_acc
= acc_off
+ 1;
338 for (int i
= 0; i
< PHYS_COUNT
; i
++) {
339 int phys_off
= (v3d_ra
->next_phys
+ i
) % PHYS_COUNT
;
340 int phys
= PHYS_INDEX
+ phys_off
;
342 if (BITSET_TEST(regs
, phys
)) {
343 v3d_ra
->next_phys
= phys_off
+ 1;
348 unreachable("RA must pass us at least one possible reg.");
352 vir_init_reg_sets(struct v3d_compiler
*compiler
)
354 /* Allocate up to 3 regfile classes, for the ways the physical
355 * register file can be divided up for fragment shader threading.
357 int max_thread_index
= (compiler
->devinfo
->ver
>= 40 ? 2 : 3);
359 compiler
->regs
= ra_alloc_reg_set(compiler
, PHYS_INDEX
+ PHYS_COUNT
,
364 for (int threads
= 0; threads
< max_thread_index
; threads
++) {
365 compiler
->reg_class_any
[threads
] =
366 ra_alloc_reg_class(compiler
->regs
);
367 compiler
->reg_class_r5
[threads
] =
368 ra_alloc_reg_class(compiler
->regs
);
369 compiler
->reg_class_phys_or_acc
[threads
] =
370 ra_alloc_reg_class(compiler
->regs
);
371 compiler
->reg_class_phys
[threads
] =
372 ra_alloc_reg_class(compiler
->regs
);
374 for (int i
= PHYS_INDEX
;
375 i
< PHYS_INDEX
+ (PHYS_COUNT
>> threads
); i
++) {
376 ra_class_add_reg(compiler
->regs
,
377 compiler
->reg_class_phys_or_acc
[threads
], i
);
378 ra_class_add_reg(compiler
->regs
,
379 compiler
->reg_class_phys
[threads
], i
);
380 ra_class_add_reg(compiler
->regs
,
381 compiler
->reg_class_any
[threads
], i
);
384 for (int i
= ACC_INDEX
+ 0; i
< ACC_INDEX
+ ACC_COUNT
- 1; i
++) {
385 ra_class_add_reg(compiler
->regs
,
386 compiler
->reg_class_phys_or_acc
[threads
], i
);
387 ra_class_add_reg(compiler
->regs
,
388 compiler
->reg_class_any
[threads
], i
);
390 /* r5 can only store a single 32-bit value, so not much can
393 ra_class_add_reg(compiler
->regs
,
394 compiler
->reg_class_r5
[threads
],
396 ra_class_add_reg(compiler
->regs
,
397 compiler
->reg_class_any
[threads
],
401 ra_set_finalize(compiler
->regs
, NULL
);
406 struct node_to_temp_map
{
412 node_to_temp_priority(const void *in_a
, const void *in_b
)
414 const struct node_to_temp_map
*a
= in_a
;
415 const struct node_to_temp_map
*b
= in_b
;
417 return a
->priority
- b
->priority
;
420 #define CLASS_BIT_PHYS (1 << 0)
421 #define CLASS_BIT_ACC (1 << 1)
422 #define CLASS_BIT_R5 (1 << 4)
423 #define CLASS_BITS_ANY (CLASS_BIT_PHYS | \
428 * Returns a mapping from QFILE_TEMP indices to struct qpu_regs.
430 * The return value should be freed by the caller.
433 v3d_register_allocate(struct v3d_compile
*c
, bool *spilled
)
435 struct node_to_temp_map map
[c
->num_temps
];
436 uint32_t temp_to_node
[c
->num_temps
];
437 uint8_t class_bits
[c
->num_temps
];
438 int acc_nodes
[ACC_COUNT
];
439 struct v3d_ra_select_callback_data callback_data
= {
441 /* Start at RF3, to try to keep the TLB writes from using
449 vir_calculate_live_intervals(c
);
451 /* Convert 1, 2, 4 threads to 0, 1, 2 index.
453 * V3D 4.x has double the physical register space, so 64 physical regs
454 * are available at both 1x and 2x threading, and 4x has 32.
456 int thread_index
= ffs(c
->threads
) - 1;
457 if (c
->devinfo
->ver
>= 40) {
458 if (thread_index
>= 1)
462 struct ra_graph
*g
= ra_alloc_interference_graph(c
->compiler
->regs
,
464 ARRAY_SIZE(acc_nodes
));
465 ra_set_select_reg_callback(g
, v3d_ra_select_callback
, &callback_data
);
467 /* Make some fixed nodes for the accumulators, which we will need to
468 * interfere with when ops have implied r3/r4 writes or for the thread
469 * switches. We could represent these as classes for the nodes to
470 * live in, but the classes take up a lot of memory to set up, so we
471 * don't want to make too many.
473 for (int i
= 0; i
< ARRAY_SIZE(acc_nodes
); i
++) {
474 acc_nodes
[i
] = c
->num_temps
+ i
;
475 ra_set_node_reg(g
, acc_nodes
[i
], ACC_INDEX
+ i
);
478 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
480 map
[i
].priority
= c
->temp_end
[i
] - c
->temp_start
[i
];
482 qsort(map
, c
->num_temps
, sizeof(map
[0]), node_to_temp_priority
);
483 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
484 temp_to_node
[map
[i
].temp
] = i
;
487 /* Figure out our register classes and preallocated registers. We
488 * start with any temp being able to be in any file, then instructions
489 * incrementally remove bits that the temp definitely can't be in.
491 memset(class_bits
, CLASS_BITS_ANY
, sizeof(class_bits
));
494 vir_for_each_inst_inorder(inst
, c
) {
495 /* If the instruction writes r3/r4 (and optionally moves its
496 * result to a temp), nothing else can be stored in r3/r4 across
499 if (vir_writes_r3(c
->devinfo
, inst
)) {
500 for (int i
= 0; i
< c
->num_temps
; i
++) {
501 if (c
->temp_start
[i
] < ip
&&
502 c
->temp_end
[i
] > ip
) {
503 ra_add_node_interference(g
,
509 if (vir_writes_r4(c
->devinfo
, inst
)) {
510 for (int i
= 0; i
< c
->num_temps
; i
++) {
511 if (c
->temp_start
[i
] < ip
&&
512 c
->temp_end
[i
] > ip
) {
513 ra_add_node_interference(g
,
520 if (inst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
) {
521 switch (inst
->qpu
.alu
.add
.op
) {
522 case V3D_QPU_A_LDVPMV_IN
:
523 case V3D_QPU_A_LDVPMV_OUT
:
524 case V3D_QPU_A_LDVPMD_IN
:
525 case V3D_QPU_A_LDVPMD_OUT
:
526 case V3D_QPU_A_LDVPMP
:
527 case V3D_QPU_A_LDVPMG_IN
:
528 case V3D_QPU_A_LDVPMG_OUT
:
529 /* LDVPMs only store to temps (the MA flag
530 * decides whether the LDVPM is in or out)
532 assert(inst
->dst
.file
== QFILE_TEMP
);
533 class_bits
[inst
->dst
.index
] &= CLASS_BIT_PHYS
;
536 case V3D_QPU_A_RECIP
:
537 case V3D_QPU_A_RSQRT
:
541 case V3D_QPU_A_RSQRT2
:
542 /* The SFU instructions write directly to the
545 assert(inst
->dst
.file
== QFILE_TEMP
);
546 class_bits
[inst
->dst
.index
] &= CLASS_BIT_PHYS
;
554 if (inst
->src
[0].file
== QFILE_REG
) {
555 switch (inst
->src
[0].index
) {
560 /* Payload setup instructions: Force allocate
561 * the dst to the given register (so the MOV
564 assert(inst
->qpu
.alu
.mul
.op
== V3D_QPU_M_MOV
);
565 assert(inst
->dst
.file
== QFILE_TEMP
);
567 temp_to_node
[inst
->dst
.index
],
574 if (inst
->dst
.file
== QFILE_TEMP
) {
575 /* Only a ldunif gets to write to R5, which only has a
576 * single 32-bit channel of storage.
578 if (!inst
->qpu
.sig
.ldunif
) {
579 class_bits
[inst
->dst
.index
] &= ~CLASS_BIT_R5
;
581 /* Until V3D 4.x, we could only load a uniform
582 * to r5, so we'll need to spill if uniform
583 * loads interfere with each other.
585 if (c
->devinfo
->ver
< 40) {
586 class_bits
[inst
->dst
.index
] &=
592 if (inst
->qpu
.sig
.thrsw
) {
593 /* All accumulators are invalidated across a thread
596 for (int i
= 0; i
< c
->num_temps
; i
++) {
597 if (c
->temp_start
[i
] < ip
&& c
->temp_end
[i
] > ip
)
598 class_bits
[i
] &= CLASS_BIT_PHYS
;
605 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
606 if (class_bits
[i
] == CLASS_BIT_PHYS
) {
607 ra_set_node_class(g
, temp_to_node
[i
],
608 c
->compiler
->reg_class_phys
[thread_index
]);
609 } else if (class_bits
[i
] == (CLASS_BIT_R5
)) {
610 ra_set_node_class(g
, temp_to_node
[i
],
611 c
->compiler
->reg_class_r5
[thread_index
]);
612 } else if (class_bits
[i
] == (CLASS_BIT_PHYS
| CLASS_BIT_ACC
)) {
613 ra_set_node_class(g
, temp_to_node
[i
],
614 c
->compiler
->reg_class_phys_or_acc
[thread_index
]);
616 assert(class_bits
[i
] == CLASS_BITS_ANY
);
617 ra_set_node_class(g
, temp_to_node
[i
],
618 c
->compiler
->reg_class_any
[thread_index
]);
622 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
623 for (uint32_t j
= i
+ 1; j
< c
->num_temps
; j
++) {
624 if (!(c
->temp_start
[i
] >= c
->temp_end
[j
] ||
625 c
->temp_start
[j
] >= c
->temp_end
[i
])) {
626 ra_add_node_interference(g
,
633 /* Debug code to force a bit of register spilling, for running across
634 * conformance tests to make sure that spilling works.
636 int force_register_spills
= 0;
638 V3D_CHANNELS
* sizeof(uint32_t) * force_register_spills
) {
639 int node
= v3d_choose_spill_node(c
, g
, temp_to_node
);
641 v3d_spill_reg(c
, map
[node
].temp
);
648 bool ok
= ra_allocate(g
);
650 int node
= v3d_choose_spill_node(c
, g
, temp_to_node
);
652 /* Don't emit spills using the TMU until we've dropped thread
656 (vir_is_mov_uniform(c
, map
[node
].temp
) ||
657 thread_index
== 0)) {
658 v3d_spill_reg(c
, map
[node
].temp
);
660 /* Ask the outer loop to call back in. */
668 struct qpu_reg
*temp_registers
= calloc(c
->num_temps
,
669 sizeof(*temp_registers
));
671 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
672 int ra_reg
= ra_get_node_reg(g
, temp_to_node
[i
]);
673 if (ra_reg
< PHYS_INDEX
) {
674 temp_registers
[i
].magic
= true;
675 temp_registers
[i
].index
= (V3D_QPU_WADDR_R0
+
678 temp_registers
[i
].magic
= false;
679 temp_registers
[i
].index
= ra_reg
- PHYS_INDEX
;
682 /* If the value's never used, just write to the NOP register
683 * for clarity in debug output.
685 if (c
->temp_start
[i
] == c
->temp_end
[i
]) {
686 temp_registers
[i
].magic
= true;
687 temp_registers
[i
].index
= V3D_QPU_WADDR_NOP
;
693 return temp_registers
;