2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "util/ralloc.h"
25 #include "util/register_allocate.h"
26 #include "common/v3d_device_info.h"
27 #include "v3d_compiler.h"
29 #define QPU_R(i) { .magic = false, .index = i }
33 #define PHYS_INDEX (ACC_INDEX + ACC_COUNT)
37 is_last_ldtmu(struct qinst
*inst
, struct qblock
*block
)
39 list_for_each_entry_from(struct qinst
, scan_inst
, inst
,
40 &block
->instructions
, link
) {
41 if (inst
->qpu
.sig
.ldtmu
)
43 if (v3d_qpu_writes_tmu(&inst
->qpu
))
51 vir_is_mov_uniform(struct v3d_compile
*c
, int temp
)
53 struct qinst
*def
= c
->defs
[temp
];
56 vir_is_raw_mov(def
) &&
57 def
->src
[0].file
== QFILE_UNIF
);
61 v3d_choose_spill_node(struct v3d_compile
*c
, struct ra_graph
*g
,
62 uint32_t *temp_to_node
)
64 const float tmu_scale
= 5;
65 float block_scale
= 1.0;
66 float spill_costs
[c
->num_temps
];
67 bool in_tmu_operation
= false;
68 bool started_last_seg
= false;
70 for (unsigned i
= 0; i
< c
->num_temps
; i
++)
73 /* XXX: Scale the cost up when inside of a loop. */
74 vir_for_each_block(block
, c
) {
75 vir_for_each_inst(inst
, block
) {
76 /* We can't insert a new TMU operation while currently
77 * in a TMU operation, and we can't insert new thread
78 * switches after starting output writes.
82 (c
->threads
> 1 && started_last_seg
));
84 for (int i
= 0; i
< vir_get_nsrc(inst
); i
++) {
85 if (inst
->src
[i
].file
!= QFILE_TEMP
)
88 int temp
= inst
->src
[i
].index
;
89 if (vir_is_mov_uniform(c
, temp
)) {
90 spill_costs
[temp
] += block_scale
;
91 } else if (!no_spilling
) {
92 spill_costs
[temp
] += (block_scale
*
95 BITSET_CLEAR(c
->spillable
, temp
);
99 if (inst
->dst
.file
== QFILE_TEMP
) {
100 int temp
= inst
->dst
.index
;
102 if (vir_is_mov_uniform(c
, temp
)) {
103 /* We just rematerialize the unform
106 } else if (!no_spilling
) {
107 spill_costs
[temp
] += (block_scale
*
110 BITSET_CLEAR(c
->spillable
, temp
);
114 /* Refuse to spill a ldvary's dst, because that means
115 * that ldvary's r5 would end up being used across a
118 if (inst
->qpu
.sig
.ldvary
) {
119 assert(inst
->dst
.file
== QFILE_TEMP
);
120 BITSET_CLEAR(c
->spillable
, inst
->dst
.index
);
123 if (inst
->is_last_thrsw
)
124 started_last_seg
= true;
126 if (v3d_qpu_writes_vpm(&inst
->qpu
) ||
127 v3d_qpu_uses_tlb(&inst
->qpu
))
128 started_last_seg
= true;
130 /* Track when we're in between a TMU setup and the
131 * final LDTMU or TMUWT from that TMU setup. We can't
132 * spill/fill any temps during that time, because that
133 * involves inserting a new TMU setup/LDTMU sequence.
135 if (inst
->qpu
.sig
.ldtmu
&&
136 is_last_ldtmu(inst
, block
))
137 in_tmu_operation
= false;
139 if (inst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
&&
140 inst
->qpu
.alu
.add
.op
== V3D_QPU_A_TMUWT
)
141 in_tmu_operation
= false;
143 if (v3d_qpu_writes_tmu(&inst
->qpu
))
144 in_tmu_operation
= true;
148 for (unsigned i
= 0; i
< c
->num_temps
; i
++) {
149 int node
= temp_to_node
[i
];
151 if (BITSET_TEST(c
->spillable
, i
))
152 ra_set_node_spill_cost(g
, node
, spill_costs
[i
]);
155 return ra_get_best_spill_node(g
);
158 /* The spill offset for this thread takes a bit of setup, so do it once at
162 v3d_setup_spill_base(struct v3d_compile
*c
)
164 c
->cursor
= vir_before_block(vir_entry_block(c
));
166 int start_num_temps
= c
->num_temps
;
168 /* Each thread wants to be in a separate region of the scratch space
169 * so that the QPUs aren't fighting over cache lines. We have the
170 * driver keep a single global spill BO rather than
171 * per-spilling-program BOs, so we need a uniform from the driver for
172 * what the per-thread scale is.
174 struct qreg thread_offset
=
177 vir_uniform(c
, QUNIFORM_SPILL_SIZE_PER_THREAD
, 0));
179 /* Each channel in a reg is 4 bytes, so scale them up by that. */
180 struct qreg element_offset
= vir_SHL(c
, vir_EIDX(c
),
181 vir_uniform_ui(c
, 2));
183 c
->spill_base
= vir_ADD(c
,
184 vir_ADD(c
, thread_offset
, element_offset
),
185 vir_uniform(c
, QUNIFORM_SPILL_OFFSET
, 0));
187 /* Make sure that we don't spill the spilling setup instructions. */
188 for (int i
= start_num_temps
; i
< c
->num_temps
; i
++)
189 BITSET_CLEAR(c
->spillable
, i
);
193 v3d_emit_spill_tmua(struct v3d_compile
*c
, uint32_t spill_offset
)
195 vir_ADD_dest(c
, vir_reg(QFILE_MAGIC
,
198 vir_uniform_ui(c
, spill_offset
));
202 v3d_spill_reg(struct v3d_compile
*c
, int spill_temp
)
204 bool is_uniform
= vir_is_mov_uniform(c
, spill_temp
);
206 uint32_t spill_offset
= 0;
209 uint32_t spill_offset
= c
->spill_size
;
210 c
->spill_size
+= 16 * sizeof(uint32_t);
212 if (spill_offset
== 0)
213 v3d_setup_spill_base(c
);
216 struct qinst
*last_thrsw
= c
->last_thrsw
;
217 assert(!last_thrsw
|| last_thrsw
->is_last_thrsw
);
219 int start_num_temps
= c
->num_temps
;
221 struct qreg uniform_src
= c
->undef
;
223 uniform_src
= c
->defs
[spill_temp
]->src
[0];
225 vir_for_each_inst_inorder_safe(inst
, c
) {
226 for (int i
= 0; i
< vir_get_nsrc(inst
); i
++) {
227 if (inst
->src
[i
].file
!= QFILE_TEMP
||
228 inst
->src
[i
].index
!= spill_temp
) {
232 c
->cursor
= vir_before_inst(inst
);
235 inst
->src
[i
] = vir_MOV(c
, uniform_src
);
237 v3d_emit_spill_tmua(c
, spill_offset
);
239 inst
->src
[i
] = vir_LDTMU(c
);
244 if (inst
->dst
.file
== QFILE_TEMP
&&
245 inst
->dst
.index
== spill_temp
) {
247 c
->cursor
.link
= NULL
;
248 vir_remove_instruction(c
, inst
);
250 c
->cursor
= vir_after_inst(inst
);
252 inst
->dst
.index
= c
->num_temps
++;
253 vir_MOV_dest(c
, vir_reg(QFILE_MAGIC
,
256 v3d_emit_spill_tmua(c
, spill_offset
);
263 /* If we didn't have a last-thrsw inserted by nir_to_vir and
264 * we've been inserting thrsws, then insert a new last_thrsw
265 * right before we start the vpm/tlb sequence for the last
268 if (!is_uniform
&& !last_thrsw
&& c
->last_thrsw
&&
269 (v3d_qpu_writes_vpm(&inst
->qpu
) ||
270 v3d_qpu_uses_tlb(&inst
->qpu
))) {
271 c
->cursor
= vir_before_inst(inst
);
274 last_thrsw
= c
->last_thrsw
;
275 last_thrsw
->is_last_thrsw
= true;
279 /* Make sure c->last_thrsw is the actual last thrsw, not just one we
280 * inserted in our most recent unspill.
283 c
->last_thrsw
= last_thrsw
;
285 /* Don't allow spilling of our spilling instructions. There's no way
286 * they can help get things colored.
288 for (int i
= start_num_temps
; i
< c
->num_temps
; i
++)
289 BITSET_CLEAR(c
->spillable
, i
);
292 struct v3d_ra_select_callback_data
{
298 v3d_ra_select_callback(struct ra_graph
*g
, BITSET_WORD
*regs
, void *data
)
300 struct v3d_ra_select_callback_data
*v3d_ra
= data
;
302 /* Choose an accumulator if possible (I think it's lower power than
303 * phys regs), but round-robin through them to give post-RA
304 * instruction selection more options.
306 for (int i
= 0; i
< ACC_COUNT
; i
++) {
307 int acc_off
= (v3d_ra
->next_acc
+ i
) % ACC_COUNT
;
308 int acc
= ACC_INDEX
+ acc_off
;
310 if (BITSET_TEST(regs
, acc
)) {
311 v3d_ra
->next_acc
= acc_off
+ 1;
316 for (int i
= 0; i
< PHYS_COUNT
; i
++) {
317 int phys_off
= (v3d_ra
->next_phys
+ i
) % PHYS_COUNT
;
318 int phys
= PHYS_INDEX
+ phys_off
;
320 if (BITSET_TEST(regs
, phys
)) {
321 v3d_ra
->next_phys
= phys_off
+ 1;
326 unreachable("RA must pass us at least one possible reg.");
330 vir_init_reg_sets(struct v3d_compiler
*compiler
)
332 /* Allocate up to 3 regfile classes, for the ways the physical
333 * register file can be divided up for fragment shader threading.
335 int max_thread_index
= (compiler
->devinfo
->ver
>= 40 ? 2 : 3);
337 compiler
->regs
= ra_alloc_reg_set(compiler
, PHYS_INDEX
+ PHYS_COUNT
,
342 for (int threads
= 0; threads
< max_thread_index
; threads
++) {
343 compiler
->reg_class_phys_or_acc
[threads
] =
344 ra_alloc_reg_class(compiler
->regs
);
345 compiler
->reg_class_phys
[threads
] =
346 ra_alloc_reg_class(compiler
->regs
);
348 for (int i
= PHYS_INDEX
;
349 i
< PHYS_INDEX
+ (PHYS_COUNT
>> threads
); i
++) {
350 ra_class_add_reg(compiler
->regs
,
351 compiler
->reg_class_phys_or_acc
[threads
], i
);
352 ra_class_add_reg(compiler
->regs
,
353 compiler
->reg_class_phys
[threads
], i
);
356 for (int i
= ACC_INDEX
+ 0; i
< ACC_INDEX
+ ACC_COUNT
; i
++) {
357 ra_class_add_reg(compiler
->regs
,
358 compiler
->reg_class_phys_or_acc
[threads
], i
);
362 ra_set_finalize(compiler
->regs
, NULL
);
367 struct node_to_temp_map
{
373 node_to_temp_priority(const void *in_a
, const void *in_b
)
375 const struct node_to_temp_map
*a
= in_a
;
376 const struct node_to_temp_map
*b
= in_b
;
378 return a
->priority
- b
->priority
;
381 #define CLASS_BIT_PHYS (1 << 0)
382 #define CLASS_BIT_ACC (1 << 1)
385 * Returns a mapping from QFILE_TEMP indices to struct qpu_regs.
387 * The return value should be freed by the caller.
390 v3d_register_allocate(struct v3d_compile
*c
, bool *spilled
)
392 struct node_to_temp_map map
[c
->num_temps
];
393 uint32_t temp_to_node
[c
->num_temps
];
394 uint8_t class_bits
[c
->num_temps
];
395 int acc_nodes
[ACC_COUNT
];
396 struct v3d_ra_select_callback_data callback_data
= {
398 /* Start at RF3, to try to keep the TLB writes from using
406 vir_calculate_live_intervals(c
);
408 /* Convert 1, 2, 4 threads to 0, 1, 2 index.
410 * V3D 4.x has double the physical register space, so 64 physical regs
411 * are available at both 1x and 2x threading, and 4x has 32.
413 int thread_index
= ffs(c
->threads
) - 1;
414 if (c
->devinfo
->ver
>= 40) {
415 if (thread_index
>= 1)
419 struct ra_graph
*g
= ra_alloc_interference_graph(c
->compiler
->regs
,
421 ARRAY_SIZE(acc_nodes
));
422 ra_set_select_reg_callback(g
, v3d_ra_select_callback
, &callback_data
);
424 /* Make some fixed nodes for the accumulators, which we will need to
425 * interfere with when ops have implied r3/r4 writes or for the thread
426 * switches. We could represent these as classes for the nodes to
427 * live in, but the classes take up a lot of memory to set up, so we
428 * don't want to make too many.
430 for (int i
= 0; i
< ARRAY_SIZE(acc_nodes
); i
++) {
431 acc_nodes
[i
] = c
->num_temps
+ i
;
432 ra_set_node_reg(g
, acc_nodes
[i
], ACC_INDEX
+ i
);
435 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
437 map
[i
].priority
= c
->temp_end
[i
] - c
->temp_start
[i
];
439 qsort(map
, c
->num_temps
, sizeof(map
[0]), node_to_temp_priority
);
440 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
441 temp_to_node
[map
[i
].temp
] = i
;
444 /* Figure out our register classes and preallocated registers. We
445 * start with any temp being able to be in any file, then instructions
446 * incrementally remove bits that the temp definitely can't be in.
449 CLASS_BIT_PHYS
| CLASS_BIT_ACC
,
453 vir_for_each_inst_inorder(inst
, c
) {
454 /* If the instruction writes r3/r4 (and optionally moves its
455 * result to a temp), nothing else can be stored in r3/r4 across
458 if (vir_writes_r3(c
->devinfo
, inst
)) {
459 for (int i
= 0; i
< c
->num_temps
; i
++) {
460 if (c
->temp_start
[i
] < ip
&&
461 c
->temp_end
[i
] > ip
) {
462 ra_add_node_interference(g
,
468 if (vir_writes_r4(c
->devinfo
, inst
)) {
469 for (int i
= 0; i
< c
->num_temps
; i
++) {
470 if (c
->temp_start
[i
] < ip
&&
471 c
->temp_end
[i
] > ip
) {
472 ra_add_node_interference(g
,
479 if (inst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
) {
480 switch (inst
->qpu
.alu
.add
.op
) {
481 case V3D_QPU_A_LDVPMV_IN
:
482 case V3D_QPU_A_LDVPMV_OUT
:
483 case V3D_QPU_A_LDVPMD_IN
:
484 case V3D_QPU_A_LDVPMD_OUT
:
485 case V3D_QPU_A_LDVPMP
:
486 case V3D_QPU_A_LDVPMG_IN
:
487 case V3D_QPU_A_LDVPMG_OUT
:
488 /* LDVPMs only store to temps (the MA flag
489 * decides whether the LDVPM is in or out)
491 assert(inst
->dst
.file
== QFILE_TEMP
);
492 class_bits
[inst
->dst
.index
] &= CLASS_BIT_PHYS
;
495 case V3D_QPU_A_RECIP
:
496 case V3D_QPU_A_RSQRT
:
500 case V3D_QPU_A_RSQRT2
:
501 /* The SFU instructions write directly to the
504 assert(inst
->dst
.file
== QFILE_TEMP
);
505 class_bits
[inst
->dst
.index
] &= CLASS_BIT_PHYS
;
513 if (inst
->src
[0].file
== QFILE_REG
) {
514 switch (inst
->src
[0].index
) {
519 /* Payload setup instructions: Force allocate
520 * the dst to the given register (so the MOV
523 assert(inst
->qpu
.alu
.mul
.op
== V3D_QPU_M_MOV
);
524 assert(inst
->dst
.file
== QFILE_TEMP
);
526 temp_to_node
[inst
->dst
.index
],
533 if (inst
->qpu
.sig
.thrsw
) {
534 /* All accumulators are invalidated across a thread
537 for (int i
= 0; i
< c
->num_temps
; i
++) {
538 if (c
->temp_start
[i
] < ip
&& c
->temp_end
[i
] > ip
)
539 class_bits
[i
] &= CLASS_BIT_PHYS
;
546 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
547 if (class_bits
[i
] == CLASS_BIT_PHYS
) {
548 ra_set_node_class(g
, temp_to_node
[i
],
549 c
->compiler
->reg_class_phys
[thread_index
]);
551 assert(class_bits
[i
] == (CLASS_BIT_PHYS
|
553 ra_set_node_class(g
, temp_to_node
[i
],
554 c
->compiler
->reg_class_phys_or_acc
[thread_index
]);
558 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
559 for (uint32_t j
= i
+ 1; j
< c
->num_temps
; j
++) {
560 if (!(c
->temp_start
[i
] >= c
->temp_end
[j
] ||
561 c
->temp_start
[j
] >= c
->temp_end
[i
])) {
562 ra_add_node_interference(g
,
569 /* Debug code to force a bit of register spilling, for running across
570 * conformance tests to make sure that spilling works.
572 int force_register_spills
= 0;
573 if (c
->spill_size
< 16 * sizeof(uint32_t) * force_register_spills
) {
574 int node
= v3d_choose_spill_node(c
, g
, temp_to_node
);
576 v3d_spill_reg(c
, map
[node
].temp
);
583 bool ok
= ra_allocate(g
);
585 int node
= v3d_choose_spill_node(c
, g
, temp_to_node
);
587 /* Don't emit spills using the TMU until we've dropped thread
591 (vir_is_mov_uniform(c
, map
[node
].temp
) ||
592 thread_index
== 0)) {
593 v3d_spill_reg(c
, map
[node
].temp
);
595 /* Ask the outer loop to call back in. */
603 struct qpu_reg
*temp_registers
= calloc(c
->num_temps
,
604 sizeof(*temp_registers
));
606 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
607 int ra_reg
= ra_get_node_reg(g
, temp_to_node
[i
]);
608 if (ra_reg
< PHYS_INDEX
) {
609 temp_registers
[i
].magic
= true;
610 temp_registers
[i
].index
= (V3D_QPU_WADDR_R0
+
613 temp_registers
[i
].magic
= false;
614 temp_registers
[i
].index
= ra_reg
- PHYS_INDEX
;
617 /* If the value's never used, just write to the NOP register
618 * for clarity in debug output.
620 if (c
->temp_start
[i
] == c
->temp_end
[i
]) {
621 temp_registers
[i
].magic
= true;
622 temp_registers
[i
].index
= V3D_QPU_WADDR_NOP
;
628 if (V3D_DEBUG
& V3D_DEBUG_SHADERDB
) {
629 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d spills\n",
630 vir_get_stage_name(c
),
631 c
->program_id
, c
->variant_id
,
634 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d fills\n",
635 vir_get_stage_name(c
),
636 c
->program_id
, c
->variant_id
,
640 return temp_registers
;