2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "util/ralloc.h"
25 #include "util/register_allocate.h"
26 #include "common/v3d_device_info.h"
27 #include "v3d_compiler.h"
29 #define QPU_R(i) { .magic = false, .index = i }
33 #define PHYS_INDEX (ACC_INDEX + ACC_COUNT)
37 is_last_ldtmu(struct qinst
*inst
, struct qblock
*block
)
39 list_for_each_entry_from(struct qinst
, scan_inst
, inst
,
40 &block
->instructions
, link
) {
41 if (inst
->qpu
.sig
.ldtmu
)
43 if (v3d_qpu_writes_tmu(&inst
->qpu
))
51 v3d_choose_spill_node(struct v3d_compile
*c
, struct ra_graph
*g
,
52 uint32_t *temp_to_node
)
54 float block_scale
= 1.0;
55 float spill_costs
[c
->num_temps
];
56 bool in_tmu_operation
= false;
57 bool started_last_seg
= false;
59 for (unsigned i
= 0; i
< c
->num_temps
; i
++)
62 /* XXX: Scale the cost up when inside of a loop. */
63 vir_for_each_block(block
, c
) {
64 vir_for_each_inst(inst
, block
) {
65 /* We can't insert a new TMU operation while currently
66 * in a TMU operation, and we can't insert new thread
67 * switches after starting output writes.
71 (c
->threads
> 1 && started_last_seg
));
73 for (int i
= 0; i
< vir_get_nsrc(inst
); i
++) {
74 if (inst
->src
[i
].file
!= QFILE_TEMP
)
77 int temp
= inst
->src
[i
].index
;
79 BITSET_CLEAR(c
->spillable
,
82 spill_costs
[temp
] += block_scale
;
86 if (inst
->dst
.file
== QFILE_TEMP
) {
87 int temp
= inst
->dst
.index
;
90 BITSET_CLEAR(c
->spillable
,
93 spill_costs
[temp
] += block_scale
;
97 if (inst
->is_last_thrsw
)
98 started_last_seg
= true;
100 if (v3d_qpu_writes_vpm(&inst
->qpu
) ||
101 v3d_qpu_uses_tlb(&inst
->qpu
))
102 started_last_seg
= true;
104 /* Track when we're in between a TMU setup and the
105 * final LDTMU from that TMU setup. We can't
106 * spill/fill any temps during that time, because that
107 * involves inserting a new TMU setup/LDTMU sequence.
109 if (inst
->qpu
.sig
.ldtmu
&&
110 is_last_ldtmu(inst
, block
))
111 in_tmu_operation
= false;
113 if (v3d_qpu_writes_tmu(&inst
->qpu
))
114 in_tmu_operation
= true;
118 for (unsigned i
= 0; i
< c
->num_temps
; i
++) {
119 int node
= temp_to_node
[i
];
121 if (BITSET_TEST(c
->spillable
, i
))
122 ra_set_node_spill_cost(g
, node
, spill_costs
[i
]);
125 return ra_get_best_spill_node(g
);
128 /* The spill offset for this thread takes a bit of setup, so do it once at
132 v3d_setup_spill_base(struct v3d_compile
*c
)
134 c
->cursor
= vir_before_block(vir_entry_block(c
));
136 int start_num_temps
= c
->num_temps
;
138 /* Each thread wants to be in a separate region of the scratch space
139 * so that the QPUs aren't fighting over cache lines. We have the
140 * driver keep a single global spill BO rather than
141 * per-spilling-program BOs, so we need a uniform from the driver for
142 * what the per-thread scale is.
144 struct qreg thread_offset
=
147 vir_uniform(c
, QUNIFORM_SPILL_SIZE_PER_THREAD
, 0));
149 /* Each channel in a reg is 4 bytes, so scale them up by that. */
150 struct qreg element_offset
= vir_SHL(c
, vir_EIDX(c
),
151 vir_uniform_ui(c
, 2));
153 c
->spill_base
= vir_ADD(c
,
154 vir_ADD(c
, thread_offset
, element_offset
),
155 vir_uniform(c
, QUNIFORM_SPILL_OFFSET
, 0));
157 /* Make sure that we don't spill the spilling setup instructions. */
158 for (int i
= start_num_temps
; i
< c
->num_temps
; i
++)
159 BITSET_CLEAR(c
->spillable
, i
);
163 v3d_emit_spill_tmua(struct v3d_compile
*c
, uint32_t spill_offset
)
165 vir_ADD_dest(c
, vir_reg(QFILE_MAGIC
,
168 vir_uniform_ui(c
, spill_offset
));
172 v3d_spill_reg(struct v3d_compile
*c
, int spill_temp
)
174 uint32_t spill_offset
= c
->spill_size
;
175 c
->spill_size
+= 16 * sizeof(uint32_t);
177 if (spill_offset
== 0)
178 v3d_setup_spill_base(c
);
180 struct qinst
*last_thrsw
= c
->last_thrsw
;
181 assert(!last_thrsw
|| last_thrsw
->is_last_thrsw
);
183 int start_num_temps
= c
->num_temps
;
185 vir_for_each_inst_inorder(inst
, c
) {
186 for (int i
= 0; i
< vir_get_nsrc(inst
); i
++) {
187 if (inst
->src
[i
].file
!= QFILE_TEMP
||
188 inst
->src
[i
].index
!= spill_temp
) {
192 c
->cursor
= vir_before_inst(inst
);
194 v3d_emit_spill_tmua(c
, spill_offset
);
196 inst
->src
[i
] = vir_LDTMU(c
);
200 if (inst
->dst
.file
== QFILE_TEMP
&&
201 inst
->dst
.index
== spill_temp
) {
202 c
->cursor
= vir_after_inst(inst
);
204 inst
->dst
.index
= c
->num_temps
++;
205 vir_MOV_dest(c
, vir_reg(QFILE_MAGIC
, V3D_QPU_WADDR_TMUD
),
207 v3d_emit_spill_tmua(c
, spill_offset
);
212 /* If we didn't have a last-thrsw inserted by nir_to_vir and
213 * we've been inserting thrsws, then insert a new last_thrsw
214 * right before we start the vpm/tlb sequence for the last
217 if (!last_thrsw
&& c
->last_thrsw
&&
218 (v3d_qpu_writes_vpm(&inst
->qpu
) ||
219 v3d_qpu_uses_tlb(&inst
->qpu
))) {
220 c
->cursor
= vir_before_inst(inst
);
223 last_thrsw
= c
->last_thrsw
;
224 last_thrsw
->is_last_thrsw
= true;
228 /* Make sure c->last_thrsw is the actual last thrsw, not just one we
229 * inserted in our most recent unspill.
232 c
->last_thrsw
= last_thrsw
;
234 /* Don't allow spilling of our spilling instructions. There's no way
235 * they can help get things colored.
237 for (int i
= start_num_temps
; i
< c
->num_temps
; i
++)
238 BITSET_CLEAR(c
->spillable
, i
);
241 struct v3d_ra_select_callback_data
{
247 v3d_ra_select_callback(struct ra_graph
*g
, BITSET_WORD
*regs
, void *data
)
249 struct v3d_ra_select_callback_data
*v3d_ra
= data
;
251 /* Choose an accumulator if possible (I think it's lower power than
252 * phys regs), but round-robin through them to give post-RA
253 * instruction selection more options.
255 for (int i
= 0; i
< ACC_COUNT
; i
++) {
256 int acc_off
= (v3d_ra
->next_acc
+ i
) % ACC_COUNT
;
257 int acc
= ACC_INDEX
+ acc_off
;
259 if (BITSET_TEST(regs
, acc
)) {
260 v3d_ra
->next_acc
= acc_off
+ 1;
265 for (int i
= 0; i
< PHYS_COUNT
; i
++) {
266 int phys_off
= (v3d_ra
->next_phys
+ i
) % PHYS_COUNT
;
267 int phys
= PHYS_INDEX
+ phys_off
;
269 if (BITSET_TEST(regs
, phys
)) {
270 v3d_ra
->next_phys
= phys_off
+ 1;
275 unreachable("RA must pass us at least one possible reg.");
279 vir_init_reg_sets(struct v3d_compiler
*compiler
)
281 /* Allocate up to 3 regfile classes, for the ways the physical
282 * register file can be divided up for fragment shader threading.
284 int max_thread_index
= (compiler
->devinfo
->ver
>= 40 ? 2 : 3);
286 compiler
->regs
= ra_alloc_reg_set(compiler
, PHYS_INDEX
+ PHYS_COUNT
,
291 for (int threads
= 0; threads
< max_thread_index
; threads
++) {
292 compiler
->reg_class_phys_or_acc
[threads
] =
293 ra_alloc_reg_class(compiler
->regs
);
294 compiler
->reg_class_phys
[threads
] =
295 ra_alloc_reg_class(compiler
->regs
);
297 for (int i
= PHYS_INDEX
;
298 i
< PHYS_INDEX
+ (PHYS_COUNT
>> threads
); i
++) {
299 ra_class_add_reg(compiler
->regs
,
300 compiler
->reg_class_phys_or_acc
[threads
], i
);
301 ra_class_add_reg(compiler
->regs
,
302 compiler
->reg_class_phys
[threads
], i
);
305 for (int i
= ACC_INDEX
+ 0; i
< ACC_INDEX
+ ACC_COUNT
; i
++) {
306 ra_class_add_reg(compiler
->regs
,
307 compiler
->reg_class_phys_or_acc
[threads
], i
);
311 ra_set_finalize(compiler
->regs
, NULL
);
316 struct node_to_temp_map
{
322 node_to_temp_priority(const void *in_a
, const void *in_b
)
324 const struct node_to_temp_map
*a
= in_a
;
325 const struct node_to_temp_map
*b
= in_b
;
327 return a
->priority
- b
->priority
;
330 #define CLASS_BIT_PHYS (1 << 0)
331 #define CLASS_BIT_R0_R2 (1 << 1)
332 #define CLASS_BIT_R3 (1 << 2)
333 #define CLASS_BIT_R4 (1 << 3)
336 * Returns a mapping from QFILE_TEMP indices to struct qpu_regs.
338 * The return value should be freed by the caller.
341 v3d_register_allocate(struct v3d_compile
*c
, bool *spilled
)
343 struct node_to_temp_map map
[c
->num_temps
];
344 uint32_t temp_to_node
[c
->num_temps
];
345 uint8_t class_bits
[c
->num_temps
];
346 struct qpu_reg
*temp_registers
= calloc(c
->num_temps
,
347 sizeof(*temp_registers
));
348 int acc_nodes
[ACC_COUNT
];
349 struct v3d_ra_select_callback_data callback_data
= {
351 /* Start at RF3, to try to keep the TLB writes from using
359 vir_calculate_live_intervals(c
);
361 /* Convert 1, 2, 4 threads to 0, 1, 2 index.
363 * V3D 4.x has double the physical register space, so 64 physical regs
364 * are available at both 1x and 2x threading, and 4x has 32.
366 int thread_index
= ffs(c
->threads
) - 1;
367 if (c
->devinfo
->ver
>= 40) {
368 if (thread_index
>= 1)
372 struct ra_graph
*g
= ra_alloc_interference_graph(c
->compiler
->regs
,
374 ARRAY_SIZE(acc_nodes
));
375 ra_set_select_reg_callback(g
, v3d_ra_select_callback
, &callback_data
);
377 /* Make some fixed nodes for the accumulators, which we will need to
378 * interfere with when ops have implied r3/r4 writes or for the thread
379 * switches. We could represent these as classes for the nodes to
380 * live in, but the classes take up a lot of memory to set up, so we
381 * don't want to make too many.
383 for (int i
= 0; i
< ARRAY_SIZE(acc_nodes
); i
++) {
384 acc_nodes
[i
] = c
->num_temps
+ i
;
385 ra_set_node_reg(g
, acc_nodes
[i
], ACC_INDEX
+ i
);
388 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
390 map
[i
].priority
= c
->temp_end
[i
] - c
->temp_start
[i
];
392 qsort(map
, c
->num_temps
, sizeof(map
[0]), node_to_temp_priority
);
393 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
394 temp_to_node
[map
[i
].temp
] = i
;
397 /* Figure out our register classes and preallocated registers. We
398 * start with any temp being able to be in any file, then instructions
399 * incrementally remove bits that the temp definitely can't be in.
402 CLASS_BIT_PHYS
| CLASS_BIT_R0_R2
| CLASS_BIT_R3
| CLASS_BIT_R4
,
406 vir_for_each_inst_inorder(inst
, c
) {
407 /* If the instruction writes r3/r4 (and optionally moves its
408 * result to a temp), nothing else can be stored in r3/r4 across
411 if (vir_writes_r3(c
->devinfo
, inst
)) {
412 for (int i
= 0; i
< c
->num_temps
; i
++) {
413 if (c
->temp_start
[i
] < ip
&&
414 c
->temp_end
[i
] > ip
) {
415 ra_add_node_interference(g
,
421 if (vir_writes_r4(c
->devinfo
, inst
)) {
422 for (int i
= 0; i
< c
->num_temps
; i
++) {
423 if (c
->temp_start
[i
] < ip
&&
424 c
->temp_end
[i
] > ip
) {
425 ra_add_node_interference(g
,
432 if (inst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
) {
433 switch (inst
->qpu
.alu
.add
.op
) {
434 case V3D_QPU_A_LDVPMV_IN
:
435 case V3D_QPU_A_LDVPMV_OUT
:
436 case V3D_QPU_A_LDVPMD_IN
:
437 case V3D_QPU_A_LDVPMD_OUT
:
438 case V3D_QPU_A_LDVPMP
:
439 case V3D_QPU_A_LDVPMG_IN
:
440 case V3D_QPU_A_LDVPMG_OUT
:
441 /* LDVPMs only store to temps (the MA flag
442 * decides whether the LDVPM is in or out)
444 assert(inst
->dst
.file
== QFILE_TEMP
);
445 class_bits
[inst
->dst
.index
] &= CLASS_BIT_PHYS
;
448 case V3D_QPU_A_RECIP
:
449 case V3D_QPU_A_RSQRT
:
453 case V3D_QPU_A_RSQRT2
:
454 /* The SFU instructions write directly to the
457 assert(inst
->dst
.file
== QFILE_TEMP
);
458 class_bits
[inst
->dst
.index
] &= CLASS_BIT_PHYS
;
466 if (inst
->src
[0].file
== QFILE_REG
) {
467 switch (inst
->src
[0].index
) {
471 /* Payload setup instructions: Force allocate
472 * the dst to the given register (so the MOV
475 assert(inst
->qpu
.alu
.mul
.op
== V3D_QPU_M_MOV
);
476 assert(inst
->dst
.file
== QFILE_TEMP
);
478 temp_to_node
[inst
->dst
.index
],
485 if (inst
->qpu
.sig
.thrsw
) {
486 /* All accumulators are invalidated across a thread
489 for (int i
= 0; i
< c
->num_temps
; i
++) {
490 if (c
->temp_start
[i
] < ip
&& c
->temp_end
[i
] > ip
)
491 class_bits
[i
] &= CLASS_BIT_PHYS
;
498 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
499 if (class_bits
[i
] == CLASS_BIT_PHYS
) {
500 ra_set_node_class(g
, temp_to_node
[i
],
501 c
->compiler
->reg_class_phys
[thread_index
]);
503 assert(class_bits
[i
] == (CLASS_BIT_PHYS
|
507 ra_set_node_class(g
, temp_to_node
[i
],
508 c
->compiler
->reg_class_phys_or_acc
[thread_index
]);
512 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
513 for (uint32_t j
= i
+ 1; j
< c
->num_temps
; j
++) {
514 if (!(c
->temp_start
[i
] >= c
->temp_end
[j
] ||
515 c
->temp_start
[j
] >= c
->temp_end
[i
])) {
516 ra_add_node_interference(g
,
523 bool ok
= ra_allocate(g
);
525 /* Try to spill, if we can't reduce threading first. */
526 if (thread_index
== 0) {
527 int node
= v3d_choose_spill_node(c
, g
, temp_to_node
);
530 v3d_spill_reg(c
, map
[node
].temp
);
533 /* Ask the outer loop to call back in. */
539 free(temp_registers
);
543 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
544 int ra_reg
= ra_get_node_reg(g
, temp_to_node
[i
]);
545 if (ra_reg
< PHYS_INDEX
) {
546 temp_registers
[i
].magic
= true;
547 temp_registers
[i
].index
= (V3D_QPU_WADDR_R0
+
550 temp_registers
[i
].magic
= false;
551 temp_registers
[i
].index
= ra_reg
- PHYS_INDEX
;
554 /* If the value's never used, just write to the NOP register
555 * for clarity in debug output.
557 if (c
->temp_start
[i
] == c
->temp_end
[i
]) {
558 temp_registers
[i
].magic
= true;
559 temp_registers
[i
].index
= V3D_QPU_WADDR_NOP
;
565 if (V3D_DEBUG
& V3D_DEBUG_SHADERDB
) {
566 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d spills\n",
567 vir_get_stage_name(c
),
568 c
->program_id
, c
->variant_id
,
571 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d fills\n",
572 vir_get_stage_name(c
),
573 c
->program_id
, c
->variant_id
,
577 return temp_registers
;