2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2014-2017 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * The basic model of the list scheduler is to take a basic block, compute a
29 * DAG of the dependencies, and make a list of the DAG heads. Heuristically
30 * pick a DAG head, then put all the children that are now DAG heads into the
31 * list of things to schedule.
33 * The goal of scheduling here is to pack pairs of operations together in a
34 * single QPU instruction.
37 #include "qpu/qpu_disasm.h"
38 #include "v3d_compiler.h"
39 #include "util/ralloc.h"
44 struct schedule_node_child
;
46 struct schedule_node
{
48 struct list_head link
;
51 /* Longest cycles + instruction_latency() of any parent of this node. */
52 uint32_t unblocked_time
;
55 * Minimum number of cycles from scheduling this instruction until the
56 * end of the program, based on the slowest dependency chain through
62 * cycles between this instruction being scheduled and when its result
68 /* When walking the instructions in reverse, we need to swap before/after in
71 enum direction
{ F
, R
};
73 struct schedule_state
{
74 const struct v3d_device_info
*devinfo
;
76 struct schedule_node
*last_r
[6];
77 struct schedule_node
*last_rf
[64];
78 struct schedule_node
*last_sf
;
79 struct schedule_node
*last_vpm_read
;
80 struct schedule_node
*last_tmu_write
;
81 struct schedule_node
*last_tmu_config
;
82 struct schedule_node
*last_tlb
;
83 struct schedule_node
*last_vpm
;
84 struct schedule_node
*last_unif
;
85 struct schedule_node
*last_rtop
;
87 /* Estimated cycle when the current instruction would start. */
92 add_dep(struct schedule_state
*state
,
93 struct schedule_node
*before
,
94 struct schedule_node
*after
,
97 bool write_after_read
= !write
&& state
->dir
== R
;
98 void *edge_data
= (void *)(uintptr_t)write_after_read
;
100 if (!before
|| !after
)
103 assert(before
!= after
);
106 dag_add_edge(&before
->dag
, &after
->dag
, edge_data
);
108 dag_add_edge(&after
->dag
, &before
->dag
, edge_data
);
112 add_read_dep(struct schedule_state
*state
,
113 struct schedule_node
*before
,
114 struct schedule_node
*after
)
116 add_dep(state
, before
, after
, false);
120 add_write_dep(struct schedule_state
*state
,
121 struct schedule_node
**before
,
122 struct schedule_node
*after
)
124 add_dep(state
, *before
, after
, true);
129 qpu_inst_is_tlb(const struct v3d_qpu_instr
*inst
)
131 if (inst
->sig
.ldtlb
|| inst
->sig
.ldtlbu
)
134 if (inst
->type
!= V3D_QPU_INSTR_TYPE_ALU
)
137 if (inst
->alu
.add
.magic_write
&&
138 (inst
->alu
.add
.waddr
== V3D_QPU_WADDR_TLB
||
139 inst
->alu
.add
.waddr
== V3D_QPU_WADDR_TLBU
))
142 if (inst
->alu
.mul
.magic_write
&&
143 (inst
->alu
.mul
.waddr
== V3D_QPU_WADDR_TLB
||
144 inst
->alu
.mul
.waddr
== V3D_QPU_WADDR_TLBU
))
151 process_mux_deps(struct schedule_state
*state
, struct schedule_node
*n
,
152 enum v3d_qpu_mux mux
)
156 add_read_dep(state
, state
->last_rf
[n
->inst
->qpu
.raddr_a
], n
);
159 if (!n
->inst
->qpu
.sig
.small_imm
) {
161 state
->last_rf
[n
->inst
->qpu
.raddr_b
], n
);
165 add_read_dep(state
, state
->last_r
[mux
- V3D_QPU_MUX_R0
], n
);
172 process_waddr_deps(struct schedule_state
*state
, struct schedule_node
*n
,
173 uint32_t waddr
, bool magic
)
176 add_write_dep(state
, &state
->last_rf
[waddr
], n
);
177 } else if (v3d_qpu_magic_waddr_is_tmu(waddr
)) {
178 /* XXX perf: For V3D 4.x, we could reorder TMU writes other
179 * than the TMUS/TMUD/TMUA to improve scheduling flexibility.
181 add_write_dep(state
, &state
->last_tmu_write
, n
);
183 case V3D_QPU_WADDR_TMUS
:
184 case V3D_QPU_WADDR_TMUSCM
:
185 case V3D_QPU_WADDR_TMUSF
:
186 case V3D_QPU_WADDR_TMUSLOD
:
187 add_write_dep(state
, &state
->last_tmu_config
, n
);
192 } else if (v3d_qpu_magic_waddr_is_sfu(waddr
)) {
193 /* Handled by v3d_qpu_writes_r4() check. */
196 case V3D_QPU_WADDR_R0
:
197 case V3D_QPU_WADDR_R1
:
198 case V3D_QPU_WADDR_R2
:
200 &state
->last_r
[waddr
- V3D_QPU_WADDR_R0
],
203 case V3D_QPU_WADDR_R3
:
204 case V3D_QPU_WADDR_R4
:
205 case V3D_QPU_WADDR_R5
:
206 /* Handled by v3d_qpu_writes_r*() checks below. */
209 case V3D_QPU_WADDR_VPM
:
210 case V3D_QPU_WADDR_VPMU
:
211 add_write_dep(state
, &state
->last_vpm
, n
);
214 case V3D_QPU_WADDR_TLB
:
215 case V3D_QPU_WADDR_TLBU
:
216 add_write_dep(state
, &state
->last_tlb
, n
);
219 case V3D_QPU_WADDR_SYNC
:
220 case V3D_QPU_WADDR_SYNCB
:
221 case V3D_QPU_WADDR_SYNCU
:
222 /* For CS barrier(): Sync against any other memory
223 * accesses. There doesn't appear to be any need for
224 * barriers to affect ALU operations.
226 add_write_dep(state
, &state
->last_tmu_write
, n
);
229 case V3D_QPU_WADDR_NOP
:
233 fprintf(stderr
, "Unknown waddr %d\n", waddr
);
240 * Common code for dependencies that need to be tracked both forward and
243 * This is for things like "all reads of r4 have to happen between the r4
244 * writes that surround them".
247 calculate_deps(struct schedule_state
*state
, struct schedule_node
*n
)
249 const struct v3d_device_info
*devinfo
= state
->devinfo
;
250 struct qinst
*qinst
= n
->inst
;
251 struct v3d_qpu_instr
*inst
= &qinst
->qpu
;
252 /* If the input and output segments are shared, then all VPM reads to
253 * a location need to happen before all writes. We handle this by
254 * serializing all VPM operations for now.
256 bool separate_vpm_segment
= false;
258 if (inst
->type
== V3D_QPU_INSTR_TYPE_BRANCH
) {
259 if (inst
->branch
.cond
!= V3D_QPU_BRANCH_COND_ALWAYS
)
260 add_read_dep(state
, state
->last_sf
, n
);
267 add_write_dep(state
, &state
->last_unif
, n
);
271 assert(inst
->type
== V3D_QPU_INSTR_TYPE_ALU
);
275 if (v3d_qpu_add_op_num_src(inst
->alu
.add
.op
) > 0)
276 process_mux_deps(state
, n
, inst
->alu
.add
.a
);
277 if (v3d_qpu_add_op_num_src(inst
->alu
.add
.op
) > 1)
278 process_mux_deps(state
, n
, inst
->alu
.add
.b
);
280 if (v3d_qpu_mul_op_num_src(inst
->alu
.mul
.op
) > 0)
281 process_mux_deps(state
, n
, inst
->alu
.mul
.a
);
282 if (v3d_qpu_mul_op_num_src(inst
->alu
.mul
.op
) > 1)
283 process_mux_deps(state
, n
, inst
->alu
.mul
.b
);
285 switch (inst
->alu
.add
.op
) {
286 case V3D_QPU_A_VPMSETUP
:
287 /* Could distinguish read/write by unpacking the uniform. */
288 add_write_dep(state
, &state
->last_vpm
, n
);
289 add_write_dep(state
, &state
->last_vpm_read
, n
);
292 case V3D_QPU_A_STVPMV
:
293 case V3D_QPU_A_STVPMD
:
294 case V3D_QPU_A_STVPMP
:
295 add_write_dep(state
, &state
->last_vpm
, n
);
298 case V3D_QPU_A_LDVPMV_IN
:
299 case V3D_QPU_A_LDVPMD_IN
:
300 case V3D_QPU_A_LDVPMG_IN
:
301 case V3D_QPU_A_LDVPMP
:
302 if (!separate_vpm_segment
)
303 add_write_dep(state
, &state
->last_vpm
, n
);
306 case V3D_QPU_A_VPMWT
:
307 add_read_dep(state
, state
->last_vpm
, n
);
311 add_read_dep(state
, state
->last_tlb
, n
);
314 case V3D_QPU_A_SETMSF
:
315 case V3D_QPU_A_SETREVF
:
316 add_write_dep(state
, &state
->last_tlb
, n
);
323 switch (inst
->alu
.mul
.op
) {
324 case V3D_QPU_M_MULTOP
:
325 case V3D_QPU_M_UMUL24
:
326 /* MULTOP sets rtop, and UMUL24 implicitly reads rtop and
327 * resets it to 0. We could possibly reorder umul24s relative
328 * to each other, but for now just keep all the MUL parts in
331 add_write_dep(state
, &state
->last_rtop
, n
);
337 if (inst
->alu
.add
.op
!= V3D_QPU_A_NOP
) {
338 process_waddr_deps(state
, n
, inst
->alu
.add
.waddr
,
339 inst
->alu
.add
.magic_write
);
341 if (inst
->alu
.mul
.op
!= V3D_QPU_M_NOP
) {
342 process_waddr_deps(state
, n
, inst
->alu
.mul
.waddr
,
343 inst
->alu
.mul
.magic_write
);
345 if (v3d_qpu_sig_writes_address(devinfo
, &inst
->sig
)) {
346 process_waddr_deps(state
, n
, inst
->sig_addr
,
350 if (v3d_qpu_writes_r3(devinfo
, inst
))
351 add_write_dep(state
, &state
->last_r
[3], n
);
352 if (v3d_qpu_writes_r4(devinfo
, inst
))
353 add_write_dep(state
, &state
->last_r
[4], n
);
354 if (v3d_qpu_writes_r5(devinfo
, inst
))
355 add_write_dep(state
, &state
->last_r
[5], n
);
357 if (inst
->sig
.thrsw
) {
358 /* All accumulator contents and flags are undefined after the
361 for (int i
= 0; i
< ARRAY_SIZE(state
->last_r
); i
++)
362 add_write_dep(state
, &state
->last_r
[i
], n
);
363 add_write_dep(state
, &state
->last_sf
, n
);
364 add_write_dep(state
, &state
->last_rtop
, n
);
366 /* Scoreboard-locking operations have to stay after the last
369 add_write_dep(state
, &state
->last_tlb
, n
);
371 add_write_dep(state
, &state
->last_tmu_write
, n
);
372 add_write_dep(state
, &state
->last_tmu_config
, n
);
375 if (v3d_qpu_waits_on_tmu(inst
)) {
376 /* TMU loads are coming from a FIFO, so ordering is important.
378 add_write_dep(state
, &state
->last_tmu_write
, n
);
381 if (inst
->sig
.wrtmuc
)
382 add_write_dep(state
, &state
->last_tmu_config
, n
);
384 if (inst
->sig
.ldtlb
| inst
->sig
.ldtlbu
)
385 add_read_dep(state
, state
->last_tlb
, n
);
387 if (inst
->sig
.ldvpm
) {
388 add_write_dep(state
, &state
->last_vpm_read
, n
);
390 /* At least for now, we're doing shared I/O segments, so queue
391 * all writes after all reads.
393 if (!separate_vpm_segment
)
394 add_write_dep(state
, &state
->last_vpm
, n
);
397 /* inst->sig.ldunif or sideband uniform read */
398 if (vir_has_uniform(qinst
))
399 add_write_dep(state
, &state
->last_unif
, n
);
401 if (v3d_qpu_reads_flags(inst
))
402 add_read_dep(state
, state
->last_sf
, n
);
403 if (v3d_qpu_writes_flags(inst
))
404 add_write_dep(state
, &state
->last_sf
, n
);
408 calculate_forward_deps(struct v3d_compile
*c
, struct dag
*dag
,
409 struct list_head
*schedule_list
)
411 struct schedule_state state
;
413 memset(&state
, 0, sizeof(state
));
415 state
.devinfo
= c
->devinfo
;
418 list_for_each_entry(struct schedule_node
, node
, schedule_list
, link
)
419 calculate_deps(&state
, node
);
423 calculate_reverse_deps(struct v3d_compile
*c
, struct dag
*dag
,
424 struct list_head
*schedule_list
)
426 struct schedule_state state
;
428 memset(&state
, 0, sizeof(state
));
430 state
.devinfo
= c
->devinfo
;
433 list_for_each_entry_rev(struct schedule_node
, node
, schedule_list
,
435 calculate_deps(&state
, (struct schedule_node
*)node
);
439 struct choose_scoreboard
{
442 int last_magic_sfu_write_tick
;
443 int last_ldvary_tick
;
444 int last_uniforms_reset_tick
;
450 mux_reads_too_soon(struct choose_scoreboard
*scoreboard
,
451 const struct v3d_qpu_instr
*inst
, enum v3d_qpu_mux mux
)
455 if (scoreboard
->tick
- scoreboard
->last_magic_sfu_write_tick
<= 2)
460 if (scoreboard
->tick
- scoreboard
->last_ldvary_tick
<= 1)
471 reads_too_soon_after_write(struct choose_scoreboard
*scoreboard
,
474 const struct v3d_qpu_instr
*inst
= &qinst
->qpu
;
476 /* XXX: Branching off of raddr. */
477 if (inst
->type
== V3D_QPU_INSTR_TYPE_BRANCH
)
480 assert(inst
->type
== V3D_QPU_INSTR_TYPE_ALU
);
482 if (inst
->alu
.add
.op
!= V3D_QPU_A_NOP
) {
483 if (v3d_qpu_add_op_num_src(inst
->alu
.add
.op
) > 0 &&
484 mux_reads_too_soon(scoreboard
, inst
, inst
->alu
.add
.a
)) {
487 if (v3d_qpu_add_op_num_src(inst
->alu
.add
.op
) > 1 &&
488 mux_reads_too_soon(scoreboard
, inst
, inst
->alu
.add
.b
)) {
493 if (inst
->alu
.mul
.op
!= V3D_QPU_M_NOP
) {
494 if (v3d_qpu_mul_op_num_src(inst
->alu
.mul
.op
) > 0 &&
495 mux_reads_too_soon(scoreboard
, inst
, inst
->alu
.mul
.a
)) {
498 if (v3d_qpu_mul_op_num_src(inst
->alu
.mul
.op
) > 1 &&
499 mux_reads_too_soon(scoreboard
, inst
, inst
->alu
.mul
.b
)) {
510 writes_too_soon_after_write(const struct v3d_device_info
*devinfo
,
511 struct choose_scoreboard
*scoreboard
,
514 const struct v3d_qpu_instr
*inst
= &qinst
->qpu
;
516 /* Don't schedule any other r4 write too soon after an SFU write.
517 * This would normally be prevented by dependency tracking, but might
518 * occur if a dead SFU computation makes it to scheduling.
520 if (scoreboard
->tick
- scoreboard
->last_magic_sfu_write_tick
< 2 &&
521 v3d_qpu_writes_r4(devinfo
, inst
))
528 pixel_scoreboard_too_soon(struct choose_scoreboard
*scoreboard
,
529 const struct v3d_qpu_instr
*inst
)
531 return (scoreboard
->tick
== 0 && qpu_inst_is_tlb(inst
));
535 get_instruction_priority(const struct v3d_qpu_instr
*inst
)
537 uint32_t baseline_score
;
538 uint32_t next_score
= 0;
540 /* Schedule TLB operations as late as possible, to get more
541 * parallelism between shaders.
543 if (qpu_inst_is_tlb(inst
))
547 /* Schedule texture read results collection late to hide latency. */
548 if (v3d_qpu_waits_on_tmu(inst
))
552 /* XXX perf: We should schedule SFU ALU ops so that the reader is 2
553 * instructions after the producer if possible, not just 1.
556 /* Default score for things that aren't otherwise special. */
557 baseline_score
= next_score
;
560 /* Schedule texture read setup early to hide their latency better. */
561 if (v3d_qpu_writes_tmu(inst
))
565 return baseline_score
;
569 qpu_magic_waddr_is_periph(enum v3d_qpu_waddr waddr
)
571 return (v3d_qpu_magic_waddr_is_tmu(waddr
) ||
572 v3d_qpu_magic_waddr_is_sfu(waddr
) ||
573 v3d_qpu_magic_waddr_is_tlb(waddr
) ||
574 v3d_qpu_magic_waddr_is_vpm(waddr
) ||
575 v3d_qpu_magic_waddr_is_tsy(waddr
));
579 qpu_accesses_peripheral(const struct v3d_qpu_instr
*inst
)
581 if (v3d_qpu_uses_vpm(inst
))
583 if (v3d_qpu_uses_sfu(inst
))
586 if (inst
->type
== V3D_QPU_INSTR_TYPE_ALU
) {
587 if (inst
->alu
.add
.op
!= V3D_QPU_A_NOP
&&
588 inst
->alu
.add
.magic_write
&&
589 qpu_magic_waddr_is_periph(inst
->alu
.add
.waddr
)) {
593 if (inst
->alu
.add
.op
== V3D_QPU_A_TMUWT
)
596 if (inst
->alu
.mul
.op
!= V3D_QPU_M_NOP
&&
597 inst
->alu
.mul
.magic_write
&&
598 qpu_magic_waddr_is_periph(inst
->alu
.mul
.waddr
)) {
603 return (inst
->sig
.ldvpm
||
611 qpu_compatible_peripheral_access(const struct v3d_device_info
*devinfo
,
612 const struct v3d_qpu_instr
*a
,
613 const struct v3d_qpu_instr
*b
)
615 const bool a_uses_peripheral
= qpu_accesses_peripheral(a
);
616 const bool b_uses_peripheral
= qpu_accesses_peripheral(b
);
618 /* We can always do one peripheral access per instruction. */
619 if (!a_uses_peripheral
|| !b_uses_peripheral
)
622 if (devinfo
->ver
< 41)
625 /* V3D 4.1 and later allow TMU read along with a VPM read or write, and
626 * WRTMUC with a TMU magic register write (other than tmuc).
628 if ((a
->sig
.ldtmu
&& v3d_qpu_uses_vpm(b
)) ||
629 (b
->sig
.ldtmu
&& v3d_qpu_uses_vpm(a
))) {
633 if ((a
->sig
.wrtmuc
&& v3d_qpu_writes_tmu_not_tmuc(b
)) ||
634 (b
->sig
.wrtmuc
&& v3d_qpu_writes_tmu_not_tmuc(a
))) {
642 qpu_merge_inst(const struct v3d_device_info
*devinfo
,
643 struct v3d_qpu_instr
*result
,
644 const struct v3d_qpu_instr
*a
,
645 const struct v3d_qpu_instr
*b
)
647 if (a
->type
!= V3D_QPU_INSTR_TYPE_ALU
||
648 b
->type
!= V3D_QPU_INSTR_TYPE_ALU
) {
652 if (!qpu_compatible_peripheral_access(devinfo
, a
, b
))
655 struct v3d_qpu_instr merge
= *a
;
657 if (b
->alu
.add
.op
!= V3D_QPU_A_NOP
) {
658 if (a
->alu
.add
.op
!= V3D_QPU_A_NOP
)
660 merge
.alu
.add
= b
->alu
.add
;
662 merge
.flags
.ac
= b
->flags
.ac
;
663 merge
.flags
.apf
= b
->flags
.apf
;
664 merge
.flags
.auf
= b
->flags
.auf
;
667 if (b
->alu
.mul
.op
!= V3D_QPU_M_NOP
) {
668 if (a
->alu
.mul
.op
!= V3D_QPU_M_NOP
)
670 merge
.alu
.mul
= b
->alu
.mul
;
672 merge
.flags
.mc
= b
->flags
.mc
;
673 merge
.flags
.mpf
= b
->flags
.mpf
;
674 merge
.flags
.muf
= b
->flags
.muf
;
677 if (v3d_qpu_uses_mux(b
, V3D_QPU_MUX_A
)) {
678 if (v3d_qpu_uses_mux(a
, V3D_QPU_MUX_A
) &&
679 a
->raddr_a
!= b
->raddr_a
) {
682 merge
.raddr_a
= b
->raddr_a
;
685 if (v3d_qpu_uses_mux(b
, V3D_QPU_MUX_B
)) {
686 if (v3d_qpu_uses_mux(a
, V3D_QPU_MUX_B
) &&
687 (a
->raddr_b
!= b
->raddr_b
||
688 a
->sig
.small_imm
!= b
->sig
.small_imm
)) {
691 merge
.raddr_b
= b
->raddr_b
;
694 merge
.sig
.thrsw
|= b
->sig
.thrsw
;
695 merge
.sig
.ldunif
|= b
->sig
.ldunif
;
696 merge
.sig
.ldunifrf
|= b
->sig
.ldunifrf
;
697 merge
.sig
.ldunifa
|= b
->sig
.ldunifa
;
698 merge
.sig
.ldunifarf
|= b
->sig
.ldunifarf
;
699 merge
.sig
.ldtmu
|= b
->sig
.ldtmu
;
700 merge
.sig
.ldvary
|= b
->sig
.ldvary
;
701 merge
.sig
.ldvpm
|= b
->sig
.ldvpm
;
702 merge
.sig
.small_imm
|= b
->sig
.small_imm
;
703 merge
.sig
.ldtlb
|= b
->sig
.ldtlb
;
704 merge
.sig
.ldtlbu
|= b
->sig
.ldtlbu
;
705 merge
.sig
.ucb
|= b
->sig
.ucb
;
706 merge
.sig
.rotate
|= b
->sig
.rotate
;
707 merge
.sig
.wrtmuc
|= b
->sig
.wrtmuc
;
709 if (v3d_qpu_sig_writes_address(devinfo
, &a
->sig
) &&
710 v3d_qpu_sig_writes_address(devinfo
, &b
->sig
))
712 merge
.sig_addr
|= b
->sig_addr
;
713 merge
.sig_magic
|= b
->sig_magic
;
716 bool ok
= v3d_qpu_instr_pack(devinfo
, &merge
, &packed
);
719 /* No modifying the real instructions on failure. */
720 assert(ok
|| (a
!= result
&& b
!= result
));
725 static struct schedule_node
*
726 choose_instruction_to_schedule(const struct v3d_device_info
*devinfo
,
727 struct choose_scoreboard
*scoreboard
,
728 struct schedule_node
*prev_inst
)
730 struct schedule_node
*chosen
= NULL
;
733 /* Don't pair up anything with a thread switch signal -- emit_thrsw()
734 * will handle pairing it along with filling the delay slots.
737 if (prev_inst
->inst
->qpu
.sig
.thrsw
)
741 list_for_each_entry(struct schedule_node
, n
, &scoreboard
->dag
->heads
,
743 const struct v3d_qpu_instr
*inst
= &n
->inst
->qpu
;
745 /* Don't choose the branch instruction until it's the last one
746 * left. We'll move it up to fit its delay slots after we
749 if (inst
->type
== V3D_QPU_INSTR_TYPE_BRANCH
&&
750 !list_is_singular(&scoreboard
->dag
->heads
)) {
754 /* "An instruction must not read from a location in physical
755 * regfile A or B that was written to by the previous
758 if (reads_too_soon_after_write(scoreboard
, n
->inst
))
761 if (writes_too_soon_after_write(devinfo
, scoreboard
, n
->inst
))
764 /* "A scoreboard wait must not occur in the first two
765 * instructions of a fragment shader. This is either the
766 * explicit Wait for Scoreboard signal or an implicit wait
767 * with the first tile-buffer read or write instruction."
769 if (pixel_scoreboard_too_soon(scoreboard
, inst
))
772 /* ldunif and ldvary both write r5, but ldunif does so a tick
773 * sooner. If the ldvary's r5 wasn't used, then ldunif might
774 * otherwise get scheduled so ldunif and ldvary try to update
775 * r5 in the same tick.
777 * XXX perf: To get good pipelining of a sequence of varying
778 * loads, we need to figure out how to pair the ldvary signal
779 * up to the instruction before the last r5 user in the
780 * previous ldvary sequence. Currently, it usually pairs with
783 if ((inst
->sig
.ldunif
|| inst
->sig
.ldunifa
) &&
784 scoreboard
->tick
== scoreboard
->last_ldvary_tick
+ 1) {
788 /* If we're trying to pair with another instruction, check
789 * that they're compatible.
792 /* Don't pair up a thread switch signal -- we'll
793 * handle pairing it when we pick it on its own.
798 if (prev_inst
->inst
->uniform
!= -1 &&
799 n
->inst
->uniform
!= -1)
802 /* Don't merge in something that will lock the TLB.
803 * Hopwefully what we have in inst will release some
804 * other instructions, allowing us to delay the
805 * TLB-locking instruction until later.
807 if (!scoreboard
->tlb_locked
&& qpu_inst_is_tlb(inst
))
810 struct v3d_qpu_instr merged_inst
;
811 if (!qpu_merge_inst(devinfo
, &merged_inst
,
812 &prev_inst
->inst
->qpu
, inst
)) {
817 int prio
= get_instruction_priority(inst
);
819 /* Found a valid instruction. If nothing better comes along,
828 if (prio
> chosen_prio
) {
831 } else if (prio
< chosen_prio
) {
835 if (n
->delay
> chosen
->delay
) {
838 } else if (n
->delay
< chosen
->delay
) {
847 update_scoreboard_for_magic_waddr(struct choose_scoreboard
*scoreboard
,
848 enum v3d_qpu_waddr waddr
)
850 if (v3d_qpu_magic_waddr_is_sfu(waddr
))
851 scoreboard
->last_magic_sfu_write_tick
= scoreboard
->tick
;
855 update_scoreboard_for_chosen(struct choose_scoreboard
*scoreboard
,
856 const struct v3d_qpu_instr
*inst
)
858 if (inst
->type
== V3D_QPU_INSTR_TYPE_BRANCH
)
861 assert(inst
->type
== V3D_QPU_INSTR_TYPE_ALU
);
863 if (inst
->alu
.add
.op
!= V3D_QPU_A_NOP
) {
864 if (inst
->alu
.add
.magic_write
) {
865 update_scoreboard_for_magic_waddr(scoreboard
,
866 inst
->alu
.add
.waddr
);
870 if (inst
->alu
.mul
.op
!= V3D_QPU_M_NOP
) {
871 if (inst
->alu
.mul
.magic_write
) {
872 update_scoreboard_for_magic_waddr(scoreboard
,
873 inst
->alu
.mul
.waddr
);
877 if (inst
->sig
.ldvary
)
878 scoreboard
->last_ldvary_tick
= scoreboard
->tick
;
880 if (qpu_inst_is_tlb(inst
))
881 scoreboard
->tlb_locked
= true;
885 dump_state(const struct v3d_device_info
*devinfo
, struct dag
*dag
)
887 list_for_each_entry(struct schedule_node
, n
, &dag
->heads
, dag
.link
) {
888 fprintf(stderr
, " t=%4d: ", n
->unblocked_time
);
889 v3d_qpu_dump(devinfo
, &n
->inst
->qpu
);
890 fprintf(stderr
, "\n");
892 util_dynarray_foreach(&n
->dag
.edges
, struct dag_edge
, edge
) {
893 struct schedule_node
*child
=
894 (struct schedule_node
*)edge
->child
;
898 fprintf(stderr
, " - ");
899 v3d_qpu_dump(devinfo
, &child
->inst
->qpu
);
900 fprintf(stderr
, " (%d parents, %c)\n",
901 child
->dag
.parent_count
,
902 edge
->data
? 'w' : 'r');
907 static uint32_t magic_waddr_latency(enum v3d_qpu_waddr waddr
,
908 const struct v3d_qpu_instr
*after
)
910 /* Apply some huge latency between texture fetch requests and getting
911 * their results back.
913 * FIXME: This is actually pretty bogus. If we do:
922 * we count that as worse than
931 * because we associate the first load_tmu0 with the *second* tmu0_s.
933 if (v3d_qpu_magic_waddr_is_tmu(waddr
) && v3d_qpu_waits_on_tmu(after
))
936 /* Assume that anything depending on us is consuming the SFU result. */
937 if (v3d_qpu_magic_waddr_is_sfu(waddr
))
944 instruction_latency(struct schedule_node
*before
, struct schedule_node
*after
)
946 const struct v3d_qpu_instr
*before_inst
= &before
->inst
->qpu
;
947 const struct v3d_qpu_instr
*after_inst
= &after
->inst
->qpu
;
948 uint32_t latency
= 1;
950 if (before_inst
->type
!= V3D_QPU_INSTR_TYPE_ALU
||
951 after_inst
->type
!= V3D_QPU_INSTR_TYPE_ALU
)
954 if (before_inst
->alu
.add
.magic_write
) {
955 latency
= MAX2(latency
,
956 magic_waddr_latency(before_inst
->alu
.add
.waddr
,
960 if (before_inst
->alu
.mul
.magic_write
) {
961 latency
= MAX2(latency
,
962 magic_waddr_latency(before_inst
->alu
.mul
.waddr
,
969 /** Recursive computation of the delay member of a node. */
971 compute_delay(struct dag_node
*node
, void *state
)
973 struct schedule_node
*n
= (struct schedule_node
*)node
;
977 util_dynarray_foreach(&n
->dag
.edges
, struct dag_edge
, edge
) {
978 struct schedule_node
*child
=
979 (struct schedule_node
*)edge
->child
;
981 n
->delay
= MAX2(n
->delay
, (child
->delay
+
982 instruction_latency(n
, child
)));
986 /* Removes a DAG head, but removing only the WAR edges. (dag_prune_head()
987 * should be called on it later to finish pruning the other edges).
990 pre_remove_head(struct dag
*dag
, struct schedule_node
*n
)
992 list_delinit(&n
->dag
.link
);
994 util_dynarray_foreach(&n
->dag
.edges
, struct dag_edge
, edge
) {
996 dag_remove_edge(dag
, edge
);
1001 mark_instruction_scheduled(struct dag
*dag
,
1003 struct schedule_node
*node
)
1008 util_dynarray_foreach(&node
->dag
.edges
, struct dag_edge
, edge
) {
1009 struct schedule_node
*child
=
1010 (struct schedule_node
*)edge
->child
;
1015 uint32_t latency
= instruction_latency(node
, child
);
1017 child
->unblocked_time
= MAX2(child
->unblocked_time
,
1020 dag_prune_head(dag
, &node
->dag
);
1024 insert_scheduled_instruction(struct v3d_compile
*c
,
1025 struct qblock
*block
,
1026 struct choose_scoreboard
*scoreboard
,
1029 list_addtail(&inst
->link
, &block
->instructions
);
1031 update_scoreboard_for_chosen(scoreboard
, &inst
->qpu
);
1032 c
->qpu_inst_count
++;
1036 static struct qinst
*
1039 struct qreg undef
= vir_nop_reg();
1040 struct qinst
*qinst
= vir_add_inst(V3D_QPU_A_NOP
, undef
, undef
, undef
);
1046 emit_nop(struct v3d_compile
*c
, struct qblock
*block
,
1047 struct choose_scoreboard
*scoreboard
)
1049 insert_scheduled_instruction(c
, block
, scoreboard
, vir_nop());
1053 qpu_instruction_valid_in_thrend_slot(struct v3d_compile
*c
,
1054 const struct qinst
*qinst
, int slot
)
1056 const struct v3d_qpu_instr
*inst
= &qinst
->qpu
;
1058 /* Only TLB Z writes are prohibited in the last slot, but we don't
1059 * have those flagged so prohibit all TLB ops for now.
1061 if (slot
== 2 && qpu_inst_is_tlb(inst
))
1064 if (slot
> 0 && qinst
->uniform
!= ~0)
1067 if (v3d_qpu_uses_vpm(inst
))
1070 if (inst
->sig
.ldvary
)
1073 if (inst
->type
== V3D_QPU_INSTR_TYPE_ALU
) {
1074 /* GFXH-1625: TMUWT not allowed in the final instruction. */
1075 if (slot
== 2 && inst
->alu
.add
.op
== V3D_QPU_A_TMUWT
)
1078 /* No writing physical registers at the end. */
1079 if (!inst
->alu
.add
.magic_write
||
1080 !inst
->alu
.mul
.magic_write
) {
1084 if (c
->devinfo
->ver
< 40 && inst
->alu
.add
.op
== V3D_QPU_A_SETMSF
)
1087 /* RF0-2 might be overwritten during the delay slots by
1088 * fragment shader setup.
1090 if (inst
->raddr_a
< 3 &&
1091 (inst
->alu
.add
.a
== V3D_QPU_MUX_A
||
1092 inst
->alu
.add
.b
== V3D_QPU_MUX_A
||
1093 inst
->alu
.mul
.a
== V3D_QPU_MUX_A
||
1094 inst
->alu
.mul
.b
== V3D_QPU_MUX_A
)) {
1098 if (inst
->raddr_b
< 3 &&
1099 !inst
->sig
.small_imm
&&
1100 (inst
->alu
.add
.a
== V3D_QPU_MUX_B
||
1101 inst
->alu
.add
.b
== V3D_QPU_MUX_B
||
1102 inst
->alu
.mul
.a
== V3D_QPU_MUX_B
||
1103 inst
->alu
.mul
.b
== V3D_QPU_MUX_B
)) {
1112 valid_thrsw_sequence(struct v3d_compile
*c
, struct choose_scoreboard
*scoreboard
,
1113 struct qinst
*qinst
, int instructions_in_sequence
,
1116 /* No emitting our thrsw while the previous thrsw hasn't happened yet. */
1117 if (scoreboard
->last_thrsw_tick
+ 3 >
1118 scoreboard
->tick
- instructions_in_sequence
) {
1122 for (int slot
= 0; slot
< instructions_in_sequence
; slot
++) {
1123 /* No scheduling SFU when the result would land in the other
1124 * thread. The simulator complains for safety, though it
1125 * would only occur for dead code in our case.
1128 qinst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
&&
1129 (v3d_qpu_magic_waddr_is_sfu(qinst
->qpu
.alu
.add
.waddr
) ||
1130 v3d_qpu_magic_waddr_is_sfu(qinst
->qpu
.alu
.mul
.waddr
))) {
1134 if (slot
> 0 && qinst
->qpu
.sig
.ldvary
)
1138 !qpu_instruction_valid_in_thrend_slot(c
, qinst
, slot
)) {
1142 /* Note that the list is circular, so we can only do this up
1143 * to instructions_in_sequence.
1145 qinst
= (struct qinst
*)qinst
->link
.next
;
1152 * Emits a THRSW signal in the stream, trying to move it up to pair with
1153 * another instruction.
1156 emit_thrsw(struct v3d_compile
*c
,
1157 struct qblock
*block
,
1158 struct choose_scoreboard
*scoreboard
,
1164 /* There should be nothing in a thrsw inst being scheduled other than
1167 assert(inst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
);
1168 assert(inst
->qpu
.alu
.add
.op
== V3D_QPU_A_NOP
);
1169 assert(inst
->qpu
.alu
.mul
.op
== V3D_QPU_M_NOP
);
1171 /* Find how far back into previous instructions we can put the THRSW. */
1172 int slots_filled
= 0;
1173 struct qinst
*merge_inst
= NULL
;
1174 vir_for_each_inst_rev(prev_inst
, block
) {
1175 struct v3d_qpu_sig sig
= prev_inst
->qpu
.sig
;
1177 uint32_t packed_sig
;
1179 if (!v3d_qpu_sig_pack(c
->devinfo
, &sig
, &packed_sig
))
1182 if (!valid_thrsw_sequence(c
, scoreboard
,
1183 prev_inst
, slots_filled
+ 1,
1188 merge_inst
= prev_inst
;
1189 if (++slots_filled
== 3)
1193 bool needs_free
= false;
1195 merge_inst
->qpu
.sig
.thrsw
= true;
1197 scoreboard
->last_thrsw_tick
= scoreboard
->tick
- slots_filled
;
1199 scoreboard
->last_thrsw_tick
= scoreboard
->tick
;
1200 insert_scheduled_instruction(c
, block
, scoreboard
, inst
);
1206 /* Insert any extra delay slot NOPs we need. */
1207 for (int i
= 0; i
< 3 - slots_filled
; i
++) {
1208 emit_nop(c
, block
, scoreboard
);
1212 /* If we're emitting the last THRSW (other than program end), then
1213 * signal that to the HW by emitting two THRSWs in a row.
1215 if (inst
->is_last_thrsw
) {
1216 struct qinst
*second_inst
=
1217 (struct qinst
*)merge_inst
->link
.next
;
1218 second_inst
->qpu
.sig
.thrsw
= true;
1221 /* If we put our THRSW into another instruction, free up the
1222 * instruction that didn't end up scheduled into the list.
1231 schedule_instructions(struct v3d_compile
*c
,
1232 struct choose_scoreboard
*scoreboard
,
1233 struct qblock
*block
,
1234 enum quniform_contents
*orig_uniform_contents
,
1235 uint32_t *orig_uniform_data
,
1236 uint32_t *next_uniform
)
1238 const struct v3d_device_info
*devinfo
= c
->devinfo
;
1241 while (!list_empty(&scoreboard
->dag
->heads
)) {
1242 struct schedule_node
*chosen
=
1243 choose_instruction_to_schedule(devinfo
,
1246 struct schedule_node
*merge
= NULL
;
1248 /* If there are no valid instructions to schedule, drop a NOP
1251 struct qinst
*qinst
= chosen
? chosen
->inst
: vir_nop();
1252 struct v3d_qpu_instr
*inst
= &qinst
->qpu
;
1255 fprintf(stderr
, "t=%4d: current list:\n",
1257 dump_state(devinfo
, scoreboard
->dag
);
1258 fprintf(stderr
, "t=%4d: chose: ", time
);
1259 v3d_qpu_dump(devinfo
, inst
);
1260 fprintf(stderr
, "\n");
1263 /* We can't mark_instruction_scheduled() the chosen inst until
1264 * we're done identifying instructions to merge, so put the
1265 * merged instructions on a list for a moment.
1267 struct list_head merged_list
;
1268 list_inithead(&merged_list
);
1270 /* Schedule this instruction onto the QPU list. Also try to
1271 * find an instruction to pair with it.
1274 time
= MAX2(chosen
->unblocked_time
, time
);
1275 pre_remove_head(scoreboard
->dag
, chosen
);
1278 choose_instruction_to_schedule(devinfo
,
1281 time
= MAX2(merge
->unblocked_time
, time
);
1282 pre_remove_head(scoreboard
->dag
, chosen
);
1283 list_addtail(&merge
->link
, &merged_list
);
1284 (void)qpu_merge_inst(devinfo
, inst
,
1285 inst
, &merge
->inst
->qpu
);
1286 if (merge
->inst
->uniform
!= -1) {
1287 chosen
->inst
->uniform
=
1288 merge
->inst
->uniform
;
1292 fprintf(stderr
, "t=%4d: merging: ",
1294 v3d_qpu_dump(devinfo
, &merge
->inst
->qpu
);
1295 fprintf(stderr
, "\n");
1296 fprintf(stderr
, " result: ");
1297 v3d_qpu_dump(devinfo
, inst
);
1298 fprintf(stderr
, "\n");
1303 /* Update the uniform index for the rewritten location --
1304 * branch target updating will still need to change
1305 * c->uniform_data[] using this index.
1307 if (qinst
->uniform
!= -1) {
1308 if (inst
->type
== V3D_QPU_INSTR_TYPE_BRANCH
)
1309 block
->branch_uniform
= *next_uniform
;
1311 c
->uniform_data
[*next_uniform
] =
1312 orig_uniform_data
[qinst
->uniform
];
1313 c
->uniform_contents
[*next_uniform
] =
1314 orig_uniform_contents
[qinst
->uniform
];
1315 qinst
->uniform
= *next_uniform
;
1320 fprintf(stderr
, "\n");
1323 /* Now that we've scheduled a new instruction, some of its
1324 * children can be promoted to the list of instructions ready to
1325 * be scheduled. Update the children's unblocked time for this
1326 * DAG edge as we do so.
1328 mark_instruction_scheduled(scoreboard
->dag
, time
, chosen
);
1329 list_for_each_entry(struct schedule_node
, merge
, &merged_list
,
1331 mark_instruction_scheduled(scoreboard
->dag
, time
, merge
);
1333 /* The merged VIR instruction doesn't get re-added to the
1334 * block, so free it now.
1339 if (inst
->sig
.thrsw
) {
1340 time
+= emit_thrsw(c
, block
, scoreboard
, qinst
, false);
1342 insert_scheduled_instruction(c
, block
,
1345 if (inst
->type
== V3D_QPU_INSTR_TYPE_BRANCH
) {
1346 block
->branch_qpu_ip
= c
->qpu_inst_count
- 1;
1347 /* Fill the delay slots.
1349 * We should fill these with actual instructions,
1350 * instead, but that will probably need to be done
1351 * after this, once we know what the leading
1352 * instructions of the successors are (so we can
1353 * handle A/B register file write latency)
1355 for (int i
= 0; i
< 3; i
++)
1356 emit_nop(c
, block
, scoreboard
);
1365 qpu_schedule_instructions_block(struct v3d_compile
*c
,
1366 struct choose_scoreboard
*scoreboard
,
1367 struct qblock
*block
,
1368 enum quniform_contents
*orig_uniform_contents
,
1369 uint32_t *orig_uniform_data
,
1370 uint32_t *next_uniform
)
1372 void *mem_ctx
= ralloc_context(NULL
);
1373 scoreboard
->dag
= dag_create(mem_ctx
);
1374 struct list_head setup_list
;
1376 list_inithead(&setup_list
);
1378 /* Wrap each instruction in a scheduler structure. */
1379 while (!list_empty(&block
->instructions
)) {
1380 struct qinst
*qinst
= (struct qinst
*)block
->instructions
.next
;
1381 struct schedule_node
*n
=
1382 rzalloc(mem_ctx
, struct schedule_node
);
1384 dag_init_node(scoreboard
->dag
, &n
->dag
);
1387 list_del(&qinst
->link
);
1388 list_addtail(&n
->link
, &setup_list
);
1391 calculate_forward_deps(c
, scoreboard
->dag
, &setup_list
);
1392 calculate_reverse_deps(c
, scoreboard
->dag
, &setup_list
);
1394 dag_traverse_bottom_up(scoreboard
->dag
, compute_delay
, NULL
);
1396 uint32_t cycles
= schedule_instructions(c
, scoreboard
, block
,
1397 orig_uniform_contents
,
1401 ralloc_free(mem_ctx
);
1402 scoreboard
->dag
= NULL
;
1408 qpu_set_branch_targets(struct v3d_compile
*c
)
1410 vir_for_each_block(block
, c
) {
1411 /* The end block of the program has no branch. */
1412 if (!block
->successors
[0])
1415 /* If there was no branch instruction, then the successor
1416 * block must follow immediately after this one.
1418 if (block
->branch_qpu_ip
== ~0) {
1419 assert(block
->end_qpu_ip
+ 1 ==
1420 block
->successors
[0]->start_qpu_ip
);
1424 /* Walk back through the delay slots to find the branch
1427 struct list_head
*entry
= block
->instructions
.prev
;
1428 for (int i
= 0; i
< 3; i
++)
1429 entry
= entry
->prev
;
1430 struct qinst
*branch
= container_of(entry
, branch
, link
);
1431 assert(branch
->qpu
.type
== V3D_QPU_INSTR_TYPE_BRANCH
);
1433 /* Make sure that the if-we-don't-jump
1434 * successor was scheduled just after the
1437 assert(!block
->successors
[1] ||
1438 block
->successors
[1]->start_qpu_ip
==
1439 block
->branch_qpu_ip
+ 4);
1441 branch
->qpu
.branch
.offset
=
1442 ((block
->successors
[0]->start_qpu_ip
-
1443 (block
->branch_qpu_ip
+ 4)) *
1446 /* Set up the relative offset to jump in the
1449 * Use a temporary here, because
1450 * uniform_data[inst->uniform] may be shared
1451 * between multiple instructions.
1453 assert(c
->uniform_contents
[branch
->uniform
] == QUNIFORM_CONSTANT
);
1454 c
->uniform_data
[branch
->uniform
] =
1455 (block
->successors
[0]->start_uniform
-
1456 (block
->branch_uniform
+ 1)) * 4;
1461 v3d_qpu_schedule_instructions(struct v3d_compile
*c
)
1463 const struct v3d_device_info
*devinfo
= c
->devinfo
;
1464 struct qblock
*end_block
= list_last_entry(&c
->blocks
,
1465 struct qblock
, link
);
1467 /* We reorder the uniforms as we schedule instructions, so save the
1468 * old data off and replace it.
1470 uint32_t *uniform_data
= c
->uniform_data
;
1471 enum quniform_contents
*uniform_contents
= c
->uniform_contents
;
1472 c
->uniform_contents
= ralloc_array(c
, enum quniform_contents
,
1474 c
->uniform_data
= ralloc_array(c
, uint32_t, c
->num_uniforms
);
1475 c
->uniform_array_size
= c
->num_uniforms
;
1476 uint32_t next_uniform
= 0;
1478 struct choose_scoreboard scoreboard
;
1479 memset(&scoreboard
, 0, sizeof(scoreboard
));
1480 scoreboard
.last_ldvary_tick
= -10;
1481 scoreboard
.last_magic_sfu_write_tick
= -10;
1482 scoreboard
.last_uniforms_reset_tick
= -10;
1483 scoreboard
.last_thrsw_tick
= -10;
1486 fprintf(stderr
, "Pre-schedule instructions\n");
1487 vir_for_each_block(block
, c
) {
1488 fprintf(stderr
, "BLOCK %d\n", block
->index
);
1489 list_for_each_entry(struct qinst
, qinst
,
1490 &block
->instructions
, link
) {
1491 v3d_qpu_dump(devinfo
, &qinst
->qpu
);
1492 fprintf(stderr
, "\n");
1495 fprintf(stderr
, "\n");
1498 uint32_t cycles
= 0;
1499 vir_for_each_block(block
, c
) {
1500 block
->start_qpu_ip
= c
->qpu_inst_count
;
1501 block
->branch_qpu_ip
= ~0;
1502 block
->start_uniform
= next_uniform
;
1504 cycles
+= qpu_schedule_instructions_block(c
,
1511 block
->end_qpu_ip
= c
->qpu_inst_count
- 1;
1514 /* Emit the program-end THRSW instruction. */;
1515 struct qinst
*thrsw
= vir_nop();
1516 thrsw
->qpu
.sig
.thrsw
= true;
1517 emit_thrsw(c
, end_block
, &scoreboard
, thrsw
, true);
1519 qpu_set_branch_targets(c
);
1521 assert(next_uniform
== c
->num_uniforms
);