2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2014-2017 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * The basic model of the list scheduler is to take a basic block, compute a
29 * DAG of the dependencies, and make a list of the DAG heads. Heuristically
30 * pick a DAG head, then put all the children that are now DAG heads into the
31 * list of things to schedule.
33 * The goal of scheduling here is to pack pairs of operations together in a
34 * single QPU instruction.
37 #include "qpu/qpu_disasm.h"
38 #include "v3d_compiler.h"
39 #include "util/ralloc.h"
43 struct schedule_node_child
;
45 struct schedule_node
{
46 struct list_head link
;
48 struct schedule_node_child
*children
;
50 uint32_t child_array_size
;
51 uint32_t parent_count
;
53 /* Longest cycles + instruction_latency() of any parent of this node. */
54 uint32_t unblocked_time
;
57 * Minimum number of cycles from scheduling this instruction until the
58 * end of the program, based on the slowest dependency chain through
64 * cycles between this instruction being scheduled and when its result
70 struct schedule_node_child
{
71 struct schedule_node
*node
;
72 bool write_after_read
;
75 /* When walking the instructions in reverse, we need to swap before/after in
78 enum direction
{ F
, R
};
80 struct schedule_state
{
81 const struct v3d_device_info
*devinfo
;
82 struct schedule_node
*last_r
[6];
83 struct schedule_node
*last_rf
[64];
84 struct schedule_node
*last_sf
;
85 struct schedule_node
*last_vpm_read
;
86 struct schedule_node
*last_tmu_write
;
87 struct schedule_node
*last_tmu_config
;
88 struct schedule_node
*last_tlb
;
89 struct schedule_node
*last_vpm
;
90 struct schedule_node
*last_unif
;
91 struct schedule_node
*last_rtop
;
93 /* Estimated cycle when the current instruction would start. */
98 add_dep(struct schedule_state
*state
,
99 struct schedule_node
*before
,
100 struct schedule_node
*after
,
103 bool write_after_read
= !write
&& state
->dir
== R
;
105 if (!before
|| !after
)
108 assert(before
!= after
);
110 if (state
->dir
== R
) {
111 struct schedule_node
*t
= before
;
116 for (int i
= 0; i
< before
->child_count
; i
++) {
117 if (before
->children
[i
].node
== after
&&
118 (before
->children
[i
].write_after_read
== write_after_read
)) {
123 if (before
->child_array_size
<= before
->child_count
) {
124 before
->child_array_size
= MAX2(before
->child_array_size
* 2, 16);
125 before
->children
= reralloc(before
, before
->children
,
126 struct schedule_node_child
,
127 before
->child_array_size
);
130 before
->children
[before
->child_count
].node
= after
;
131 before
->children
[before
->child_count
].write_after_read
=
133 before
->child_count
++;
134 after
->parent_count
++;
138 add_read_dep(struct schedule_state
*state
,
139 struct schedule_node
*before
,
140 struct schedule_node
*after
)
142 add_dep(state
, before
, after
, false);
146 add_write_dep(struct schedule_state
*state
,
147 struct schedule_node
**before
,
148 struct schedule_node
*after
)
150 add_dep(state
, *before
, after
, true);
155 qpu_inst_is_tlb(const struct v3d_qpu_instr
*inst
)
157 if (inst
->type
!= V3D_QPU_INSTR_TYPE_ALU
)
160 if (inst
->alu
.add
.magic_write
&&
161 (inst
->alu
.add
.waddr
== V3D_QPU_WADDR_TLB
||
162 inst
->alu
.add
.waddr
== V3D_QPU_WADDR_TLBU
))
165 if (inst
->alu
.mul
.magic_write
&&
166 (inst
->alu
.mul
.waddr
== V3D_QPU_WADDR_TLB
||
167 inst
->alu
.mul
.waddr
== V3D_QPU_WADDR_TLBU
))
174 process_mux_deps(struct schedule_state
*state
, struct schedule_node
*n
,
175 enum v3d_qpu_mux mux
)
179 add_read_dep(state
, state
->last_rf
[n
->inst
->qpu
.raddr_a
], n
);
182 add_read_dep(state
, state
->last_rf
[n
->inst
->qpu
.raddr_b
], n
);
185 add_read_dep(state
, state
->last_r
[mux
- V3D_QPU_MUX_R0
], n
);
192 process_waddr_deps(struct schedule_state
*state
, struct schedule_node
*n
,
193 uint32_t waddr
, bool magic
)
196 add_write_dep(state
, &state
->last_rf
[waddr
], n
);
197 } else if (v3d_qpu_magic_waddr_is_tmu(waddr
)) {
198 /* XXX perf: For V3D 4.x, we could reorder TMU writes other
199 * than the TMUS/TMUD/TMUA to improve scheduling flexibility.
201 add_write_dep(state
, &state
->last_tmu_write
, n
);
203 case V3D_QPU_WADDR_TMUS
:
204 case V3D_QPU_WADDR_TMUSCM
:
205 case V3D_QPU_WADDR_TMUSF
:
206 case V3D_QPU_WADDR_TMUSLOD
:
207 add_write_dep(state
, &state
->last_tmu_config
, n
);
212 } else if (v3d_qpu_magic_waddr_is_sfu(waddr
)) {
213 /* Handled by v3d_qpu_writes_r4() check. */
216 case V3D_QPU_WADDR_R0
:
217 case V3D_QPU_WADDR_R1
:
218 case V3D_QPU_WADDR_R2
:
220 &state
->last_r
[waddr
- V3D_QPU_WADDR_R0
],
223 case V3D_QPU_WADDR_R3
:
224 case V3D_QPU_WADDR_R4
:
225 case V3D_QPU_WADDR_R5
:
226 /* Handled by v3d_qpu_writes_r*() checks below. */
229 case V3D_QPU_WADDR_VPM
:
230 case V3D_QPU_WADDR_VPMU
:
231 add_write_dep(state
, &state
->last_vpm
, n
);
234 case V3D_QPU_WADDR_TLB
:
235 case V3D_QPU_WADDR_TLBU
:
236 add_write_dep(state
, &state
->last_tlb
, n
);
239 case V3D_QPU_WADDR_NOP
:
243 fprintf(stderr
, "Unknown waddr %d\n", waddr
);
250 process_cond_deps(struct schedule_state
*state
, struct schedule_node
*n
,
251 enum v3d_qpu_cond cond
)
253 if (cond
!= V3D_QPU_COND_NONE
)
254 add_read_dep(state
, state
->last_sf
, n
);
258 process_pf_deps(struct schedule_state
*state
, struct schedule_node
*n
,
261 if (pf
!= V3D_QPU_PF_NONE
)
262 add_write_dep(state
, &state
->last_sf
, n
);
266 process_uf_deps(struct schedule_state
*state
, struct schedule_node
*n
,
269 if (uf
!= V3D_QPU_UF_NONE
)
270 add_write_dep(state
, &state
->last_sf
, n
);
274 * Common code for dependencies that need to be tracked both forward and
277 * This is for things like "all reads of r4 have to happen between the r4
278 * writes that surround them".
281 calculate_deps(struct schedule_state
*state
, struct schedule_node
*n
)
283 const struct v3d_device_info
*devinfo
= state
->devinfo
;
284 struct qinst
*qinst
= n
->inst
;
285 struct v3d_qpu_instr
*inst
= &qinst
->qpu
;
286 /* If the input and output segments are shared, then all VPM reads to
287 * a location need to happen before all writes. We handle this by
288 * serializing all VPM operations for now.
290 bool separate_vpm_segment
= false;
292 if (inst
->type
== V3D_QPU_INSTR_TYPE_BRANCH
) {
293 if (inst
->branch
.cond
!= V3D_QPU_BRANCH_COND_ALWAYS
)
294 add_read_dep(state
, state
->last_sf
, n
);
301 add_write_dep(state
, &state
->last_unif
, n
);
305 assert(inst
->type
== V3D_QPU_INSTR_TYPE_ALU
);
309 if (v3d_qpu_add_op_num_src(inst
->alu
.add
.op
) > 0)
310 process_mux_deps(state
, n
, inst
->alu
.add
.a
);
311 if (v3d_qpu_add_op_num_src(inst
->alu
.add
.op
) > 1)
312 process_mux_deps(state
, n
, inst
->alu
.add
.b
);
314 if (v3d_qpu_mul_op_num_src(inst
->alu
.mul
.op
) > 0)
315 process_mux_deps(state
, n
, inst
->alu
.mul
.a
);
316 if (v3d_qpu_mul_op_num_src(inst
->alu
.mul
.op
) > 1)
317 process_mux_deps(state
, n
, inst
->alu
.mul
.b
);
319 switch (inst
->alu
.add
.op
) {
320 case V3D_QPU_A_VPMSETUP
:
321 /* Could distinguish read/write by unpacking the uniform. */
322 add_write_dep(state
, &state
->last_vpm
, n
);
323 add_write_dep(state
, &state
->last_vpm_read
, n
);
326 case V3D_QPU_A_STVPMV
:
327 case V3D_QPU_A_STVPMD
:
328 case V3D_QPU_A_STVPMP
:
329 add_write_dep(state
, &state
->last_vpm
, n
);
332 case V3D_QPU_A_LDVPMV_IN
:
333 case V3D_QPU_A_LDVPMD_IN
:
334 case V3D_QPU_A_LDVPMG_IN
:
335 case V3D_QPU_A_LDVPMP
:
336 if (!separate_vpm_segment
)
337 add_write_dep(state
, &state
->last_vpm
, n
);
340 case V3D_QPU_A_VPMWT
:
341 add_read_dep(state
, state
->last_vpm
, n
);
345 add_read_dep(state
, state
->last_tlb
, n
);
348 case V3D_QPU_A_SETMSF
:
349 case V3D_QPU_A_SETREVF
:
350 add_write_dep(state
, &state
->last_tlb
, n
);
353 case V3D_QPU_A_FLAPUSH
:
354 case V3D_QPU_A_FLBPUSH
:
356 case V3D_QPU_A_VFLNA
:
358 case V3D_QPU_A_VFLNB
:
359 add_read_dep(state
, state
->last_sf
, n
);
362 case V3D_QPU_A_FLPOP
:
363 add_write_dep(state
, &state
->last_sf
, n
);
370 switch (inst
->alu
.mul
.op
) {
371 case V3D_QPU_M_MULTOP
:
372 case V3D_QPU_M_UMUL24
:
373 /* MULTOP sets rtop, and UMUL24 implicitly reads rtop and
374 * resets it to 0. We could possibly reorder umul24s relative
375 * to each other, but for now just keep all the MUL parts in
378 add_write_dep(state
, &state
->last_rtop
, n
);
384 if (inst
->alu
.add
.op
!= V3D_QPU_A_NOP
) {
385 process_waddr_deps(state
, n
, inst
->alu
.add
.waddr
,
386 inst
->alu
.add
.magic_write
);
388 if (inst
->alu
.mul
.op
!= V3D_QPU_M_NOP
) {
389 process_waddr_deps(state
, n
, inst
->alu
.mul
.waddr
,
390 inst
->alu
.mul
.magic_write
);
392 if (v3d_qpu_sig_writes_address(devinfo
, &inst
->sig
)) {
393 process_waddr_deps(state
, n
, inst
->sig_addr
,
397 if (v3d_qpu_writes_r3(devinfo
, inst
))
398 add_write_dep(state
, &state
->last_r
[3], n
);
399 if (v3d_qpu_writes_r4(devinfo
, inst
))
400 add_write_dep(state
, &state
->last_r
[4], n
);
401 if (v3d_qpu_writes_r5(devinfo
, inst
))
402 add_write_dep(state
, &state
->last_r
[5], n
);
404 if (inst
->sig
.thrsw
) {
405 /* All accumulator contents and flags are undefined after the
408 for (int i
= 0; i
< ARRAY_SIZE(state
->last_r
); i
++)
409 add_write_dep(state
, &state
->last_r
[i
], n
);
410 add_write_dep(state
, &state
->last_sf
, n
);
411 add_write_dep(state
, &state
->last_rtop
, n
);
413 /* Scoreboard-locking operations have to stay after the last
416 add_write_dep(state
, &state
->last_tlb
, n
);
418 add_write_dep(state
, &state
->last_tmu_write
, n
);
419 add_write_dep(state
, &state
->last_tmu_config
, n
);
422 if (v3d_qpu_waits_on_tmu(inst
)) {
423 /* TMU loads are coming from a FIFO, so ordering is important.
425 add_write_dep(state
, &state
->last_tmu_write
, n
);
428 if (inst
->sig
.wrtmuc
)
429 add_write_dep(state
, &state
->last_tmu_config
, n
);
431 if (inst
->sig
.ldtlb
| inst
->sig
.ldtlbu
)
432 add_read_dep(state
, state
->last_tlb
, n
);
434 if (inst
->sig
.ldvpm
) {
435 add_write_dep(state
, &state
->last_vpm_read
, n
);
437 /* At least for now, we're doing shared I/O segments, so queue
438 * all writes after all reads.
440 if (!separate_vpm_segment
)
441 add_write_dep(state
, &state
->last_vpm
, n
);
444 /* inst->sig.ldunif or sideband uniform read */
445 if (qinst
->uniform
!= ~0)
446 add_write_dep(state
, &state
->last_unif
, n
);
448 process_cond_deps(state
, n
, inst
->flags
.ac
);
449 process_cond_deps(state
, n
, inst
->flags
.mc
);
450 process_pf_deps(state
, n
, inst
->flags
.apf
);
451 process_pf_deps(state
, n
, inst
->flags
.mpf
);
452 process_uf_deps(state
, n
, inst
->flags
.auf
);
453 process_uf_deps(state
, n
, inst
->flags
.muf
);
457 calculate_forward_deps(struct v3d_compile
*c
, struct list_head
*schedule_list
)
459 struct schedule_state state
;
461 memset(&state
, 0, sizeof(state
));
462 state
.devinfo
= c
->devinfo
;
465 list_for_each_entry(struct schedule_node
, node
, schedule_list
, link
)
466 calculate_deps(&state
, node
);
470 calculate_reverse_deps(struct v3d_compile
*c
, struct list_head
*schedule_list
)
472 struct list_head
*node
;
473 struct schedule_state state
;
475 memset(&state
, 0, sizeof(state
));
476 state
.devinfo
= c
->devinfo
;
479 for (node
= schedule_list
->prev
; schedule_list
!= node
; node
= node
->prev
) {
480 calculate_deps(&state
, (struct schedule_node
*)node
);
484 struct choose_scoreboard
{
486 int last_magic_sfu_write_tick
;
487 int last_ldvary_tick
;
488 int last_uniforms_reset_tick
;
494 mux_reads_too_soon(struct choose_scoreboard
*scoreboard
,
495 const struct v3d_qpu_instr
*inst
, enum v3d_qpu_mux mux
)
499 if (scoreboard
->tick
- scoreboard
->last_magic_sfu_write_tick
<= 2)
504 if (scoreboard
->tick
- scoreboard
->last_ldvary_tick
<= 1)
515 reads_too_soon_after_write(struct choose_scoreboard
*scoreboard
,
518 const struct v3d_qpu_instr
*inst
= &qinst
->qpu
;
520 /* XXX: Branching off of raddr. */
521 if (inst
->type
== V3D_QPU_INSTR_TYPE_BRANCH
)
524 assert(inst
->type
== V3D_QPU_INSTR_TYPE_ALU
);
526 if (inst
->alu
.add
.op
!= V3D_QPU_A_NOP
) {
527 if (v3d_qpu_add_op_num_src(inst
->alu
.add
.op
) > 0 &&
528 mux_reads_too_soon(scoreboard
, inst
, inst
->alu
.add
.a
)) {
531 if (v3d_qpu_add_op_num_src(inst
->alu
.add
.op
) > 1 &&
532 mux_reads_too_soon(scoreboard
, inst
, inst
->alu
.add
.b
)) {
537 if (inst
->alu
.mul
.op
!= V3D_QPU_M_NOP
) {
538 if (v3d_qpu_mul_op_num_src(inst
->alu
.mul
.op
) > 0 &&
539 mux_reads_too_soon(scoreboard
, inst
, inst
->alu
.mul
.a
)) {
542 if (v3d_qpu_mul_op_num_src(inst
->alu
.mul
.op
) > 1 &&
543 mux_reads_too_soon(scoreboard
, inst
, inst
->alu
.mul
.b
)) {
554 writes_too_soon_after_write(const struct v3d_device_info
*devinfo
,
555 struct choose_scoreboard
*scoreboard
,
558 const struct v3d_qpu_instr
*inst
= &qinst
->qpu
;
560 /* Don't schedule any other r4 write too soon after an SFU write.
561 * This would normally be prevented by dependency tracking, but might
562 * occur if a dead SFU computation makes it to scheduling.
564 if (scoreboard
->tick
- scoreboard
->last_magic_sfu_write_tick
< 2 &&
565 v3d_qpu_writes_r4(devinfo
, inst
))
572 pixel_scoreboard_too_soon(struct choose_scoreboard
*scoreboard
,
573 const struct v3d_qpu_instr
*inst
)
575 return (scoreboard
->tick
== 0 && qpu_inst_is_tlb(inst
));
579 get_instruction_priority(const struct v3d_qpu_instr
*inst
)
581 uint32_t baseline_score
;
582 uint32_t next_score
= 0;
584 /* Schedule TLB operations as late as possible, to get more
585 * parallelism between shaders.
587 if (qpu_inst_is_tlb(inst
))
591 /* Schedule texture read results collection late to hide latency. */
592 if (v3d_qpu_waits_on_tmu(inst
))
596 /* XXX perf: We should schedule SFU ALU ops so that the reader is 2
597 * instructions after the producer if possible, not just 1.
600 /* Default score for things that aren't otherwise special. */
601 baseline_score
= next_score
;
604 /* Schedule texture read setup early to hide their latency better. */
605 if (v3d_qpu_writes_tmu(inst
))
609 return baseline_score
;
613 qpu_magic_waddr_is_periph(enum v3d_qpu_waddr waddr
)
615 return (v3d_qpu_magic_waddr_is_tmu(waddr
) ||
616 v3d_qpu_magic_waddr_is_sfu(waddr
) ||
617 v3d_qpu_magic_waddr_is_tlb(waddr
) ||
618 v3d_qpu_magic_waddr_is_vpm(waddr
) ||
619 v3d_qpu_magic_waddr_is_tsy(waddr
));
623 qpu_accesses_peripheral(const struct v3d_qpu_instr
*inst
)
625 if (v3d_qpu_uses_vpm(inst
))
627 if (v3d_qpu_uses_sfu(inst
))
630 if (inst
->type
== V3D_QPU_INSTR_TYPE_ALU
) {
631 if (inst
->alu
.add
.op
!= V3D_QPU_A_NOP
&&
632 inst
->alu
.add
.magic_write
&&
633 qpu_magic_waddr_is_periph(inst
->alu
.add
.waddr
)) {
637 if (inst
->alu
.add
.op
== V3D_QPU_A_TMUWT
)
640 if (inst
->alu
.mul
.op
!= V3D_QPU_M_NOP
&&
641 inst
->alu
.mul
.magic_write
&&
642 qpu_magic_waddr_is_periph(inst
->alu
.mul
.waddr
)) {
647 return (inst
->sig
.ldvpm
||
655 qpu_merge_inst(const struct v3d_device_info
*devinfo
,
656 struct v3d_qpu_instr
*result
,
657 const struct v3d_qpu_instr
*a
,
658 const struct v3d_qpu_instr
*b
)
660 if (a
->type
!= V3D_QPU_INSTR_TYPE_ALU
||
661 b
->type
!= V3D_QPU_INSTR_TYPE_ALU
) {
665 /* Can't do more than one peripheral access in an instruction.
667 * XXX: V3D 4.1 allows TMU read along with a VPM read or write, and
668 * WRTMUC with a TMU magic register write (other than tmuc).
670 if (qpu_accesses_peripheral(a
) && qpu_accesses_peripheral(b
))
673 struct v3d_qpu_instr merge
= *a
;
675 if (b
->alu
.add
.op
!= V3D_QPU_A_NOP
) {
676 if (a
->alu
.add
.op
!= V3D_QPU_A_NOP
)
678 merge
.alu
.add
= b
->alu
.add
;
680 merge
.flags
.ac
= b
->flags
.ac
;
681 merge
.flags
.apf
= b
->flags
.apf
;
682 merge
.flags
.auf
= b
->flags
.auf
;
685 if (b
->alu
.mul
.op
!= V3D_QPU_M_NOP
) {
686 if (a
->alu
.mul
.op
!= V3D_QPU_M_NOP
)
688 merge
.alu
.mul
= b
->alu
.mul
;
690 merge
.flags
.mc
= b
->flags
.mc
;
691 merge
.flags
.mpf
= b
->flags
.mpf
;
692 merge
.flags
.muf
= b
->flags
.muf
;
695 if (v3d_qpu_uses_mux(b
, V3D_QPU_MUX_A
)) {
696 if (v3d_qpu_uses_mux(a
, V3D_QPU_MUX_A
) &&
697 a
->raddr_a
!= b
->raddr_a
) {
700 merge
.raddr_a
= b
->raddr_a
;
703 if (v3d_qpu_uses_mux(b
, V3D_QPU_MUX_B
)) {
704 if (v3d_qpu_uses_mux(a
, V3D_QPU_MUX_B
) &&
705 (a
->raddr_b
!= b
->raddr_b
||
706 a
->sig
.small_imm
!= b
->sig
.small_imm
)) {
709 merge
.raddr_b
= b
->raddr_b
;
712 merge
.sig
.thrsw
|= b
->sig
.thrsw
;
713 merge
.sig
.ldunif
|= b
->sig
.ldunif
;
714 merge
.sig
.ldunifrf
|= b
->sig
.ldunifrf
;
715 merge
.sig
.ldunifa
|= b
->sig
.ldunifa
;
716 merge
.sig
.ldunifarf
|= b
->sig
.ldunifarf
;
717 merge
.sig
.ldtmu
|= b
->sig
.ldtmu
;
718 merge
.sig
.ldvary
|= b
->sig
.ldvary
;
719 merge
.sig
.ldvpm
|= b
->sig
.ldvpm
;
720 merge
.sig
.small_imm
|= b
->sig
.small_imm
;
721 merge
.sig
.ldtlb
|= b
->sig
.ldtlb
;
722 merge
.sig
.ldtlbu
|= b
->sig
.ldtlbu
;
723 merge
.sig
.ucb
|= b
->sig
.ucb
;
724 merge
.sig
.rotate
|= b
->sig
.rotate
;
725 merge
.sig
.wrtmuc
|= b
->sig
.wrtmuc
;
727 if (v3d_qpu_sig_writes_address(devinfo
, &a
->sig
) &&
728 v3d_qpu_sig_writes_address(devinfo
, &b
->sig
))
730 merge
.sig_addr
|= b
->sig_addr
;
731 merge
.sig_magic
|= b
->sig_magic
;
734 bool ok
= v3d_qpu_instr_pack(devinfo
, &merge
, &packed
);
737 /* No modifying the real instructions on failure. */
738 assert(ok
|| (a
!= result
&& b
!= result
));
743 static struct schedule_node
*
744 choose_instruction_to_schedule(const struct v3d_device_info
*devinfo
,
745 struct choose_scoreboard
*scoreboard
,
746 struct list_head
*schedule_list
,
747 struct schedule_node
*prev_inst
)
749 struct schedule_node
*chosen
= NULL
;
752 /* Don't pair up anything with a thread switch signal -- emit_thrsw()
753 * will handle pairing it along with filling the delay slots.
756 if (prev_inst
->inst
->qpu
.sig
.thrsw
)
760 list_for_each_entry(struct schedule_node
, n
, schedule_list
, link
) {
761 const struct v3d_qpu_instr
*inst
= &n
->inst
->qpu
;
763 /* Don't choose the branch instruction until it's the last one
764 * left. We'll move it up to fit its delay slots after we
767 if (inst
->type
== V3D_QPU_INSTR_TYPE_BRANCH
&&
768 !list_is_singular(schedule_list
)) {
772 /* "An instruction must not read from a location in physical
773 * regfile A or B that was written to by the previous
776 if (reads_too_soon_after_write(scoreboard
, n
->inst
))
779 if (writes_too_soon_after_write(devinfo
, scoreboard
, n
->inst
))
782 /* "A scoreboard wait must not occur in the first two
783 * instructions of a fragment shader. This is either the
784 * explicit Wait for Scoreboard signal or an implicit wait
785 * with the first tile-buffer read or write instruction."
787 if (pixel_scoreboard_too_soon(scoreboard
, inst
))
790 /* ldunif and ldvary both write r5, but ldunif does so a tick
791 * sooner. If the ldvary's r5 wasn't used, then ldunif might
792 * otherwise get scheduled so ldunif and ldvary try to update
793 * r5 in the same tick.
795 * XXX perf: To get good pipelining of a sequence of varying
796 * loads, we need to figure out how to pair the ldvary signal
797 * up to the instruction before the last r5 user in the
798 * previous ldvary sequence. Currently, it usually pairs with
801 if ((inst
->sig
.ldunif
|| inst
->sig
.ldunifa
) &&
802 scoreboard
->tick
== scoreboard
->last_ldvary_tick
+ 1) {
806 /* If we're trying to pair with another instruction, check
807 * that they're compatible.
810 /* Don't pair up a thread switch signal -- we'll
811 * handle pairing it when we pick it on its own.
816 if (prev_inst
->inst
->uniform
!= -1 &&
817 n
->inst
->uniform
!= -1)
820 /* Don't merge in something that will lock the TLB.
821 * Hopwefully what we have in inst will release some
822 * other instructions, allowing us to delay the
823 * TLB-locking instruction until later.
825 if (!scoreboard
->tlb_locked
&& qpu_inst_is_tlb(inst
))
828 struct v3d_qpu_instr merged_inst
;
829 if (!qpu_merge_inst(devinfo
, &merged_inst
,
830 &prev_inst
->inst
->qpu
, inst
)) {
835 int prio
= get_instruction_priority(inst
);
837 /* Found a valid instruction. If nothing better comes along,
846 if (prio
> chosen_prio
) {
849 } else if (prio
< chosen_prio
) {
853 if (n
->delay
> chosen
->delay
) {
856 } else if (n
->delay
< chosen
->delay
) {
865 update_scoreboard_for_magic_waddr(struct choose_scoreboard
*scoreboard
,
866 enum v3d_qpu_waddr waddr
)
868 if (v3d_qpu_magic_waddr_is_sfu(waddr
))
869 scoreboard
->last_magic_sfu_write_tick
= scoreboard
->tick
;
873 update_scoreboard_for_chosen(struct choose_scoreboard
*scoreboard
,
874 const struct v3d_qpu_instr
*inst
)
876 if (inst
->type
== V3D_QPU_INSTR_TYPE_BRANCH
)
879 assert(inst
->type
== V3D_QPU_INSTR_TYPE_ALU
);
881 if (inst
->alu
.add
.op
!= V3D_QPU_A_NOP
) {
882 if (inst
->alu
.add
.magic_write
) {
883 update_scoreboard_for_magic_waddr(scoreboard
,
884 inst
->alu
.add
.waddr
);
888 if (inst
->alu
.mul
.op
!= V3D_QPU_M_NOP
) {
889 if (inst
->alu
.mul
.magic_write
) {
890 update_scoreboard_for_magic_waddr(scoreboard
,
891 inst
->alu
.mul
.waddr
);
895 if (inst
->sig
.ldvary
)
896 scoreboard
->last_ldvary_tick
= scoreboard
->tick
;
898 if (qpu_inst_is_tlb(inst
))
899 scoreboard
->tlb_locked
= true;
903 dump_state(const struct v3d_device_info
*devinfo
,
904 struct list_head
*schedule_list
)
906 list_for_each_entry(struct schedule_node
, n
, schedule_list
, link
) {
907 fprintf(stderr
, " t=%4d: ", n
->unblocked_time
);
908 v3d_qpu_dump(devinfo
, &n
->inst
->qpu
);
909 fprintf(stderr
, "\n");
911 for (int i
= 0; i
< n
->child_count
; i
++) {
912 struct schedule_node
*child
= n
->children
[i
].node
;
916 fprintf(stderr
, " - ");
917 v3d_qpu_dump(devinfo
, &child
->inst
->qpu
);
918 fprintf(stderr
, " (%d parents, %c)\n",
920 n
->children
[i
].write_after_read
? 'w' : 'r');
925 static uint32_t magic_waddr_latency(enum v3d_qpu_waddr waddr
,
926 const struct v3d_qpu_instr
*after
)
928 /* Apply some huge latency between texture fetch requests and getting
929 * their results back.
931 * FIXME: This is actually pretty bogus. If we do:
940 * we count that as worse than
949 * because we associate the first load_tmu0 with the *second* tmu0_s.
951 if (v3d_qpu_magic_waddr_is_tmu(waddr
) && v3d_qpu_waits_on_tmu(after
))
954 /* Assume that anything depending on us is consuming the SFU result. */
955 if (v3d_qpu_magic_waddr_is_sfu(waddr
))
962 instruction_latency(struct schedule_node
*before
, struct schedule_node
*after
)
964 const struct v3d_qpu_instr
*before_inst
= &before
->inst
->qpu
;
965 const struct v3d_qpu_instr
*after_inst
= &after
->inst
->qpu
;
966 uint32_t latency
= 1;
968 if (before_inst
->type
!= V3D_QPU_INSTR_TYPE_ALU
||
969 after_inst
->type
!= V3D_QPU_INSTR_TYPE_ALU
)
972 if (before_inst
->alu
.add
.magic_write
) {
973 latency
= MAX2(latency
,
974 magic_waddr_latency(before_inst
->alu
.add
.waddr
,
978 if (before_inst
->alu
.mul
.magic_write
) {
979 latency
= MAX2(latency
,
980 magic_waddr_latency(before_inst
->alu
.mul
.waddr
,
987 /** Recursive computation of the delay member of a node. */
989 compute_delay(struct schedule_node
*n
)
991 if (!n
->child_count
) {
994 for (int i
= 0; i
< n
->child_count
; i
++) {
995 if (!n
->children
[i
].node
->delay
)
996 compute_delay(n
->children
[i
].node
);
997 n
->delay
= MAX2(n
->delay
,
998 n
->children
[i
].node
->delay
+
999 instruction_latency(n
, n
->children
[i
].node
));
1005 mark_instruction_scheduled(struct list_head
*schedule_list
,
1007 struct schedule_node
*node
,
1013 for (int i
= node
->child_count
- 1; i
>= 0; i
--) {
1014 struct schedule_node
*child
=
1015 node
->children
[i
].node
;
1020 if (war_only
&& !node
->children
[i
].write_after_read
)
1023 /* If the requirement is only that the node not appear before
1024 * the last read of its destination, then it can be scheduled
1025 * immediately after (or paired with!) the thing reading the
1028 uint32_t latency
= 0;
1030 latency
= instruction_latency(node
,
1031 node
->children
[i
].node
);
1034 child
->unblocked_time
= MAX2(child
->unblocked_time
,
1036 child
->parent_count
--;
1037 if (child
->parent_count
== 0)
1038 list_add(&child
->link
, schedule_list
);
1040 node
->children
[i
].node
= NULL
;
1045 insert_scheduled_instruction(struct v3d_compile
*c
,
1046 struct qblock
*block
,
1047 struct choose_scoreboard
*scoreboard
,
1050 list_addtail(&inst
->link
, &block
->instructions
);
1052 update_scoreboard_for_chosen(scoreboard
, &inst
->qpu
);
1053 c
->qpu_inst_count
++;
1057 static struct qinst
*
1060 struct qreg undef
= { QFILE_NULL
, 0 };
1061 struct qinst
*qinst
= vir_add_inst(V3D_QPU_A_NOP
, undef
, undef
, undef
);
1067 emit_nop(struct v3d_compile
*c
, struct qblock
*block
,
1068 struct choose_scoreboard
*scoreboard
)
1070 insert_scheduled_instruction(c
, block
, scoreboard
, vir_nop());
1074 qpu_instruction_valid_in_thrend_slot(struct v3d_compile
*c
,
1075 const struct qinst
*qinst
, int slot
)
1077 const struct v3d_qpu_instr
*inst
= &qinst
->qpu
;
1079 /* Only TLB Z writes are prohibited in the last slot, but we don't
1080 * have those flagged so prohibit all TLB ops for now.
1082 if (slot
== 2 && qpu_inst_is_tlb(inst
))
1085 if (slot
> 0 && qinst
->uniform
!= ~0)
1088 if (v3d_qpu_uses_vpm(inst
))
1091 if (inst
->sig
.ldvary
)
1094 if (inst
->type
== V3D_QPU_INSTR_TYPE_ALU
) {
1095 /* GFXH-1625: TMUWT not allowed in the final instruction. */
1096 if (slot
== 2 && inst
->alu
.add
.op
== V3D_QPU_A_TMUWT
)
1099 /* No writing physical registers at the end. */
1100 if (!inst
->alu
.add
.magic_write
||
1101 !inst
->alu
.mul
.magic_write
) {
1105 if (c
->devinfo
->ver
< 40 && inst
->alu
.add
.op
== V3D_QPU_A_SETMSF
)
1108 /* RF0-2 might be overwritten during the delay slots by
1109 * fragment shader setup.
1111 if (inst
->raddr_a
< 3 &&
1112 (inst
->alu
.add
.a
== V3D_QPU_MUX_A
||
1113 inst
->alu
.add
.b
== V3D_QPU_MUX_A
||
1114 inst
->alu
.mul
.a
== V3D_QPU_MUX_A
||
1115 inst
->alu
.mul
.b
== V3D_QPU_MUX_A
)) {
1119 if (inst
->raddr_b
< 3 &&
1120 !inst
->sig
.small_imm
&&
1121 (inst
->alu
.add
.a
== V3D_QPU_MUX_B
||
1122 inst
->alu
.add
.b
== V3D_QPU_MUX_B
||
1123 inst
->alu
.mul
.a
== V3D_QPU_MUX_B
||
1124 inst
->alu
.mul
.b
== V3D_QPU_MUX_B
)) {
1133 valid_thrsw_sequence(struct v3d_compile
*c
, struct choose_scoreboard
*scoreboard
,
1134 struct qinst
*qinst
, int instructions_in_sequence
,
1137 /* No emitting our thrsw while the previous thrsw hasn't happened yet. */
1138 if (scoreboard
->last_thrsw_tick
+ 3 >
1139 scoreboard
->tick
- instructions_in_sequence
) {
1143 for (int slot
= 0; slot
< instructions_in_sequence
; slot
++) {
1144 /* No scheduling SFU when the result would land in the other
1145 * thread. The simulator complains for safety, though it
1146 * would only occur for dead code in our case.
1149 qinst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
&&
1150 (v3d_qpu_magic_waddr_is_sfu(qinst
->qpu
.alu
.add
.waddr
) ||
1151 v3d_qpu_magic_waddr_is_sfu(qinst
->qpu
.alu
.mul
.waddr
))) {
1155 if (slot
> 0 && qinst
->qpu
.sig
.ldvary
)
1159 !qpu_instruction_valid_in_thrend_slot(c
, qinst
, slot
)) {
1163 /* Note that the list is circular, so we can only do this up
1164 * to instructions_in_sequence.
1166 qinst
= (struct qinst
*)qinst
->link
.next
;
1173 * Emits a THRSW signal in the stream, trying to move it up to pair with
1174 * another instruction.
1177 emit_thrsw(struct v3d_compile
*c
,
1178 struct qblock
*block
,
1179 struct choose_scoreboard
*scoreboard
,
1185 /* There should be nothing in a thrsw inst being scheduled other than
1188 assert(inst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
);
1189 assert(inst
->qpu
.alu
.add
.op
== V3D_QPU_A_NOP
);
1190 assert(inst
->qpu
.alu
.mul
.op
== V3D_QPU_M_NOP
);
1192 /* Find how far back into previous instructions we can put the THRSW. */
1193 int slots_filled
= 0;
1194 struct qinst
*merge_inst
= NULL
;
1195 vir_for_each_inst_rev(prev_inst
, block
) {
1196 struct v3d_qpu_sig sig
= prev_inst
->qpu
.sig
;
1198 uint32_t packed_sig
;
1200 if (!v3d_qpu_sig_pack(c
->devinfo
, &sig
, &packed_sig
))
1203 if (!valid_thrsw_sequence(c
, scoreboard
,
1204 prev_inst
, slots_filled
+ 1,
1209 merge_inst
= prev_inst
;
1210 if (++slots_filled
== 3)
1214 bool needs_free
= false;
1216 merge_inst
->qpu
.sig
.thrsw
= true;
1218 scoreboard
->last_thrsw_tick
= scoreboard
->tick
- slots_filled
;
1220 scoreboard
->last_thrsw_tick
= scoreboard
->tick
;
1221 insert_scheduled_instruction(c
, block
, scoreboard
, inst
);
1227 /* Insert any extra delay slot NOPs we need. */
1228 for (int i
= 0; i
< 3 - slots_filled
; i
++) {
1229 emit_nop(c
, block
, scoreboard
);
1233 /* If we're emitting the last THRSW (other than program end), then
1234 * signal that to the HW by emitting two THRSWs in a row.
1236 if (inst
->is_last_thrsw
) {
1237 struct qinst
*second_inst
=
1238 (struct qinst
*)merge_inst
->link
.next
;
1239 second_inst
->qpu
.sig
.thrsw
= true;
1242 /* If we put our THRSW into another instruction, free up the
1243 * instruction that didn't end up scheduled into the list.
1252 schedule_instructions(struct v3d_compile
*c
,
1253 struct choose_scoreboard
*scoreboard
,
1254 struct qblock
*block
,
1255 struct list_head
*schedule_list
,
1256 enum quniform_contents
*orig_uniform_contents
,
1257 uint32_t *orig_uniform_data
,
1258 uint32_t *next_uniform
)
1260 const struct v3d_device_info
*devinfo
= c
->devinfo
;
1264 fprintf(stderr
, "initial deps:\n");
1265 dump_state(devinfo
, schedule_list
);
1266 fprintf(stderr
, "\n");
1269 /* Remove non-DAG heads from the list. */
1270 list_for_each_entry_safe(struct schedule_node
, n
, schedule_list
, link
) {
1271 if (n
->parent_count
!= 0)
1275 while (!list_empty(schedule_list
)) {
1276 struct schedule_node
*chosen
=
1277 choose_instruction_to_schedule(devinfo
,
1281 struct schedule_node
*merge
= NULL
;
1283 /* If there are no valid instructions to schedule, drop a NOP
1286 struct qinst
*qinst
= chosen
? chosen
->inst
: vir_nop();
1287 struct v3d_qpu_instr
*inst
= &qinst
->qpu
;
1290 fprintf(stderr
, "t=%4d: current list:\n",
1292 dump_state(devinfo
, schedule_list
);
1293 fprintf(stderr
, "t=%4d: chose: ", time
);
1294 v3d_qpu_dump(devinfo
, inst
);
1295 fprintf(stderr
, "\n");
1298 /* We can't mark_instruction_scheduled() the chosen inst until
1299 * we're done identifying instructions to merge, so put the
1300 * merged instructions on a list for a moment.
1302 struct list_head merged_list
;
1303 list_inithead(&merged_list
);
1305 /* Schedule this instruction onto the QPU list. Also try to
1306 * find an instruction to pair with it.
1309 time
= MAX2(chosen
->unblocked_time
, time
);
1310 list_del(&chosen
->link
);
1311 mark_instruction_scheduled(schedule_list
, time
,
1315 choose_instruction_to_schedule(devinfo
,
1319 time
= MAX2(merge
->unblocked_time
, time
);
1320 list_del(&merge
->link
);
1321 list_addtail(&merge
->link
, &merged_list
);
1322 (void)qpu_merge_inst(devinfo
, inst
,
1323 inst
, &merge
->inst
->qpu
);
1324 if (merge
->inst
->uniform
!= -1) {
1325 chosen
->inst
->uniform
=
1326 merge
->inst
->uniform
;
1330 fprintf(stderr
, "t=%4d: merging: ",
1332 v3d_qpu_dump(devinfo
, &merge
->inst
->qpu
);
1333 fprintf(stderr
, "\n");
1334 fprintf(stderr
, " result: ");
1335 v3d_qpu_dump(devinfo
, inst
);
1336 fprintf(stderr
, "\n");
1341 /* Update the uniform index for the rewritten location --
1342 * branch target updating will still need to change
1343 * c->uniform_data[] using this index.
1345 if (qinst
->uniform
!= -1) {
1346 if (inst
->type
== V3D_QPU_INSTR_TYPE_BRANCH
)
1347 block
->branch_uniform
= *next_uniform
;
1349 c
->uniform_data
[*next_uniform
] =
1350 orig_uniform_data
[qinst
->uniform
];
1351 c
->uniform_contents
[*next_uniform
] =
1352 orig_uniform_contents
[qinst
->uniform
];
1353 qinst
->uniform
= *next_uniform
;
1358 fprintf(stderr
, "\n");
1361 /* Now that we've scheduled a new instruction, some of its
1362 * children can be promoted to the list of instructions ready to
1363 * be scheduled. Update the children's unblocked time for this
1364 * DAG edge as we do so.
1366 mark_instruction_scheduled(schedule_list
, time
, chosen
, false);
1367 list_for_each_entry(struct schedule_node
, merge
, &merged_list
,
1369 mark_instruction_scheduled(schedule_list
, time
, merge
,
1372 /* The merged VIR instruction doesn't get re-added to the
1373 * block, so free it now.
1378 if (inst
->sig
.thrsw
) {
1379 time
+= emit_thrsw(c
, block
, scoreboard
, qinst
, false);
1381 insert_scheduled_instruction(c
, block
,
1384 if (inst
->type
== V3D_QPU_INSTR_TYPE_BRANCH
) {
1385 block
->branch_qpu_ip
= c
->qpu_inst_count
- 1;
1386 /* Fill the delay slots.
1388 * We should fill these with actual instructions,
1389 * instead, but that will probably need to be done
1390 * after this, once we know what the leading
1391 * instructions of the successors are (so we can
1392 * handle A/B register file write latency)
1394 for (int i
= 0; i
< 3; i
++)
1395 emit_nop(c
, block
, scoreboard
);
1404 qpu_schedule_instructions_block(struct v3d_compile
*c
,
1405 struct choose_scoreboard
*scoreboard
,
1406 struct qblock
*block
,
1407 enum quniform_contents
*orig_uniform_contents
,
1408 uint32_t *orig_uniform_data
,
1409 uint32_t *next_uniform
)
1411 void *mem_ctx
= ralloc_context(NULL
);
1412 struct list_head schedule_list
;
1414 list_inithead(&schedule_list
);
1416 /* Wrap each instruction in a scheduler structure. */
1417 while (!list_empty(&block
->instructions
)) {
1418 struct qinst
*qinst
= (struct qinst
*)block
->instructions
.next
;
1419 struct schedule_node
*n
=
1420 rzalloc(mem_ctx
, struct schedule_node
);
1424 list_del(&qinst
->link
);
1425 list_addtail(&n
->link
, &schedule_list
);
1428 calculate_forward_deps(c
, &schedule_list
);
1429 calculate_reverse_deps(c
, &schedule_list
);
1431 list_for_each_entry(struct schedule_node
, n
, &schedule_list
, link
) {
1435 uint32_t cycles
= schedule_instructions(c
, scoreboard
, block
,
1437 orig_uniform_contents
,
1441 ralloc_free(mem_ctx
);
1447 qpu_set_branch_targets(struct v3d_compile
*c
)
1449 vir_for_each_block(block
, c
) {
1450 /* The end block of the program has no branch. */
1451 if (!block
->successors
[0])
1454 /* If there was no branch instruction, then the successor
1455 * block must follow immediately after this one.
1457 if (block
->branch_qpu_ip
== ~0) {
1458 assert(block
->end_qpu_ip
+ 1 ==
1459 block
->successors
[0]->start_qpu_ip
);
1463 /* Walk back through the delay slots to find the branch
1466 struct list_head
*entry
= block
->instructions
.prev
;
1467 for (int i
= 0; i
< 3; i
++)
1468 entry
= entry
->prev
;
1469 struct qinst
*branch
= container_of(entry
, branch
, link
);
1470 assert(branch
->qpu
.type
== V3D_QPU_INSTR_TYPE_BRANCH
);
1472 /* Make sure that the if-we-don't-jump
1473 * successor was scheduled just after the
1476 assert(!block
->successors
[1] ||
1477 block
->successors
[1]->start_qpu_ip
==
1478 block
->branch_qpu_ip
+ 4);
1480 branch
->qpu
.branch
.offset
=
1481 ((block
->successors
[0]->start_qpu_ip
-
1482 (block
->branch_qpu_ip
+ 4)) *
1485 /* Set up the relative offset to jump in the
1488 * Use a temporary here, because
1489 * uniform_data[inst->uniform] may be shared
1490 * between multiple instructions.
1492 assert(c
->uniform_contents
[branch
->uniform
] == QUNIFORM_CONSTANT
);
1493 c
->uniform_data
[branch
->uniform
] =
1494 (block
->successors
[0]->start_uniform
-
1495 (block
->branch_uniform
+ 1)) * 4;
1500 v3d_qpu_schedule_instructions(struct v3d_compile
*c
)
1502 const struct v3d_device_info
*devinfo
= c
->devinfo
;
1503 struct qblock
*end_block
= list_last_entry(&c
->blocks
,
1504 struct qblock
, link
);
1506 /* We reorder the uniforms as we schedule instructions, so save the
1507 * old data off and replace it.
1509 uint32_t *uniform_data
= c
->uniform_data
;
1510 enum quniform_contents
*uniform_contents
= c
->uniform_contents
;
1511 c
->uniform_contents
= ralloc_array(c
, enum quniform_contents
,
1513 c
->uniform_data
= ralloc_array(c
, uint32_t, c
->num_uniforms
);
1514 c
->uniform_array_size
= c
->num_uniforms
;
1515 uint32_t next_uniform
= 0;
1517 struct choose_scoreboard scoreboard
;
1518 memset(&scoreboard
, 0, sizeof(scoreboard
));
1519 scoreboard
.last_ldvary_tick
= -10;
1520 scoreboard
.last_magic_sfu_write_tick
= -10;
1521 scoreboard
.last_uniforms_reset_tick
= -10;
1522 scoreboard
.last_thrsw_tick
= -10;
1525 fprintf(stderr
, "Pre-schedule instructions\n");
1526 vir_for_each_block(block
, c
) {
1527 fprintf(stderr
, "BLOCK %d\n", block
->index
);
1528 list_for_each_entry(struct qinst
, qinst
,
1529 &block
->instructions
, link
) {
1530 v3d_qpu_dump(devinfo
, &qinst
->qpu
);
1531 fprintf(stderr
, "\n");
1534 fprintf(stderr
, "\n");
1537 uint32_t cycles
= 0;
1538 vir_for_each_block(block
, c
) {
1539 block
->start_qpu_ip
= c
->qpu_inst_count
;
1540 block
->branch_qpu_ip
= ~0;
1541 block
->start_uniform
= next_uniform
;
1543 cycles
+= qpu_schedule_instructions_block(c
,
1550 block
->end_qpu_ip
= c
->qpu_inst_count
- 1;
1553 /* Emit the program-end THRSW instruction. */;
1554 struct qinst
*thrsw
= vir_nop();
1555 thrsw
->qpu
.sig
.thrsw
= true;
1556 emit_thrsw(c
, end_block
, &scoreboard
, thrsw
, true);
1558 qpu_set_branch_targets(c
);
1560 assert(next_uniform
== c
->num_uniforms
);