2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2014 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 * @file vc4_qpu_schedule.c
28 * The basic model of the list scheduler is to take a basic block, compute a
29 * DAG of the dependencies, and make a list of the DAG heads. Heuristically
30 * pick a DAG head, then put all the children that are now DAG heads into the
31 * list of things to schedule.
33 * The goal of scheduling here is to pack pairs of operations together in a
34 * single QPU instruction.
39 #include "util/ralloc.h"
44 struct schedule_node_child
;
46 struct schedule_node
{
48 struct list_head link
;
49 struct queued_qpu_inst
*inst
;
51 /* Longest cycles + instruction_latency() of any parent of this node. */
52 uint32_t unblocked_time
;
55 * Minimum number of cycles from scheduling this instruction until the
56 * end of the program, based on the slowest dependency chain through
62 * cycles between this instruction being scheduled and when its result
68 * Which uniform from uniform_data[] this instruction read, or -1 if
69 * not reading a uniform.
74 /* When walking the instructions in reverse, we need to swap before/after in
77 enum direction
{ F
, R
};
79 struct schedule_state
{
81 struct schedule_node
*last_r
[6];
82 struct schedule_node
*last_ra
[32];
83 struct schedule_node
*last_rb
[32];
84 struct schedule_node
*last_sf
;
85 struct schedule_node
*last_vpm_read
;
86 struct schedule_node
*last_tmu_write
;
87 struct schedule_node
*last_tlb
;
88 struct schedule_node
*last_vpm
;
89 struct schedule_node
*last_uniforms_reset
;
91 /* Estimated cycle when the current instruction would start. */
96 add_dep(struct schedule_state
*state
,
97 struct schedule_node
*before
,
98 struct schedule_node
*after
,
101 bool write_after_read
= !write
&& state
->dir
== R
;
102 void *edge_data
= (void *)(uintptr_t)write_after_read
;
104 if (!before
|| !after
)
107 assert(before
!= after
);
110 dag_add_edge(&before
->dag
, &after
->dag
, edge_data
);
112 dag_add_edge(&after
->dag
, &before
->dag
, edge_data
);
116 add_read_dep(struct schedule_state
*state
,
117 struct schedule_node
*before
,
118 struct schedule_node
*after
)
120 add_dep(state
, before
, after
, false);
124 add_write_dep(struct schedule_state
*state
,
125 struct schedule_node
**before
,
126 struct schedule_node
*after
)
128 add_dep(state
, *before
, after
, true);
133 qpu_writes_r4(uint64_t inst
)
135 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
138 case QPU_SIG_COLOR_LOAD
:
139 case QPU_SIG_LOAD_TMU0
:
140 case QPU_SIG_LOAD_TMU1
:
141 case QPU_SIG_ALPHA_MASK_LOAD
:
149 process_raddr_deps(struct schedule_state
*state
, struct schedule_node
*n
,
150 uint32_t raddr
, bool is_a
)
154 add_write_dep(state
, &state
->last_r
[5], n
);
158 add_write_dep(state
, &state
->last_vpm_read
, n
);
162 add_read_dep(state
, state
->last_uniforms_reset
, n
);
167 case QPU_R_XY_PIXEL_COORD
:
168 case QPU_R_MS_REV_FLAGS
:
174 add_read_dep(state
, state
->last_ra
[raddr
], n
);
176 add_read_dep(state
, state
->last_rb
[raddr
], n
);
178 fprintf(stderr
, "unknown raddr %d\n", raddr
);
186 is_tmu_write(uint32_t waddr
)
204 reads_uniform(uint64_t inst
)
206 if (QPU_GET_FIELD(inst
, QPU_SIG
) == QPU_SIG_LOAD_IMM
)
209 return (QPU_GET_FIELD(inst
, QPU_RADDR_A
) == QPU_R_UNIF
||
210 (QPU_GET_FIELD(inst
, QPU_RADDR_B
) == QPU_R_UNIF
&&
211 QPU_GET_FIELD(inst
, QPU_SIG
) != QPU_SIG_SMALL_IMM
) ||
212 is_tmu_write(QPU_GET_FIELD(inst
, QPU_WADDR_ADD
)) ||
213 is_tmu_write(QPU_GET_FIELD(inst
, QPU_WADDR_MUL
)));
217 process_mux_deps(struct schedule_state
*state
, struct schedule_node
*n
,
220 if (mux
!= QPU_MUX_A
&& mux
!= QPU_MUX_B
)
221 add_read_dep(state
, state
->last_r
[mux
], n
);
226 process_waddr_deps(struct schedule_state
*state
, struct schedule_node
*n
,
227 uint32_t waddr
, bool is_add
)
229 uint64_t inst
= n
->inst
->inst
;
230 bool is_a
= is_add
^ ((inst
& QPU_WS
) != 0);
234 add_write_dep(state
, &state
->last_ra
[waddr
], n
);
236 add_write_dep(state
, &state
->last_rb
[waddr
], n
);
238 } else if (is_tmu_write(waddr
)) {
239 add_write_dep(state
, &state
->last_tmu_write
, n
);
240 add_read_dep(state
, state
->last_uniforms_reset
, n
);
241 } else if (qpu_waddr_is_tlb(waddr
) ||
242 waddr
== QPU_W_MS_FLAGS
) {
243 add_write_dep(state
, &state
->last_tlb
, n
);
251 add_write_dep(state
, &state
->last_r
[waddr
- QPU_W_ACC0
],
256 add_write_dep(state
, &state
->last_vpm
, n
);
259 case QPU_W_VPMVCD_SETUP
:
261 add_write_dep(state
, &state
->last_vpm_read
, n
);
263 add_write_dep(state
, &state
->last_vpm
, n
);
266 case QPU_W_SFU_RECIP
:
267 case QPU_W_SFU_RECIPSQRT
:
270 add_write_dep(state
, &state
->last_r
[4], n
);
273 case QPU_W_TLB_STENCIL_SETUP
:
274 /* This isn't a TLB operation that does things like
275 * implicitly lock the scoreboard, but it does have to
276 * appear before TLB_Z, and each of the TLB_STENCILs
277 * have to schedule in the same order relative to each
280 add_write_dep(state
, &state
->last_tlb
, n
);
284 add_write_dep(state
, &state
->last_tlb
, n
);
287 case QPU_W_UNIFORMS_ADDRESS
:
288 add_write_dep(state
, &state
->last_uniforms_reset
, n
);
295 fprintf(stderr
, "Unknown waddr %d\n", waddr
);
302 process_cond_deps(struct schedule_state
*state
, struct schedule_node
*n
,
307 case QPU_COND_ALWAYS
:
310 add_read_dep(state
, state
->last_sf
, n
);
316 * Common code for dependencies that need to be tracked both forward and
319 * This is for things like "all reads of r4 have to happen between the r4
320 * writes that surround them".
323 calculate_deps(struct schedule_state
*state
, struct schedule_node
*n
)
325 uint64_t inst
= n
->inst
->inst
;
326 uint32_t add_op
= QPU_GET_FIELD(inst
, QPU_OP_ADD
);
327 uint32_t mul_op
= QPU_GET_FIELD(inst
, QPU_OP_MUL
);
328 uint32_t waddr_add
= QPU_GET_FIELD(inst
, QPU_WADDR_ADD
);
329 uint32_t waddr_mul
= QPU_GET_FIELD(inst
, QPU_WADDR_MUL
);
330 uint32_t raddr_a
= QPU_GET_FIELD(inst
, QPU_RADDR_A
);
331 uint32_t raddr_b
= QPU_GET_FIELD(inst
, QPU_RADDR_B
);
332 uint32_t add_a
= QPU_GET_FIELD(inst
, QPU_ADD_A
);
333 uint32_t add_b
= QPU_GET_FIELD(inst
, QPU_ADD_B
);
334 uint32_t mul_a
= QPU_GET_FIELD(inst
, QPU_MUL_A
);
335 uint32_t mul_b
= QPU_GET_FIELD(inst
, QPU_MUL_B
);
336 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
338 if (sig
!= QPU_SIG_LOAD_IMM
) {
339 process_raddr_deps(state
, n
, raddr_a
, true);
340 if (sig
!= QPU_SIG_SMALL_IMM
&&
341 sig
!= QPU_SIG_BRANCH
)
342 process_raddr_deps(state
, n
, raddr_b
, false);
345 if (add_op
!= QPU_A_NOP
) {
346 process_mux_deps(state
, n
, add_a
);
347 process_mux_deps(state
, n
, add_b
);
349 if (mul_op
!= QPU_M_NOP
) {
350 process_mux_deps(state
, n
, mul_a
);
351 process_mux_deps(state
, n
, mul_b
);
354 process_waddr_deps(state
, n
, waddr_add
, true);
355 process_waddr_deps(state
, n
, waddr_mul
, false);
356 if (qpu_writes_r4(inst
))
357 add_write_dep(state
, &state
->last_r
[4], n
);
360 case QPU_SIG_SW_BREAKPOINT
:
362 case QPU_SIG_SMALL_IMM
:
363 case QPU_SIG_LOAD_IMM
:
366 case QPU_SIG_THREAD_SWITCH
:
367 case QPU_SIG_LAST_THREAD_SWITCH
:
368 /* All accumulator contents and flags are undefined after the
371 for (int i
= 0; i
< ARRAY_SIZE(state
->last_r
); i
++)
372 add_write_dep(state
, &state
->last_r
[i
], n
);
373 add_write_dep(state
, &state
->last_sf
, n
);
375 /* Scoreboard-locking operations have to stay after the last
378 add_write_dep(state
, &state
->last_tlb
, n
);
380 add_write_dep(state
, &state
->last_tmu_write
, n
);
383 case QPU_SIG_LOAD_TMU0
:
384 case QPU_SIG_LOAD_TMU1
:
385 /* TMU loads are coming from a FIFO, so ordering is important.
387 add_write_dep(state
, &state
->last_tmu_write
, n
);
390 case QPU_SIG_COLOR_LOAD
:
391 add_read_dep(state
, state
->last_tlb
, n
);
395 add_read_dep(state
, state
->last_sf
, n
);
398 case QPU_SIG_PROG_END
:
399 case QPU_SIG_WAIT_FOR_SCOREBOARD
:
400 case QPU_SIG_SCOREBOARD_UNLOCK
:
401 case QPU_SIG_COVERAGE_LOAD
:
402 case QPU_SIG_COLOR_LOAD_END
:
403 case QPU_SIG_ALPHA_MASK_LOAD
:
404 fprintf(stderr
, "Unhandled signal bits %d\n", sig
);
408 process_cond_deps(state
, n
, QPU_GET_FIELD(inst
, QPU_COND_ADD
));
409 process_cond_deps(state
, n
, QPU_GET_FIELD(inst
, QPU_COND_MUL
));
410 if ((inst
& QPU_SF
) && sig
!= QPU_SIG_BRANCH
)
411 add_write_dep(state
, &state
->last_sf
, n
);
415 calculate_forward_deps(struct vc4_compile
*c
, struct dag
*dag
,
416 struct list_head
*schedule_list
)
418 struct schedule_state state
;
420 memset(&state
, 0, sizeof(state
));
424 list_for_each_entry(struct schedule_node
, node
, schedule_list
, link
)
425 calculate_deps(&state
, node
);
429 calculate_reverse_deps(struct vc4_compile
*c
, struct dag
*dag
,
430 struct list_head
*schedule_list
)
432 struct schedule_state state
;
434 memset(&state
, 0, sizeof(state
));
438 list_for_each_entry_rev(struct schedule_node
, node
, schedule_list
,
440 calculate_deps(&state
, (struct schedule_node
*)node
);
444 struct choose_scoreboard
{
447 int last_sfu_write_tick
;
448 int last_uniforms_reset_tick
;
449 uint32_t last_waddr_a
, last_waddr_b
;
454 reads_too_soon_after_write(struct choose_scoreboard
*scoreboard
, uint64_t inst
)
456 uint32_t raddr_a
= QPU_GET_FIELD(inst
, QPU_RADDR_A
);
457 uint32_t raddr_b
= QPU_GET_FIELD(inst
, QPU_RADDR_B
);
458 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
460 /* Full immediate loads don't read any registers. */
461 if (sig
== QPU_SIG_LOAD_IMM
)
464 uint32_t src_muxes
[] = {
465 QPU_GET_FIELD(inst
, QPU_ADD_A
),
466 QPU_GET_FIELD(inst
, QPU_ADD_B
),
467 QPU_GET_FIELD(inst
, QPU_MUL_A
),
468 QPU_GET_FIELD(inst
, QPU_MUL_B
),
470 for (int i
= 0; i
< ARRAY_SIZE(src_muxes
); i
++) {
471 if ((src_muxes
[i
] == QPU_MUX_A
&&
473 scoreboard
->last_waddr_a
== raddr_a
) ||
474 (src_muxes
[i
] == QPU_MUX_B
&&
475 sig
!= QPU_SIG_SMALL_IMM
&&
477 scoreboard
->last_waddr_b
== raddr_b
)) {
481 if (src_muxes
[i
] == QPU_MUX_R4
) {
482 if (scoreboard
->tick
-
483 scoreboard
->last_sfu_write_tick
<= 2) {
489 if (sig
== QPU_SIG_SMALL_IMM
&&
490 QPU_GET_FIELD(inst
, QPU_SMALL_IMM
) >= QPU_SMALL_IMM_MUL_ROT
) {
491 uint32_t mux_a
= QPU_GET_FIELD(inst
, QPU_MUL_A
);
492 uint32_t mux_b
= QPU_GET_FIELD(inst
, QPU_MUL_B
);
494 if (scoreboard
->last_waddr_a
== mux_a
+ QPU_W_ACC0
||
495 scoreboard
->last_waddr_a
== mux_b
+ QPU_W_ACC0
||
496 scoreboard
->last_waddr_b
== mux_a
+ QPU_W_ACC0
||
497 scoreboard
->last_waddr_b
== mux_b
+ QPU_W_ACC0
) {
502 if (reads_uniform(inst
) &&
503 scoreboard
->tick
- scoreboard
->last_uniforms_reset_tick
<= 2) {
511 pixel_scoreboard_too_soon(struct choose_scoreboard
*scoreboard
, uint64_t inst
)
513 return (scoreboard
->tick
< 2 && qpu_inst_is_tlb(inst
));
517 get_instruction_priority(uint64_t inst
)
519 uint32_t waddr_add
= QPU_GET_FIELD(inst
, QPU_WADDR_ADD
);
520 uint32_t waddr_mul
= QPU_GET_FIELD(inst
, QPU_WADDR_MUL
);
521 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
522 uint32_t baseline_score
;
523 uint32_t next_score
= 0;
525 /* Schedule TLB operations as late as possible, to get more
526 * parallelism between shaders.
528 if (qpu_inst_is_tlb(inst
))
532 /* Schedule texture read results collection late to hide latency. */
533 if (sig
== QPU_SIG_LOAD_TMU0
|| sig
== QPU_SIG_LOAD_TMU1
)
537 /* Default score for things that aren't otherwise special. */
538 baseline_score
= next_score
;
541 /* Schedule texture read setup early to hide their latency better. */
542 if (is_tmu_write(waddr_add
) || is_tmu_write(waddr_mul
))
546 return baseline_score
;
549 static struct schedule_node
*
550 choose_instruction_to_schedule(struct choose_scoreboard
*scoreboard
,
551 struct list_head
*schedule_list
,
552 struct schedule_node
*prev_inst
)
554 struct schedule_node
*chosen
= NULL
;
557 /* Don't pair up anything with a thread switch signal -- emit_thrsw()
558 * will handle pairing it along with filling the delay slots.
561 uint32_t prev_sig
= QPU_GET_FIELD(prev_inst
->inst
->inst
,
563 if (prev_sig
== QPU_SIG_THREAD_SWITCH
||
564 prev_sig
== QPU_SIG_LAST_THREAD_SWITCH
) {
569 list_for_each_entry(struct schedule_node
, n
, &scoreboard
->dag
->heads
,
571 uint64_t inst
= n
->inst
->inst
;
572 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
574 /* Don't choose the branch instruction until it's the last one
575 * left. XXX: We could potentially choose it before it's the
576 * last one, if the remaining instructions fit in the delay
579 if (sig
== QPU_SIG_BRANCH
&&
580 !list_is_singular(&scoreboard
->dag
->heads
)) {
584 /* "An instruction must not read from a location in physical
585 * regfile A or B that was written to by the previous
588 if (reads_too_soon_after_write(scoreboard
, inst
))
591 /* "A scoreboard wait must not occur in the first two
592 * instructions of a fragment shader. This is either the
593 * explicit Wait for Scoreboard signal or an implicit wait
594 * with the first tile-buffer read or write instruction."
596 if (pixel_scoreboard_too_soon(scoreboard
, inst
))
599 /* If we're trying to pair with another instruction, check
600 * that they're compatible.
603 /* Don't pair up a thread switch signal -- we'll
604 * handle pairing it when we pick it on its own.
606 if (sig
== QPU_SIG_THREAD_SWITCH
||
607 sig
== QPU_SIG_LAST_THREAD_SWITCH
) {
611 if (prev_inst
->uniform
!= -1 && n
->uniform
!= -1)
614 /* Don't merge in something that will lock the TLB.
615 * Hopwefully what we have in inst will release some
616 * other instructions, allowing us to delay the
617 * TLB-locking instruction until later.
619 if (!scoreboard
->tlb_locked
&& qpu_inst_is_tlb(inst
))
622 inst
= qpu_merge_inst(prev_inst
->inst
->inst
, inst
);
627 int prio
= get_instruction_priority(inst
);
629 /* Found a valid instruction. If nothing better comes along,
638 if (prio
> chosen_prio
) {
641 } else if (prio
< chosen_prio
) {
645 if (n
->delay
> chosen
->delay
) {
648 } else if (n
->delay
< chosen
->delay
) {
657 update_scoreboard_for_chosen(struct choose_scoreboard
*scoreboard
,
660 uint32_t waddr_add
= QPU_GET_FIELD(inst
, QPU_WADDR_ADD
);
661 uint32_t waddr_mul
= QPU_GET_FIELD(inst
, QPU_WADDR_MUL
);
663 if (!(inst
& QPU_WS
)) {
664 scoreboard
->last_waddr_a
= waddr_add
;
665 scoreboard
->last_waddr_b
= waddr_mul
;
667 scoreboard
->last_waddr_b
= waddr_add
;
668 scoreboard
->last_waddr_a
= waddr_mul
;
671 if ((waddr_add
>= QPU_W_SFU_RECIP
&& waddr_add
<= QPU_W_SFU_LOG
) ||
672 (waddr_mul
>= QPU_W_SFU_RECIP
&& waddr_mul
<= QPU_W_SFU_LOG
)) {
673 scoreboard
->last_sfu_write_tick
= scoreboard
->tick
;
676 if (waddr_add
== QPU_W_UNIFORMS_ADDRESS
||
677 waddr_mul
== QPU_W_UNIFORMS_ADDRESS
) {
678 scoreboard
->last_uniforms_reset_tick
= scoreboard
->tick
;
681 if (qpu_inst_is_tlb(inst
))
682 scoreboard
->tlb_locked
= true;
686 dump_state(struct dag
*dag
)
688 list_for_each_entry(struct schedule_node
, n
, &dag
->heads
, dag
.link
) {
689 fprintf(stderr
, " t=%4d: ", n
->unblocked_time
);
690 vc4_qpu_disasm(&n
->inst
->inst
, 1);
691 fprintf(stderr
, "\n");
693 util_dynarray_foreach(&n
->dag
.edges
, struct dag_edge
, edge
) {
694 struct schedule_node
*child
=
695 (struct schedule_node
*)edge
->child
;
699 fprintf(stderr
, " - ");
700 vc4_qpu_disasm(&child
->inst
->inst
, 1);
701 fprintf(stderr
, " (%d parents, %c)\n",
702 child
->dag
.parent_count
,
703 edge
->data
? 'w' : 'r');
708 static uint32_t waddr_latency(uint32_t waddr
, uint64_t after
)
713 /* Apply some huge latency between texture fetch requests and getting
714 * their results back.
716 * FIXME: This is actually pretty bogus. If we do:
725 * we count that as worse than
734 * because we associate the first load_tmu0 with the *second* tmu0_s.
736 if (waddr
== QPU_W_TMU0_S
) {
737 if (QPU_GET_FIELD(after
, QPU_SIG
) == QPU_SIG_LOAD_TMU0
)
740 if (waddr
== QPU_W_TMU1_S
) {
741 if (QPU_GET_FIELD(after
, QPU_SIG
) == QPU_SIG_LOAD_TMU1
)
746 case QPU_W_SFU_RECIP
:
747 case QPU_W_SFU_RECIPSQRT
:
757 instruction_latency(struct schedule_node
*before
, struct schedule_node
*after
)
759 uint64_t before_inst
= before
->inst
->inst
;
760 uint64_t after_inst
= after
->inst
->inst
;
762 return MAX2(waddr_latency(QPU_GET_FIELD(before_inst
, QPU_WADDR_ADD
),
764 waddr_latency(QPU_GET_FIELD(before_inst
, QPU_WADDR_MUL
),
768 /** Recursive computation of the delay member of a node. */
770 compute_delay(struct dag_node
*node
, void *state
)
772 struct schedule_node
*n
= (struct schedule_node
*)node
;
776 util_dynarray_foreach(&n
->dag
.edges
, struct dag_edge
, edge
) {
777 struct schedule_node
*child
=
778 (struct schedule_node
*)edge
->child
;
779 n
->delay
= MAX2(n
->delay
, (child
->delay
+
780 instruction_latency(n
, child
)));
784 /* Removes a DAG head, but removing only the WAR edges. (dag_prune_head()
785 * should be called on it later to finish pruning the other edges).
788 pre_remove_head(struct dag
*dag
, struct schedule_node
*n
)
790 list_delinit(&n
->dag
.link
);
792 util_dynarray_foreach(&n
->dag
.edges
, struct dag_edge
, edge
) {
794 dag_remove_edge(dag
, edge
);
799 mark_instruction_scheduled(struct dag
*dag
,
801 struct schedule_node
*node
)
806 util_dynarray_foreach(&node
->dag
.edges
, struct dag_edge
, edge
) {
807 struct schedule_node
*child
=
808 (struct schedule_node
*)edge
->child
;
813 uint32_t latency
= instruction_latency(node
, child
);
815 child
->unblocked_time
= MAX2(child
->unblocked_time
,
818 dag_prune_head(dag
, &node
->dag
);
822 * Emits a THRSW/LTHRSW signal in the stream, trying to move it up to pair
823 * with another instruction.
826 emit_thrsw(struct vc4_compile
*c
,
827 struct choose_scoreboard
*scoreboard
,
830 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
832 /* There should be nothing in a thrsw inst being scheduled other than
835 assert(QPU_GET_FIELD(inst
, QPU_OP_ADD
) == QPU_A_NOP
);
836 assert(QPU_GET_FIELD(inst
, QPU_OP_MUL
) == QPU_M_NOP
);
838 /* Try to find an earlier scheduled instruction that we can merge the
841 int thrsw_ip
= c
->qpu_inst_count
;
842 for (int i
= 1; i
<= MIN2(c
->qpu_inst_count
, 3); i
++) {
843 uint64_t prev_instr
= c
->qpu_insts
[c
->qpu_inst_count
- i
];
844 uint32_t prev_sig
= QPU_GET_FIELD(prev_instr
, QPU_SIG
);
846 if (prev_sig
== QPU_SIG_NONE
)
847 thrsw_ip
= c
->qpu_inst_count
- i
;
850 if (thrsw_ip
!= c
->qpu_inst_count
) {
851 /* Merge the thrsw into the existing instruction. */
852 c
->qpu_insts
[thrsw_ip
] =
853 QPU_UPDATE_FIELD(c
->qpu_insts
[thrsw_ip
], sig
, QPU_SIG
);
855 qpu_serialize_one_inst(c
, inst
);
856 update_scoreboard_for_chosen(scoreboard
, inst
);
859 /* Fill the delay slots. */
860 while (c
->qpu_inst_count
< thrsw_ip
+ 3) {
861 update_scoreboard_for_chosen(scoreboard
, qpu_NOP());
862 qpu_serialize_one_inst(c
, qpu_NOP());
867 schedule_instructions(struct vc4_compile
*c
,
868 struct choose_scoreboard
*scoreboard
,
869 struct qblock
*block
,
870 struct list_head
*schedule_list
,
871 enum quniform_contents
*orig_uniform_contents
,
872 uint32_t *orig_uniform_data
,
873 uint32_t *next_uniform
)
877 while (!list_is_empty(&scoreboard
->dag
->heads
)) {
878 struct schedule_node
*chosen
=
879 choose_instruction_to_schedule(scoreboard
,
882 struct schedule_node
*merge
= NULL
;
884 /* If there are no valid instructions to schedule, drop a NOP
887 uint64_t inst
= chosen
? chosen
->inst
->inst
: qpu_NOP();
890 fprintf(stderr
, "t=%4d: current list:\n",
892 dump_state(scoreboard
->dag
);
893 fprintf(stderr
, "t=%4d: chose: ", time
);
894 vc4_qpu_disasm(&inst
, 1);
895 fprintf(stderr
, "\n");
898 /* Schedule this instruction onto the QPU list. Also try to
899 * find an instruction to pair with it.
902 time
= MAX2(chosen
->unblocked_time
, time
);
903 pre_remove_head(scoreboard
->dag
, chosen
);
904 if (chosen
->uniform
!= -1) {
905 c
->uniform_data
[*next_uniform
] =
906 orig_uniform_data
[chosen
->uniform
];
907 c
->uniform_contents
[*next_uniform
] =
908 orig_uniform_contents
[chosen
->uniform
];
912 merge
= choose_instruction_to_schedule(scoreboard
,
916 time
= MAX2(merge
->unblocked_time
, time
);
917 inst
= qpu_merge_inst(inst
, merge
->inst
->inst
);
919 if (merge
->uniform
!= -1) {
920 c
->uniform_data
[*next_uniform
] =
921 orig_uniform_data
[merge
->uniform
];
922 c
->uniform_contents
[*next_uniform
] =
923 orig_uniform_contents
[merge
->uniform
];
928 fprintf(stderr
, "t=%4d: merging: ",
930 vc4_qpu_disasm(&merge
->inst
->inst
, 1);
931 fprintf(stderr
, "\n");
932 fprintf(stderr
, " resulting in: ");
933 vc4_qpu_disasm(&inst
, 1);
934 fprintf(stderr
, "\n");
940 fprintf(stderr
, "\n");
943 /* Now that we've scheduled a new instruction, some of its
944 * children can be promoted to the list of instructions ready to
945 * be scheduled. Update the children's unblocked time for this
946 * DAG edge as we do so.
948 mark_instruction_scheduled(scoreboard
->dag
, time
, chosen
);
949 mark_instruction_scheduled(scoreboard
->dag
, time
, merge
);
951 if (QPU_GET_FIELD(inst
, QPU_SIG
) == QPU_SIG_THREAD_SWITCH
||
952 QPU_GET_FIELD(inst
, QPU_SIG
) == QPU_SIG_LAST_THREAD_SWITCH
) {
953 emit_thrsw(c
, scoreboard
, inst
);
955 qpu_serialize_one_inst(c
, inst
);
956 update_scoreboard_for_chosen(scoreboard
, inst
);
962 if (QPU_GET_FIELD(inst
, QPU_SIG
) == QPU_SIG_BRANCH
) {
963 block
->branch_qpu_ip
= c
->qpu_inst_count
- 1;
964 /* Fill the delay slots.
966 * We should fill these with actual instructions,
967 * instead, but that will probably need to be done
968 * after this, once we know what the leading
969 * instructions of the successors are (so we can
970 * handle A/B register file write latency)
973 update_scoreboard_for_chosen(scoreboard
, inst
);
974 qpu_serialize_one_inst(c
, inst
);
975 qpu_serialize_one_inst(c
, inst
);
976 qpu_serialize_one_inst(c
, inst
);
984 qpu_schedule_instructions_block(struct vc4_compile
*c
,
985 struct choose_scoreboard
*scoreboard
,
986 struct qblock
*block
,
987 enum quniform_contents
*orig_uniform_contents
,
988 uint32_t *orig_uniform_data
,
989 uint32_t *next_uniform
)
991 scoreboard
->dag
= dag_create(NULL
);
992 struct list_head setup_list
;
994 list_inithead(&setup_list
);
996 /* Wrap each instruction in a scheduler structure. */
997 uint32_t next_sched_uniform
= *next_uniform
;
998 while (!list_is_empty(&block
->qpu_inst_list
)) {
999 struct queued_qpu_inst
*inst
=
1000 (struct queued_qpu_inst
*)block
->qpu_inst_list
.next
;
1001 struct schedule_node
*n
= rzalloc(scoreboard
->dag
,
1002 struct schedule_node
);
1004 dag_init_node(scoreboard
->dag
, &n
->dag
);
1007 if (reads_uniform(inst
->inst
)) {
1008 n
->uniform
= next_sched_uniform
++;
1012 list_del(&inst
->link
);
1013 list_addtail(&n
->link
, &setup_list
);
1016 calculate_forward_deps(c
, scoreboard
->dag
, &setup_list
);
1017 calculate_reverse_deps(c
, scoreboard
->dag
, &setup_list
);
1019 dag_traverse_bottom_up(scoreboard
->dag
, compute_delay
, NULL
);
1021 uint32_t cycles
= schedule_instructions(c
, scoreboard
, block
,
1023 orig_uniform_contents
,
1027 ralloc_free(scoreboard
->dag
);
1028 scoreboard
->dag
= NULL
;
1034 qpu_set_branch_targets(struct vc4_compile
*c
)
1036 qir_for_each_block(block
, c
) {
1037 /* The end block of the program has no branch. */
1038 if (!block
->successors
[0])
1041 /* If there was no branch instruction, then the successor
1042 * block must follow immediately after this one.
1044 if (block
->branch_qpu_ip
== ~0) {
1045 assert(block
->end_qpu_ip
+ 1 ==
1046 block
->successors
[0]->start_qpu_ip
);
1050 /* Set the branch target for the block that doesn't follow
1051 * immediately after ours.
1053 uint64_t *branch_inst
= &c
->qpu_insts
[block
->branch_qpu_ip
];
1054 assert(QPU_GET_FIELD(*branch_inst
, QPU_SIG
) == QPU_SIG_BRANCH
);
1055 assert(QPU_GET_FIELD(*branch_inst
, QPU_BRANCH_TARGET
) == 0);
1057 uint32_t branch_target
=
1058 (block
->successors
[0]->start_qpu_ip
-
1059 (block
->branch_qpu_ip
+ 4)) * sizeof(uint64_t);
1060 *branch_inst
= (*branch_inst
|
1061 QPU_SET_FIELD(branch_target
, QPU_BRANCH_TARGET
));
1063 /* Make sure that the if-we-don't-jump successor was scheduled
1064 * just after the delay slots.
1066 if (block
->successors
[1]) {
1067 assert(block
->successors
[1]->start_qpu_ip
==
1068 block
->branch_qpu_ip
+ 4);
1074 qpu_schedule_instructions(struct vc4_compile
*c
)
1076 /* We reorder the uniforms as we schedule instructions, so save the
1077 * old data off and replace it.
1079 uint32_t *uniform_data
= c
->uniform_data
;
1080 enum quniform_contents
*uniform_contents
= c
->uniform_contents
;
1081 c
->uniform_contents
= ralloc_array(c
, enum quniform_contents
,
1083 c
->uniform_data
= ralloc_array(c
, uint32_t, c
->num_uniforms
);
1084 c
->uniform_array_size
= c
->num_uniforms
;
1085 uint32_t next_uniform
= 0;
1087 struct choose_scoreboard scoreboard
;
1088 memset(&scoreboard
, 0, sizeof(scoreboard
));
1089 scoreboard
.last_waddr_a
= ~0;
1090 scoreboard
.last_waddr_b
= ~0;
1091 scoreboard
.last_sfu_write_tick
= -10;
1092 scoreboard
.last_uniforms_reset_tick
= -10;
1095 fprintf(stderr
, "Pre-schedule instructions\n");
1096 qir_for_each_block(block
, c
) {
1097 fprintf(stderr
, "BLOCK %d\n", block
->index
);
1098 list_for_each_entry(struct queued_qpu_inst
, q
,
1099 &block
->qpu_inst_list
, link
) {
1100 vc4_qpu_disasm(&q
->inst
, 1);
1101 fprintf(stderr
, "\n");
1104 fprintf(stderr
, "\n");
1107 uint32_t cycles
= 0;
1108 qir_for_each_block(block
, c
) {
1109 block
->start_qpu_ip
= c
->qpu_inst_count
;
1110 block
->branch_qpu_ip
= ~0;
1112 cycles
+= qpu_schedule_instructions_block(c
,
1119 block
->end_qpu_ip
= c
->qpu_inst_count
- 1;
1122 qpu_set_branch_targets(c
);
1124 assert(next_uniform
== c
->num_uniforms
);
1127 fprintf(stderr
, "Post-schedule instructions\n");
1128 vc4_qpu_disasm(c
->qpu_insts
, c
->qpu_inst_count
);
1129 fprintf(stderr
, "\n");