2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2014 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 * @file vc4_qpu_schedule.c
28 * The basic model of the list scheduler is to take a basic block, compute a
29 * DAG of the dependencies, and make a list of the DAG heads. Heuristically
30 * pick a DAG head, then put all the children that are now DAG heads into the
31 * list of things to schedule.
33 * The goal of scheduling here is to pack pairs of operations together in a
34 * single QPU instruction.
39 #include "util/ralloc.h"
43 struct schedule_node_child
;
45 struct schedule_node
{
46 struct list_head link
;
47 struct queued_qpu_inst
*inst
;
48 struct schedule_node_child
*children
;
50 uint32_t child_array_size
;
51 uint32_t parent_count
;
53 /* Longest cycles + instruction_latency() of any parent of this node. */
54 uint32_t unblocked_time
;
57 * Minimum number of cycles from scheduling this instruction until the
58 * end of the program, based on the slowest dependency chain through
64 * cycles between this instruction being scheduled and when its result
70 * Which uniform from uniform_data[] this instruction read, or -1 if
71 * not reading a uniform.
76 struct schedule_node_child
{
77 struct schedule_node
*node
;
78 bool write_after_read
;
81 /* When walking the instructions in reverse, we need to swap before/after in
84 enum direction
{ F
, R
};
86 struct schedule_state
{
87 struct schedule_node
*last_r
[6];
88 struct schedule_node
*last_ra
[32];
89 struct schedule_node
*last_rb
[32];
90 struct schedule_node
*last_sf
;
91 struct schedule_node
*last_vpm_read
;
92 struct schedule_node
*last_tmu_write
;
93 struct schedule_node
*last_tlb
;
94 struct schedule_node
*last_vpm
;
95 struct schedule_node
*last_uniforms_reset
;
97 /* Estimated cycle when the current instruction would start. */
102 add_dep(struct schedule_state
*state
,
103 struct schedule_node
*before
,
104 struct schedule_node
*after
,
107 bool write_after_read
= !write
&& state
->dir
== R
;
109 if (!before
|| !after
)
112 assert(before
!= after
);
114 if (state
->dir
== R
) {
115 struct schedule_node
*t
= before
;
120 for (int i
= 0; i
< before
->child_count
; i
++) {
121 if (before
->children
[i
].node
== after
&&
122 (before
->children
[i
].write_after_read
== write_after_read
)) {
127 if (before
->child_array_size
<= before
->child_count
) {
128 before
->child_array_size
= MAX2(before
->child_array_size
* 2, 16);
129 before
->children
= reralloc(before
, before
->children
,
130 struct schedule_node_child
,
131 before
->child_array_size
);
134 before
->children
[before
->child_count
].node
= after
;
135 before
->children
[before
->child_count
].write_after_read
=
137 before
->child_count
++;
138 after
->parent_count
++;
142 add_read_dep(struct schedule_state
*state
,
143 struct schedule_node
*before
,
144 struct schedule_node
*after
)
146 add_dep(state
, before
, after
, false);
150 add_write_dep(struct schedule_state
*state
,
151 struct schedule_node
**before
,
152 struct schedule_node
*after
)
154 add_dep(state
, *before
, after
, true);
159 qpu_writes_r4(uint64_t inst
)
161 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
164 case QPU_SIG_COLOR_LOAD
:
165 case QPU_SIG_LOAD_TMU0
:
166 case QPU_SIG_LOAD_TMU1
:
167 case QPU_SIG_ALPHA_MASK_LOAD
:
175 process_raddr_deps(struct schedule_state
*state
, struct schedule_node
*n
,
176 uint32_t raddr
, bool is_a
)
180 add_write_dep(state
, &state
->last_r
[5], n
);
184 add_write_dep(state
, &state
->last_vpm_read
, n
);
188 add_read_dep(state
, state
->last_uniforms_reset
, n
);
193 case QPU_R_XY_PIXEL_COORD
:
194 case QPU_R_MS_REV_FLAGS
:
200 add_read_dep(state
, state
->last_ra
[raddr
], n
);
202 add_read_dep(state
, state
->last_rb
[raddr
], n
);
204 fprintf(stderr
, "unknown raddr %d\n", raddr
);
212 is_tmu_write(uint32_t waddr
)
230 reads_uniform(uint64_t inst
)
232 if (QPU_GET_FIELD(inst
, QPU_SIG
) == QPU_SIG_LOAD_IMM
)
235 return (QPU_GET_FIELD(inst
, QPU_RADDR_A
) == QPU_R_UNIF
||
236 (QPU_GET_FIELD(inst
, QPU_RADDR_B
) == QPU_R_UNIF
&&
237 QPU_GET_FIELD(inst
, QPU_SIG
) != QPU_SIG_SMALL_IMM
) ||
238 is_tmu_write(QPU_GET_FIELD(inst
, QPU_WADDR_ADD
)) ||
239 is_tmu_write(QPU_GET_FIELD(inst
, QPU_WADDR_MUL
)));
243 process_mux_deps(struct schedule_state
*state
, struct schedule_node
*n
,
246 if (mux
!= QPU_MUX_A
&& mux
!= QPU_MUX_B
)
247 add_read_dep(state
, state
->last_r
[mux
], n
);
252 process_waddr_deps(struct schedule_state
*state
, struct schedule_node
*n
,
253 uint32_t waddr
, bool is_add
)
255 uint64_t inst
= n
->inst
->inst
;
256 bool is_a
= is_add
^ ((inst
& QPU_WS
) != 0);
260 add_write_dep(state
, &state
->last_ra
[waddr
], n
);
262 add_write_dep(state
, &state
->last_rb
[waddr
], n
);
264 } else if (is_tmu_write(waddr
)) {
265 add_write_dep(state
, &state
->last_tmu_write
, n
);
266 add_read_dep(state
, state
->last_uniforms_reset
, n
);
267 } else if (qpu_waddr_is_tlb(waddr
) ||
268 waddr
== QPU_W_MS_FLAGS
) {
269 add_write_dep(state
, &state
->last_tlb
, n
);
277 add_write_dep(state
, &state
->last_r
[waddr
- QPU_W_ACC0
],
282 add_write_dep(state
, &state
->last_vpm
, n
);
285 case QPU_W_VPMVCD_SETUP
:
287 add_write_dep(state
, &state
->last_vpm_read
, n
);
289 add_write_dep(state
, &state
->last_vpm
, n
);
292 case QPU_W_SFU_RECIP
:
293 case QPU_W_SFU_RECIPSQRT
:
296 add_write_dep(state
, &state
->last_r
[4], n
);
299 case QPU_W_TLB_STENCIL_SETUP
:
300 /* This isn't a TLB operation that does things like
301 * implicitly lock the scoreboard, but it does have to
302 * appear before TLB_Z, and each of the TLB_STENCILs
303 * have to schedule in the same order relative to each
306 add_write_dep(state
, &state
->last_tlb
, n
);
310 add_write_dep(state
, &state
->last_tlb
, n
);
313 case QPU_W_UNIFORMS_ADDRESS
:
314 add_write_dep(state
, &state
->last_uniforms_reset
, n
);
321 fprintf(stderr
, "Unknown waddr %d\n", waddr
);
328 process_cond_deps(struct schedule_state
*state
, struct schedule_node
*n
,
333 case QPU_COND_ALWAYS
:
336 add_read_dep(state
, state
->last_sf
, n
);
342 * Common code for dependencies that need to be tracked both forward and
345 * This is for things like "all reads of r4 have to happen between the r4
346 * writes that surround them".
349 calculate_deps(struct schedule_state
*state
, struct schedule_node
*n
)
351 uint64_t inst
= n
->inst
->inst
;
352 uint32_t add_op
= QPU_GET_FIELD(inst
, QPU_OP_ADD
);
353 uint32_t mul_op
= QPU_GET_FIELD(inst
, QPU_OP_MUL
);
354 uint32_t waddr_add
= QPU_GET_FIELD(inst
, QPU_WADDR_ADD
);
355 uint32_t waddr_mul
= QPU_GET_FIELD(inst
, QPU_WADDR_MUL
);
356 uint32_t raddr_a
= QPU_GET_FIELD(inst
, QPU_RADDR_A
);
357 uint32_t raddr_b
= QPU_GET_FIELD(inst
, QPU_RADDR_B
);
358 uint32_t add_a
= QPU_GET_FIELD(inst
, QPU_ADD_A
);
359 uint32_t add_b
= QPU_GET_FIELD(inst
, QPU_ADD_B
);
360 uint32_t mul_a
= QPU_GET_FIELD(inst
, QPU_MUL_A
);
361 uint32_t mul_b
= QPU_GET_FIELD(inst
, QPU_MUL_B
);
362 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
364 if (sig
!= QPU_SIG_LOAD_IMM
) {
365 process_raddr_deps(state
, n
, raddr_a
, true);
366 if (sig
!= QPU_SIG_SMALL_IMM
&&
367 sig
!= QPU_SIG_BRANCH
)
368 process_raddr_deps(state
, n
, raddr_b
, false);
371 if (add_op
!= QPU_A_NOP
) {
372 process_mux_deps(state
, n
, add_a
);
373 process_mux_deps(state
, n
, add_b
);
375 if (mul_op
!= QPU_M_NOP
) {
376 process_mux_deps(state
, n
, mul_a
);
377 process_mux_deps(state
, n
, mul_b
);
380 process_waddr_deps(state
, n
, waddr_add
, true);
381 process_waddr_deps(state
, n
, waddr_mul
, false);
382 if (qpu_writes_r4(inst
))
383 add_write_dep(state
, &state
->last_r
[4], n
);
386 case QPU_SIG_SW_BREAKPOINT
:
388 case QPU_SIG_THREAD_SWITCH
:
389 case QPU_SIG_LAST_THREAD_SWITCH
:
390 case QPU_SIG_SMALL_IMM
:
391 case QPU_SIG_LOAD_IMM
:
394 case QPU_SIG_LOAD_TMU0
:
395 case QPU_SIG_LOAD_TMU1
:
396 /* TMU loads are coming from a FIFO, so ordering is important.
398 add_write_dep(state
, &state
->last_tmu_write
, n
);
401 case QPU_SIG_COLOR_LOAD
:
402 add_read_dep(state
, state
->last_tlb
, n
);
406 add_read_dep(state
, state
->last_sf
, n
);
409 case QPU_SIG_PROG_END
:
410 case QPU_SIG_WAIT_FOR_SCOREBOARD
:
411 case QPU_SIG_SCOREBOARD_UNLOCK
:
412 case QPU_SIG_COVERAGE_LOAD
:
413 case QPU_SIG_COLOR_LOAD_END
:
414 case QPU_SIG_ALPHA_MASK_LOAD
:
415 fprintf(stderr
, "Unhandled signal bits %d\n", sig
);
419 process_cond_deps(state
, n
, QPU_GET_FIELD(inst
, QPU_COND_ADD
));
420 process_cond_deps(state
, n
, QPU_GET_FIELD(inst
, QPU_COND_MUL
));
421 if ((inst
& QPU_SF
) && sig
!= QPU_SIG_BRANCH
)
422 add_write_dep(state
, &state
->last_sf
, n
);
426 calculate_forward_deps(struct vc4_compile
*c
, struct list_head
*schedule_list
)
428 struct schedule_state state
;
430 memset(&state
, 0, sizeof(state
));
433 list_for_each_entry(struct schedule_node
, node
, schedule_list
, link
)
434 calculate_deps(&state
, node
);
438 calculate_reverse_deps(struct vc4_compile
*c
, struct list_head
*schedule_list
)
440 struct list_head
*node
;
441 struct schedule_state state
;
443 memset(&state
, 0, sizeof(state
));
446 for (node
= schedule_list
->prev
; schedule_list
!= node
; node
= node
->prev
) {
447 calculate_deps(&state
, (struct schedule_node
*)node
);
451 struct choose_scoreboard
{
453 int last_sfu_write_tick
;
454 int last_uniforms_reset_tick
;
455 uint32_t last_waddr_a
, last_waddr_b
;
459 reads_too_soon_after_write(struct choose_scoreboard
*scoreboard
, uint64_t inst
)
461 uint32_t raddr_a
= QPU_GET_FIELD(inst
, QPU_RADDR_A
);
462 uint32_t raddr_b
= QPU_GET_FIELD(inst
, QPU_RADDR_B
);
463 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
464 uint32_t src_muxes
[] = {
465 QPU_GET_FIELD(inst
, QPU_ADD_A
),
466 QPU_GET_FIELD(inst
, QPU_ADD_B
),
467 QPU_GET_FIELD(inst
, QPU_MUL_A
),
468 QPU_GET_FIELD(inst
, QPU_MUL_B
),
470 for (int i
= 0; i
< ARRAY_SIZE(src_muxes
); i
++) {
471 if ((src_muxes
[i
] == QPU_MUX_A
&&
473 scoreboard
->last_waddr_a
== raddr_a
) ||
474 (src_muxes
[i
] == QPU_MUX_B
&&
475 sig
!= QPU_SIG_SMALL_IMM
&&
477 scoreboard
->last_waddr_b
== raddr_b
)) {
481 if (src_muxes
[i
] == QPU_MUX_R4
) {
482 if (scoreboard
->tick
-
483 scoreboard
->last_sfu_write_tick
<= 2) {
489 if (sig
== QPU_SIG_SMALL_IMM
&&
490 QPU_GET_FIELD(inst
, QPU_SMALL_IMM
) >= QPU_SMALL_IMM_MUL_ROT
) {
491 uint32_t mux_a
= QPU_GET_FIELD(inst
, QPU_MUL_A
);
492 uint32_t mux_b
= QPU_GET_FIELD(inst
, QPU_MUL_B
);
494 if (scoreboard
->last_waddr_a
== mux_a
+ QPU_W_ACC0
||
495 scoreboard
->last_waddr_a
== mux_b
+ QPU_W_ACC0
||
496 scoreboard
->last_waddr_b
== mux_a
+ QPU_W_ACC0
||
497 scoreboard
->last_waddr_b
== mux_b
+ QPU_W_ACC0
) {
502 if (reads_uniform(inst
) &&
503 scoreboard
->tick
- scoreboard
->last_uniforms_reset_tick
<= 2) {
511 pixel_scoreboard_too_soon(struct choose_scoreboard
*scoreboard
, uint64_t inst
)
513 return (scoreboard
->tick
< 2 && qpu_inst_is_tlb(inst
));
517 get_instruction_priority(uint64_t inst
)
519 uint32_t waddr_add
= QPU_GET_FIELD(inst
, QPU_WADDR_ADD
);
520 uint32_t waddr_mul
= QPU_GET_FIELD(inst
, QPU_WADDR_MUL
);
521 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
522 uint32_t baseline_score
;
523 uint32_t next_score
= 0;
525 /* Schedule TLB operations as late as possible, to get more
526 * parallelism between shaders.
528 if (qpu_inst_is_tlb(inst
))
532 /* Schedule texture read results collection late to hide latency. */
533 if (sig
== QPU_SIG_LOAD_TMU0
|| sig
== QPU_SIG_LOAD_TMU1
)
537 /* Default score for things that aren't otherwise special. */
538 baseline_score
= next_score
;
541 /* Schedule texture read setup early to hide their latency better. */
542 if (is_tmu_write(waddr_add
) || is_tmu_write(waddr_mul
))
546 return baseline_score
;
549 static struct schedule_node
*
550 choose_instruction_to_schedule(struct choose_scoreboard
*scoreboard
,
551 struct list_head
*schedule_list
,
552 struct schedule_node
*prev_inst
)
554 struct schedule_node
*chosen
= NULL
;
557 list_for_each_entry(struct schedule_node
, n
, schedule_list
, link
) {
558 uint64_t inst
= n
->inst
->inst
;
560 /* Don't choose the branch instruction until it's the last one
561 * left. XXX: We could potentially choose it before it's the
562 * last one, if the remaining instructions fit in the delay
565 if (QPU_GET_FIELD(inst
, QPU_SIG
) == QPU_SIG_BRANCH
&&
566 !list_is_singular(schedule_list
)) {
570 /* "An instruction must not read from a location in physical
571 * regfile A or B that was written to by the previous
574 if (reads_too_soon_after_write(scoreboard
, inst
))
577 /* "A scoreboard wait must not occur in the first two
578 * instructions of a fragment shader. This is either the
579 * explicit Wait for Scoreboard signal or an implicit wait
580 * with the first tile-buffer read or write instruction."
582 if (pixel_scoreboard_too_soon(scoreboard
, inst
))
585 /* If we're trying to pair with another instruction, check
586 * that they're compatible.
589 if (prev_inst
->uniform
!= -1 && n
->uniform
!= -1)
592 inst
= qpu_merge_inst(prev_inst
->inst
->inst
, inst
);
597 int prio
= get_instruction_priority(inst
);
599 /* Found a valid instruction. If nothing better comes along,
608 if (prio
> chosen_prio
) {
611 } else if (prio
< chosen_prio
) {
615 if (n
->delay
> chosen
->delay
) {
618 } else if (n
->delay
< chosen
->delay
) {
627 update_scoreboard_for_chosen(struct choose_scoreboard
*scoreboard
,
630 uint32_t waddr_add
= QPU_GET_FIELD(inst
, QPU_WADDR_ADD
);
631 uint32_t waddr_mul
= QPU_GET_FIELD(inst
, QPU_WADDR_MUL
);
633 if (!(inst
& QPU_WS
)) {
634 scoreboard
->last_waddr_a
= waddr_add
;
635 scoreboard
->last_waddr_b
= waddr_mul
;
637 scoreboard
->last_waddr_b
= waddr_add
;
638 scoreboard
->last_waddr_a
= waddr_mul
;
641 if ((waddr_add
>= QPU_W_SFU_RECIP
&& waddr_add
<= QPU_W_SFU_LOG
) ||
642 (waddr_mul
>= QPU_W_SFU_RECIP
&& waddr_mul
<= QPU_W_SFU_LOG
)) {
643 scoreboard
->last_sfu_write_tick
= scoreboard
->tick
;
646 if (waddr_add
== QPU_W_UNIFORMS_ADDRESS
||
647 waddr_mul
== QPU_W_UNIFORMS_ADDRESS
) {
648 scoreboard
->last_uniforms_reset_tick
= scoreboard
->tick
;
653 dump_state(struct list_head
*schedule_list
)
655 list_for_each_entry(struct schedule_node
, n
, schedule_list
, link
) {
656 fprintf(stderr
, " t=%4d: ", n
->unblocked_time
);
657 vc4_qpu_disasm(&n
->inst
->inst
, 1);
658 fprintf(stderr
, "\n");
660 for (int i
= 0; i
< n
->child_count
; i
++) {
661 struct schedule_node
*child
= n
->children
[i
].node
;
665 fprintf(stderr
, " - ");
666 vc4_qpu_disasm(&child
->inst
->inst
, 1);
667 fprintf(stderr
, " (%d parents, %c)\n",
669 n
->children
[i
].write_after_read
? 'w' : 'r');
674 static uint32_t waddr_latency(uint32_t waddr
, uint64_t after
)
679 /* Apply some huge latency between texture fetch requests and getting
680 * their results back.
682 if (waddr
== QPU_W_TMU0_S
) {
683 if (QPU_GET_FIELD(after
, QPU_SIG
) == QPU_SIG_LOAD_TMU0
)
686 if (waddr
== QPU_W_TMU1_S
) {
687 if (QPU_GET_FIELD(after
, QPU_SIG
) == QPU_SIG_LOAD_TMU1
)
692 case QPU_W_SFU_RECIP
:
693 case QPU_W_SFU_RECIPSQRT
:
703 instruction_latency(struct schedule_node
*before
, struct schedule_node
*after
)
705 uint64_t before_inst
= before
->inst
->inst
;
706 uint64_t after_inst
= after
->inst
->inst
;
708 return MAX2(waddr_latency(QPU_GET_FIELD(before_inst
, QPU_WADDR_ADD
),
710 waddr_latency(QPU_GET_FIELD(before_inst
, QPU_WADDR_MUL
),
714 /** Recursive computation of the delay member of a node. */
716 compute_delay(struct schedule_node
*n
)
718 if (!n
->child_count
) {
721 for (int i
= 0; i
< n
->child_count
; i
++) {
722 if (!n
->children
[i
].node
->delay
)
723 compute_delay(n
->children
[i
].node
);
724 n
->delay
= MAX2(n
->delay
,
725 n
->children
[i
].node
->delay
+
726 instruction_latency(n
, n
->children
[i
].node
));
732 mark_instruction_scheduled(struct list_head
*schedule_list
,
734 struct schedule_node
*node
,
740 for (int i
= node
->child_count
- 1; i
>= 0; i
--) {
741 struct schedule_node
*child
=
742 node
->children
[i
].node
;
747 if (war_only
&& !node
->children
[i
].write_after_read
)
750 /* If the requirement is only that the node not appear before
751 * the last read of its destination, then it can be scheduled
752 * immediately after (or paired with!) the thing reading the
755 uint32_t latency
= 0;
757 latency
= instruction_latency(node
,
758 node
->children
[i
].node
);
761 child
->unblocked_time
= MAX2(child
->unblocked_time
,
763 child
->parent_count
--;
764 if (child
->parent_count
== 0)
765 list_add(&child
->link
, schedule_list
);
767 node
->children
[i
].node
= NULL
;
772 schedule_instructions(struct vc4_compile
*c
,
773 struct choose_scoreboard
*scoreboard
,
774 struct qblock
*block
,
775 struct list_head
*schedule_list
,
776 enum quniform_contents
*orig_uniform_contents
,
777 uint32_t *orig_uniform_data
,
778 uint32_t *next_uniform
)
783 fprintf(stderr
, "initial deps:\n");
784 dump_state(schedule_list
);
785 fprintf(stderr
, "\n");
788 /* Remove non-DAG heads from the list. */
789 list_for_each_entry_safe(struct schedule_node
, n
, schedule_list
, link
) {
790 if (n
->parent_count
!= 0)
794 while (!list_empty(schedule_list
)) {
795 struct schedule_node
*chosen
=
796 choose_instruction_to_schedule(scoreboard
,
799 struct schedule_node
*merge
= NULL
;
801 /* If there are no valid instructions to schedule, drop a NOP
804 uint64_t inst
= chosen
? chosen
->inst
->inst
: qpu_NOP();
807 fprintf(stderr
, "t=%4d: current list:\n",
809 dump_state(schedule_list
);
810 fprintf(stderr
, "t=%4d: chose: ", time
);
811 vc4_qpu_disasm(&inst
, 1);
812 fprintf(stderr
, "\n");
815 /* Schedule this instruction onto the QPU list. Also try to
816 * find an instruction to pair with it.
819 time
= MAX2(chosen
->unblocked_time
, time
);
820 list_del(&chosen
->link
);
821 mark_instruction_scheduled(schedule_list
, time
,
823 if (chosen
->uniform
!= -1) {
824 c
->uniform_data
[*next_uniform
] =
825 orig_uniform_data
[chosen
->uniform
];
826 c
->uniform_contents
[*next_uniform
] =
827 orig_uniform_contents
[chosen
->uniform
];
831 merge
= choose_instruction_to_schedule(scoreboard
,
835 time
= MAX2(merge
->unblocked_time
, time
);
836 list_del(&merge
->link
);
837 inst
= qpu_merge_inst(inst
, merge
->inst
->inst
);
839 if (merge
->uniform
!= -1) {
840 c
->uniform_data
[*next_uniform
] =
841 orig_uniform_data
[merge
->uniform
];
842 c
->uniform_contents
[*next_uniform
] =
843 orig_uniform_contents
[merge
->uniform
];
848 fprintf(stderr
, "t=%4d: merging: ",
850 vc4_qpu_disasm(&merge
->inst
->inst
, 1);
851 fprintf(stderr
, "\n");
852 fprintf(stderr
, " resulting in: ");
853 vc4_qpu_disasm(&inst
, 1);
854 fprintf(stderr
, "\n");
860 fprintf(stderr
, "\n");
863 qpu_serialize_one_inst(c
, inst
);
865 update_scoreboard_for_chosen(scoreboard
, inst
);
867 /* Now that we've scheduled a new instruction, some of its
868 * children can be promoted to the list of instructions ready to
869 * be scheduled. Update the children's unblocked time for this
870 * DAG edge as we do so.
872 mark_instruction_scheduled(schedule_list
, time
, chosen
, false);
873 mark_instruction_scheduled(schedule_list
, time
, merge
, false);
878 if (QPU_GET_FIELD(inst
, QPU_SIG
) == QPU_SIG_BRANCH
) {
879 block
->branch_qpu_ip
= c
->qpu_inst_count
- 1;
880 /* Fill the delay slots.
882 * We should fill these with actual instructions,
883 * instead, but that will probably need to be done
884 * after this, once we know what the leading
885 * instructions of the successors are (so we can
886 * handle A/B register file write latency)
889 update_scoreboard_for_chosen(scoreboard
, inst
);
890 qpu_serialize_one_inst(c
, inst
);
891 qpu_serialize_one_inst(c
, inst
);
892 qpu_serialize_one_inst(c
, inst
);
900 qpu_schedule_instructions_block(struct vc4_compile
*c
,
901 struct choose_scoreboard
*scoreboard
,
902 struct qblock
*block
,
903 enum quniform_contents
*orig_uniform_contents
,
904 uint32_t *orig_uniform_data
,
905 uint32_t *next_uniform
)
907 void *mem_ctx
= ralloc_context(NULL
);
908 struct list_head schedule_list
;
910 list_inithead(&schedule_list
);
912 /* Wrap each instruction in a scheduler structure. */
913 uint32_t next_sched_uniform
= *next_uniform
;
914 while (!list_empty(&block
->qpu_inst_list
)) {
915 struct queued_qpu_inst
*inst
=
916 (struct queued_qpu_inst
*)block
->qpu_inst_list
.next
;
917 struct schedule_node
*n
= rzalloc(mem_ctx
, struct schedule_node
);
921 if (reads_uniform(inst
->inst
)) {
922 n
->uniform
= next_sched_uniform
++;
926 list_del(&inst
->link
);
927 list_addtail(&n
->link
, &schedule_list
);
930 calculate_forward_deps(c
, &schedule_list
);
931 calculate_reverse_deps(c
, &schedule_list
);
933 list_for_each_entry(struct schedule_node
, n
, &schedule_list
, link
) {
937 uint32_t cycles
= schedule_instructions(c
, scoreboard
, block
,
939 orig_uniform_contents
,
943 ralloc_free(mem_ctx
);
949 qpu_set_branch_targets(struct vc4_compile
*c
)
951 qir_for_each_block(block
, c
) {
952 /* The end block of the program has no branch. */
953 if (!block
->successors
[0])
956 /* If there was no branch instruction, then the successor
957 * block must follow immediately after this one.
959 if (block
->branch_qpu_ip
== ~0) {
960 assert(block
->end_qpu_ip
+ 1 ==
961 block
->successors
[0]->start_qpu_ip
);
965 /* Set the branch target for the block that doesn't follow
966 * immediately after ours.
968 uint64_t *branch_inst
= &c
->qpu_insts
[block
->branch_qpu_ip
];
969 assert(QPU_GET_FIELD(*branch_inst
, QPU_SIG
) == QPU_SIG_BRANCH
);
970 assert(QPU_GET_FIELD(*branch_inst
, QPU_BRANCH_TARGET
) == 0);
972 uint32_t branch_target
=
973 (block
->successors
[0]->start_qpu_ip
-
974 (block
->branch_qpu_ip
+ 4)) * sizeof(uint64_t);
975 *branch_inst
= (*branch_inst
|
976 QPU_SET_FIELD(branch_target
, QPU_BRANCH_TARGET
));
978 /* Make sure that the if-we-don't-jump successor was scheduled
979 * just after the delay slots.
981 if (block
->successors
[1]) {
982 assert(block
->successors
[1]->start_qpu_ip
==
983 block
->branch_qpu_ip
+ 4);
989 qpu_schedule_instructions(struct vc4_compile
*c
)
991 /* We reorder the uniforms as we schedule instructions, so save the
992 * old data off and replace it.
994 uint32_t *uniform_data
= c
->uniform_data
;
995 enum quniform_contents
*uniform_contents
= c
->uniform_contents
;
996 c
->uniform_contents
= ralloc_array(c
, enum quniform_contents
,
998 c
->uniform_data
= ralloc_array(c
, uint32_t, c
->num_uniforms
);
999 c
->uniform_array_size
= c
->num_uniforms
;
1000 uint32_t next_uniform
= 0;
1002 struct choose_scoreboard scoreboard
;
1003 memset(&scoreboard
, 0, sizeof(scoreboard
));
1004 scoreboard
.last_waddr_a
= ~0;
1005 scoreboard
.last_waddr_b
= ~0;
1006 scoreboard
.last_sfu_write_tick
= -10;
1007 scoreboard
.last_uniforms_reset_tick
= -10;
1010 fprintf(stderr
, "Pre-schedule instructions\n");
1011 qir_for_each_block(block
, c
) {
1012 fprintf(stderr
, "BLOCK %d\n", block
->index
);
1013 list_for_each_entry(struct queued_qpu_inst
, q
,
1014 &block
->qpu_inst_list
, link
) {
1015 vc4_qpu_disasm(&q
->inst
, 1);
1016 fprintf(stderr
, "\n");
1019 fprintf(stderr
, "\n");
1022 uint32_t cycles
= 0;
1023 qir_for_each_block(block
, c
) {
1024 block
->start_qpu_ip
= c
->qpu_inst_count
;
1025 block
->branch_qpu_ip
= ~0;
1027 cycles
+= qpu_schedule_instructions_block(c
,
1034 block
->end_qpu_ip
= c
->qpu_inst_count
- 1;
1037 qpu_set_branch_targets(c
);
1039 assert(next_uniform
== c
->num_uniforms
);
1042 fprintf(stderr
, "Post-schedule instructions\n");
1043 vc4_qpu_disasm(c
->qpu_insts
, c
->qpu_inst_count
);
1044 fprintf(stderr
, "\n");