2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2014 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 * @file vc4_qpu_schedule.c
28 * The basic model of the list scheduler is to take a basic block, compute a
29 * DAG of the dependencies, and make a list of the DAG heads. Heuristically
30 * pick a DAG head, then put all the children that are now DAG heads into the
31 * list of things to schedule.
33 * The goal of scheduling here is to pack pairs of operations together in a
34 * single QPU instruction.
39 #include "util/ralloc.h"
43 struct schedule_node_child
;
45 struct schedule_node
{
46 struct list_head link
;
47 struct queued_qpu_inst
*inst
;
48 struct schedule_node_child
*children
;
50 uint32_t child_array_size
;
51 uint32_t parent_count
;
53 /* Longest cycles + instruction_latency() of any parent of this node. */
54 uint32_t unblocked_time
;
57 * Minimum number of cycles from scheduling this instruction until the
58 * end of the program, based on the slowest dependency chain through
64 * cycles between this instruction being scheduled and when its result
70 * Which uniform from uniform_data[] this instruction read, or -1 if
71 * not reading a uniform.
76 struct schedule_node_child
{
77 struct schedule_node
*node
;
78 bool write_after_read
;
81 /* When walking the instructions in reverse, we need to swap before/after in
84 enum direction
{ F
, R
};
86 struct schedule_state
{
87 struct schedule_node
*last_r
[6];
88 struct schedule_node
*last_ra
[32];
89 struct schedule_node
*last_rb
[32];
90 struct schedule_node
*last_sf
;
91 struct schedule_node
*last_vpm_read
;
92 struct schedule_node
*last_tmu_write
;
93 struct schedule_node
*last_tlb
;
94 struct schedule_node
*last_vpm
;
96 /* Estimated cycle when the current instruction would start. */
101 add_dep(struct schedule_state
*state
,
102 struct schedule_node
*before
,
103 struct schedule_node
*after
,
106 bool write_after_read
= !write
&& state
->dir
== R
;
108 if (!before
|| !after
)
111 assert(before
!= after
);
113 if (state
->dir
== R
) {
114 struct schedule_node
*t
= before
;
119 for (int i
= 0; i
< before
->child_count
; i
++) {
120 if (before
->children
[i
].node
== after
&&
121 (before
->children
[i
].write_after_read
== write_after_read
)) {
126 if (before
->child_array_size
<= before
->child_count
) {
127 before
->child_array_size
= MAX2(before
->child_array_size
* 2, 16);
128 before
->children
= reralloc(before
, before
->children
,
129 struct schedule_node_child
,
130 before
->child_array_size
);
133 before
->children
[before
->child_count
].node
= after
;
134 before
->children
[before
->child_count
].write_after_read
=
136 before
->child_count
++;
137 after
->parent_count
++;
141 add_read_dep(struct schedule_state
*state
,
142 struct schedule_node
*before
,
143 struct schedule_node
*after
)
145 add_dep(state
, before
, after
, false);
149 add_write_dep(struct schedule_state
*state
,
150 struct schedule_node
**before
,
151 struct schedule_node
*after
)
153 add_dep(state
, *before
, after
, true);
158 qpu_writes_r4(uint64_t inst
)
160 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
163 case QPU_SIG_COLOR_LOAD
:
164 case QPU_SIG_LOAD_TMU0
:
165 case QPU_SIG_LOAD_TMU1
:
166 case QPU_SIG_ALPHA_MASK_LOAD
:
174 process_raddr_deps(struct schedule_state
*state
, struct schedule_node
*n
,
175 uint32_t raddr
, bool is_a
)
179 add_write_dep(state
, &state
->last_r
[5], n
);
183 add_write_dep(state
, &state
->last_vpm_read
, n
);
189 case QPU_R_XY_PIXEL_COORD
:
190 case QPU_R_MS_REV_FLAGS
:
196 add_read_dep(state
, state
->last_ra
[raddr
], n
);
198 add_read_dep(state
, state
->last_rb
[raddr
], n
);
200 fprintf(stderr
, "unknown raddr %d\n", raddr
);
208 is_tmu_write(uint32_t waddr
)
226 reads_uniform(uint64_t inst
)
228 if (QPU_GET_FIELD(inst
, QPU_SIG
) == QPU_SIG_LOAD_IMM
)
231 return (QPU_GET_FIELD(inst
, QPU_RADDR_A
) == QPU_R_UNIF
||
232 (QPU_GET_FIELD(inst
, QPU_RADDR_B
) == QPU_R_UNIF
&&
233 QPU_GET_FIELD(inst
, QPU_SIG
) != QPU_SIG_SMALL_IMM
) ||
234 is_tmu_write(QPU_GET_FIELD(inst
, QPU_WADDR_ADD
)) ||
235 is_tmu_write(QPU_GET_FIELD(inst
, QPU_WADDR_MUL
)));
239 process_mux_deps(struct schedule_state
*state
, struct schedule_node
*n
,
242 if (mux
!= QPU_MUX_A
&& mux
!= QPU_MUX_B
)
243 add_read_dep(state
, state
->last_r
[mux
], n
);
248 process_waddr_deps(struct schedule_state
*state
, struct schedule_node
*n
,
249 uint32_t waddr
, bool is_add
)
251 uint64_t inst
= n
->inst
->inst
;
252 bool is_a
= is_add
^ ((inst
& QPU_WS
) != 0);
256 add_write_dep(state
, &state
->last_ra
[waddr
], n
);
258 add_write_dep(state
, &state
->last_rb
[waddr
], n
);
260 } else if (is_tmu_write(waddr
)) {
261 add_write_dep(state
, &state
->last_tmu_write
, n
);
262 } else if (qpu_waddr_is_tlb(waddr
) ||
263 waddr
== QPU_W_MS_FLAGS
) {
264 add_write_dep(state
, &state
->last_tlb
, n
);
272 add_write_dep(state
, &state
->last_r
[waddr
- QPU_W_ACC0
],
277 add_write_dep(state
, &state
->last_vpm
, n
);
280 case QPU_W_VPMVCD_SETUP
:
282 add_write_dep(state
, &state
->last_vpm_read
, n
);
284 add_write_dep(state
, &state
->last_vpm
, n
);
287 case QPU_W_SFU_RECIP
:
288 case QPU_W_SFU_RECIPSQRT
:
291 add_write_dep(state
, &state
->last_r
[4], n
);
294 case QPU_W_TLB_STENCIL_SETUP
:
295 /* This isn't a TLB operation that does things like
296 * implicitly lock the scoreboard, but it does have to
297 * appear before TLB_Z, and each of the TLB_STENCILs
298 * have to schedule in the same order relative to each
301 add_write_dep(state
, &state
->last_tlb
, n
);
305 add_write_dep(state
, &state
->last_tlb
, n
);
312 fprintf(stderr
, "Unknown waddr %d\n", waddr
);
319 process_cond_deps(struct schedule_state
*state
, struct schedule_node
*n
,
324 case QPU_COND_ALWAYS
:
327 add_read_dep(state
, state
->last_sf
, n
);
333 * Common code for dependencies that need to be tracked both forward and
336 * This is for things like "all reads of r4 have to happen between the r4
337 * writes that surround them".
340 calculate_deps(struct schedule_state
*state
, struct schedule_node
*n
)
342 uint64_t inst
= n
->inst
->inst
;
343 uint32_t add_op
= QPU_GET_FIELD(inst
, QPU_OP_ADD
);
344 uint32_t mul_op
= QPU_GET_FIELD(inst
, QPU_OP_MUL
);
345 uint32_t waddr_add
= QPU_GET_FIELD(inst
, QPU_WADDR_ADD
);
346 uint32_t waddr_mul
= QPU_GET_FIELD(inst
, QPU_WADDR_MUL
);
347 uint32_t raddr_a
= QPU_GET_FIELD(inst
, QPU_RADDR_A
);
348 uint32_t raddr_b
= QPU_GET_FIELD(inst
, QPU_RADDR_B
);
349 uint32_t add_a
= QPU_GET_FIELD(inst
, QPU_ADD_A
);
350 uint32_t add_b
= QPU_GET_FIELD(inst
, QPU_ADD_B
);
351 uint32_t mul_a
= QPU_GET_FIELD(inst
, QPU_MUL_A
);
352 uint32_t mul_b
= QPU_GET_FIELD(inst
, QPU_MUL_B
);
353 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
355 if (sig
!= QPU_SIG_LOAD_IMM
) {
356 process_raddr_deps(state
, n
, raddr_a
, true);
357 if (sig
!= QPU_SIG_SMALL_IMM
)
358 process_raddr_deps(state
, n
, raddr_b
, false);
361 if (add_op
!= QPU_A_NOP
) {
362 process_mux_deps(state
, n
, add_a
);
363 process_mux_deps(state
, n
, add_b
);
365 if (mul_op
!= QPU_M_NOP
) {
366 process_mux_deps(state
, n
, mul_a
);
367 process_mux_deps(state
, n
, mul_b
);
370 process_waddr_deps(state
, n
, waddr_add
, true);
371 process_waddr_deps(state
, n
, waddr_mul
, false);
372 if (qpu_writes_r4(inst
))
373 add_write_dep(state
, &state
->last_r
[4], n
);
376 case QPU_SIG_SW_BREAKPOINT
:
378 case QPU_SIG_THREAD_SWITCH
:
379 case QPU_SIG_LAST_THREAD_SWITCH
:
380 case QPU_SIG_SMALL_IMM
:
381 case QPU_SIG_LOAD_IMM
:
384 case QPU_SIG_LOAD_TMU0
:
385 case QPU_SIG_LOAD_TMU1
:
386 /* TMU loads are coming from a FIFO, so ordering is important.
388 add_write_dep(state
, &state
->last_tmu_write
, n
);
391 case QPU_SIG_COLOR_LOAD
:
392 add_read_dep(state
, state
->last_tlb
, n
);
395 case QPU_SIG_PROG_END
:
396 case QPU_SIG_WAIT_FOR_SCOREBOARD
:
397 case QPU_SIG_SCOREBOARD_UNLOCK
:
398 case QPU_SIG_COVERAGE_LOAD
:
399 case QPU_SIG_COLOR_LOAD_END
:
400 case QPU_SIG_ALPHA_MASK_LOAD
:
402 fprintf(stderr
, "Unhandled signal bits %d\n", sig
);
406 process_cond_deps(state
, n
, QPU_GET_FIELD(inst
, QPU_COND_ADD
));
407 process_cond_deps(state
, n
, QPU_GET_FIELD(inst
, QPU_COND_ADD
));
409 add_write_dep(state
, &state
->last_sf
, n
);
413 calculate_forward_deps(struct vc4_compile
*c
, struct list_head
*schedule_list
)
415 struct schedule_state state
;
417 memset(&state
, 0, sizeof(state
));
420 list_for_each_entry(struct schedule_node
, node
, schedule_list
, link
)
421 calculate_deps(&state
, node
);
425 calculate_reverse_deps(struct vc4_compile
*c
, struct list_head
*schedule_list
)
427 struct list_head
*node
;
428 struct schedule_state state
;
430 memset(&state
, 0, sizeof(state
));
433 for (node
= schedule_list
->prev
; schedule_list
!= node
; node
= node
->prev
) {
434 calculate_deps(&state
, (struct schedule_node
*)node
);
438 struct choose_scoreboard
{
440 int last_sfu_write_tick
;
441 uint32_t last_waddr_a
, last_waddr_b
;
445 reads_too_soon_after_write(struct choose_scoreboard
*scoreboard
, uint64_t inst
)
447 uint32_t raddr_a
= QPU_GET_FIELD(inst
, QPU_RADDR_A
);
448 uint32_t raddr_b
= QPU_GET_FIELD(inst
, QPU_RADDR_B
);
449 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
450 uint32_t src_muxes
[] = {
451 QPU_GET_FIELD(inst
, QPU_ADD_A
),
452 QPU_GET_FIELD(inst
, QPU_ADD_B
),
453 QPU_GET_FIELD(inst
, QPU_MUL_A
),
454 QPU_GET_FIELD(inst
, QPU_MUL_B
),
456 for (int i
= 0; i
< ARRAY_SIZE(src_muxes
); i
++) {
457 if ((src_muxes
[i
] == QPU_MUX_A
&&
459 scoreboard
->last_waddr_a
== raddr_a
) ||
460 (src_muxes
[i
] == QPU_MUX_B
&&
461 sig
!= QPU_SIG_SMALL_IMM
&&
463 scoreboard
->last_waddr_b
== raddr_b
)) {
467 if (src_muxes
[i
] == QPU_MUX_R4
) {
468 if (scoreboard
->tick
-
469 scoreboard
->last_sfu_write_tick
<= 2) {
479 pixel_scoreboard_too_soon(struct choose_scoreboard
*scoreboard
, uint64_t inst
)
481 return (scoreboard
->tick
< 2 && qpu_inst_is_tlb(inst
));
485 get_instruction_priority(uint64_t inst
)
487 uint32_t waddr_add
= QPU_GET_FIELD(inst
, QPU_WADDR_ADD
);
488 uint32_t waddr_mul
= QPU_GET_FIELD(inst
, QPU_WADDR_MUL
);
489 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
490 uint32_t baseline_score
;
491 uint32_t next_score
= 0;
493 /* Schedule TLB operations as late as possible, to get more
494 * parallelism between shaders.
496 if (qpu_inst_is_tlb(inst
))
500 /* Schedule texture read results collection late to hide latency. */
501 if (sig
== QPU_SIG_LOAD_TMU0
|| sig
== QPU_SIG_LOAD_TMU1
)
505 /* Default score for things that aren't otherwise special. */
506 baseline_score
= next_score
;
509 /* Schedule texture read setup early to hide their latency better. */
510 if (is_tmu_write(waddr_add
) || is_tmu_write(waddr_mul
))
514 return baseline_score
;
517 static struct schedule_node
*
518 choose_instruction_to_schedule(struct choose_scoreboard
*scoreboard
,
519 struct list_head
*schedule_list
,
520 struct schedule_node
*prev_inst
)
522 struct schedule_node
*chosen
= NULL
;
525 list_for_each_entry(struct schedule_node
, n
, schedule_list
, link
) {
526 uint64_t inst
= n
->inst
->inst
;
528 /* "An instruction must not read from a location in physical
529 * regfile A or B that was written to by the previous
532 if (reads_too_soon_after_write(scoreboard
, inst
))
535 /* "A scoreboard wait must not occur in the first two
536 * instructions of a fragment shader. This is either the
537 * explicit Wait for Scoreboard signal or an implicit wait
538 * with the first tile-buffer read or write instruction."
540 if (pixel_scoreboard_too_soon(scoreboard
, inst
))
543 /* If we're trying to pair with another instruction, check
544 * that they're compatible.
547 if (prev_inst
->uniform
!= -1 && n
->uniform
!= -1)
550 inst
= qpu_merge_inst(prev_inst
->inst
->inst
, inst
);
555 int prio
= get_instruction_priority(inst
);
557 /* Found a valid instruction. If nothing better comes along,
566 if (prio
> chosen_prio
) {
569 } else if (prio
< chosen_prio
) {
573 if (n
->delay
> chosen
->delay
) {
576 } else if (n
->delay
< chosen
->delay
) {
585 update_scoreboard_for_chosen(struct choose_scoreboard
*scoreboard
,
588 uint32_t waddr_add
= QPU_GET_FIELD(inst
, QPU_WADDR_ADD
);
589 uint32_t waddr_mul
= QPU_GET_FIELD(inst
, QPU_WADDR_MUL
);
591 if (!(inst
& QPU_WS
)) {
592 scoreboard
->last_waddr_a
= waddr_add
;
593 scoreboard
->last_waddr_b
= waddr_mul
;
595 scoreboard
->last_waddr_b
= waddr_add
;
596 scoreboard
->last_waddr_a
= waddr_mul
;
599 if ((waddr_add
>= QPU_W_SFU_RECIP
&& waddr_add
<= QPU_W_SFU_LOG
) ||
600 (waddr_mul
>= QPU_W_SFU_RECIP
&& waddr_mul
<= QPU_W_SFU_LOG
)) {
601 scoreboard
->last_sfu_write_tick
= scoreboard
->tick
;
606 dump_state(struct list_head
*schedule_list
)
608 list_for_each_entry(struct schedule_node
, n
, schedule_list
, link
) {
609 fprintf(stderr
, " t=%4d: ", n
->unblocked_time
);
610 vc4_qpu_disasm(&n
->inst
->inst
, 1);
611 fprintf(stderr
, "\n");
613 for (int i
= 0; i
< n
->child_count
; i
++) {
614 struct schedule_node
*child
= n
->children
[i
].node
;
618 fprintf(stderr
, " - ");
619 vc4_qpu_disasm(&child
->inst
->inst
, 1);
620 fprintf(stderr
, " (%d parents, %c)\n",
622 n
->children
[i
].write_after_read
? 'w' : 'r');
627 static uint32_t waddr_latency(uint32_t waddr
, uint64_t after
)
632 /* Apply some huge latency between texture fetch requests and getting
633 * their results back.
635 if (waddr
== QPU_W_TMU0_S
) {
636 if (QPU_GET_FIELD(after
, QPU_SIG
) == QPU_SIG_LOAD_TMU0
)
639 if (waddr
== QPU_W_TMU1_S
) {
640 if (QPU_GET_FIELD(after
, QPU_SIG
) == QPU_SIG_LOAD_TMU1
)
645 case QPU_W_SFU_RECIP
:
646 case QPU_W_SFU_RECIPSQRT
:
656 instruction_latency(struct schedule_node
*before
, struct schedule_node
*after
)
658 uint64_t before_inst
= before
->inst
->inst
;
659 uint64_t after_inst
= after
->inst
->inst
;
661 return MAX2(waddr_latency(QPU_GET_FIELD(before_inst
, QPU_WADDR_ADD
),
663 waddr_latency(QPU_GET_FIELD(before_inst
, QPU_WADDR_MUL
),
667 /** Recursive computation of the delay member of a node. */
669 compute_delay(struct schedule_node
*n
)
671 if (!n
->child_count
) {
674 for (int i
= 0; i
< n
->child_count
; i
++) {
675 if (!n
->children
[i
].node
->delay
)
676 compute_delay(n
->children
[i
].node
);
677 n
->delay
= MAX2(n
->delay
,
678 n
->children
[i
].node
->delay
+
679 instruction_latency(n
, n
->children
[i
].node
));
685 mark_instruction_scheduled(struct list_head
*schedule_list
,
687 struct schedule_node
*node
,
693 for (int i
= node
->child_count
- 1; i
>= 0; i
--) {
694 struct schedule_node
*child
=
695 node
->children
[i
].node
;
700 if (war_only
&& !node
->children
[i
].write_after_read
)
703 /* If the requirement is only that the node not appear before
704 * the last read of its destination, then it can be scheduled
705 * immediately after (or paired with!) the thing reading the
708 uint32_t latency
= 0;
710 latency
= instruction_latency(node
,
711 node
->children
[i
].node
);
714 child
->unblocked_time
= MAX2(child
->unblocked_time
,
716 child
->parent_count
--;
717 if (child
->parent_count
== 0)
718 list_add(&child
->link
, schedule_list
);
720 node
->children
[i
].node
= NULL
;
725 schedule_instructions(struct vc4_compile
*c
, struct list_head
*schedule_list
)
727 struct choose_scoreboard scoreboard
;
730 /* We reorder the uniforms as we schedule instructions, so save the
731 * old data off and replace it.
733 uint32_t *uniform_data
= c
->uniform_data
;
734 enum quniform_contents
*uniform_contents
= c
->uniform_contents
;
735 c
->uniform_contents
= ralloc_array(c
, enum quniform_contents
,
737 c
->uniform_data
= ralloc_array(c
, uint32_t, c
->num_uniforms
);
738 c
->uniform_array_size
= c
->num_uniforms
;
739 uint32_t next_uniform
= 0;
741 memset(&scoreboard
, 0, sizeof(scoreboard
));
742 scoreboard
.last_waddr_a
= ~0;
743 scoreboard
.last_waddr_b
= ~0;
744 scoreboard
.last_sfu_write_tick
= -10;
747 fprintf(stderr
, "initial deps:\n");
748 dump_state(schedule_list
);
749 fprintf(stderr
, "\n");
752 /* Remove non-DAG heads from the list. */
753 list_for_each_entry_safe(struct schedule_node
, n
, schedule_list
, link
) {
754 if (n
->parent_count
!= 0)
758 while (!list_empty(schedule_list
)) {
759 struct schedule_node
*chosen
=
760 choose_instruction_to_schedule(&scoreboard
,
763 struct schedule_node
*merge
= NULL
;
765 /* If there are no valid instructions to schedule, drop a NOP
768 uint64_t inst
= chosen
? chosen
->inst
->inst
: qpu_NOP();
771 fprintf(stderr
, "t=%4d: current list:\n",
773 dump_state(schedule_list
);
774 fprintf(stderr
, "t=%4d: chose: ", time
);
775 vc4_qpu_disasm(&inst
, 1);
776 fprintf(stderr
, "\n");
779 /* Schedule this instruction onto the QPU list. Also try to
780 * find an instruction to pair with it.
783 time
= MAX2(chosen
->unblocked_time
, time
);
784 list_del(&chosen
->link
);
785 mark_instruction_scheduled(schedule_list
, time
,
787 if (chosen
->uniform
!= -1) {
788 c
->uniform_data
[next_uniform
] =
789 uniform_data
[chosen
->uniform
];
790 c
->uniform_contents
[next_uniform
] =
791 uniform_contents
[chosen
->uniform
];
795 merge
= choose_instruction_to_schedule(&scoreboard
,
799 time
= MAX2(merge
->unblocked_time
, time
);
800 list_del(&merge
->link
);
801 inst
= qpu_merge_inst(inst
, merge
->inst
->inst
);
803 if (merge
->uniform
!= -1) {
804 c
->uniform_data
[next_uniform
] =
805 uniform_data
[merge
->uniform
];
806 c
->uniform_contents
[next_uniform
] =
807 uniform_contents
[merge
->uniform
];
812 fprintf(stderr
, "t=%4d: merging: ",
814 vc4_qpu_disasm(&merge
->inst
->inst
, 1);
815 fprintf(stderr
, "\n");
816 fprintf(stderr
, " resulting in: ");
817 vc4_qpu_disasm(&inst
, 1);
818 fprintf(stderr
, "\n");
824 fprintf(stderr
, "\n");
827 qpu_serialize_one_inst(c
, inst
);
829 update_scoreboard_for_chosen(&scoreboard
, inst
);
831 /* Now that we've scheduled a new instruction, some of its
832 * children can be promoted to the list of instructions ready to
833 * be scheduled. Update the children's unblocked time for this
834 * DAG edge as we do so.
836 mark_instruction_scheduled(schedule_list
, time
, chosen
, false);
837 mark_instruction_scheduled(schedule_list
, time
, merge
, false);
843 assert(next_uniform
== c
->num_uniforms
);
849 qpu_schedule_instructions(struct vc4_compile
*c
)
851 void *mem_ctx
= ralloc_context(NULL
);
852 struct list_head schedule_list
;
854 list_inithead(&schedule_list
);
857 fprintf(stderr
, "Pre-schedule instructions\n");
858 list_for_each_entry(struct queued_qpu_inst
, q
,
859 &c
->qpu_inst_list
, link
) {
860 vc4_qpu_disasm(&q
->inst
, 1);
861 fprintf(stderr
, "\n");
863 fprintf(stderr
, "\n");
866 /* Wrap each instruction in a scheduler structure. */
867 uint32_t next_uniform
= 0;
868 while (!list_empty(&c
->qpu_inst_list
)) {
869 struct queued_qpu_inst
*inst
=
870 (struct queued_qpu_inst
*)c
->qpu_inst_list
.next
;
871 struct schedule_node
*n
= rzalloc(mem_ctx
, struct schedule_node
);
875 if (reads_uniform(inst
->inst
)) {
876 n
->uniform
= next_uniform
++;
880 list_del(&inst
->link
);
881 list_addtail(&n
->link
, &schedule_list
);
883 assert(next_uniform
== c
->num_uniforms
);
885 calculate_forward_deps(c
, &schedule_list
);
886 calculate_reverse_deps(c
, &schedule_list
);
888 list_for_each_entry(struct schedule_node
, n
, &schedule_list
, link
) {
892 uint32_t cycles
= schedule_instructions(c
, &schedule_list
);
895 fprintf(stderr
, "Post-schedule instructions\n");
896 vc4_qpu_disasm(c
->qpu_insts
, c
->qpu_inst_count
);
897 fprintf(stderr
, "\n");
900 ralloc_free(mem_ctx
);