2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2014-2015 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 * @file vc4_qir_schedule.c
28 * The basic model of the list scheduler is to take a basic block, compute a
29 * DAG of the dependencies from the bottom up, and make a list of the DAG
30 * heads. Heuristically pick a DAG head and schedule (remove) it, then put
31 * all the parents that are now DAG heads into the list of things to
34 * The goal of scheduling here, before register allocation and conversion to
35 * QPU instructions, is to reduce register pressure by reordering instructions
36 * to consume values when possible.
44 struct schedule_node
{
46 struct list_head link
;
49 /* Length of the longest (latency) chain from a DAG head to the this
54 /* Longest time + latency_between(parent, this) of any parent of this
57 uint32_t unblocked_time
;
60 struct schedule_state
{
65 uint32_t *temp_writes
;
67 BITSET_WORD
*temp_live
;
70 /* When walking the instructions in reverse, we need to swap before/after in
73 enum direction
{ F
, R
};
76 * Marks a dependency between two intructions, that \p after must appear after
79 * Our dependencies are tracked as a DAG. Since we're scheduling bottom-up,
80 * the latest instructions with nothing left to schedule are the DAG heads,
81 * and their inputs are their children.
84 add_dep(enum direction dir
,
85 struct schedule_node
*before
,
86 struct schedule_node
*after
)
88 if (!before
|| !after
)
91 assert(before
!= after
);
94 struct schedule_node
*t
= before
;
99 dag_add_edge(&after
->dag
, &before
->dag
, NULL
);
103 add_write_dep(enum direction dir
,
104 struct schedule_node
**before
,
105 struct schedule_node
*after
)
107 add_dep(dir
, *before
, after
);
111 struct schedule_setup_state
{
112 struct schedule_node
**last_temp_write
;
113 struct schedule_node
*last_sf
;
114 struct schedule_node
*last_vary_read
;
115 struct schedule_node
*last_vpm_read
;
116 struct schedule_node
*last_vpm_write
;
117 struct schedule_node
*last_tex_coord
;
118 struct schedule_node
*last_tex_result
;
119 struct schedule_node
*last_tlb
;
120 struct schedule_node
*last_uniforms_reset
;
124 * Texture FIFO tracking. This is done top-to-bottom, and is used to
125 * track the QOP_TEX_RESULTs and add dependencies on previous ones
126 * when trying to submit texture coords with TFREQ full or new texture
127 * fetches with TXRCV full.
130 struct schedule_node
*node
;
133 int tfreq_count
; /**< Number of texture coords outstanding. */
134 int tfrcv_count
; /**< Number of texture results outstanding. */
139 block_until_tex_result(struct schedule_setup_state
*state
, struct schedule_node
*n
)
141 add_dep(state
->dir
, state
->tex_fifo
[0].node
, n
);
143 state
->tfreq_count
-= state
->tex_fifo
[0].coords
;
144 state
->tfrcv_count
--;
146 memmove(&state
->tex_fifo
[0],
148 state
->tex_fifo_pos
* sizeof(state
->tex_fifo
[0]));
149 state
->tex_fifo_pos
--;
153 * Common code for dependencies that need to be tracked both forward and
156 * This is for things like "all VPM reads have to happen in order."
159 calculate_deps(struct schedule_setup_state
*state
, struct schedule_node
*n
)
161 struct qinst
*inst
= n
->inst
;
162 enum direction dir
= state
->dir
;
165 /* Add deps for temp registers and varyings accesses. Note that we
166 * ignore uniforms accesses, because qir_reorder_uniforms() happens
169 for (int i
= 0; i
< qir_get_nsrc(inst
); i
++) {
170 switch (inst
->src
[i
].file
) {
173 state
->last_temp_write
[inst
->src
[i
].index
], n
);
177 add_write_dep(dir
, &state
->last_vary_read
, n
);
181 add_write_dep(dir
, &state
->last_vpm_read
, n
);
191 add_dep(dir
, state
->last_vary_read
, n
);
195 /* Results have to be fetched in order. */
196 add_write_dep(dir
, &state
->last_tex_result
, n
);
200 /* After a new THRSW, one must collect all texture samples
201 * queued since the previous THRSW/program start. For now, we
202 * have one THRSW in between each texture setup and its
203 * results collection as our input, and we just make sure that
204 * that ordering is maintained.
206 add_write_dep(dir
, &state
->last_tex_coord
, n
);
207 add_write_dep(dir
, &state
->last_tex_result
, n
);
209 /* accumulators and flags are lost across thread switches. */
210 add_write_dep(dir
, &state
->last_sf
, n
);
212 /* Setup, like the varyings, will need to be drained before we
215 add_write_dep(dir
, &state
->last_vary_read
, n
);
217 /* The TLB-locking operations have to stay after the last
220 add_write_dep(dir
, &state
->last_tlb
, n
);
223 case QOP_TLB_COLOR_READ
:
225 add_write_dep(dir
, &state
->last_tlb
, n
);
232 switch (inst
->dst
.file
) {
234 add_write_dep(dir
, &state
->last_vpm_write
, n
);
238 add_write_dep(dir
, &state
->last_temp_write
[inst
->dst
.index
], n
);
241 case QFILE_TLB_COLOR_WRITE
:
242 case QFILE_TLB_COLOR_WRITE_MS
:
243 case QFILE_TLB_Z_WRITE
:
244 case QFILE_TLB_STENCIL_SETUP
:
245 add_write_dep(dir
, &state
->last_tlb
, n
);
248 case QFILE_TEX_S_DIRECT
:
253 /* Texturing setup gets scheduled in order, because
254 * the uniforms referenced by them have to land in a
257 add_write_dep(dir
, &state
->last_tex_coord
, n
);
264 if (qir_depends_on_flags(inst
))
265 add_dep(dir
, state
->last_sf
, n
);
268 add_write_dep(dir
, &state
->last_sf
, n
);
272 calculate_forward_deps(struct vc4_compile
*c
, void *mem_ctx
,
273 struct list_head
*schedule_list
)
275 struct schedule_setup_state state
;
277 memset(&state
, 0, sizeof(state
));
278 state
.last_temp_write
= rzalloc_array(mem_ctx
, struct schedule_node
*,
282 list_for_each_entry(struct schedule_node
, n
, schedule_list
, link
) {
283 struct qinst
*inst
= n
->inst
;
285 calculate_deps(&state
, n
);
287 for (int i
= 0; i
< qir_get_nsrc(inst
); i
++) {
288 switch (inst
->src
[i
].file
) {
290 add_dep(state
.dir
, state
.last_uniforms_reset
, n
);
297 switch (inst
->dst
.file
) {
298 case QFILE_TEX_S_DIRECT
:
303 /* From the VC4 spec:
305 * "The TFREQ input FIFO holds two full lots of s,
306 * t, r, b data, plus associated setup data, per
307 * QPU, that is, there are eight data slots. For
308 * each texture request, slots are only consumed
309 * for the components of s, t, r, and b actually
310 * written. Thus the FIFO can hold four requests
311 * of just (s, t) data, or eight requests of just
312 * s data (for direct addressed data lookups).
314 * Note that there is one FIFO per QPU, and the
315 * FIFO has no concept of threads - that is,
316 * multi-threaded shaders must be careful to use
317 * only 1/2 the FIFO depth before reading
318 * back. Multi-threaded programs must also
319 * therefore always thread switch on texture
320 * fetch as the other thread may have data
321 * waiting in the FIFO."
323 * If the texture coordinate fifo is full, block this
324 * on the last QOP_TEX_RESULT.
326 if (state
.tfreq_count
== (c
->fs_threaded
? 4 : 8)) {
327 block_until_tex_result(&state
, n
);
330 /* From the VC4 spec:
332 * "Since the maximum number of texture requests
333 * in the input (TFREQ) FIFO is four lots of (s,
334 * t) data, the output (TFRCV) FIFO is sized to
335 * holds four lots of max-size color data per
336 * QPU. For non-float color, reads are packed
337 * RGBA8888 data (one read per pixel). For 16-bit
338 * float color, two reads are necessary per
339 * pixel, with reads packed as RG1616 then
340 * BA1616. So per QPU there are eight color slots
341 * in the TFRCV FIFO."
343 * If the texture result fifo is full, block adding
344 * any more to it until the last QOP_TEX_RESULT.
346 if (inst
->dst
.file
== QFILE_TEX_S
||
347 inst
->dst
.file
== QFILE_TEX_S_DIRECT
) {
348 if (state
.tfrcv_count
==
349 (c
->fs_threaded
? 2 : 4))
350 block_until_tex_result(&state
, n
);
354 state
.tex_fifo
[state
.tex_fifo_pos
].coords
++;
364 /* Results have to be fetched after the
365 * coordinate setup. Note that we're assuming
366 * here that our input shader has the texture
367 * coord setup and result fetch in order,
368 * which is true initially but not of our
369 * instruction stream after this pass.
371 add_dep(state
.dir
, state
.last_tex_coord
, n
);
373 state
.tex_fifo
[state
.tex_fifo_pos
].node
= n
;
375 state
.tex_fifo_pos
++;
376 memset(&state
.tex_fifo
[state
.tex_fifo_pos
], 0,
377 sizeof(state
.tex_fifo
[0]));
380 case QOP_UNIFORMS_RESET
:
381 add_write_dep(state
.dir
, &state
.last_uniforms_reset
, n
);
391 calculate_reverse_deps(struct vc4_compile
*c
, void *mem_ctx
,
392 struct list_head
*schedule_list
)
394 struct schedule_setup_state state
;
396 memset(&state
, 0, sizeof(state
));
398 state
.last_temp_write
= rzalloc_array(mem_ctx
, struct schedule_node
*,
401 list_for_each_entry_rev(struct schedule_node
, n
, schedule_list
, link
) {
402 calculate_deps(&state
, n
);
407 get_register_pressure_cost(struct schedule_state
*state
, struct qinst
*inst
)
411 if (inst
->dst
.file
== QFILE_TEMP
&&
412 state
->temp_writes
[inst
->dst
.index
] == 1)
415 for (int i
= 0; i
< qir_get_nsrc(inst
); i
++) {
416 if (inst
->src
[i
].file
!= QFILE_TEMP
||
417 BITSET_TEST(state
->temp_live
, inst
->src
[i
].index
)) {
421 bool already_counted
= false;
422 for (int j
= 0; j
< i
; j
++) {
423 if (inst
->src
[i
].file
== inst
->src
[j
].file
&&
424 inst
->src
[i
].index
== inst
->src
[j
].index
) {
425 already_counted
= true;
428 if (!already_counted
)
436 locks_scoreboard(struct qinst
*inst
)
438 if (inst
->op
== QOP_TLB_COLOR_READ
)
441 switch (inst
->dst
.file
) {
442 case QFILE_TLB_Z_WRITE
:
443 case QFILE_TLB_COLOR_WRITE
:
444 case QFILE_TLB_COLOR_WRITE_MS
:
451 static struct schedule_node
*
452 choose_instruction(struct schedule_state
*state
)
454 struct schedule_node
*chosen
= NULL
;
456 list_for_each_entry(struct schedule_node
, n
, &state
->dag
->heads
,
458 /* The branches aren't being tracked as dependencies. Make
459 * sure that they stay scheduled as the last instruction of
460 * the block, which is to say the first one we choose to
463 if (n
->inst
->op
== QOP_BRANCH
)
471 /* Prefer scheduling things that lock the scoreboard, so that
472 * they appear late in the program and we get more parallelism
473 * between shaders on multiple QPUs hitting the same fragment.
475 if (locks_scoreboard(n
->inst
) &&
476 !locks_scoreboard(chosen
->inst
)) {
479 } else if (!locks_scoreboard(n
->inst
) &&
480 locks_scoreboard(chosen
->inst
)) {
484 /* If we would block on the previously chosen node, but would
485 * block less on this one, then prefer it.
487 if (chosen
->unblocked_time
> state
->time
&&
488 n
->unblocked_time
< chosen
->unblocked_time
) {
491 } else if (n
->unblocked_time
> state
->time
&&
492 n
->unblocked_time
> chosen
->unblocked_time
) {
496 /* If we can definitely reduce register pressure, do so
499 int register_pressure_cost
=
500 get_register_pressure_cost(state
, n
->inst
);
501 int chosen_register_pressure_cost
=
502 get_register_pressure_cost(state
, chosen
->inst
);
504 if (register_pressure_cost
< chosen_register_pressure_cost
) {
507 } else if (register_pressure_cost
>
508 chosen_register_pressure_cost
) {
512 /* Otherwise, prefer instructions with the deepest chain to
513 * the end of the program. This avoids the problem of
514 * "everything generates a temp, nothing finishes freeing one,
515 * guess I'll just keep emitting varying mul/adds".
517 if (n
->delay
> chosen
->delay
) {
520 } else if (n
->delay
< chosen
->delay
) {
529 dump_state(struct vc4_compile
*c
, struct schedule_state
*state
)
532 list_for_each_entry(struct schedule_node
, n
, &state
->dag
->heads
,
534 fprintf(stderr
, "%3d: ", i
++);
535 qir_dump_inst(c
, n
->inst
);
536 fprintf(stderr
, " (%d cost)\n",
537 get_register_pressure_cost(state
, n
->inst
));
539 util_dynarray_foreach(&n
->dag
.edges
, struct dag_edge
, edge
) {
540 struct schedule_node
*child
=
541 (struct schedule_node
*)edge
->child
;
542 fprintf(stderr
, " - ");
543 qir_dump_inst(c
, child
->inst
);
544 fprintf(stderr
, " (%d parents)\n",
545 child
->dag
.parent_count
);
550 /* Estimate of how many instructions we should schedule between operations.
552 * These aren't in real cycle counts, because we're just estimating cycle
553 * times anyway. QIR instructions will get paired up when turned into QPU
554 * instructions, or extra NOP delays will have to be added due to register
555 * allocation choices.
558 latency_between(struct schedule_node
*before
, struct schedule_node
*after
)
560 if ((before
->inst
->dst
.file
== QFILE_TEX_S
||
561 before
->inst
->dst
.file
== QFILE_TEX_S_DIRECT
) &&
562 after
->inst
->op
== QOP_TEX_RESULT
)
565 switch (before
->inst
->op
) {
570 for (int i
= 0; i
< qir_get_nsrc(after
->inst
); i
++) {
571 if (after
->inst
->src
[i
].file
==
572 before
->inst
->dst
.file
&&
573 after
->inst
->src
[i
].index
==
574 before
->inst
->dst
.index
) {
575 /* There are two QPU delay slots before we can
576 * read a math result, which could be up to 4
577 * QIR instructions if they packed well.
590 /** Recursive computation of the delay member of a node. */
592 compute_delay(struct dag_node
*node
, void *state
)
594 struct schedule_node
*n
= (struct schedule_node
*)node
;
596 /* The color read needs to be scheduled late, to avoid locking the
597 * scoreboard early. This is our best tool for encouraging that. The
598 * other scoreboard locking ops will have this happen by default,
599 * since they are generally the DAG heads or close to them.
601 if (n
->inst
->op
== QOP_TLB_COLOR_READ
)
606 util_dynarray_foreach(&n
->dag
.edges
, struct dag_edge
, edge
) {
607 struct schedule_node
*child
=
608 (struct schedule_node
*)edge
->child
;
610 n
->delay
= MAX2(n
->delay
, (child
->delay
+
611 latency_between(child
, n
)));
616 schedule_instructions(struct vc4_compile
*c
,
617 struct qblock
*block
, struct schedule_state
*state
)
620 fprintf(stderr
, "initial deps:\n");
621 dump_state(c
, state
);
625 while (!list_empty(&state
->dag
->heads
)) {
626 struct schedule_node
*chosen
= choose_instruction(state
);
627 struct qinst
*inst
= chosen
->inst
;
630 fprintf(stderr
, "current list:\n");
631 dump_state(c
, state
);
632 fprintf(stderr
, "chose: ");
633 qir_dump_inst(c
, inst
);
634 fprintf(stderr
, " (%d cost)\n",
635 get_register_pressure_cost(state
, inst
));
638 state
->time
= MAX2(state
->time
, chosen
->unblocked_time
);
640 /* Schedule this instruction back onto the QIR list. */
641 list_add(&inst
->link
, &block
->instructions
);
643 /* Now that we've scheduled a new instruction, some of its
644 * children can be promoted to the list of instructions ready to
645 * be scheduled. Update the children's unblocked time for this
646 * DAG edge as we do so.
648 util_dynarray_foreach(&chosen
->dag
.edges
,
649 struct dag_edge
, edge
) {
650 struct schedule_node
*child
=
651 (struct schedule_node
*)edge
->child
;
653 child
->unblocked_time
= MAX2(child
->unblocked_time
,
655 latency_between(child
,
658 dag_prune_head(state
->dag
, &chosen
->dag
);
660 /* Update our tracking of register pressure. */
661 for (int i
= 0; i
< qir_get_nsrc(inst
); i
++) {
662 if (inst
->src
[i
].file
== QFILE_TEMP
)
663 BITSET_SET(state
->temp_live
, inst
->src
[i
].index
);
665 if (inst
->dst
.file
== QFILE_TEMP
) {
666 state
->temp_writes
[inst
->dst
.index
]--;
667 if (state
->temp_writes
[inst
->dst
.index
] == 0)
668 BITSET_CLEAR(state
->temp_live
, inst
->dst
.index
);
676 qir_schedule_instructions_block(struct vc4_compile
*c
,
677 struct qblock
*block
)
679 struct schedule_state
*state
= rzalloc(NULL
, struct schedule_state
);
681 state
->temp_writes
= rzalloc_array(state
, uint32_t, c
->num_temps
);
682 state
->temp_live
= rzalloc_array(state
, BITSET_WORD
,
683 BITSET_WORDS(c
->num_temps
));
684 state
->dag
= dag_create(state
);
686 struct list_head setup_list
;
687 list_inithead(&setup_list
);
689 /* Wrap each instruction in a scheduler structure. */
690 qir_for_each_inst_safe(inst
, block
) {
691 struct schedule_node
*n
= rzalloc(state
, struct schedule_node
);
694 list_del(&inst
->link
);
695 list_addtail(&n
->link
, &setup_list
);
696 dag_init_node(state
->dag
, &n
->dag
);
698 if (inst
->dst
.file
== QFILE_TEMP
)
699 state
->temp_writes
[inst
->dst
.index
]++;
702 /* Dependencies tracked top-to-bottom. */
703 calculate_forward_deps(c
, state
, &setup_list
);
704 /* Dependencies tracked bottom-to-top. */
705 calculate_reverse_deps(c
, state
, &setup_list
);
707 dag_traverse_bottom_up(state
->dag
, compute_delay
, NULL
);
709 schedule_instructions(c
, block
, state
);
715 qir_schedule_instructions(struct vc4_compile
*c
)
719 fprintf(stderr
, "Pre-schedule instructions\n");
723 qir_for_each_block(block
, c
)
724 qir_schedule_instructions_block(c
, block
);
727 fprintf(stderr
, "Post-schedule instructions\n");