2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2014-2015 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 * @file vc4_qir_schedule.c
28 * The basic model of the list scheduler is to take a basic block, compute a
29 * DAG of the dependencies from the bottom up, and make a list of the DAG
30 * heads. Heuristically pick a DAG head and schedule (remove) it, then put
31 * all the parents that are now DAG heads into the list of things to
34 * The goal of scheduling here, before register allocation and conversion to
35 * QPU instructions, is to reduce register pressure by reordering instructions
36 * to consume values when possible.
43 struct schedule_node
{
44 struct list_head link
;
47 struct schedule_node
**children
;
49 uint32_t child_array_size
;
50 uint32_t parent_count
;
52 /* Length of the longest (latency) chain from a DAG head to the this
57 /* Longest time + latency_between(parent, this) of any parent of this
60 uint32_t unblocked_time
;
63 struct schedule_state
{
64 /* List of struct schedule_node *. This starts out with all
65 * instructions, and after dependency updates it's trimmed to be just
68 struct list_head worklist
;
72 uint32_t *temp_writes
;
74 BITSET_WORD
*temp_live
;
77 /* When walking the instructions in reverse, we need to swap before/after in
80 enum direction
{ F
, R
};
83 * Marks a dependency between two intructions, that \p after must appear after
86 * Our dependencies are tracked as a DAG. Since we're scheduling bottom-up,
87 * the latest instructions with nothing left to schedule are the DAG heads,
88 * and their inputs are their children.
91 add_dep(enum direction dir
,
92 struct schedule_node
*before
,
93 struct schedule_node
*after
)
95 if (!before
|| !after
)
98 assert(before
!= after
);
101 struct schedule_node
*t
= before
;
106 for (int i
= 0; i
< after
->child_count
; i
++) {
107 if (after
->children
[i
] == after
)
111 if (after
->child_array_size
<= after
->child_count
) {
112 after
->child_array_size
= MAX2(after
->child_array_size
* 2, 16);
113 after
->children
= reralloc(after
, after
->children
,
114 struct schedule_node
*,
115 after
->child_array_size
);
118 after
->children
[after
->child_count
] = before
;
119 after
->child_count
++;
120 before
->parent_count
++;
124 add_write_dep(enum direction dir
,
125 struct schedule_node
**before
,
126 struct schedule_node
*after
)
128 add_dep(dir
, *before
, after
);
132 struct schedule_setup_state
{
133 struct schedule_node
**last_temp_write
;
134 struct schedule_node
*last_sf
;
135 struct schedule_node
*last_vary_read
;
136 struct schedule_node
*last_vpm_read
;
137 struct schedule_node
*last_vpm_write
;
138 struct schedule_node
*last_tex_coord
;
139 struct schedule_node
*last_tex_result
;
140 struct schedule_node
*last_tlb
;
141 struct schedule_node
*last_uniforms_reset
;
145 * Texture FIFO tracking. This is done top-to-bottom, and is used to
146 * track the QOP_TEX_RESULTs and add dependencies on previous ones
147 * when trying to submit texture coords with TFREQ full or new texture
148 * fetches with TXRCV full.
151 struct schedule_node
*node
;
154 int tfreq_count
; /**< Number of texture coords outstanding. */
155 int tfrcv_count
; /**< Number of texture results outstanding. */
160 block_until_tex_result(struct schedule_setup_state
*state
, struct schedule_node
*n
)
162 add_dep(state
->dir
, state
->tex_fifo
[0].node
, n
);
164 state
->tfreq_count
-= state
->tex_fifo
[0].coords
;
165 state
->tfrcv_count
--;
167 memmove(&state
->tex_fifo
[0],
169 state
->tex_fifo_pos
* sizeof(state
->tex_fifo
[0]));
170 state
->tex_fifo_pos
--;
174 * Common code for dependencies that need to be tracked both forward and
177 * This is for things like "all VPM reads have to happen in order."
180 calculate_deps(struct schedule_setup_state
*state
, struct schedule_node
*n
)
182 struct qinst
*inst
= n
->inst
;
183 enum direction dir
= state
->dir
;
186 /* Add deps for temp registers and varyings accesses. Note that we
187 * ignore uniforms accesses, because qir_reorder_uniforms() happens
190 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
191 switch (inst
->src
[i
].file
) {
194 state
->last_temp_write
[inst
->src
[i
].index
], n
);
198 add_write_dep(dir
, &state
->last_vary_read
, n
);
202 add_write_dep(dir
, &state
->last_vpm_read
, n
);
212 add_dep(dir
, state
->last_vary_read
, n
);
220 /* Texturing setup gets scheduled in order, because
221 * the uniforms referenced by them have to land in a
224 add_write_dep(dir
, &state
->last_tex_coord
, n
);
228 /* Results have to be fetched in order. */
229 add_write_dep(dir
, &state
->last_tex_result
, n
);
233 /* After a new THRSW, one must collect all texture samples
234 * queued since the previous THRSW/program start. For now, we
235 * have one THRSW in between each texture setup and its
236 * results collection as our input, and we just make sure that
237 * that ordering is maintained.
239 add_write_dep(dir
, &state
->last_tex_coord
, n
);
240 add_write_dep(dir
, &state
->last_tex_result
, n
);
242 /* accumulators and flags are lost across thread switches. */
243 add_write_dep(dir
, &state
->last_sf
, n
);
245 /* Setup, like the varyings, will need to be drained before we
248 add_write_dep(dir
, &state
->last_vary_read
, n
);
250 /* The TLB-locking operations have to stay after the last
253 add_write_dep(dir
, &state
->last_tlb
, n
);
256 case QOP_TLB_COLOR_READ
:
258 add_write_dep(dir
, &state
->last_tlb
, n
);
265 switch (inst
->dst
.file
) {
267 add_write_dep(dir
, &state
->last_vpm_write
, n
);
271 add_write_dep(dir
, &state
->last_temp_write
[inst
->dst
.index
], n
);
274 case QFILE_TLB_COLOR_WRITE
:
275 case QFILE_TLB_COLOR_WRITE_MS
:
276 case QFILE_TLB_Z_WRITE
:
277 case QFILE_TLB_STENCIL_SETUP
:
278 add_write_dep(dir
, &state
->last_tlb
, n
);
285 if (qir_depends_on_flags(inst
))
286 add_dep(dir
, state
->last_sf
, n
);
289 add_write_dep(dir
, &state
->last_sf
, n
);
293 calculate_forward_deps(struct vc4_compile
*c
, void *mem_ctx
,
294 struct list_head
*schedule_list
)
296 struct schedule_setup_state state
;
298 memset(&state
, 0, sizeof(state
));
299 state
.last_temp_write
= rzalloc_array(mem_ctx
, struct schedule_node
*,
303 list_for_each_entry(struct schedule_node
, n
, schedule_list
, link
) {
304 struct qinst
*inst
= n
->inst
;
306 calculate_deps(&state
, n
);
308 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
309 switch (inst
->src
[i
].file
) {
311 add_dep(state
.dir
, state
.last_uniforms_reset
, n
);
324 /* From the VC4 spec:
326 * "The TFREQ input FIFO holds two full lots of s,
327 * t, r, b data, plus associated setup data, per
328 * QPU, that is, there are eight data slots. For
329 * each texture request, slots are only consumed
330 * for the components of s, t, r, and b actually
331 * written. Thus the FIFO can hold four requests
332 * of just (s, t) data, or eight requests of just
333 * s data (for direct addressed data lookups).
335 * Note that there is one FIFO per QPU, and the
336 * FIFO has no concept of threads - that is,
337 * multi-threaded shaders must be careful to use
338 * only 1/2 the FIFO depth before reading
339 * back. Multi-threaded programs must also
340 * therefore always thread switch on texture
341 * fetch as the other thread may have data
342 * waiting in the FIFO."
344 * If the texture coordinate fifo is full, block this
345 * on the last QOP_TEX_RESULT.
347 if (state
.tfreq_count
== (c
->fs_threaded
? 4 : 8)) {
348 block_until_tex_result(&state
, n
);
351 /* From the VC4 spec:
353 * "Since the maximum number of texture requests
354 * in the input (TFREQ) FIFO is four lots of (s,
355 * t) data, the output (TFRCV) FIFO is sized to
356 * holds four lots of max-size color data per
357 * QPU. For non-float color, reads are packed
358 * RGBA8888 data (one read per pixel). For 16-bit
359 * float color, two reads are necessary per
360 * pixel, with reads packed as RG1616 then
361 * BA1616. So per QPU there are eight color slots
362 * in the TFRCV FIFO."
364 * If the texture result fifo is full, block adding
365 * any more to it until the last QOP_TEX_RESULT.
367 if (inst
->op
== QOP_TEX_S
||
368 inst
->op
== QOP_TEX_DIRECT
) {
369 if (state
.tfrcv_count
==
370 (c
->fs_threaded
? 2 : 4))
371 block_until_tex_result(&state
, n
);
375 state
.tex_fifo
[state
.tex_fifo_pos
].coords
++;
380 /* Results have to be fetched after the
381 * coordinate setup. Note that we're assuming
382 * here that our input shader has the texture
383 * coord setup and result fetch in order,
384 * which is true initially but not of our
385 * instruction stream after this pass.
387 add_dep(state
.dir
, state
.last_tex_coord
, n
);
389 state
.tex_fifo
[state
.tex_fifo_pos
].node
= n
;
391 state
.tex_fifo_pos
++;
392 memset(&state
.tex_fifo
[state
.tex_fifo_pos
], 0,
393 sizeof(state
.tex_fifo
[0]));
396 case QOP_UNIFORMS_RESET
:
397 add_write_dep(state
.dir
, &state
.last_uniforms_reset
, n
);
401 assert(!qir_is_tex(inst
));
408 calculate_reverse_deps(struct vc4_compile
*c
, void *mem_ctx
,
409 struct list_head
*schedule_list
)
411 struct schedule_setup_state state
;
413 memset(&state
, 0, sizeof(state
));
415 state
.last_temp_write
= rzalloc_array(mem_ctx
, struct schedule_node
*,
418 list_for_each_entry_rev(struct schedule_node
, n
, schedule_list
, link
) {
419 calculate_deps(&state
, n
);
424 get_register_pressure_cost(struct schedule_state
*state
, struct qinst
*inst
)
428 if (inst
->dst
.file
== QFILE_TEMP
&&
429 state
->temp_writes
[inst
->dst
.index
] == 1)
432 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
433 if (inst
->src
[i
].file
== QFILE_TEMP
&&
434 !BITSET_TEST(state
->temp_live
, inst
->src
[i
].index
)) {
443 locks_scoreboard(struct qinst
*inst
)
445 if (inst
->op
== QOP_TLB_COLOR_READ
)
448 switch (inst
->dst
.file
) {
449 case QFILE_TLB_Z_WRITE
:
450 case QFILE_TLB_COLOR_WRITE
:
451 case QFILE_TLB_COLOR_WRITE_MS
:
458 static struct schedule_node
*
459 choose_instruction(struct schedule_state
*state
)
461 struct schedule_node
*chosen
= NULL
;
463 list_for_each_entry(struct schedule_node
, n
, &state
->worklist
, link
) {
464 /* The branches aren't being tracked as dependencies. Make
465 * sure that they stay scheduled as the last instruction of
466 * the block, which is to say the first one we choose to
469 if (n
->inst
->op
== QOP_BRANCH
)
477 /* Prefer scheduling things that lock the scoreboard, so that
478 * they appear late in the program and we get more parallelism
479 * between shaders on multiple QPUs hitting the same fragment.
481 if (locks_scoreboard(n
->inst
) &&
482 !locks_scoreboard(chosen
->inst
)) {
485 } else if (!locks_scoreboard(n
->inst
) &&
486 locks_scoreboard(chosen
->inst
)) {
490 /* If we would block on the previously chosen node, but would
491 * block less on this one, then prefer it.
493 if (chosen
->unblocked_time
> state
->time
&&
494 n
->unblocked_time
< chosen
->unblocked_time
) {
497 } else if (n
->unblocked_time
> state
->time
&&
498 n
->unblocked_time
> chosen
->unblocked_time
) {
502 /* If we can definitely reduce register pressure, do so
505 int register_pressure_cost
=
506 get_register_pressure_cost(state
, n
->inst
);
507 int chosen_register_pressure_cost
=
508 get_register_pressure_cost(state
, chosen
->inst
);
510 if (register_pressure_cost
< chosen_register_pressure_cost
) {
513 } else if (register_pressure_cost
>
514 chosen_register_pressure_cost
) {
518 /* Otherwise, prefer instructions with the deepest chain to
519 * the end of the program. This avoids the problem of
520 * "everything generates a temp, nothing finishes freeing one,
521 * guess I'll just keep emitting varying mul/adds".
523 if (n
->delay
> chosen
->delay
) {
526 } else if (n
->delay
< chosen
->delay
) {
535 dump_state(struct vc4_compile
*c
, struct schedule_state
*state
)
538 list_for_each_entry(struct schedule_node
, n
, &state
->worklist
, link
) {
539 fprintf(stderr
, "%3d: ", i
++);
540 qir_dump_inst(c
, n
->inst
);
541 fprintf(stderr
, " (%d cost)\n",
542 get_register_pressure_cost(state
, n
->inst
));
544 for (int i
= 0; i
< n
->child_count
; i
++) {
545 struct schedule_node
*child
= n
->children
[i
];
546 fprintf(stderr
, " - ");
547 qir_dump_inst(c
, child
->inst
);
548 fprintf(stderr
, " (%d parents)\n", child
->parent_count
);
553 /* Estimate of how many instructions we should schedule between operations.
555 * These aren't in real cycle counts, because we're just estimating cycle
556 * times anyway. QIR instructions will get paired up when turned into QPU
557 * instructions, or extra NOP delays will have to be added due to register
558 * allocation choices.
561 latency_between(struct schedule_node
*before
, struct schedule_node
*after
)
563 if ((before
->inst
->op
== QOP_TEX_S
||
564 before
->inst
->op
== QOP_TEX_DIRECT
) &&
565 after
->inst
->op
== QOP_TEX_RESULT
)
571 /** Recursive computation of the delay member of a node. */
573 compute_delay(struct schedule_node
*n
)
575 if (!n
->child_count
) {
576 /* The color read needs to be scheduled late, to avoid locking
577 * the scoreboard early. This is our best tool for
578 * encouraging that. The other scoreboard locking ops will
579 * have this happen by default, since they are generally the
580 * DAG heads or close to them.
582 if (n
->inst
->op
== QOP_TLB_COLOR_READ
)
587 for (int i
= 0; i
< n
->child_count
; i
++) {
588 if (!n
->children
[i
]->delay
)
589 compute_delay(n
->children
[i
]);
590 n
->delay
= MAX2(n
->delay
,
591 n
->children
[i
]->delay
+
592 latency_between(n
, n
->children
[i
]));
598 schedule_instructions(struct vc4_compile
*c
,
599 struct qblock
*block
, struct schedule_state
*state
)
602 fprintf(stderr
, "initial deps:\n");
603 dump_state(c
, state
);
606 /* Remove non-DAG heads from the list. */
607 list_for_each_entry_safe(struct schedule_node
, n
,
608 &state
->worklist
, link
) {
609 if (n
->parent_count
!= 0)
614 while (!list_empty(&state
->worklist
)) {
615 struct schedule_node
*chosen
= choose_instruction(state
);
616 struct qinst
*inst
= chosen
->inst
;
619 fprintf(stderr
, "current list:\n");
620 dump_state(c
, state
);
621 fprintf(stderr
, "chose: ");
622 qir_dump_inst(c
, inst
);
623 fprintf(stderr
, " (%d cost)\n",
624 get_register_pressure_cost(state
, inst
));
627 state
->time
= MAX2(state
->time
, chosen
->unblocked_time
);
629 /* Schedule this instruction back onto the QIR list. */
630 list_del(&chosen
->link
);
631 list_add(&inst
->link
, &block
->instructions
);
633 /* Now that we've scheduled a new instruction, some of its
634 * children can be promoted to the list of instructions ready to
635 * be scheduled. Update the children's unblocked time for this
636 * DAG edge as we do so.
638 for (int i
= chosen
->child_count
- 1; i
>= 0; i
--) {
639 struct schedule_node
*child
= chosen
->children
[i
];
641 child
->unblocked_time
= MAX2(child
->unblocked_time
,
643 latency_between(chosen
,
645 child
->parent_count
--;
646 if (child
->parent_count
== 0)
647 list_add(&child
->link
, &state
->worklist
);
650 /* Update our tracking of register pressure. */
651 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
652 if (inst
->src
[i
].file
== QFILE_TEMP
)
653 BITSET_SET(state
->temp_live
, inst
->src
[i
].index
);
655 if (inst
->dst
.file
== QFILE_TEMP
) {
656 state
->temp_writes
[inst
->dst
.index
]--;
657 if (state
->temp_writes
[inst
->dst
.index
] == 0)
658 BITSET_CLEAR(state
->temp_live
, inst
->dst
.index
);
666 qir_schedule_instructions_block(struct vc4_compile
*c
,
667 struct qblock
*block
)
669 void *mem_ctx
= ralloc_context(NULL
);
670 struct schedule_state state
= { { 0 } };
672 state
.temp_writes
= rzalloc_array(mem_ctx
, uint32_t, c
->num_temps
);
673 state
.temp_live
= rzalloc_array(mem_ctx
, BITSET_WORD
,
674 BITSET_WORDS(c
->num_temps
));
675 list_inithead(&state
.worklist
);
677 /* Wrap each instruction in a scheduler structure. */
678 qir_for_each_inst_safe(inst
, block
) {
679 struct schedule_node
*n
= rzalloc(mem_ctx
, struct schedule_node
);
682 list_del(&inst
->link
);
683 list_addtail(&n
->link
, &state
.worklist
);
685 if (inst
->dst
.file
== QFILE_TEMP
)
686 state
.temp_writes
[inst
->dst
.index
]++;
689 /* Dependencies tracked top-to-bottom. */
690 calculate_forward_deps(c
, mem_ctx
, &state
.worklist
);
691 /* Dependencies tracked bottom-to-top. */
692 calculate_reverse_deps(c
, mem_ctx
, &state
.worklist
);
694 list_for_each_entry(struct schedule_node
, n
, &state
.worklist
, link
)
697 schedule_instructions(c
, block
, &state
);
699 ralloc_free(mem_ctx
);
703 qir_schedule_instructions(struct vc4_compile
*c
)
707 fprintf(stderr
, "Pre-schedule instructions\n");
711 qir_for_each_block(block
, c
)
712 qir_schedule_instructions_block(c
, block
);
715 fprintf(stderr
, "Post-schedule instructions\n");