2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2014-2015 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 * @file vc4_qir_schedule.c
28 * The basic model of the list scheduler is to take a basic block, compute a
29 * DAG of the dependencies from the bottom up, and make a list of the DAG
30 * heads. Heuristically pick a DAG head and schedule (remove) it, then put
31 * all the parents that are now DAG heads into the list of things to
34 * The goal of scheduling here, before register allocation and conversion to
35 * QPU instructions, is to reduce register pressure by reordering instructions
36 * to consume values when possible.
43 struct schedule_node
{
44 struct list_head link
;
47 struct schedule_node
**children
;
49 uint32_t child_array_size
;
50 uint32_t parent_count
;
52 /* Length of the longest (latency) chain from a DAG head to the this
57 /* Longest time + latency_between(parent, this) of any parent of this
60 uint32_t unblocked_time
;
63 struct schedule_state
{
64 /* List of struct schedule_node *. This starts out with all
65 * instructions, and after dependency updates it's trimmed to be just
68 struct list_head worklist
;
72 uint32_t *temp_writes
;
74 BITSET_WORD
*temp_live
;
77 /* When walking the instructions in reverse, we need to swap before/after in
80 enum direction
{ F
, R
};
83 * Marks a dependency between two intructions, that \p after must appear after
86 * Our dependencies are tracked as a DAG. Since we're scheduling bottom-up,
87 * the latest instructions with nothing left to schedule are the DAG heads,
88 * and their inputs are their children.
91 add_dep(enum direction dir
,
92 struct schedule_node
*before
,
93 struct schedule_node
*after
)
95 if (!before
|| !after
)
98 assert(before
!= after
);
101 struct schedule_node
*t
= before
;
106 for (int i
= 0; i
< after
->child_count
; i
++) {
107 if (after
->children
[i
] == after
)
111 if (after
->child_array_size
<= after
->child_count
) {
112 after
->child_array_size
= MAX2(after
->child_array_size
* 2, 16);
113 after
->children
= reralloc(after
, after
->children
,
114 struct schedule_node
*,
115 after
->child_array_size
);
118 after
->children
[after
->child_count
] = before
;
119 after
->child_count
++;
120 before
->parent_count
++;
124 add_write_dep(enum direction dir
,
125 struct schedule_node
**before
,
126 struct schedule_node
*after
)
128 add_dep(dir
, *before
, after
);
132 struct schedule_setup_state
{
133 struct schedule_node
**last_temp_write
;
134 struct schedule_node
*last_sf
;
135 struct schedule_node
*last_vary_read
;
136 struct schedule_node
*last_vpm_read
;
137 struct schedule_node
*last_vpm_write
;
138 struct schedule_node
*last_tex_coord
;
139 struct schedule_node
*last_tex_result
;
140 struct schedule_node
*last_tlb
;
141 struct schedule_node
*last_uniforms_reset
;
145 * Texture FIFO tracking. This is done top-to-bottom, and is used to
146 * track the QOP_TEX_RESULTs and add dependencies on previous ones
147 * when trying to submit texture coords with TFREQ full or new texture
148 * fetches with TXRCV full.
151 struct schedule_node
*node
;
154 int tfreq_count
; /**< Number of texture coords outstanding. */
155 int tfrcv_count
; /**< Number of texture results outstanding. */
160 block_until_tex_result(struct schedule_setup_state
*state
, struct schedule_node
*n
)
162 add_dep(state
->dir
, state
->tex_fifo
[0].node
, n
);
164 state
->tfreq_count
-= state
->tex_fifo
[0].coords
;
165 state
->tfrcv_count
--;
167 memmove(&state
->tex_fifo
[0],
169 state
->tex_fifo_pos
* sizeof(state
->tex_fifo
[0]));
170 state
->tex_fifo_pos
--;
174 * Common code for dependencies that need to be tracked both forward and
177 * This is for things like "all VPM reads have to happen in order."
180 calculate_deps(struct schedule_setup_state
*state
, struct schedule_node
*n
)
182 struct qinst
*inst
= n
->inst
;
183 enum direction dir
= state
->dir
;
186 /* Add deps for temp registers and varyings accesses. Note that we
187 * ignore uniforms accesses, because qir_reorder_uniforms() happens
190 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
191 switch (inst
->src
[i
].file
) {
194 state
->last_temp_write
[inst
->src
[i
].index
], n
);
198 add_write_dep(dir
, &state
->last_vary_read
, n
);
202 add_write_dep(dir
, &state
->last_vpm_read
, n
);
212 add_dep(dir
, state
->last_vary_read
, n
);
220 /* Texturing setup gets scheduled in order, because
221 * the uniforms referenced by them have to land in a
224 add_write_dep(dir
, &state
->last_tex_coord
, n
);
228 /* Results have to be fetched in order. */
229 add_write_dep(dir
, &state
->last_tex_result
, n
);
232 case QOP_TLB_COLOR_READ
:
234 add_write_dep(dir
, &state
->last_tlb
, n
);
241 switch (inst
->dst
.file
) {
243 add_write_dep(dir
, &state
->last_vpm_write
, n
);
247 add_write_dep(dir
, &state
->last_temp_write
[inst
->dst
.index
], n
);
250 case QFILE_TLB_COLOR_WRITE
:
251 case QFILE_TLB_COLOR_WRITE_MS
:
252 case QFILE_TLB_Z_WRITE
:
253 case QFILE_TLB_STENCIL_SETUP
:
254 add_write_dep(dir
, &state
->last_tlb
, n
);
261 if (qir_depends_on_flags(inst
))
262 add_dep(dir
, state
->last_sf
, n
);
265 add_write_dep(dir
, &state
->last_sf
, n
);
269 calculate_forward_deps(struct vc4_compile
*c
, void *mem_ctx
,
270 struct list_head
*schedule_list
)
272 struct schedule_setup_state state
;
274 memset(&state
, 0, sizeof(state
));
275 state
.last_temp_write
= rzalloc_array(mem_ctx
, struct schedule_node
*,
279 list_for_each_entry(struct schedule_node
, n
, schedule_list
, link
) {
280 struct qinst
*inst
= n
->inst
;
282 calculate_deps(&state
, n
);
284 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
285 switch (inst
->src
[i
].file
) {
287 add_dep(state
.dir
, state
.last_uniforms_reset
, n
);
300 /* If the texture coordinate fifo is full,
301 * block this on the last QOP_TEX_RESULT.
303 if (state
.tfreq_count
== 8) {
304 block_until_tex_result(&state
, n
);
307 /* If the texture result fifo is full, block
308 * adding any more to it until the last
311 if (inst
->op
== QOP_TEX_S
||
312 inst
->op
== QOP_TEX_DIRECT
) {
313 if (state
.tfrcv_count
== 4)
314 block_until_tex_result(&state
, n
);
318 state
.tex_fifo
[state
.tex_fifo_pos
].coords
++;
323 /* Results have to be fetched after the
324 * coordinate setup. Note that we're assuming
325 * here that our input shader has the texture
326 * coord setup and result fetch in order,
327 * which is true initially but not of our
328 * instruction stream after this pass.
330 add_dep(state
.dir
, state
.last_tex_coord
, n
);
332 state
.tex_fifo
[state
.tex_fifo_pos
].node
= n
;
334 state
.tex_fifo_pos
++;
335 memset(&state
.tex_fifo
[state
.tex_fifo_pos
], 0,
336 sizeof(state
.tex_fifo
[0]));
339 case QOP_UNIFORMS_RESET
:
340 add_write_dep(state
.dir
, &state
.last_uniforms_reset
, n
);
344 assert(!qir_is_tex(inst
));
351 calculate_reverse_deps(struct vc4_compile
*c
, void *mem_ctx
,
352 struct list_head
*schedule_list
)
354 struct schedule_setup_state state
;
356 memset(&state
, 0, sizeof(state
));
358 state
.last_temp_write
= rzalloc_array(mem_ctx
, struct schedule_node
*,
361 list_for_each_entry_rev(struct schedule_node
, n
, schedule_list
, link
) {
362 calculate_deps(&state
, n
);
367 get_register_pressure_cost(struct schedule_state
*state
, struct qinst
*inst
)
371 if (inst
->dst
.file
== QFILE_TEMP
&&
372 state
->temp_writes
[inst
->dst
.index
] == 1)
375 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
376 if (inst
->src
[i
].file
== QFILE_TEMP
&&
377 !BITSET_TEST(state
->temp_live
, inst
->src
[i
].index
)) {
386 locks_scoreboard(struct qinst
*inst
)
388 if (inst
->op
== QOP_TLB_COLOR_READ
)
391 switch (inst
->dst
.file
) {
392 case QFILE_TLB_Z_WRITE
:
393 case QFILE_TLB_COLOR_WRITE
:
394 case QFILE_TLB_COLOR_WRITE_MS
:
401 static struct schedule_node
*
402 choose_instruction(struct schedule_state
*state
)
404 struct schedule_node
*chosen
= NULL
;
406 list_for_each_entry(struct schedule_node
, n
, &state
->worklist
, link
) {
407 /* The branches aren't being tracked as dependencies. Make
408 * sure that they stay scheduled as the last instruction of
409 * the block, which is to say the first one we choose to
412 if (n
->inst
->op
== QOP_BRANCH
)
420 /* Prefer scheduling things that lock the scoreboard, so that
421 * they appear late in the program and we get more parallelism
422 * between shaders on multiple QPUs hitting the same fragment.
424 if (locks_scoreboard(n
->inst
) &&
425 !locks_scoreboard(chosen
->inst
)) {
428 } else if (!locks_scoreboard(n
->inst
) &&
429 locks_scoreboard(chosen
->inst
)) {
433 /* If we would block on the previously chosen node, but would
434 * block less on this one, then prefer it.
436 if (chosen
->unblocked_time
> state
->time
&&
437 n
->unblocked_time
< chosen
->unblocked_time
) {
440 } else if (n
->unblocked_time
> state
->time
&&
441 n
->unblocked_time
> chosen
->unblocked_time
) {
445 /* If we can definitely reduce register pressure, do so
448 int register_pressure_cost
=
449 get_register_pressure_cost(state
, n
->inst
);
450 int chosen_register_pressure_cost
=
451 get_register_pressure_cost(state
, chosen
->inst
);
453 if (register_pressure_cost
< chosen_register_pressure_cost
) {
456 } else if (register_pressure_cost
>
457 chosen_register_pressure_cost
) {
461 /* Otherwise, prefer instructions with the deepest chain to
462 * the end of the program. This avoids the problem of
463 * "everything generates a temp, nothing finishes freeing one,
464 * guess I'll just keep emitting varying mul/adds".
466 if (n
->delay
> chosen
->delay
) {
469 } else if (n
->delay
< chosen
->delay
) {
478 dump_state(struct vc4_compile
*c
, struct schedule_state
*state
)
481 list_for_each_entry(struct schedule_node
, n
, &state
->worklist
, link
) {
482 fprintf(stderr
, "%3d: ", i
++);
483 qir_dump_inst(c
, n
->inst
);
484 fprintf(stderr
, " (%d cost)\n",
485 get_register_pressure_cost(state
, n
->inst
));
487 for (int i
= 0; i
< n
->child_count
; i
++) {
488 struct schedule_node
*child
= n
->children
[i
];
489 fprintf(stderr
, " - ");
490 qir_dump_inst(c
, child
->inst
);
491 fprintf(stderr
, " (%d parents)\n", child
->parent_count
);
496 /* Estimate of how many instructions we should schedule between operations.
498 * These aren't in real cycle counts, because we're just estimating cycle
499 * times anyway. QIR instructions will get paired up when turned into QPU
500 * instructions, or extra NOP delays will have to be added due to register
501 * allocation choices.
504 latency_between(struct schedule_node
*before
, struct schedule_node
*after
)
506 if ((before
->inst
->op
== QOP_TEX_S
||
507 before
->inst
->op
== QOP_TEX_DIRECT
) &&
508 after
->inst
->op
== QOP_TEX_RESULT
)
514 /** Recursive computation of the delay member of a node. */
516 compute_delay(struct schedule_node
*n
)
518 if (!n
->child_count
) {
519 /* The color read needs to be scheduled late, to avoid locking
520 * the scoreboard early. This is our best tool for
521 * encouraging that. The other scoreboard locking ops will
522 * have this happen by default, since they are generally the
523 * DAG heads or close to them.
525 if (n
->inst
->op
== QOP_TLB_COLOR_READ
)
530 for (int i
= 0; i
< n
->child_count
; i
++) {
531 if (!n
->children
[i
]->delay
)
532 compute_delay(n
->children
[i
]);
533 n
->delay
= MAX2(n
->delay
,
534 n
->children
[i
]->delay
+
535 latency_between(n
, n
->children
[i
]));
541 schedule_instructions(struct vc4_compile
*c
,
542 struct qblock
*block
, struct schedule_state
*state
)
545 fprintf(stderr
, "initial deps:\n");
546 dump_state(c
, state
);
549 /* Remove non-DAG heads from the list. */
550 list_for_each_entry_safe(struct schedule_node
, n
,
551 &state
->worklist
, link
) {
552 if (n
->parent_count
!= 0)
557 while (!list_empty(&state
->worklist
)) {
558 struct schedule_node
*chosen
= choose_instruction(state
);
559 struct qinst
*inst
= chosen
->inst
;
562 fprintf(stderr
, "current list:\n");
563 dump_state(c
, state
);
564 fprintf(stderr
, "chose: ");
565 qir_dump_inst(c
, inst
);
566 fprintf(stderr
, " (%d cost)\n",
567 get_register_pressure_cost(state
, inst
));
570 state
->time
= MAX2(state
->time
, chosen
->unblocked_time
);
572 /* Schedule this instruction back onto the QIR list. */
573 list_del(&chosen
->link
);
574 list_add(&inst
->link
, &block
->instructions
);
576 /* Now that we've scheduled a new instruction, some of its
577 * children can be promoted to the list of instructions ready to
578 * be scheduled. Update the children's unblocked time for this
579 * DAG edge as we do so.
581 for (int i
= chosen
->child_count
- 1; i
>= 0; i
--) {
582 struct schedule_node
*child
= chosen
->children
[i
];
584 child
->unblocked_time
= MAX2(child
->unblocked_time
,
586 latency_between(chosen
,
588 child
->parent_count
--;
589 if (child
->parent_count
== 0)
590 list_add(&child
->link
, &state
->worklist
);
593 /* Update our tracking of register pressure. */
594 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
595 if (inst
->src
[i
].file
== QFILE_TEMP
)
596 BITSET_SET(state
->temp_live
, inst
->src
[i
].index
);
598 if (inst
->dst
.file
== QFILE_TEMP
) {
599 state
->temp_writes
[inst
->dst
.index
]--;
600 if (state
->temp_writes
[inst
->dst
.index
] == 0)
601 BITSET_CLEAR(state
->temp_live
, inst
->dst
.index
);
609 qir_schedule_instructions_block(struct vc4_compile
*c
,
610 struct qblock
*block
)
612 void *mem_ctx
= ralloc_context(NULL
);
613 struct schedule_state state
= { { 0 } };
615 state
.temp_writes
= rzalloc_array(mem_ctx
, uint32_t, c
->num_temps
);
616 state
.temp_live
= rzalloc_array(mem_ctx
, BITSET_WORD
,
617 BITSET_WORDS(c
->num_temps
));
618 list_inithead(&state
.worklist
);
620 /* Wrap each instruction in a scheduler structure. */
621 qir_for_each_inst_safe(inst
, block
) {
622 struct schedule_node
*n
= rzalloc(mem_ctx
, struct schedule_node
);
625 list_del(&inst
->link
);
626 list_addtail(&n
->link
, &state
.worklist
);
628 if (inst
->dst
.file
== QFILE_TEMP
)
629 state
.temp_writes
[inst
->dst
.index
]++;
632 /* Dependencies tracked top-to-bottom. */
633 calculate_forward_deps(c
, mem_ctx
, &state
.worklist
);
634 /* Dependencies tracked bottom-to-top. */
635 calculate_reverse_deps(c
, mem_ctx
, &state
.worklist
);
637 list_for_each_entry(struct schedule_node
, n
, &state
.worklist
, link
)
640 schedule_instructions(c
, block
, &state
);
642 ralloc_free(mem_ctx
);
646 qir_schedule_instructions(struct vc4_compile
*c
)
650 fprintf(stderr
, "Pre-schedule instructions\n");
654 qir_for_each_block(block
, c
)
655 qir_schedule_instructions_block(c
, block
);
658 fprintf(stderr
, "Post-schedule instructions\n");