2 * Copyright © 2019 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "util/u_dynarray.h"
30 * Implements basic-block-level prepass instruction scheduling in NIR to
31 * manage register pressure.
33 * This is based on the Goodman/Hsu paper (1988, cached copy at
34 * https://people.freedesktop.org/~anholt/scheduling-goodman-hsu.pdf). We
35 * make up the DDG for NIR (which can be mostly done using the NIR def/use
36 * chains for SSA instructions, plus some edges for ordering register writes
37 * vs reads, and some more for ordering intrinsics). Then we pick heads off
38 * of the DDG using their heuristic to emit the NIR instructions back into the
39 * block in their new order.
41 * The hard case for prepass scheduling on GPUs seems to always be consuming
42 * texture/ubo results. The register pressure heuristic doesn't want to pick
43 * an instr that starts consuming texture results because it usually won't be
44 * the only usage, so that instruction will increase pressure.
46 * If you try to force consumption of tex results always, then in a case where
47 * single sample is used for many outputs, you'll end up picking every other
48 * user and expanding register pressure. The partially_evaluated_path flag
49 * helps tremendously, in that if you happen for whatever reason to pick a
50 * texture sample's output, then you'll try to finish off that sample. Future
51 * work may include doing some local search before locking in a choice, to try
52 * to more reliably find the case where just a few choices going against the
53 * heuristic can manage to free the whole vector.
59 * Represents a node in the DDG for a NIR instruction.
62 struct dag_node dag
; /* must be first for our u_dynarray_foreach */
64 bool partially_evaluated_path
;
66 /* Approximate estimate of the delay between starting this instruction and
67 * its results being available.
69 * Accuracy is not too important, given that we're prepass scheduling here
70 * and just trying to reduce excess dependencies introduced by a register
71 * allocator by stretching out the live intervals of expensive
76 /* Cost of the maximum-delay path from this node to the leaves. */
79 /* scoreboard->time value when this instruction can be scheduled without
80 * any stalls expected.
90 /* Mapping from nir_register * or nir_ssa_def * to a struct set of
91 * instructions remaining to be scheduled using the register.
93 struct hash_table
*remaining_uses
;
95 /* Map from nir_instr to nir_schedule_node * */
96 struct hash_table
*instr_map
;
98 /* Set of nir_register * or nir_ssa_def * that have had any instruction
101 struct set
*live_values
;
103 /* An abstract approximation of the number of nir_scheduler_node->delay
104 * units since the start of the shader.
108 /* Number of channels currently used by the NIR instructions that have been
113 /* Number of channels that may be in use before we switch to the
114 * pressure-prioritizing scheduling heuristic.
117 } nir_schedule_scoreboard
;
119 /* When walking the instructions in reverse, we use this flag to swap
120 * before/after in add_dep().
122 enum direction
{ F
, R
};
127 /* Map from nir_instr to nir_schedule_node * */
128 struct hash_table
*instr_map
;
129 /* Map from nir_register to nir_schedule_node * */
130 struct hash_table
*reg_map
;
132 /* Scheduler nodes for last instruction involved in some class of dependency.
134 nir_schedule_node
*load_input
;
135 nir_schedule_node
*store_shared
;
136 nir_schedule_node
*unknown_intrinsic
;
137 nir_schedule_node
*discard
;
138 nir_schedule_node
*jump
;
144 _mesa_hash_table_search_data(struct hash_table
*ht
, void *key
)
146 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, key
);
152 static nir_schedule_node
*
153 nir_schedule_get_node(struct hash_table
*instr_map
, nir_instr
*instr
)
155 return _mesa_hash_table_search_data(instr_map
, instr
);
159 nir_schedule_scoreboard_get_src(nir_schedule_scoreboard
*scoreboard
, nir_src
*src
)
162 return _mesa_hash_table_search_data(scoreboard
->remaining_uses
, src
->ssa
);
164 return _mesa_hash_table_search_data(scoreboard
->remaining_uses
,
170 nir_schedule_def_pressure(nir_ssa_def
*def
)
172 return def
->num_components
;
176 nir_schedule_src_pressure(nir_src
*src
)
179 return nir_schedule_def_pressure(src
->ssa
);
181 return src
->reg
.reg
->num_components
;
185 nir_schedule_dest_pressure(nir_dest
*dest
)
188 return nir_schedule_def_pressure(&dest
->ssa
);
190 return dest
->reg
.reg
->num_components
;
194 * Adds a dependency such that @after must appear in the final program after
197 * We add @before as a child of @after, so that DAG heads are the outputs of
198 * the program and we make our scheduling decisions bottom to top.
201 add_dep(nir_deps_state
*state
,
202 nir_schedule_node
*before
,
203 nir_schedule_node
*after
)
205 if (!before
|| !after
)
208 assert(before
!= after
);
211 dag_add_edge(&before
->dag
, &after
->dag
, NULL
);
213 dag_add_edge(&after
->dag
, &before
->dag
, NULL
);
218 add_read_dep(nir_deps_state
*state
,
219 nir_schedule_node
*before
,
220 nir_schedule_node
*after
)
222 add_dep(state
, before
, after
);
226 add_write_dep(nir_deps_state
*state
,
227 nir_schedule_node
**before
,
228 nir_schedule_node
*after
)
230 add_dep(state
, *before
, after
);
235 nir_schedule_reg_src_deps(nir_src
*src
, void *in_state
)
237 nir_deps_state
*state
= in_state
;
242 struct hash_entry
*entry
= _mesa_hash_table_search(state
->reg_map
,
246 nir_schedule_node
*dst_n
= entry
->data
;
248 nir_schedule_node
*src_n
= nir_schedule_get_node(state
->instr_map
,
251 add_dep(state
, dst_n
, src_n
);
257 nir_schedule_reg_dest_deps(nir_dest
*dest
, void *in_state
)
259 nir_deps_state
*state
= in_state
;
264 nir_schedule_node
*dest_n
= nir_schedule_get_node(state
->instr_map
,
265 dest
->reg
.parent_instr
);
267 struct hash_entry
*entry
= _mesa_hash_table_search(state
->reg_map
,
270 _mesa_hash_table_insert(state
->reg_map
, dest
->reg
.reg
, dest_n
);
273 nir_schedule_node
**before
= (nir_schedule_node
**)&entry
->data
;
275 add_write_dep(state
, before
, dest_n
);
281 nir_schedule_ssa_deps(nir_ssa_def
*def
, void *in_state
)
283 nir_deps_state
*state
= in_state
;
284 nir_schedule_node
*def_n
= nir_schedule_get_node(state
->instr_map
, def
->parent_instr
);
286 nir_foreach_use(src
, def
) {
287 nir_schedule_node
*use_n
= nir_schedule_get_node(state
->instr_map
,
290 add_read_dep(state
, def_n
, use_n
);
297 nir_schedule_intrinsic_deps(nir_deps_state
*state
,
298 nir_intrinsic_instr
*instr
)
300 nir_schedule_node
*n
= nir_schedule_get_node(state
->instr_map
, &instr
->instr
);
302 switch (instr
->intrinsic
) {
303 case nir_intrinsic_load_uniform
:
304 case nir_intrinsic_load_ubo
:
305 case nir_intrinsic_load_front_face
:
308 case nir_intrinsic_discard
:
309 case nir_intrinsic_discard_if
:
310 /* We are adding two dependencies:
312 * * A individual one that we could use to add a read_dep while handling
315 * * Include it on the unknown intrinsic set, as we want discard to be
316 * serialized in in the same order relative to intervening stores or
317 * atomic accesses to SSBOs and images
319 add_write_dep(state
, &state
->discard
, n
);
320 add_write_dep(state
, &state
->unknown_intrinsic
, n
);
323 case nir_intrinsic_store_output
:
324 /* For some non-FS shader stages, or for some hardware, output stores
325 * affect the same shared memory as input loads.
327 if (state
->shader
->info
.stage
!= MESA_SHADER_FRAGMENT
)
328 add_write_dep(state
, &state
->load_input
, n
);
330 /* Make sure that preceding discards stay before the store_output */
331 add_read_dep(state
, state
->discard
, n
);
335 case nir_intrinsic_load_input
:
336 add_read_dep(state
, state
->load_input
, n
);
339 case nir_intrinsic_load_shared
:
340 /* Don't move load_shared beyond a following store_shared, as it could
343 add_read_dep(state
, state
->store_shared
, n
);
346 case nir_intrinsic_store_shared
:
347 add_write_dep(state
, &state
->store_shared
, n
);
350 case nir_intrinsic_control_barrier
:
351 case nir_intrinsic_memory_barrier_shared
:
352 add_write_dep(state
, &state
->store_shared
, n
);
354 /* Serialize against ssbos/atomics/etc. */
355 add_write_dep(state
, &state
->unknown_intrinsic
, n
);
359 /* Attempt to handle other intrinsics that we haven't individually
360 * categorized by serializing them in the same order relative to each
363 add_write_dep(state
, &state
->unknown_intrinsic
, n
);
369 * Common code for dependencies that need to be tracked both forward and
372 * This is for things like "all reads of r4 have to happen between the r4
373 * writes that surround them".
376 nir_schedule_calculate_deps(nir_deps_state
*state
, nir_schedule_node
*n
)
378 nir_instr
*instr
= n
->instr
;
380 /* For NIR SSA defs, we only need to do a single pass of making the uses
384 nir_foreach_ssa_def(instr
, nir_schedule_ssa_deps
, state
);
386 /* For NIR regs, track the last writer in the scheduler state so that we
387 * can keep the writes in order and let reads get reordered only between
390 nir_foreach_src(instr
, nir_schedule_reg_src_deps
, state
);
392 nir_foreach_dest(instr
, nir_schedule_reg_dest_deps
, state
);
394 /* Make sure any other instructions keep their positions relative to
397 if (instr
->type
!= nir_instr_type_jump
)
398 add_read_dep(state
, state
->jump
, n
);
400 switch (instr
->type
) {
401 case nir_instr_type_ssa_undef
:
402 case nir_instr_type_load_const
:
403 case nir_instr_type_alu
:
404 case nir_instr_type_deref
:
407 case nir_instr_type_tex
:
408 /* Don't move texture ops before a discard, as that could increase
409 * memory bandwidth for reading the discarded samples.
411 add_read_dep(state
, state
->discard
, n
);
414 case nir_instr_type_jump
:
415 add_write_dep(state
, &state
->jump
, n
);
418 case nir_instr_type_call
:
419 unreachable("Calls should have been lowered");
422 case nir_instr_type_parallel_copy
:
423 unreachable("Parallel copies should have been lowered");
426 case nir_instr_type_phi
:
427 unreachable("nir_schedule() should be called after lowering from SSA");
430 case nir_instr_type_intrinsic
:
431 nir_schedule_intrinsic_deps(state
, nir_instr_as_intrinsic(instr
));
437 calculate_forward_deps(nir_schedule_scoreboard
*scoreboard
, nir_block
*block
)
439 nir_deps_state state
= {
440 .shader
= scoreboard
->shader
,
442 .instr_map
= scoreboard
->instr_map
,
443 .reg_map
= _mesa_pointer_hash_table_create(NULL
),
446 nir_foreach_instr(instr
, block
) {
447 nir_schedule_node
*node
= nir_schedule_get_node(scoreboard
->instr_map
,
449 nir_schedule_calculate_deps(&state
, node
);
452 ralloc_free(state
.reg_map
);
456 calculate_reverse_deps(nir_schedule_scoreboard
*scoreboard
, nir_block
*block
)
458 nir_deps_state state
= {
459 .shader
= scoreboard
->shader
,
461 .instr_map
= scoreboard
->instr_map
,
462 .reg_map
= _mesa_pointer_hash_table_create(NULL
),
465 nir_foreach_instr_reverse(instr
, block
) {
466 nir_schedule_node
*node
= nir_schedule_get_node(scoreboard
->instr_map
,
468 nir_schedule_calculate_deps(&state
, node
);
471 ralloc_free(state
.reg_map
);
475 nir_schedule_scoreboard
*scoreboard
;
477 } nir_schedule_regs_freed_state
;
480 nir_schedule_regs_freed_src_cb(nir_src
*src
, void *in_state
)
482 nir_schedule_regs_freed_state
*state
= in_state
;
483 nir_schedule_scoreboard
*scoreboard
= state
->scoreboard
;
484 struct set
*remaining_uses
= nir_schedule_scoreboard_get_src(scoreboard
, src
);
486 if (remaining_uses
->entries
== 1 &&
487 _mesa_set_search(remaining_uses
, src
->parent_instr
)) {
488 state
->regs_freed
+= nir_schedule_src_pressure(src
);
495 nir_schedule_regs_freed_def_cb(nir_ssa_def
*def
, void *in_state
)
497 nir_schedule_regs_freed_state
*state
= in_state
;
499 state
->regs_freed
-= nir_schedule_def_pressure(def
);
505 nir_schedule_regs_freed_dest_cb(nir_dest
*dest
, void *in_state
)
507 nir_schedule_regs_freed_state
*state
= in_state
;
508 nir_schedule_scoreboard
*scoreboard
= state
->scoreboard
;
513 nir_register
*reg
= dest
->reg
.reg
;
515 /* Only the first def of a reg counts against register pressure. */
516 if (!_mesa_set_search(scoreboard
->live_values
, reg
))
517 state
->regs_freed
-= nir_schedule_dest_pressure(dest
);
523 nir_schedule_regs_freed(nir_schedule_scoreboard
*scoreboard
, nir_schedule_node
*n
)
525 nir_schedule_regs_freed_state state
= {
526 .scoreboard
= scoreboard
,
529 nir_foreach_src(n
->instr
, nir_schedule_regs_freed_src_cb
, &state
);
531 nir_foreach_ssa_def(n
->instr
, nir_schedule_regs_freed_def_cb
, &state
);
533 nir_foreach_dest(n
->instr
, nir_schedule_regs_freed_dest_cb
, &state
);
535 return state
.regs_freed
;
539 * Chooses an instruction to schedule using the Goodman/Hsu (1988) CSP (Code
540 * Scheduling for Parallelism) heuristic.
542 * Picks an instruction on the critical that's ready to execute without
543 * stalls, if possible, otherwise picks the instruction on the critical path.
545 static nir_schedule_node
*
546 nir_schedule_choose_instruction_csp(nir_schedule_scoreboard
*scoreboard
)
548 nir_schedule_node
*chosen
= NULL
;
550 /* Find the leader in the ready (shouldn't-stall) set with the maximum
553 list_for_each_entry(nir_schedule_node
, n
, &scoreboard
->dag
->heads
, dag
.link
) {
554 if (scoreboard
->time
< n
->ready_time
)
557 if (!chosen
|| chosen
->max_delay
< n
->max_delay
)
562 fprintf(stderr
, "chose (ready): ");
563 nir_print_instr(chosen
->instr
, stderr
);
564 fprintf(stderr
, "\n");
570 /* Otherwise, choose the leader with the maximum cost. */
571 list_for_each_entry(nir_schedule_node
, n
, &scoreboard
->dag
->heads
, dag
.link
) {
572 if (!chosen
|| chosen
->max_delay
< n
->max_delay
)
576 fprintf(stderr
, "chose (leader): ");
577 nir_print_instr(chosen
->instr
, stderr
);
578 fprintf(stderr
, "\n");
585 * Chooses an instruction to schedule using the Goodman/Hsu (1988) CSR (Code
586 * Scheduling for Register pressure) heuristic.
588 static nir_schedule_node
*
589 nir_schedule_choose_instruction_csr(nir_schedule_scoreboard
*scoreboard
)
591 nir_schedule_node
*chosen
= NULL
;
593 /* Find a ready inst with regs freed and pick the one with max cost. */
594 list_for_each_entry(nir_schedule_node
, n
, &scoreboard
->dag
->heads
, dag
.link
) {
595 if (n
->ready_time
> scoreboard
->time
)
598 int regs_freed
= nir_schedule_regs_freed(scoreboard
, n
);
600 if (regs_freed
> 0 && (!chosen
|| chosen
->max_delay
< n
->max_delay
)) {
606 fprintf(stderr
, "chose (freed+ready): ");
607 nir_print_instr(chosen
->instr
, stderr
);
608 fprintf(stderr
, "\n");
614 /* Find a leader with regs freed and pick the one with max cost. */
615 list_for_each_entry(nir_schedule_node
, n
, &scoreboard
->dag
->heads
, dag
.link
) {
616 int regs_freed
= nir_schedule_regs_freed(scoreboard
, n
);
618 if (regs_freed
> 0 && (!chosen
|| chosen
->max_delay
< n
->max_delay
)) {
624 fprintf(stderr
, "chose (regs freed): ");
625 nir_print_instr(chosen
->instr
, stderr
);
626 fprintf(stderr
, "\n");
632 /* Find a partially evaluated path and try to finish it off */
633 list_for_each_entry(nir_schedule_node
, n
, &scoreboard
->dag
->heads
, dag
.link
) {
634 if (n
->partially_evaluated_path
&&
635 (!chosen
|| chosen
->max_delay
< n
->max_delay
)) {
641 fprintf(stderr
, "chose (partial path): ");
642 nir_print_instr(chosen
->instr
, stderr
);
643 fprintf(stderr
, "\n");
649 /* Contra the paper, pick a leader with no effect on used regs. This may
650 * open up new opportunities, as otherwise a single-operand instr consuming
651 * a value will tend to block finding freeing that value. This had a
652 * massive effect on reducing spilling on V3D.
654 * XXX: Should this prioritize ready?
656 list_for_each_entry(nir_schedule_node
, n
, &scoreboard
->dag
->heads
, dag
.link
) {
657 if (nir_schedule_regs_freed(scoreboard
, n
) != 0)
660 if (!chosen
|| chosen
->max_delay
< n
->max_delay
)
665 fprintf(stderr
, "chose (regs no-op): ");
666 nir_print_instr(chosen
->instr
, stderr
);
667 fprintf(stderr
, "\n");
673 /* Pick the max delay of the remaining ready set. */
674 list_for_each_entry(nir_schedule_node
, n
, &scoreboard
->dag
->heads
, dag
.link
) {
675 if (n
->ready_time
> scoreboard
->time
)
677 if (!chosen
|| chosen
->max_delay
< n
->max_delay
)
682 fprintf(stderr
, "chose (ready max delay): ");
683 nir_print_instr(chosen
->instr
, stderr
);
684 fprintf(stderr
, "\n");
689 /* Pick the max delay of the remaining leaders. */
690 list_for_each_entry(nir_schedule_node
, n
, &scoreboard
->dag
->heads
, dag
.link
) {
691 if (!chosen
|| chosen
->max_delay
< n
->max_delay
)
696 fprintf(stderr
, "chose (max delay): ");
697 nir_print_instr(chosen
->instr
, stderr
);
698 fprintf(stderr
, "\n");
705 dump_state(nir_schedule_scoreboard
*scoreboard
)
707 list_for_each_entry(nir_schedule_node
, n
, &scoreboard
->dag
->heads
, dag
.link
) {
708 fprintf(stderr
, "maxdel %5d ", n
->max_delay
);
709 nir_print_instr(n
->instr
, stderr
);
710 fprintf(stderr
, "\n");
712 util_dynarray_foreach(&n
->dag
.edges
, struct dag_edge
, edge
) {
713 nir_schedule_node
*child
= (nir_schedule_node
*)edge
->child
;
715 fprintf(stderr
, " -> (%d parents) ", child
->dag
.parent_count
);
716 nir_print_instr(child
->instr
, stderr
);
717 fprintf(stderr
, "\n");
723 nir_schedule_mark_use(nir_schedule_scoreboard
*scoreboard
,
725 nir_instr
*reg_or_def_parent
,
728 /* Make the value live if it's the first time it's been used. */
729 if (!_mesa_set_search(scoreboard
->live_values
, reg_or_def
)) {
730 _mesa_set_add(scoreboard
->live_values
, reg_or_def
);
731 scoreboard
->pressure
+= pressure
;
734 /* Make the value dead if it's the last remaining use. Be careful when one
735 * instruction uses a value twice to not decrement pressure twice.
737 struct set
*remaining_uses
=
738 _mesa_hash_table_search_data(scoreboard
->remaining_uses
, reg_or_def
);
739 struct set_entry
*entry
= _mesa_set_search(remaining_uses
, reg_or_def_parent
);
741 _mesa_set_remove(remaining_uses
, entry
);
743 if (remaining_uses
->entries
== 0)
744 scoreboard
->pressure
-= pressure
;
749 nir_schedule_mark_src_scheduled(nir_src
*src
, void *state
)
751 nir_schedule_scoreboard
*scoreboard
= state
;
752 struct set
*remaining_uses
= nir_schedule_scoreboard_get_src(scoreboard
, src
);
754 struct set_entry
*entry
= _mesa_set_search(remaining_uses
,
757 /* Once we've used an SSA value in one instruction, bump the priority of
758 * the other uses so the SSA value can get fully consumed.
760 * We don't do this for registers, and it's would be a hassle and it's
761 * unclear if that would help or not. Also, skip it for constants, as
762 * they're often folded as immediates into backend instructions and have
763 * many unrelated instructions all referencing the same value (0).
766 src
->ssa
->parent_instr
->type
!= nir_instr_type_load_const
) {
767 nir_foreach_use(other_src
, src
->ssa
) {
768 if (other_src
->parent_instr
== src
->parent_instr
)
771 nir_schedule_node
*n
=
772 nir_schedule_get_node(scoreboard
->instr_map
,
773 other_src
->parent_instr
);
775 if (n
&& !n
->partially_evaluated_path
) {
777 fprintf(stderr
, " New partially evaluated path: ");
778 nir_print_instr(n
->instr
, stderr
);
779 fprintf(stderr
, "\n");
782 n
->partially_evaluated_path
= true;
788 nir_schedule_mark_use(scoreboard
,
789 src
->is_ssa
? (void *)src
->ssa
: (void *)src
->reg
.reg
,
791 nir_schedule_src_pressure(src
));
797 nir_schedule_mark_def_scheduled(nir_ssa_def
*def
, void *state
)
799 nir_schedule_scoreboard
*scoreboard
= state
;
801 nir_schedule_mark_use(scoreboard
, def
, def
->parent_instr
,
802 nir_schedule_def_pressure(def
));
808 nir_schedule_mark_dest_scheduled(nir_dest
*dest
, void *state
)
810 nir_schedule_scoreboard
*scoreboard
= state
;
812 /* SSA defs were handled in nir_schedule_mark_def_scheduled()
817 /* XXX: This is not actually accurate for regs -- the last use of a reg may
818 * have a live interval that extends across control flow. We should
819 * calculate the live ranges of regs, and have scheduler nodes for the CF
820 * nodes that also "use" the reg.
822 nir_schedule_mark_use(scoreboard
, dest
->reg
.reg
,
823 dest
->reg
.parent_instr
,
824 nir_schedule_dest_pressure(dest
));
830 nir_schedule_mark_node_scheduled(nir_schedule_scoreboard
*scoreboard
,
831 nir_schedule_node
*n
)
833 nir_foreach_src(n
->instr
, nir_schedule_mark_src_scheduled
, scoreboard
);
834 nir_foreach_ssa_def(n
->instr
, nir_schedule_mark_def_scheduled
, scoreboard
);
835 nir_foreach_dest(n
->instr
, nir_schedule_mark_dest_scheduled
, scoreboard
);
837 util_dynarray_foreach(&n
->dag
.edges
, struct dag_edge
, edge
) {
838 nir_schedule_node
*child
= (nir_schedule_node
*)edge
->child
;
840 child
->ready_time
= MAX2(child
->ready_time
,
841 scoreboard
->time
+ n
->delay
);
843 if (child
->dag
.parent_count
== 1) {
845 fprintf(stderr
, " New DAG head: ");
846 nir_print_instr(child
->instr
, stderr
);
847 fprintf(stderr
, "\n");
852 dag_prune_head(scoreboard
->dag
, &n
->dag
);
854 scoreboard
->time
= MAX2(n
->ready_time
, scoreboard
->time
);
859 nir_schedule_instructions(nir_schedule_scoreboard
*scoreboard
, nir_block
*block
)
861 while (!list_is_empty(&scoreboard
->dag
->heads
)) {
863 fprintf(stderr
, "current list:\n");
864 dump_state(scoreboard
);
867 nir_schedule_node
*chosen
;
868 if (scoreboard
->pressure
< scoreboard
->threshold
)
869 chosen
= nir_schedule_choose_instruction_csp(scoreboard
);
871 chosen
= nir_schedule_choose_instruction_csr(scoreboard
);
873 /* Now that we've scheduled a new instruction, some of its children may
874 * be promoted to the list of instructions ready to be scheduled.
876 nir_schedule_mark_node_scheduled(scoreboard
, chosen
);
878 /* Move the instruction to the end (so our first chosen instructions are
879 * the start of the program).
881 exec_node_remove(&chosen
->instr
->node
);
882 exec_list_push_tail(&block
->instr_list
, &chosen
->instr
->node
);
885 fprintf(stderr
, "\n");
890 nir_schedule_get_delay(nir_instr
*instr
)
892 switch (instr
->type
) {
893 case nir_instr_type_ssa_undef
:
894 case nir_instr_type_load_const
:
895 case nir_instr_type_alu
:
896 case nir_instr_type_deref
:
897 case nir_instr_type_jump
:
898 case nir_instr_type_parallel_copy
:
899 case nir_instr_type_call
:
900 case nir_instr_type_phi
:
903 case nir_instr_type_intrinsic
:
904 /* XXX: Pick a large number for UBO/SSBO/image/shared loads */
907 case nir_instr_type_tex
:
908 /* Pick some large number to try to fetch textures early and sample them
918 nir_schedule_dag_max_delay_cb(struct dag_node
*node
, void *state
)
920 nir_schedule_node
*n
= (nir_schedule_node
*)node
;
921 uint32_t max_delay
= 0;
923 util_dynarray_foreach(&n
->dag
.edges
, struct dag_edge
, edge
) {
924 nir_schedule_node
*child
= (nir_schedule_node
*)edge
->child
;
925 max_delay
= MAX2(child
->max_delay
, max_delay
);
928 n
->max_delay
= MAX2(n
->max_delay
, max_delay
+ n
->delay
);
932 nir_schedule_block(nir_schedule_scoreboard
*scoreboard
, nir_block
*block
)
934 void *mem_ctx
= ralloc_context(NULL
);
935 scoreboard
->instr_map
= _mesa_pointer_hash_table_create(mem_ctx
);
937 scoreboard
->dag
= dag_create(mem_ctx
);
939 nir_foreach_instr(instr
, block
) {
940 nir_schedule_node
*n
=
941 rzalloc(mem_ctx
, nir_schedule_node
);
944 n
->delay
= nir_schedule_get_delay(instr
);
945 dag_init_node(scoreboard
->dag
, &n
->dag
);
947 _mesa_hash_table_insert(scoreboard
->instr_map
, instr
, n
);
950 calculate_forward_deps(scoreboard
, block
);
951 calculate_reverse_deps(scoreboard
, block
);
953 dag_traverse_bottom_up(scoreboard
->dag
, nir_schedule_dag_max_delay_cb
, NULL
);
955 nir_schedule_instructions(scoreboard
, block
);
957 ralloc_free(mem_ctx
);
958 scoreboard
->instr_map
= NULL
;
962 nir_schedule_ssa_def_init_scoreboard(nir_ssa_def
*def
, void *state
)
964 nir_schedule_scoreboard
*scoreboard
= state
;
965 struct set
*def_uses
= _mesa_pointer_set_create(scoreboard
);
967 _mesa_hash_table_insert(scoreboard
->remaining_uses
, def
, def_uses
);
969 _mesa_set_add(def_uses
, def
->parent_instr
);
971 nir_foreach_use(src
, def
) {
972 _mesa_set_add(def_uses
, src
->parent_instr
);
975 /* XXX: Handle if uses */
980 static nir_schedule_scoreboard
*
981 nir_schedule_get_scoreboard(nir_shader
*shader
, int threshold
)
983 nir_schedule_scoreboard
*scoreboard
= rzalloc(NULL
, nir_schedule_scoreboard
);
985 scoreboard
->shader
= shader
;
986 scoreboard
->live_values
= _mesa_pointer_set_create(scoreboard
);
987 scoreboard
->remaining_uses
= _mesa_pointer_hash_table_create(scoreboard
);
988 scoreboard
->threshold
= threshold
;
989 scoreboard
->pressure
= 0;
991 nir_foreach_function(function
, shader
) {
992 nir_foreach_register(reg
, &function
->impl
->registers
) {
993 struct set
*register_uses
=
994 _mesa_pointer_set_create(scoreboard
);
996 _mesa_hash_table_insert(scoreboard
->remaining_uses
, reg
, register_uses
);
998 nir_foreach_use(src
, reg
) {
999 _mesa_set_add(register_uses
, src
->parent_instr
);
1002 /* XXX: Handle if uses */
1004 nir_foreach_def(dest
, reg
) {
1005 _mesa_set_add(register_uses
, dest
->reg
.parent_instr
);
1009 nir_foreach_block(block
, function
->impl
) {
1010 nir_foreach_instr(instr
, block
) {
1011 nir_foreach_ssa_def(instr
, nir_schedule_ssa_def_init_scoreboard
,
1015 /* XXX: We're ignoring if uses, which may prioritize scheduling other
1016 * uses of the if src even when it doesn't help. That's not many
1017 * values, though, so meh.
1026 nir_schedule_validate_uses(nir_schedule_scoreboard
*scoreboard
)
1032 bool any_uses
= false;
1034 hash_table_foreach(scoreboard
->remaining_uses
, entry
) {
1035 struct set
*remaining_uses
= entry
->data
;
1037 set_foreach(remaining_uses
, instr_entry
) {
1039 fprintf(stderr
, "Tracked uses remain after scheduling. "
1040 "Affected instructions: \n");
1043 nir_print_instr(instr_entry
->key
, stderr
);
1044 fprintf(stderr
, "\n");
1052 * Schedules the NIR instructions to try to decrease stalls (for example,
1053 * delaying texture reads) while managing register pressure.
1055 * The threshold represents "number of NIR register/SSA def channels live
1056 * before switching the scheduling heuristic to reduce register pressure",
1057 * since most of our GPU architectures are scalar (extending to vector with a
1058 * flag wouldn't be hard). This number should be a bit below the number of
1059 * registers available (counting any that may be occupied by system value
1060 * payload values, for example), since the heuristic may not always be able to
1061 * free a register immediately. The amount below the limit is up to you to
1065 nir_schedule(nir_shader
*shader
, int threshold
)
1067 nir_schedule_scoreboard
*scoreboard
= nir_schedule_get_scoreboard(shader
,
1071 fprintf(stderr
, "NIR shader before scheduling:\n");
1072 nir_print_shader(shader
, stderr
);
1075 nir_foreach_function(function
, shader
) {
1076 if (!function
->impl
)
1079 nir_foreach_block(block
, function
->impl
) {
1080 nir_schedule_block(scoreboard
, block
);
1084 nir_schedule_validate_uses(scoreboard
);
1086 ralloc_free(scoreboard
);