nir/schedule: Add a callback for backend-specific dependencies
[mesa.git] / src / compiler / nir / nir_schedule.c
1 /*
2 * Copyright © 2019 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir_schedule.h"
25 #include "util/dag.h"
26 #include "util/u_dynarray.h"
27
28 /** @file
29 *
30 * Implements basic-block-level prepass instruction scheduling in NIR to
31 * manage register pressure.
32 *
33 * This is based on the Goodman/Hsu paper (1988, cached copy at
34 * https://people.freedesktop.org/~anholt/scheduling-goodman-hsu.pdf). We
35 * make up the DDG for NIR (which can be mostly done using the NIR def/use
36 * chains for SSA instructions, plus some edges for ordering register writes
37 * vs reads, and some more for ordering intrinsics). Then we pick heads off
38 * of the DDG using their heuristic to emit the NIR instructions back into the
39 * block in their new order.
40 *
41 * The hard case for prepass scheduling on GPUs seems to always be consuming
42 * texture/ubo results. The register pressure heuristic doesn't want to pick
43 * an instr that starts consuming texture results because it usually won't be
44 * the only usage, so that instruction will increase pressure.
45 *
46 * If you try to force consumption of tex results always, then in a case where
47 * single sample is used for many outputs, you'll end up picking every other
48 * user and expanding register pressure. The partially_evaluated_path flag
49 * helps tremendously, in that if you happen for whatever reason to pick a
50 * texture sample's output, then you'll try to finish off that sample. Future
51 * work may include doing some local search before locking in a choice, to try
52 * to more reliably find the case where just a few choices going against the
53 * heuristic can manage to free the whole vector.
54 */
55
56 static bool debug;
57
58 /**
59 * Represents a node in the DDG for a NIR instruction.
60 */
61 typedef struct {
62 struct dag_node dag; /* must be first for our u_dynarray_foreach */
63 nir_instr *instr;
64 bool partially_evaluated_path;
65
66 /* Approximate estimate of the delay between starting this instruction and
67 * its results being available.
68 *
69 * Accuracy is not too important, given that we're prepass scheduling here
70 * and just trying to reduce excess dependencies introduced by a register
71 * allocator by stretching out the live intervals of expensive
72 * instructions.
73 */
74 uint32_t delay;
75
76 /* Cost of the maximum-delay path from this node to the leaves. */
77 uint32_t max_delay;
78
79 /* scoreboard->time value when this instruction can be scheduled without
80 * any stalls expected.
81 */
82 uint32_t ready_time;
83 } nir_schedule_node;
84
85 typedef struct {
86 struct dag *dag;
87
88 nir_shader *shader;
89
90 /* Mapping from nir_register * or nir_ssa_def * to a struct set of
91 * instructions remaining to be scheduled using the register.
92 */
93 struct hash_table *remaining_uses;
94
95 /* Map from nir_instr to nir_schedule_node * */
96 struct hash_table *instr_map;
97
98 /* Set of nir_register * or nir_ssa_def * that have had any instruction
99 * scheduled on them.
100 */
101 struct set *live_values;
102
103 /* An abstract approximation of the number of nir_scheduler_node->delay
104 * units since the start of the shader.
105 */
106 uint32_t time;
107
108 /* Number of channels currently used by the NIR instructions that have been
109 * scheduled.
110 */
111 int pressure;
112
113 /* Options specified by the backend */
114 const nir_schedule_options *options;
115 } nir_schedule_scoreboard;
116
117 /* When walking the instructions in reverse, we use this flag to swap
118 * before/after in add_dep().
119 */
120 enum direction { F, R };
121
122 struct nir_schedule_class_dep {
123 int klass;
124 nir_schedule_node *node;
125 struct nir_schedule_class_dep *next;
126 };
127
128 typedef struct {
129 nir_schedule_scoreboard *scoreboard;
130
131 /* Map from nir_register to nir_schedule_node * */
132 struct hash_table *reg_map;
133
134 /* Scheduler nodes for last instruction involved in some class of dependency.
135 */
136 nir_schedule_node *load_input;
137 nir_schedule_node *store_shared;
138 nir_schedule_node *unknown_intrinsic;
139 nir_schedule_node *discard;
140 nir_schedule_node *jump;
141
142 struct nir_schedule_class_dep *class_deps;
143
144 enum direction dir;
145 } nir_deps_state;
146
147 static void *
148 _mesa_hash_table_search_data(struct hash_table *ht, void *key)
149 {
150 struct hash_entry *entry = _mesa_hash_table_search(ht, key);
151 if (!entry)
152 return NULL;
153 return entry->data;
154 }
155
156 static nir_schedule_node *
157 nir_schedule_get_node(struct hash_table *instr_map, nir_instr *instr)
158 {
159 return _mesa_hash_table_search_data(instr_map, instr);
160 }
161
162 static struct set *
163 nir_schedule_scoreboard_get_src(nir_schedule_scoreboard *scoreboard, nir_src *src)
164 {
165 if (src->is_ssa) {
166 return _mesa_hash_table_search_data(scoreboard->remaining_uses, src->ssa);
167 } else {
168 return _mesa_hash_table_search_data(scoreboard->remaining_uses,
169 src->reg.reg);
170 }
171 }
172
173 static int
174 nir_schedule_def_pressure(nir_ssa_def *def)
175 {
176 return def->num_components;
177 }
178
179 static int
180 nir_schedule_src_pressure(nir_src *src)
181 {
182 if (src->is_ssa)
183 return nir_schedule_def_pressure(src->ssa);
184 else
185 return src->reg.reg->num_components;
186 }
187
188 static int
189 nir_schedule_dest_pressure(nir_dest *dest)
190 {
191 if (dest->is_ssa)
192 return nir_schedule_def_pressure(&dest->ssa);
193 else
194 return dest->reg.reg->num_components;
195 }
196
197 /**
198 * Adds a dependency such that @after must appear in the final program after
199 * @before.
200 *
201 * We add @before as a child of @after, so that DAG heads are the outputs of
202 * the program and we make our scheduling decisions bottom to top.
203 */
204 static void
205 add_dep(nir_deps_state *state,
206 nir_schedule_node *before,
207 nir_schedule_node *after)
208 {
209 if (!before || !after)
210 return;
211
212 assert(before != after);
213
214 if (state->dir == F)
215 dag_add_edge(&before->dag, &after->dag, NULL);
216 else
217 dag_add_edge(&after->dag, &before->dag, NULL);
218 }
219
220
221 static void
222 add_read_dep(nir_deps_state *state,
223 nir_schedule_node *before,
224 nir_schedule_node *after)
225 {
226 add_dep(state, before, after);
227 }
228
229 static void
230 add_write_dep(nir_deps_state *state,
231 nir_schedule_node **before,
232 nir_schedule_node *after)
233 {
234 add_dep(state, *before, after);
235 *before = after;
236 }
237
238 static bool
239 nir_schedule_reg_src_deps(nir_src *src, void *in_state)
240 {
241 nir_deps_state *state = in_state;
242
243 if (src->is_ssa)
244 return true;
245
246 struct hash_entry *entry = _mesa_hash_table_search(state->reg_map,
247 src->reg.reg);
248 if (!entry)
249 return true;
250 nir_schedule_node *dst_n = entry->data;
251
252 nir_schedule_node *src_n =
253 nir_schedule_get_node(state->scoreboard->instr_map,
254 src->parent_instr);
255
256 add_dep(state, dst_n, src_n);
257
258 return true;
259 }
260
261 static bool
262 nir_schedule_reg_dest_deps(nir_dest *dest, void *in_state)
263 {
264 nir_deps_state *state = in_state;
265
266 if (dest->is_ssa)
267 return true;
268
269 nir_schedule_node *dest_n =
270 nir_schedule_get_node(state->scoreboard->instr_map,
271 dest->reg.parent_instr);
272
273 struct hash_entry *entry = _mesa_hash_table_search(state->reg_map,
274 dest->reg.reg);
275 if (!entry) {
276 _mesa_hash_table_insert(state->reg_map, dest->reg.reg, dest_n);
277 return true;
278 }
279 nir_schedule_node **before = (nir_schedule_node **)&entry->data;
280
281 add_write_dep(state, before, dest_n);
282
283 return true;
284 }
285
286 static bool
287 nir_schedule_ssa_deps(nir_ssa_def *def, void *in_state)
288 {
289 nir_deps_state *state = in_state;
290 struct hash_table *instr_map = state->scoreboard->instr_map;
291 nir_schedule_node *def_n = nir_schedule_get_node(instr_map, def->parent_instr);
292
293 nir_foreach_use(src, def) {
294 nir_schedule_node *use_n = nir_schedule_get_node(instr_map,
295 src->parent_instr);
296
297 add_read_dep(state, def_n, use_n);
298 }
299
300 return true;
301 }
302
303 static struct nir_schedule_class_dep *
304 nir_schedule_get_class_dep(nir_deps_state *state,
305 int klass)
306 {
307 for (struct nir_schedule_class_dep *class_dep = state->class_deps;
308 class_dep != NULL;
309 class_dep = class_dep->next) {
310 if (class_dep->klass == klass)
311 return class_dep;
312 }
313
314 struct nir_schedule_class_dep *class_dep =
315 ralloc(state->reg_map, struct nir_schedule_class_dep);
316
317 class_dep->klass = klass;
318 class_dep->node = NULL;
319 class_dep->next = state->class_deps;
320
321 state->class_deps = class_dep;
322
323 return class_dep;
324 }
325
326 static void
327 nir_schedule_intrinsic_deps(nir_deps_state *state,
328 nir_intrinsic_instr *instr)
329 {
330 nir_schedule_node *n = nir_schedule_get_node(state->scoreboard->instr_map,
331 &instr->instr);
332 const nir_schedule_options *options = state->scoreboard->options;
333 nir_schedule_dependency dep;
334
335 if (options->intrinsic_cb &&
336 options->intrinsic_cb(instr, &dep, options->intrinsic_cb_data)) {
337 struct nir_schedule_class_dep *class_dep =
338 nir_schedule_get_class_dep(state, dep.klass);
339
340 switch (dep.type) {
341 case NIR_SCHEDULE_READ_DEPENDENCY:
342 add_read_dep(state, class_dep->node, n);
343 break;
344 case NIR_SCHEDULE_WRITE_DEPENDENCY:
345 add_write_dep(state, &class_dep->node, n);
346 break;
347 }
348 }
349
350 switch (instr->intrinsic) {
351 case nir_intrinsic_load_uniform:
352 case nir_intrinsic_load_ubo:
353 case nir_intrinsic_load_front_face:
354 break;
355
356 case nir_intrinsic_discard:
357 case nir_intrinsic_discard_if:
358 /* We are adding two dependencies:
359 *
360 * * A individual one that we could use to add a read_dep while handling
361 * nir_instr_type_tex
362 *
363 * * Include it on the unknown intrinsic set, as we want discard to be
364 * serialized in in the same order relative to intervening stores or
365 * atomic accesses to SSBOs and images
366 */
367 add_write_dep(state, &state->discard, n);
368 add_write_dep(state, &state->unknown_intrinsic, n);
369 break;
370
371 case nir_intrinsic_store_output:
372 /* For some hardware and stages, output stores affect the same shared
373 * memory as input loads.
374 */
375 if ((state->scoreboard->options->stages_with_shared_io_memory &
376 (1 << state->scoreboard->shader->info.stage)))
377 add_write_dep(state, &state->load_input, n);
378
379 /* Make sure that preceding discards stay before the store_output */
380 add_read_dep(state, state->discard, n);
381
382 break;
383
384 case nir_intrinsic_load_input:
385 case nir_intrinsic_load_per_vertex_input:
386 add_read_dep(state, state->load_input, n);
387 break;
388
389 case nir_intrinsic_load_shared:
390 /* Don't move load_shared beyond a following store_shared, as it could
391 * change their value
392 */
393 add_read_dep(state, state->store_shared, n);
394 break;
395
396 case nir_intrinsic_store_shared:
397 add_write_dep(state, &state->store_shared, n);
398 break;
399
400 case nir_intrinsic_control_barrier:
401 case nir_intrinsic_memory_barrier_shared:
402 add_write_dep(state, &state->store_shared, n);
403
404 /* Serialize against ssbos/atomics/etc. */
405 add_write_dep(state, &state->unknown_intrinsic, n);
406 break;
407
408 default:
409 /* Attempt to handle other intrinsics that we haven't individually
410 * categorized by serializing them in the same order relative to each
411 * other.
412 */
413 add_write_dep(state, &state->unknown_intrinsic, n);
414 break;
415 }
416 }
417
418 /**
419 * Common code for dependencies that need to be tracked both forward and
420 * backward.
421 *
422 * This is for things like "all reads of r4 have to happen between the r4
423 * writes that surround them".
424 */
425 static void
426 nir_schedule_calculate_deps(nir_deps_state *state, nir_schedule_node *n)
427 {
428 nir_instr *instr = n->instr;
429
430 /* For NIR SSA defs, we only need to do a single pass of making the uses
431 * depend on the def.
432 */
433 if (state->dir == F)
434 nir_foreach_ssa_def(instr, nir_schedule_ssa_deps, state);
435
436 /* For NIR regs, track the last writer in the scheduler state so that we
437 * can keep the writes in order and let reads get reordered only between
438 * each write.
439 */
440 nir_foreach_src(instr, nir_schedule_reg_src_deps, state);
441
442 nir_foreach_dest(instr, nir_schedule_reg_dest_deps, state);
443
444 /* Make sure any other instructions keep their positions relative to
445 * jumps.
446 */
447 if (instr->type != nir_instr_type_jump)
448 add_read_dep(state, state->jump, n);
449
450 switch (instr->type) {
451 case nir_instr_type_ssa_undef:
452 case nir_instr_type_load_const:
453 case nir_instr_type_alu:
454 case nir_instr_type_deref:
455 break;
456
457 case nir_instr_type_tex:
458 /* Don't move texture ops before a discard, as that could increase
459 * memory bandwidth for reading the discarded samples.
460 */
461 add_read_dep(state, state->discard, n);
462 break;
463
464 case nir_instr_type_jump:
465 add_write_dep(state, &state->jump, n);
466 break;
467
468 case nir_instr_type_call:
469 unreachable("Calls should have been lowered");
470 break;
471
472 case nir_instr_type_parallel_copy:
473 unreachable("Parallel copies should have been lowered");
474 break;
475
476 case nir_instr_type_phi:
477 unreachable("nir_schedule() should be called after lowering from SSA");
478 break;
479
480 case nir_instr_type_intrinsic:
481 nir_schedule_intrinsic_deps(state, nir_instr_as_intrinsic(instr));
482 break;
483 }
484 }
485
486 static void
487 calculate_forward_deps(nir_schedule_scoreboard *scoreboard, nir_block *block)
488 {
489 nir_deps_state state = {
490 .scoreboard = scoreboard,
491 .dir = F,
492 .reg_map = _mesa_pointer_hash_table_create(NULL),
493 };
494
495 nir_foreach_instr(instr, block) {
496 nir_schedule_node *node = nir_schedule_get_node(scoreboard->instr_map,
497 instr);
498 nir_schedule_calculate_deps(&state, node);
499 }
500
501 ralloc_free(state.reg_map);
502 }
503
504 static void
505 calculate_reverse_deps(nir_schedule_scoreboard *scoreboard, nir_block *block)
506 {
507 nir_deps_state state = {
508 .scoreboard = scoreboard,
509 .dir = R,
510 .reg_map = _mesa_pointer_hash_table_create(NULL),
511 };
512
513 nir_foreach_instr_reverse(instr, block) {
514 nir_schedule_node *node = nir_schedule_get_node(scoreboard->instr_map,
515 instr);
516 nir_schedule_calculate_deps(&state, node);
517 }
518
519 ralloc_free(state.reg_map);
520 }
521
522 typedef struct {
523 nir_schedule_scoreboard *scoreboard;
524 int regs_freed;
525 } nir_schedule_regs_freed_state;
526
527 static bool
528 nir_schedule_regs_freed_src_cb(nir_src *src, void *in_state)
529 {
530 nir_schedule_regs_freed_state *state = in_state;
531 nir_schedule_scoreboard *scoreboard = state->scoreboard;
532 struct set *remaining_uses = nir_schedule_scoreboard_get_src(scoreboard, src);
533
534 if (remaining_uses->entries == 1 &&
535 _mesa_set_search(remaining_uses, src->parent_instr)) {
536 state->regs_freed += nir_schedule_src_pressure(src);
537 }
538
539 return true;
540 }
541
542 static bool
543 nir_schedule_regs_freed_def_cb(nir_ssa_def *def, void *in_state)
544 {
545 nir_schedule_regs_freed_state *state = in_state;
546
547 state->regs_freed -= nir_schedule_def_pressure(def);
548
549 return true;
550 }
551
552 static bool
553 nir_schedule_regs_freed_dest_cb(nir_dest *dest, void *in_state)
554 {
555 nir_schedule_regs_freed_state *state = in_state;
556 nir_schedule_scoreboard *scoreboard = state->scoreboard;
557
558 if (dest->is_ssa)
559 return true;
560
561 nir_register *reg = dest->reg.reg;
562
563 /* Only the first def of a reg counts against register pressure. */
564 if (!_mesa_set_search(scoreboard->live_values, reg))
565 state->regs_freed -= nir_schedule_dest_pressure(dest);
566
567 return true;
568 }
569
570 static int
571 nir_schedule_regs_freed(nir_schedule_scoreboard *scoreboard, nir_schedule_node *n)
572 {
573 nir_schedule_regs_freed_state state = {
574 .scoreboard = scoreboard,
575 };
576
577 nir_foreach_src(n->instr, nir_schedule_regs_freed_src_cb, &state);
578
579 nir_foreach_ssa_def(n->instr, nir_schedule_regs_freed_def_cb, &state);
580
581 nir_foreach_dest(n->instr, nir_schedule_regs_freed_dest_cb, &state);
582
583 return state.regs_freed;
584 }
585
586 /**
587 * Chooses an instruction to schedule using the Goodman/Hsu (1988) CSP (Code
588 * Scheduling for Parallelism) heuristic.
589 *
590 * Picks an instruction on the critical that's ready to execute without
591 * stalls, if possible, otherwise picks the instruction on the critical path.
592 */
593 static nir_schedule_node *
594 nir_schedule_choose_instruction_csp(nir_schedule_scoreboard *scoreboard)
595 {
596 nir_schedule_node *chosen = NULL;
597
598 /* Find the leader in the ready (shouldn't-stall) set with the maximum
599 * cost.
600 */
601 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
602 if (scoreboard->time < n->ready_time)
603 continue;
604
605 if (!chosen || chosen->max_delay < n->max_delay)
606 chosen = n;
607 }
608 if (chosen) {
609 if (debug) {
610 fprintf(stderr, "chose (ready): ");
611 nir_print_instr(chosen->instr, stderr);
612 fprintf(stderr, "\n");
613 }
614
615 return chosen;
616 }
617
618 /* Otherwise, choose the leader with the maximum cost. */
619 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
620 if (!chosen || chosen->max_delay < n->max_delay)
621 chosen = n;
622 }
623 if (debug) {
624 fprintf(stderr, "chose (leader): ");
625 nir_print_instr(chosen->instr, stderr);
626 fprintf(stderr, "\n");
627 }
628
629 return chosen;
630 }
631
632 /**
633 * Chooses an instruction to schedule using the Goodman/Hsu (1988) CSR (Code
634 * Scheduling for Register pressure) heuristic.
635 */
636 static nir_schedule_node *
637 nir_schedule_choose_instruction_csr(nir_schedule_scoreboard *scoreboard)
638 {
639 nir_schedule_node *chosen = NULL;
640
641 /* Find a ready inst with regs freed and pick the one with max cost. */
642 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
643 if (n->ready_time > scoreboard->time)
644 continue;
645
646 int regs_freed = nir_schedule_regs_freed(scoreboard, n);
647
648 if (regs_freed > 0 && (!chosen || chosen->max_delay < n->max_delay)) {
649 chosen = n;
650 }
651 }
652 if (chosen) {
653 if (debug) {
654 fprintf(stderr, "chose (freed+ready): ");
655 nir_print_instr(chosen->instr, stderr);
656 fprintf(stderr, "\n");
657 }
658
659 return chosen;
660 }
661
662 /* Find a leader with regs freed and pick the one with max cost. */
663 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
664 int regs_freed = nir_schedule_regs_freed(scoreboard, n);
665
666 if (regs_freed > 0 && (!chosen || chosen->max_delay < n->max_delay)) {
667 chosen = n;
668 }
669 }
670 if (chosen) {
671 if (debug) {
672 fprintf(stderr, "chose (regs freed): ");
673 nir_print_instr(chosen->instr, stderr);
674 fprintf(stderr, "\n");
675 }
676
677 return chosen;
678 }
679
680 /* Find a partially evaluated path and try to finish it off */
681 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
682 if (n->partially_evaluated_path &&
683 (!chosen || chosen->max_delay < n->max_delay)) {
684 chosen = n;
685 }
686 }
687 if (chosen) {
688 if (debug) {
689 fprintf(stderr, "chose (partial path): ");
690 nir_print_instr(chosen->instr, stderr);
691 fprintf(stderr, "\n");
692 }
693
694 return chosen;
695 }
696
697 /* Contra the paper, pick a leader with no effect on used regs. This may
698 * open up new opportunities, as otherwise a single-operand instr consuming
699 * a value will tend to block finding freeing that value. This had a
700 * massive effect on reducing spilling on V3D.
701 *
702 * XXX: Should this prioritize ready?
703 */
704 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
705 if (nir_schedule_regs_freed(scoreboard, n) != 0)
706 continue;
707
708 if (!chosen || chosen->max_delay < n->max_delay)
709 chosen = n;
710 }
711 if (chosen) {
712 if (debug) {
713 fprintf(stderr, "chose (regs no-op): ");
714 nir_print_instr(chosen->instr, stderr);
715 fprintf(stderr, "\n");
716 }
717
718 return chosen;
719 }
720
721 /* Pick the max delay of the remaining ready set. */
722 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
723 if (n->ready_time > scoreboard->time)
724 continue;
725 if (!chosen || chosen->max_delay < n->max_delay)
726 chosen = n;
727 }
728 if (chosen) {
729 if (debug) {
730 fprintf(stderr, "chose (ready max delay): ");
731 nir_print_instr(chosen->instr, stderr);
732 fprintf(stderr, "\n");
733 }
734 return chosen;
735 }
736
737 /* Pick the max delay of the remaining leaders. */
738 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
739 if (!chosen || chosen->max_delay < n->max_delay)
740 chosen = n;
741 }
742
743 if (debug) {
744 fprintf(stderr, "chose (max delay): ");
745 nir_print_instr(chosen->instr, stderr);
746 fprintf(stderr, "\n");
747 }
748
749 return chosen;
750 }
751
752 static void
753 dump_state(nir_schedule_scoreboard *scoreboard)
754 {
755 list_for_each_entry(nir_schedule_node, n, &scoreboard->dag->heads, dag.link) {
756 fprintf(stderr, "maxdel %5d ", n->max_delay);
757 nir_print_instr(n->instr, stderr);
758 fprintf(stderr, "\n");
759
760 util_dynarray_foreach(&n->dag.edges, struct dag_edge, edge) {
761 nir_schedule_node *child = (nir_schedule_node *)edge->child;
762
763 fprintf(stderr, " -> (%d parents) ", child->dag.parent_count);
764 nir_print_instr(child->instr, stderr);
765 fprintf(stderr, "\n");
766 }
767 }
768 }
769
770 static void
771 nir_schedule_mark_use(nir_schedule_scoreboard *scoreboard,
772 void *reg_or_def,
773 nir_instr *reg_or_def_parent,
774 int pressure)
775 {
776 /* Make the value live if it's the first time it's been used. */
777 if (!_mesa_set_search(scoreboard->live_values, reg_or_def)) {
778 _mesa_set_add(scoreboard->live_values, reg_or_def);
779 scoreboard->pressure += pressure;
780 }
781
782 /* Make the value dead if it's the last remaining use. Be careful when one
783 * instruction uses a value twice to not decrement pressure twice.
784 */
785 struct set *remaining_uses =
786 _mesa_hash_table_search_data(scoreboard->remaining_uses, reg_or_def);
787 struct set_entry *entry = _mesa_set_search(remaining_uses, reg_or_def_parent);
788 if (entry) {
789 _mesa_set_remove(remaining_uses, entry);
790
791 if (remaining_uses->entries == 0)
792 scoreboard->pressure -= pressure;
793 }
794 }
795
796 static bool
797 nir_schedule_mark_src_scheduled(nir_src *src, void *state)
798 {
799 nir_schedule_scoreboard *scoreboard = state;
800 struct set *remaining_uses = nir_schedule_scoreboard_get_src(scoreboard, src);
801
802 struct set_entry *entry = _mesa_set_search(remaining_uses,
803 src->parent_instr);
804 if (entry) {
805 /* Once we've used an SSA value in one instruction, bump the priority of
806 * the other uses so the SSA value can get fully consumed.
807 *
808 * We don't do this for registers, and it's would be a hassle and it's
809 * unclear if that would help or not. Also, skip it for constants, as
810 * they're often folded as immediates into backend instructions and have
811 * many unrelated instructions all referencing the same value (0).
812 */
813 if (src->is_ssa &&
814 src->ssa->parent_instr->type != nir_instr_type_load_const) {
815 nir_foreach_use(other_src, src->ssa) {
816 if (other_src->parent_instr == src->parent_instr)
817 continue;
818
819 nir_schedule_node *n =
820 nir_schedule_get_node(scoreboard->instr_map,
821 other_src->parent_instr);
822
823 if (n && !n->partially_evaluated_path) {
824 if (debug) {
825 fprintf(stderr, " New partially evaluated path: ");
826 nir_print_instr(n->instr, stderr);
827 fprintf(stderr, "\n");
828 }
829
830 n->partially_evaluated_path = true;
831 }
832 }
833 }
834 }
835
836 nir_schedule_mark_use(scoreboard,
837 src->is_ssa ? (void *)src->ssa : (void *)src->reg.reg,
838 src->parent_instr,
839 nir_schedule_src_pressure(src));
840
841 return true;
842 }
843
844 static bool
845 nir_schedule_mark_def_scheduled(nir_ssa_def *def, void *state)
846 {
847 nir_schedule_scoreboard *scoreboard = state;
848
849 nir_schedule_mark_use(scoreboard, def, def->parent_instr,
850 nir_schedule_def_pressure(def));
851
852 return true;
853 }
854
855 static bool
856 nir_schedule_mark_dest_scheduled(nir_dest *dest, void *state)
857 {
858 nir_schedule_scoreboard *scoreboard = state;
859
860 /* SSA defs were handled in nir_schedule_mark_def_scheduled()
861 */
862 if (dest->is_ssa)
863 return true;
864
865 /* XXX: This is not actually accurate for regs -- the last use of a reg may
866 * have a live interval that extends across control flow. We should
867 * calculate the live ranges of regs, and have scheduler nodes for the CF
868 * nodes that also "use" the reg.
869 */
870 nir_schedule_mark_use(scoreboard, dest->reg.reg,
871 dest->reg.parent_instr,
872 nir_schedule_dest_pressure(dest));
873
874 return true;
875 }
876
877 static void
878 nir_schedule_mark_node_scheduled(nir_schedule_scoreboard *scoreboard,
879 nir_schedule_node *n)
880 {
881 nir_foreach_src(n->instr, nir_schedule_mark_src_scheduled, scoreboard);
882 nir_foreach_ssa_def(n->instr, nir_schedule_mark_def_scheduled, scoreboard);
883 nir_foreach_dest(n->instr, nir_schedule_mark_dest_scheduled, scoreboard);
884
885 util_dynarray_foreach(&n->dag.edges, struct dag_edge, edge) {
886 nir_schedule_node *child = (nir_schedule_node *)edge->child;
887
888 child->ready_time = MAX2(child->ready_time,
889 scoreboard->time + n->delay);
890
891 if (child->dag.parent_count == 1) {
892 if (debug) {
893 fprintf(stderr, " New DAG head: ");
894 nir_print_instr(child->instr, stderr);
895 fprintf(stderr, "\n");
896 }
897 }
898 }
899
900 dag_prune_head(scoreboard->dag, &n->dag);
901
902 scoreboard->time = MAX2(n->ready_time, scoreboard->time);
903 scoreboard->time++;
904 }
905
906 static void
907 nir_schedule_instructions(nir_schedule_scoreboard *scoreboard, nir_block *block)
908 {
909 while (!list_is_empty(&scoreboard->dag->heads)) {
910 if (debug) {
911 fprintf(stderr, "current list:\n");
912 dump_state(scoreboard);
913 }
914
915 nir_schedule_node *chosen;
916 if (scoreboard->pressure < scoreboard->options->threshold)
917 chosen = nir_schedule_choose_instruction_csp(scoreboard);
918 else
919 chosen = nir_schedule_choose_instruction_csr(scoreboard);
920
921 /* Now that we've scheduled a new instruction, some of its children may
922 * be promoted to the list of instructions ready to be scheduled.
923 */
924 nir_schedule_mark_node_scheduled(scoreboard, chosen);
925
926 /* Move the instruction to the end (so our first chosen instructions are
927 * the start of the program).
928 */
929 exec_node_remove(&chosen->instr->node);
930 exec_list_push_tail(&block->instr_list, &chosen->instr->node);
931
932 if (debug)
933 fprintf(stderr, "\n");
934 }
935 }
936
937 static uint32_t
938 nir_schedule_get_delay(nir_instr *instr)
939 {
940 switch (instr->type) {
941 case nir_instr_type_ssa_undef:
942 case nir_instr_type_load_const:
943 case nir_instr_type_alu:
944 case nir_instr_type_deref:
945 case nir_instr_type_jump:
946 case nir_instr_type_parallel_copy:
947 case nir_instr_type_call:
948 case nir_instr_type_phi:
949 return 1;
950
951 case nir_instr_type_intrinsic:
952 /* XXX: Pick a large number for UBO/SSBO/image/shared loads */
953 return 1;
954
955 case nir_instr_type_tex:
956 /* Pick some large number to try to fetch textures early and sample them
957 * late.
958 */
959 return 100;
960 }
961
962 return 0;
963 }
964
965 static void
966 nir_schedule_dag_max_delay_cb(struct dag_node *node, void *state)
967 {
968 nir_schedule_node *n = (nir_schedule_node *)node;
969 uint32_t max_delay = 0;
970
971 util_dynarray_foreach(&n->dag.edges, struct dag_edge, edge) {
972 nir_schedule_node *child = (nir_schedule_node *)edge->child;
973 max_delay = MAX2(child->max_delay, max_delay);
974 }
975
976 n->max_delay = MAX2(n->max_delay, max_delay + n->delay);
977 }
978
979 static void
980 nir_schedule_block(nir_schedule_scoreboard *scoreboard, nir_block *block)
981 {
982 void *mem_ctx = ralloc_context(NULL);
983 scoreboard->instr_map = _mesa_pointer_hash_table_create(mem_ctx);
984
985 scoreboard->dag = dag_create(mem_ctx);
986
987 nir_foreach_instr(instr, block) {
988 nir_schedule_node *n =
989 rzalloc(mem_ctx, nir_schedule_node);
990
991 n->instr = instr;
992 n->delay = nir_schedule_get_delay(instr);
993 dag_init_node(scoreboard->dag, &n->dag);
994
995 _mesa_hash_table_insert(scoreboard->instr_map, instr, n);
996 }
997
998 calculate_forward_deps(scoreboard, block);
999 calculate_reverse_deps(scoreboard, block);
1000
1001 dag_traverse_bottom_up(scoreboard->dag, nir_schedule_dag_max_delay_cb, NULL);
1002
1003 nir_schedule_instructions(scoreboard, block);
1004
1005 ralloc_free(mem_ctx);
1006 scoreboard->instr_map = NULL;
1007 }
1008
1009 static bool
1010 nir_schedule_ssa_def_init_scoreboard(nir_ssa_def *def, void *state)
1011 {
1012 nir_schedule_scoreboard *scoreboard = state;
1013 struct set *def_uses = _mesa_pointer_set_create(scoreboard);
1014
1015 _mesa_hash_table_insert(scoreboard->remaining_uses, def, def_uses);
1016
1017 _mesa_set_add(def_uses, def->parent_instr);
1018
1019 nir_foreach_use(src, def) {
1020 _mesa_set_add(def_uses, src->parent_instr);
1021 }
1022
1023 /* XXX: Handle if uses */
1024
1025 return true;
1026 }
1027
1028 static nir_schedule_scoreboard *
1029 nir_schedule_get_scoreboard(nir_shader *shader,
1030 const nir_schedule_options *options)
1031 {
1032 nir_schedule_scoreboard *scoreboard = rzalloc(NULL, nir_schedule_scoreboard);
1033
1034 scoreboard->shader = shader;
1035 scoreboard->live_values = _mesa_pointer_set_create(scoreboard);
1036 scoreboard->remaining_uses = _mesa_pointer_hash_table_create(scoreboard);
1037 scoreboard->options = options;
1038 scoreboard->pressure = 0;
1039
1040 nir_foreach_function(function, shader) {
1041 nir_foreach_register(reg, &function->impl->registers) {
1042 struct set *register_uses =
1043 _mesa_pointer_set_create(scoreboard);
1044
1045 _mesa_hash_table_insert(scoreboard->remaining_uses, reg, register_uses);
1046
1047 nir_foreach_use(src, reg) {
1048 _mesa_set_add(register_uses, src->parent_instr);
1049 }
1050
1051 /* XXX: Handle if uses */
1052
1053 nir_foreach_def(dest, reg) {
1054 _mesa_set_add(register_uses, dest->reg.parent_instr);
1055 }
1056 }
1057
1058 nir_foreach_block(block, function->impl) {
1059 nir_foreach_instr(instr, block) {
1060 nir_foreach_ssa_def(instr, nir_schedule_ssa_def_init_scoreboard,
1061 scoreboard);
1062 }
1063
1064 /* XXX: We're ignoring if uses, which may prioritize scheduling other
1065 * uses of the if src even when it doesn't help. That's not many
1066 * values, though, so meh.
1067 */
1068 }
1069 }
1070
1071 return scoreboard;
1072 }
1073
1074 static void
1075 nir_schedule_validate_uses(nir_schedule_scoreboard *scoreboard)
1076 {
1077 #ifdef NDEBUG
1078 return;
1079 #endif
1080
1081 bool any_uses = false;
1082
1083 hash_table_foreach(scoreboard->remaining_uses, entry) {
1084 struct set *remaining_uses = entry->data;
1085
1086 set_foreach(remaining_uses, instr_entry) {
1087 if (!any_uses) {
1088 fprintf(stderr, "Tracked uses remain after scheduling. "
1089 "Affected instructions: \n");
1090 any_uses = true;
1091 }
1092 nir_print_instr(instr_entry->key, stderr);
1093 fprintf(stderr, "\n");
1094 }
1095 }
1096
1097 assert(!any_uses);
1098 }
1099
1100 /**
1101 * Schedules the NIR instructions to try to decrease stalls (for example,
1102 * delaying texture reads) while managing register pressure.
1103 *
1104 * The threshold represents "number of NIR register/SSA def channels live
1105 * before switching the scheduling heuristic to reduce register pressure",
1106 * since most of our GPU architectures are scalar (extending to vector with a
1107 * flag wouldn't be hard). This number should be a bit below the number of
1108 * registers available (counting any that may be occupied by system value
1109 * payload values, for example), since the heuristic may not always be able to
1110 * free a register immediately. The amount below the limit is up to you to
1111 * tune.
1112 */
1113 void
1114 nir_schedule(nir_shader *shader,
1115 const nir_schedule_options *options)
1116 {
1117 nir_schedule_scoreboard *scoreboard = nir_schedule_get_scoreboard(shader,
1118 options);
1119
1120 if (debug) {
1121 fprintf(stderr, "NIR shader before scheduling:\n");
1122 nir_print_shader(shader, stderr);
1123 }
1124
1125 nir_foreach_function(function, shader) {
1126 if (!function->impl)
1127 continue;
1128
1129 nir_foreach_block(block, function->impl) {
1130 nir_schedule_block(scoreboard, block);
1131 }
1132 }
1133
1134 nir_schedule_validate_uses(scoreboard);
1135
1136 ralloc_free(scoreboard);
1137 }