2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Rob Clark <robclark@freedesktop.org>
28 #include "util/u_math.h"
33 * Instruction Scheduling:
35 * A recursive depth based scheduling algo. Recursively find an eligible
36 * instruction to schedule from the deepest instruction (recursing through
37 * it's unscheduled src instructions). Normally this would result in a
38 * lot of re-traversal of the same instructions, so we cache results in
39 * instr->data (and clear cached results that would be no longer valid
40 * after scheduling an instruction).
42 * There are a few special cases that need to be handled, since sched
43 * is currently independent of register allocation. Usages of address
44 * register (a0.x) or predicate register (p0.x) must be serialized. Ie.
45 * if you have two pairs of instructions that write the same special
46 * register and then read it, then those pairs cannot be interleaved.
47 * To solve this, when we are in such a scheduling "critical section",
48 * and we encounter a conflicting write to a special register, we try
49 * to schedule any remaining instructions that use that value first.
52 struct ir3_sched_ctx
{
53 struct ir3_block
*block
; /* the current block */
54 struct list_head depth_list
; /* depth sorted unscheduled instrs */
55 struct ir3_instruction
*scheduled
; /* last scheduled instr XXX remove*/
56 struct ir3_instruction
*addr
; /* current a0.x user, if any */
57 struct ir3_instruction
*pred
; /* current p0.x user, if any */
58 int live_values
; /* estimate of current live values */
62 static bool is_sfu_or_mem(struct ir3_instruction
*instr
)
64 return is_sfu(instr
) || is_mem(instr
);
68 unuse_each_src(struct ir3_sched_ctx
*ctx
, struct ir3_instruction
*instr
)
70 struct ir3_instruction
*src
;
72 foreach_ssa_src_n(src
, n
, instr
) {
73 if (__is_false_dep(instr
, n
))
75 if (instr
->block
!= src
->block
)
77 if ((src
->opc
== OPC_META_FI
) || (src
->opc
== OPC_META_FO
)) {
78 unuse_each_src(ctx
, src
);
80 debug_assert(src
->use_count
> 0);
82 if (--src
->use_count
== 0) {
83 ctx
->live_values
-= dest_regs(src
);
84 debug_assert(ctx
->live_values
>= 0);
90 static void use_instr(struct ir3_instruction
*instr
);
93 use_each_src(struct ir3_instruction
*instr
)
95 struct ir3_instruction
*src
;
97 foreach_ssa_src_n(src
, n
, instr
) {
98 if (__is_false_dep(instr
, n
))
105 use_instr(struct ir3_instruction
*instr
)
107 if ((instr
->opc
== OPC_META_FI
) || (instr
->opc
== OPC_META_FO
)) {
115 update_live_values(struct ir3_sched_ctx
*ctx
, struct ir3_instruction
*instr
)
117 if ((instr
->opc
== OPC_META_FI
) || (instr
->opc
== OPC_META_FO
))
120 ctx
->live_values
+= dest_regs(instr
);
121 unuse_each_src(ctx
, instr
);
125 update_use_count(struct ir3
*ir
)
127 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
128 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
129 instr
->use_count
= 0;
133 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
134 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
135 if ((instr
->opc
== OPC_META_FI
) || (instr
->opc
== OPC_META_FO
))
142 /* Shader outputs are also used:
144 for (unsigned i
= 0; i
< ir
->noutputs
; i
++) {
145 struct ir3_instruction
*out
= ir
->outputs
[i
];
154 #define NULL_INSTR ((void *)~0)
157 clear_cache(struct ir3_sched_ctx
*ctx
, struct ir3_instruction
*instr
)
159 list_for_each_entry (struct ir3_instruction
, instr2
, &ctx
->depth_list
, node
) {
160 if ((instr2
->data
== instr
) || (instr2
->data
== NULL_INSTR
) || !instr
)
166 schedule(struct ir3_sched_ctx
*ctx
, struct ir3_instruction
*instr
)
168 debug_assert(ctx
->block
== instr
->block
);
170 /* maybe there is a better way to handle this than just stuffing
171 * a nop.. ideally we'd know about this constraint in the
172 * scheduling and depth calculation..
174 if (ctx
->scheduled
&& is_sfu_or_mem(ctx
->scheduled
) && is_sfu_or_mem(instr
))
177 /* remove from depth list:
179 list_delinit(&instr
->node
);
181 if (writes_addr(instr
)) {
182 debug_assert(ctx
->addr
== NULL
);
186 if (writes_pred(instr
)) {
187 debug_assert(ctx
->pred
== NULL
);
191 instr
->flags
|= IR3_INSTR_MARK
;
193 list_addtail(&instr
->node
, &instr
->block
->instr_list
);
194 ctx
->scheduled
= instr
;
196 update_live_values(ctx
, instr
);
198 if (writes_addr(instr
) || writes_pred(instr
) || is_input(instr
)) {
199 clear_cache(ctx
, NULL
);
201 /* invalidate only the necessary entries.. */
202 clear_cache(ctx
, instr
);
206 static struct ir3_instruction
*
207 deepest(struct ir3_instruction
**srcs
, unsigned nsrcs
)
209 struct ir3_instruction
*d
= NULL
;
210 unsigned i
= 0, id
= 0;
212 while ((i
< nsrcs
) && !(d
= srcs
[id
= i
]))
218 for (; i
< nsrcs
; i
++)
219 if (srcs
[i
] && (srcs
[i
]->depth
> d
->depth
))
228 * @block: the block to search in, starting from end; in first pass,
229 * this will be the block the instruction would be inserted into
230 * (but has not yet, ie. it only contains already scheduled
231 * instructions). For intra-block scheduling (second pass), this
232 * would be one of the predecessor blocks.
233 * @instr: the instruction to search for
234 * @maxd: max distance, bail after searching this # of instruction
235 * slots, since it means the instruction we are looking for is
237 * @pred: if true, recursively search into predecessor blocks to
238 * find the worst case (shortest) distance (only possible after
239 * individual blocks are all scheduled
242 distance(struct ir3_block
*block
, struct ir3_instruction
*instr
,
243 unsigned maxd
, bool pred
)
247 list_for_each_entry_rev (struct ir3_instruction
, n
, &block
->instr_list
, node
) {
248 if ((n
== instr
) || (d
>= maxd
))
250 /* NOTE: don't count branch/jump since we don't know yet if they will
251 * be eliminated later in resolve_jumps().. really should do that
252 * earlier so we don't have this constraint.
254 if (is_alu(n
) || (is_flow(n
) && (n
->opc
!= OPC_JUMP
) && (n
->opc
!= OPC_BR
)))
258 /* if coming from a predecessor block, assume it is assigned far
259 * enough away.. we'll fix up later.
264 if (pred
&& (block
->data
!= block
)) {
265 /* Search into predecessor blocks, finding the one with the
266 * shortest distance, since that will be the worst case
268 unsigned min
= maxd
- d
;
270 /* (ab)use block->data to prevent recursion: */
273 for (unsigned i
= 0; i
< block
->predecessors_count
; i
++) {
276 n
= distance(block
->predecessors
[i
], instr
, min
, pred
);
288 /* calculate delay for specified src: */
290 delay_calc_srcn(struct ir3_block
*block
,
291 struct ir3_instruction
*assigner
,
292 struct ir3_instruction
*consumer
,
293 unsigned srcn
, bool soft
, bool pred
)
297 if (is_meta(assigner
)) {
298 struct ir3_instruction
*src
;
299 foreach_ssa_src(src
, assigner
) {
301 d
= delay_calc_srcn(block
, src
, consumer
, srcn
, soft
, pred
);
302 delay
= MAX2(delay
, d
);
306 if (is_sfu(assigner
)) {
309 delay
= ir3_delayslots(assigner
, consumer
, srcn
);
312 delay
= ir3_delayslots(assigner
, consumer
, srcn
);
314 delay
-= distance(block
, assigner
, delay
, pred
);
320 /* calculate delay for instruction (maximum of delay for all srcs): */
322 delay_calc(struct ir3_block
*block
, struct ir3_instruction
*instr
,
323 bool soft
, bool pred
)
326 struct ir3_instruction
*src
;
328 foreach_ssa_src_n(src
, i
, instr
) {
330 d
= delay_calc_srcn(block
, src
, instr
, i
, soft
, pred
);
331 delay
= MAX2(delay
, d
);
337 struct ir3_sched_notes
{
338 /* there is at least one kill which could be scheduled, except
339 * for unscheduled bary.f's:
342 /* there is at least one instruction that could be scheduled,
343 * except for conflicting address/predicate register usage:
345 bool addr_conflict
, pred_conflict
;
348 static bool is_scheduled(struct ir3_instruction
*instr
)
350 return !!(instr
->flags
& IR3_INSTR_MARK
);
353 /* could an instruction be scheduled if specified ssa src was scheduled? */
355 could_sched(struct ir3_instruction
*instr
, struct ir3_instruction
*src
)
357 struct ir3_instruction
*other_src
;
358 foreach_ssa_src(other_src
, instr
) {
359 /* if dependency not scheduled, we aren't ready yet: */
360 if ((src
!= other_src
) && !is_scheduled(other_src
)) {
367 /* Check if instruction is ok to schedule. Make sure it is not blocked
368 * by use of addr/predicate register, etc.
371 check_instr(struct ir3_sched_ctx
*ctx
, struct ir3_sched_notes
*notes
,
372 struct ir3_instruction
*instr
)
374 /* For instructions that write address register we need to
375 * make sure there is at least one instruction that uses the
376 * addr value which is otherwise ready.
378 * TODO if any instructions use pred register and have other
379 * src args, we would need to do the same for writes_pred()..
381 if (writes_addr(instr
)) {
382 struct ir3
*ir
= instr
->block
->shader
;
384 for (unsigned i
= 0; (i
< ir
->indirects_count
) && !ready
; i
++) {
385 struct ir3_instruction
*indirect
= ir
->indirects
[i
];
388 if (indirect
->address
!= instr
)
390 ready
= could_sched(indirect
, instr
);
393 /* nothing could be scheduled, so keep looking: */
398 /* if this is a write to address/predicate register, and that
399 * register is currently in use, we need to defer until it is
402 if (writes_addr(instr
) && ctx
->addr
) {
403 debug_assert(ctx
->addr
!= instr
);
404 notes
->addr_conflict
= true;
408 if (writes_pred(instr
) && ctx
->pred
) {
409 debug_assert(ctx
->pred
!= instr
);
410 notes
->pred_conflict
= true;
414 /* if the instruction is a kill, we need to ensure *every*
415 * bary.f is scheduled. The hw seems unhappy if the thread
416 * gets killed before the end-input (ei) flag is hit.
418 * We could do this by adding each bary.f instruction as
419 * virtual ssa src for the kill instruction. But we have
420 * fixed length instr->regs[].
422 * TODO this wouldn't be quite right if we had multiple
423 * basic blocks, if any block was conditional. We'd need
424 * to schedule the bary.f's outside of any block which
425 * was conditional that contained a kill.. I think..
427 if (is_kill(instr
)) {
428 struct ir3
*ir
= instr
->block
->shader
;
430 for (unsigned i
= 0; i
< ir
->baryfs_count
; i
++) {
431 struct ir3_instruction
*baryf
= ir
->baryfs
[i
];
432 if (baryf
->flags
& IR3_INSTR_UNUSED
)
434 if (!is_scheduled(baryf
)) {
435 notes
->blocked_kill
= true;
444 /* Find the best instruction to schedule from specified instruction or
445 * recursively it's ssa sources.
447 static struct ir3_instruction
*
448 find_instr_recursive(struct ir3_sched_ctx
*ctx
, struct ir3_sched_notes
*notes
,
449 struct ir3_instruction
*instr
)
451 struct ir3_instruction
*srcs
[__ssa_src_cnt(instr
)];
452 struct ir3_instruction
*src
;
455 if (is_scheduled(instr
))
458 /* use instr->data to cache the results of recursing up the
459 * instr src's. Otherwise the recursive algo can scale quite
460 * badly w/ shader size. But this takes some care to clear
461 * the cache appropriately when instructions are scheduled.
464 if (instr
->data
== NULL_INSTR
)
469 /* find unscheduled srcs: */
470 foreach_ssa_src(src
, instr
) {
471 if (!is_scheduled(src
) && (src
->block
== instr
->block
)) {
472 debug_assert(nsrcs
< ARRAY_SIZE(srcs
));
477 /* if all our src's are already scheduled: */
479 if (check_instr(ctx
, notes
, instr
)) {
486 while ((src
= deepest(srcs
, nsrcs
))) {
487 struct ir3_instruction
*candidate
;
489 candidate
= find_instr_recursive(ctx
, notes
, src
);
493 if (check_instr(ctx
, notes
, candidate
)) {
494 instr
->data
= candidate
;
499 instr
->data
= NULL_INSTR
;
503 /* find net change to live values if instruction were scheduled: */
505 live_effect(struct ir3_instruction
*instr
)
507 struct ir3_instruction
*src
;
508 int new_live
= dest_regs(instr
);
511 foreach_ssa_src_n(src
, n
, instr
) {
512 if (__is_false_dep(instr
, n
))
515 if (instr
->block
!= src
->block
)
518 /* for fanout/split, just pass things along to the real src: */
519 if (src
->opc
== OPC_META_FO
)
520 src
= ssa(src
->regs
[1]);
522 /* for fanin/collect, if this is the last use of *each* src,
523 * then it will decrease the live values, since RA treats
526 if (src
->opc
== OPC_META_FI
) {
527 struct ir3_instruction
*src2
;
528 bool last_use
= true;
530 foreach_ssa_src(src2
, src
) {
531 if (src2
->use_count
> 1) {
538 old_live
+= dest_regs(src
);
541 debug_assert(src
->use_count
> 0);
543 if (src
->use_count
== 1) {
544 old_live
+= dest_regs(src
);
549 return new_live
- old_live
;
552 /* find instruction to schedule: */
553 static struct ir3_instruction
*
554 find_eligible_instr(struct ir3_sched_ctx
*ctx
, struct ir3_sched_notes
*notes
,
557 struct ir3_instruction
*best_instr
= NULL
;
558 int best_rank
= INT_MAX
; /* lower is better */
559 unsigned deepest
= 0;
561 /* TODO we'd really rather use the list/array of block outputs. But we
562 * don't have such a thing. Recursing *every* instruction in the list
563 * will result in a lot of repeated traversal, since instructions will
564 * get traversed both when they appear as ssa src to a later instruction
565 * as well as where they appear in the depth_list.
567 list_for_each_entry_rev (struct ir3_instruction
, instr
, &ctx
->depth_list
, node
) {
568 struct ir3_instruction
*candidate
;
570 candidate
= find_instr_recursive(ctx
, notes
, instr
);
574 if (is_meta(candidate
))
577 deepest
= MAX2(deepest
, candidate
->depth
);
580 /* traverse the list a second time.. but since we cache the result of
581 * find_instr_recursive() it isn't as bad as it looks.
583 list_for_each_entry_rev (struct ir3_instruction
, instr
, &ctx
->depth_list
, node
) {
584 struct ir3_instruction
*candidate
;
586 candidate
= find_instr_recursive(ctx
, notes
, instr
);
590 /* determine net change to # of live values: */
591 int le
= live_effect(candidate
);
593 /* if there is a net increase in # of live values, then apply some
594 * threshold to avoid instructions getting scheduled *too* early
595 * and increasing register pressure.
600 if (ctx
->live_values
> 4*4) {
606 /* Filter out any "shallow" instructions which would otherwise
607 * tend to get scheduled too early to fill delay slots even
608 * when they are not needed for a while. There will probably
609 * be later delay slots that they could just as easily fill.
611 * A classic case where this comes up is frag shaders that
612 * write a constant value (like 1.0f) to one of the channels
613 * of the output color(s). Since the mov from immed has no
614 * dependencies, it would otherwise get scheduled early to
615 * fill delay slots, occupying a register until the end of
618 if ((deepest
- candidate
->depth
) > threshold
)
622 int rank
= delay_calc(ctx
->block
, candidate
, soft
, false);
624 /* if too many live values, prioritize instructions that reduce the
625 * number of live values:
627 if (ctx
->live_values
> 16*4) {
629 } else if (ctx
->live_values
> 4*4) {
633 if (rank
< best_rank
) {
634 best_instr
= candidate
;
642 /* "spill" the address register by remapping any unscheduled
643 * instructions which depend on the current address register
644 * to a clone of the instruction which wrote the address reg.
646 static struct ir3_instruction
*
647 split_addr(struct ir3_sched_ctx
*ctx
)
650 struct ir3_instruction
*new_addr
= NULL
;
653 debug_assert(ctx
->addr
);
655 ir
= ctx
->addr
->block
->shader
;
657 for (i
= 0; i
< ir
->indirects_count
; i
++) {
658 struct ir3_instruction
*indirect
= ir
->indirects
[i
];
663 /* skip instructions already scheduled: */
664 if (is_scheduled(indirect
))
667 /* remap remaining instructions using current addr
670 if (indirect
->address
== ctx
->addr
) {
672 new_addr
= ir3_instr_clone(ctx
->addr
);
673 /* original addr is scheduled, but new one isn't: */
674 new_addr
->flags
&= ~IR3_INSTR_MARK
;
676 ir3_instr_set_address(indirect
, new_addr
);
680 /* all remaining indirects remapped to new addr: */
686 /* "spill" the predicate register by remapping any unscheduled
687 * instructions which depend on the current predicate register
688 * to a clone of the instruction which wrote the address reg.
690 static struct ir3_instruction
*
691 split_pred(struct ir3_sched_ctx
*ctx
)
694 struct ir3_instruction
*new_pred
= NULL
;
697 debug_assert(ctx
->pred
);
699 ir
= ctx
->pred
->block
->shader
;
701 for (i
= 0; i
< ir
->predicates_count
; i
++) {
702 struct ir3_instruction
*predicated
= ir
->predicates
[i
];
704 /* skip instructions already scheduled: */
705 if (is_scheduled(predicated
))
708 /* remap remaining instructions using current pred
711 * TODO is there ever a case when pred isn't first
714 if (ssa(predicated
->regs
[1]) == ctx
->pred
) {
716 new_pred
= ir3_instr_clone(ctx
->pred
);
717 /* original pred is scheduled, but new one isn't: */
718 new_pred
->flags
&= ~IR3_INSTR_MARK
;
720 predicated
->regs
[1]->instr
= new_pred
;
724 /* all remaining predicated remapped to new pred: */
731 sched_block(struct ir3_sched_ctx
*ctx
, struct ir3_block
*block
)
733 struct list_head unscheduled_list
;
737 /* addr/pred writes are per-block: */
741 /* move all instructions to the unscheduled list, and
742 * empty the block's instruction list (to which we will
745 list_replace(&block
->instr_list
, &unscheduled_list
);
746 list_inithead(&block
->instr_list
);
747 list_inithead(&ctx
->depth_list
);
749 /* first a pre-pass to schedule all meta:input instructions
750 * (which need to appear first so that RA knows the register is
751 * occupied), and move remaining to depth sorted list:
753 list_for_each_entry_safe (struct ir3_instruction
, instr
, &unscheduled_list
, node
) {
754 if (instr
->opc
== OPC_META_INPUT
) {
755 schedule(ctx
, instr
);
757 ir3_insert_by_depth(instr
, &ctx
->depth_list
);
761 while (!list_empty(&ctx
->depth_list
)) {
762 struct ir3_sched_notes notes
= {0};
763 struct ir3_instruction
*instr
;
765 instr
= find_eligible_instr(ctx
, ¬es
, true);
767 instr
= find_eligible_instr(ctx
, ¬es
, false);
770 unsigned delay
= delay_calc(ctx
->block
, instr
, false, false);
772 /* and if we run out of instructions that can be scheduled,
773 * then it is time for nop's:
775 debug_assert(delay
<= 6);
781 schedule(ctx
, instr
);
783 struct ir3_instruction
*new_instr
= NULL
;
785 /* nothing available to schedule.. if we are blocked on
786 * address/predicate register conflict, then break the
787 * deadlock by cloning the instruction that wrote that
790 if (notes
.addr_conflict
) {
791 new_instr
= split_addr(ctx
);
792 } else if (notes
.pred_conflict
) {
793 new_instr
= split_pred(ctx
);
801 /* clearing current addr/pred can change what is
802 * available to schedule, so clear cache..
804 clear_cache(ctx
, NULL
);
806 ir3_insert_by_depth(new_instr
, &ctx
->depth_list
);
807 /* the original instr that wrote addr/pred may have
808 * originated from a different block:
810 new_instr
->block
= block
;
815 /* And lastly, insert branch/jump instructions to take us to
816 * the next block. Later we'll strip back out the branches
817 * that simply jump to next instruction.
819 if (block
->successors
[1]) {
820 /* if/else, conditional branches to "then" or "else": */
821 struct ir3_instruction
*br
;
824 debug_assert(ctx
->pred
);
825 debug_assert(block
->condition
);
827 delay
-= distance(ctx
->block
, ctx
->pred
, delay
, false);
834 /* create "else" branch first (since "then" block should
835 * frequently/always end up being a fall-thru):
839 br
->cat0
.target
= block
->successors
[1];
841 /* NOTE: we have to hard code delay of 6 above, since
842 * we want to insert the nop's before constructing the
843 * branch. Throw in an assert so we notice if this
844 * ever breaks on future generation:
846 debug_assert(ir3_delayslots(ctx
->pred
, br
, 0) == 6);
849 br
->cat0
.target
= block
->successors
[0];
851 } else if (block
->successors
[0]) {
852 /* otherwise unconditional jump to next block: */
853 struct ir3_instruction
*jmp
;
855 jmp
= ir3_JUMP(block
);
856 jmp
->cat0
.target
= block
->successors
[0];
859 /* NOTE: if we kept track of the predecessors, we could do a better
860 * job w/ (jp) flags.. every node w/ > predecessor is a join point.
861 * Note that as we eliminate blocks which contain only an unconditional
862 * jump we probably need to propagate (jp) flag..
866 /* After scheduling individual blocks, we still could have cases where
867 * one (or more) paths into a block, a value produced by a previous
868 * has too few delay slots to be legal. We can't deal with this in the
869 * first pass, because loops (ie. we can't ensure all predecessor blocks
870 * are already scheduled in the first pass). All we can really do at
871 * this point is stuff in extra nop's until things are legal.
874 sched_intra_block(struct ir3_sched_ctx
*ctx
, struct ir3_block
*block
)
880 list_for_each_entry_safe (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
883 for (unsigned i
= 0; i
< block
->predecessors_count
; i
++) {
884 unsigned d
= delay_calc(block
->predecessors
[i
], instr
, false, true);
885 delay
= MAX2(d
, delay
);
889 struct ir3_instruction
*nop
= ir3_NOP(block
);
891 /* move to before instr: */
892 list_delinit(&nop
->node
);
893 list_addtail(&nop
->node
, &instr
->node
);
898 /* we can bail once we hit worst case delay: */
904 int ir3_sched(struct ir3
*ir
)
906 struct ir3_sched_ctx ctx
= {0};
909 update_use_count(ir
);
911 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
913 sched_block(&ctx
, block
);
916 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
917 sched_intra_block(&ctx
, block
);
927 get_array_id(struct ir3_instruction
*instr
)
929 /* The expectation is that there is only a single array
930 * src or dst, ir3_cp should enforce this.
933 for (unsigned i
= 0; i
< instr
->regs_count
; i
++)
934 if (instr
->regs
[i
]->flags
& IR3_REG_ARRAY
)
935 return instr
->regs
[i
]->array
.id
;
937 unreachable("this was unexpected");
940 /* does instruction 'prior' need to be scheduled before 'instr'? */
942 depends_on(struct ir3_instruction
*instr
, struct ir3_instruction
*prior
)
944 /* TODO for dependencies that are related to a specific object, ie
945 * a specific SSBO/image/array, we could relax this constraint to
946 * make accesses to unrelated objects not depend on each other (at
947 * least as long as not declared coherent)
949 if (((instr
->barrier_class
& IR3_BARRIER_EVERYTHING
) && prior
->barrier_class
) ||
950 ((prior
->barrier_class
& IR3_BARRIER_EVERYTHING
) && instr
->barrier_class
))
953 if (instr
->barrier_class
& prior
->barrier_conflict
) {
954 if (!(instr
->barrier_class
& ~(IR3_BARRIER_ARRAY_R
| IR3_BARRIER_ARRAY_W
))) {
955 /* if only array barrier, then we can further limit false-deps
956 * by considering the array-id, ie reads/writes to different
957 * arrays do not depend on each other (no aliasing)
959 if (get_array_id(instr
) != get_array_id(prior
)) {
971 add_barrier_deps(struct ir3_block
*block
, struct ir3_instruction
*instr
)
973 struct list_head
*prev
= instr
->node
.prev
;
974 struct list_head
*next
= instr
->node
.next
;
976 /* add dependencies on previous instructions that must be scheduled
977 * prior to the current instruction
979 while (prev
!= &block
->instr_list
) {
980 struct ir3_instruction
*pi
=
981 LIST_ENTRY(struct ir3_instruction
, prev
, node
);
988 if (instr
->barrier_class
== pi
->barrier_class
) {
989 ir3_instr_add_dep(instr
, pi
);
993 if (depends_on(instr
, pi
))
994 ir3_instr_add_dep(instr
, pi
);
997 /* add dependencies on this instruction to following instructions
998 * that must be scheduled after the current instruction:
1000 while (next
!= &block
->instr_list
) {
1001 struct ir3_instruction
*ni
=
1002 LIST_ENTRY(struct ir3_instruction
, next
, node
);
1009 if (instr
->barrier_class
== ni
->barrier_class
) {
1010 ir3_instr_add_dep(ni
, instr
);
1014 if (depends_on(ni
, instr
))
1015 ir3_instr_add_dep(ni
, instr
);
1019 /* before scheduling a block, we need to add any necessary false-dependencies
1022 * (1) barriers are scheduled in the right order wrt instructions related
1025 * (2) reads that come before a write actually get scheduled before the
1029 calculate_deps(struct ir3_block
*block
)
1031 list_for_each_entry (struct ir3_instruction
, instr
, &block
->instr_list
, node
) {
1032 if (instr
->barrier_class
) {
1033 add_barrier_deps(block
, instr
);
1039 ir3_sched_add_deps(struct ir3
*ir
)
1041 list_for_each_entry (struct ir3_block
, block
, &ir
->block_list
, node
) {
1042 calculate_deps(block
);