1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
4 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Rob Clark <robclark@freedesktop.org>
30 #include "util/u_math.h"
40 * Instruction Scheduling:
42 * Using the depth sorted list from depth pass, attempt to recursively
43 * schedule deepest unscheduled path. The first instruction that cannot
44 * be scheduled, returns the required delay slots it needs, at which
45 * point we return back up to the top and attempt to schedule by next
46 * highest depth. After a sufficient number of instructions have been
47 * scheduled, return back to beginning of list and start again. If you
48 * reach the end of depth sorted list without being able to insert any
49 * instruction, insert nop's. Repeat until no more unscheduled
52 * There are a few special cases that need to be handled, since sched
53 * is currently independent of register allocation. Usages of address
54 * register (a0.x) or predicate register (p0.x) must be serialized. Ie.
55 * if you have two pairs of instructions that write the same special
56 * register and then read it, then those pairs cannot be interleaved.
57 * To solve this, when we are in such a scheduling "critical section",
58 * and we encounter a conflicting write to a special register, we try
59 * to schedule any remaining instructions that use that value first.
62 struct ir3_sched_ctx
{
63 struct ir3_instruction
*scheduled
; /* last scheduled instr */
64 struct ir3_instruction
*addr
; /* current a0.x user, if any */
65 struct ir3_instruction
*pred
; /* current p0.x user, if any */
70 static struct ir3_instruction
*
71 deepest(struct ir3_instruction
**srcs
, unsigned nsrcs
)
73 struct ir3_instruction
*d
= NULL
;
74 unsigned i
= 0, id
= 0;
76 while ((i
< nsrcs
) && !(d
= srcs
[id
= i
]))
82 for (; i
< nsrcs
; i
++)
83 if (srcs
[i
] && (srcs
[i
]->depth
> d
->depth
))
91 static unsigned distance(struct ir3_sched_ctx
*ctx
,
92 struct ir3_instruction
*instr
, unsigned maxd
)
94 struct ir3_instruction
*n
= ctx
->scheduled
;
96 while (n
&& (n
!= instr
) && (d
< maxd
)) {
97 if (is_alu(n
) || is_flow(n
))
104 /* TODO maybe we want double linked list? */
105 static struct ir3_instruction
* prev(struct ir3_instruction
*instr
)
107 struct ir3_instruction
*p
= instr
->block
->head
;
108 while (p
&& (p
->next
!= instr
))
113 static void schedule(struct ir3_sched_ctx
*ctx
,
114 struct ir3_instruction
*instr
, bool remove
)
116 struct ir3_block
*block
= instr
->block
;
118 /* maybe there is a better way to handle this than just stuffing
119 * a nop.. ideally we'd know about this constraint in the
120 * scheduling and depth calculation..
122 if (ctx
->scheduled
&& is_sfu(ctx
->scheduled
) && is_sfu(instr
))
123 schedule(ctx
, ir3_instr_create(block
, 0, OPC_NOP
), false);
125 /* remove from depth list:
128 struct ir3_instruction
*p
= prev(instr
);
130 /* NOTE: this can happen for inputs which are not
131 * read.. in that case there is no need to schedule
132 * the input, so just bail:
134 if (instr
!= (p
? p
->next
: block
->head
))
138 p
->next
= instr
->next
;
140 block
->head
= instr
->next
;
143 if (writes_addr(instr
)) {
144 assert(ctx
->addr
== NULL
);
148 if (writes_pred(instr
)) {
149 assert(ctx
->pred
== NULL
);
153 instr
->flags
|= IR3_INSTR_MARK
;
155 instr
->next
= ctx
->scheduled
;
156 ctx
->scheduled
= instr
;
162 * Delay-slot calculation. Follows fanin/fanout.
165 /* calculate delay for specified src: */
166 static unsigned delay_calc_srcn(struct ir3_sched_ctx
*ctx
,
167 struct ir3_instruction
*assigner
,
168 struct ir3_instruction
*consumer
, unsigned srcn
)
172 if (is_meta(assigner
)) {
173 struct ir3_instruction
*src
;
174 foreach_ssa_src(src
, assigner
) {
175 unsigned d
= delay_calc_srcn(ctx
, src
, consumer
, srcn
);
176 delay
= MAX2(delay
, d
);
179 delay
= ir3_delayslots(assigner
, consumer
, srcn
);
180 delay
-= distance(ctx
, assigner
, delay
);
186 /* calculate delay for instruction (maximum of delay for all srcs): */
187 static unsigned delay_calc(struct ir3_sched_ctx
*ctx
,
188 struct ir3_instruction
*instr
)
191 struct ir3_instruction
*src
;
193 foreach_ssa_src_n(src
, i
, instr
) {
194 unsigned d
= delay_calc_srcn(ctx
, src
, instr
, i
);
195 delay
= MAX2(delay
, d
);
201 /* A negative return value signals that an instruction has been newly
202 * SCHEDULED (or DELAYED due to address or predicate register already
203 * in use), return back up to the top of the stack (to block_sched())
205 static int trysched(struct ir3_sched_ctx
*ctx
,
206 struct ir3_instruction
*instr
)
208 struct ir3_instruction
*srcs
[64];
209 struct ir3_instruction
*src
;
210 unsigned delay
, nsrcs
= 0;
212 /* if already scheduled: */
213 if (instr
->flags
& IR3_INSTR_MARK
)
216 /* figure out our src's, copy 'em out into an array for sorting: */
217 foreach_ssa_src(src
, instr
) {
218 debug_assert(nsrcs
< ARRAY_SIZE(srcs
));
222 /* for each src register in sorted order:
225 while ((src
= deepest(srcs
, nsrcs
))) {
226 delay
= trysched(ctx
, src
);
231 /* all our dependents are scheduled, figure out if
232 * we have enough delay slots to schedule ourself:
234 delay
= delay_calc(ctx
, instr
);
238 /* if the instruction is a kill, we need to ensure *every*
239 * bary.f is scheduled. The hw seems unhappy if the thread
240 * gets killed before the end-input (ei) flag is hit.
242 * We could do this by adding each bary.f instruction as
243 * virtual ssa src for the kill instruction. But we have
244 * fixed length instr->regs[].
246 * TODO this wouldn't be quite right if we had multiple
247 * basic blocks, if any block was conditional. We'd need
248 * to schedule the bary.f's outside of any block which
249 * was conditional that contained a kill.. I think..
251 if (is_kill(instr
)) {
252 struct ir3
*ir
= instr
->block
->shader
;
255 for (i
= 0; i
< ir
->baryfs_count
; i
++) {
256 struct ir3_instruction
*baryf
= ir
->baryfs
[i
];
257 if (baryf
->depth
== DEPTH_UNUSED
)
259 delay
= trysched(ctx
, baryf
);
265 /* if instruction writes address register, we need to ensure
266 * that the instructions which use the address register value
267 * have all their other dependencies scheduled.
268 * TODO we may possibly need to do the same thing with predicate
269 * register usage, but for now we get by without since the
270 * predicate usage patterns are more simple
272 if (writes_addr(instr
)) {
273 struct ir3
*ir
= instr
->block
->shader
;
276 for (i
= 0; i
< ir
->indirects_count
; i
++) {
277 struct ir3_instruction
*indirect
= ir
->indirects
[i
];
278 if (indirect
->depth
== DEPTH_UNUSED
)
280 if (indirect
->address
!= instr
)
282 /* NOTE: avoid recursively scheduling the dependency
283 * on ourself (ie. avoid infinite recursion):
285 foreach_ssa_src(src
, indirect
) {
288 delay
= trysched(ctx
, src
);
295 /* if this is a write to address/predicate register, and that
296 * register is currently in use, we need to defer until it is
299 if (writes_addr(instr
) && ctx
->addr
) {
300 assert(ctx
->addr
!= instr
);
303 if (writes_pred(instr
) && ctx
->pred
) {
304 assert(ctx
->pred
!= instr
);
308 schedule(ctx
, instr
, true);
312 static struct ir3_instruction
* reverse(struct ir3_instruction
*instr
)
314 struct ir3_instruction
*reversed
= NULL
;
316 struct ir3_instruction
*next
= instr
->next
;
317 instr
->next
= reversed
;
324 static bool uses_current_addr(struct ir3_sched_ctx
*ctx
,
325 struct ir3_instruction
*instr
)
327 return instr
->address
&& (ctx
->addr
== instr
->address
);
330 static bool uses_current_pred(struct ir3_sched_ctx
*ctx
,
331 struct ir3_instruction
*instr
)
333 struct ir3_instruction
*src
;
334 foreach_ssa_src(src
, instr
)
335 if (ctx
->pred
== src
)
340 /* when we encounter an instruction that writes to the address register
341 * when it is in use, we delay that instruction and try to schedule all
342 * other instructions using the current address register:
344 static int block_sched_undelayed(struct ir3_sched_ctx
*ctx
,
345 struct ir3_block
*block
)
347 struct ir3_instruction
*instr
= block
->head
;
348 bool addr_in_use
= false;
349 bool pred_in_use
= false;
350 bool all_delayed
= true;
351 unsigned cnt
= ~0, attempted
= 0;
354 struct ir3_instruction
*next
= instr
->next
;
355 bool addr
= uses_current_addr(ctx
, instr
);
356 bool pred
= uses_current_pred(ctx
, instr
);
359 int ret
= trysched(ctx
, instr
);
364 if (ret
== SCHEDULED
)
367 cnt
= MIN2(cnt
, ret
);
385 /* detect if we've gotten ourselves into an impossible situation
388 if (all_delayed
&& (attempted
> 0))
394 static void block_sched(struct ir3_sched_ctx
*ctx
, struct ir3_block
*block
)
396 struct ir3_instruction
*instr
;
398 /* schedule all the shader input's (meta-instr) first so that
399 * the RA step sees that the input registers contain a value
400 * from the start of the shader:
402 if (!block
->parent
) {
404 for (i
= 0; i
< block
->ninputs
; i
++) {
405 struct ir3_instruction
*in
= block
->inputs
[i
];
407 schedule(ctx
, in
, true);
411 while ((instr
= block
->head
) && !ctx
->error
) {
412 /* NOTE: always grab next *before* trysched(), in case the
413 * instruction is actually scheduled (and therefore moved
414 * from depth list into scheduled list)
416 struct ir3_instruction
*next
= instr
->next
;
417 int cnt
= trysched(ctx
, instr
);
420 cnt
= block_sched_undelayed(ctx
, block
);
422 /* -1 is signal to return up stack, but to us means same as 0: */
427 /* if deepest remaining instruction cannot be scheduled, try
428 * the increasingly more shallow instructions until needed
429 * number of delay slots is filled:
431 while (instr
&& (cnt
> ctx
->cnt
)) {
433 trysched(ctx
, instr
);
437 /* and if we run out of instructions that can be scheduled,
438 * then it is time for nop's:
440 while (cnt
> ctx
->cnt
)
441 schedule(ctx
, ir3_instr_create(block
, 0, OPC_NOP
), false);
444 /* at this point, scheduled list is in reverse order, so fix that: */
445 block
->head
= reverse(ctx
->scheduled
);
448 int ir3_block_sched(struct ir3_block
*block
)
450 struct ir3_sched_ctx ctx
= {0};
451 ir3_clear_mark(block
->shader
);
452 block_sched(&ctx
, block
);