1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
4 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Rob Clark <robclark@freedesktop.org>
30 #include "util/u_math.h"
40 * Instruction Scheduling:
42 * Using the depth sorted list from depth pass, attempt to recursively
43 * schedule deepest unscheduled path. The first instruction that cannot
44 * be scheduled, returns the required delay slots it needs, at which
45 * point we return back up to the top and attempt to schedule by next
46 * highest depth. After a sufficient number of instructions have been
47 * scheduled, return back to beginning of list and start again. If you
48 * reach the end of depth sorted list without being able to insert any
49 * instruction, insert nop's. Repeat until no more unscheduled
52 * There are a few special cases that need to be handled, since sched
53 * is currently independent of register allocation. Usages of address
54 * register (a0.x) or predicate register (p0.x) must be serialized. Ie.
55 * if you have two pairs of instructions that write the same special
56 * register and then read it, then those pairs cannot be interleaved.
57 * To solve this, when we are in such a scheduling "critical section",
58 * and we encounter a conflicting write to a special register, we try
59 * to schedule any remaining instructions that use that value first.
62 struct ir3_sched_ctx
{
63 struct ir3_instruction
*scheduled
; /* last scheduled instr */
64 struct ir3_instruction
*addr
; /* current a0.x user, if any */
65 struct ir3_instruction
*pred
; /* current p0.x user, if any */
70 static struct ir3_instruction
*
71 deepest(struct ir3_instruction
**srcs
, unsigned nsrcs
)
73 struct ir3_instruction
*d
= NULL
;
74 unsigned i
= 0, id
= 0;
76 while ((i
< nsrcs
) && !(d
= srcs
[id
= i
]))
82 for (; i
< nsrcs
; i
++)
83 if (srcs
[i
] && (srcs
[i
]->depth
> d
->depth
))
91 static unsigned distance(struct ir3_sched_ctx
*ctx
,
92 struct ir3_instruction
*instr
, unsigned maxd
)
94 struct ir3_instruction
*n
= ctx
->scheduled
;
96 while (n
&& (n
!= instr
) && (d
< maxd
)) {
97 if (is_alu(n
) || is_flow(n
))
104 /* TODO maybe we want double linked list? */
105 static struct ir3_instruction
* prev(struct ir3_instruction
*instr
)
107 struct ir3_instruction
*p
= instr
->block
->head
;
108 while (p
&& (p
->next
!= instr
))
113 static bool is_sfu_or_mem(struct ir3_instruction
*instr
)
115 return is_sfu(instr
) || is_mem(instr
);
118 static void schedule(struct ir3_sched_ctx
*ctx
,
119 struct ir3_instruction
*instr
, bool remove
)
121 struct ir3_block
*block
= instr
->block
;
123 /* maybe there is a better way to handle this than just stuffing
124 * a nop.. ideally we'd know about this constraint in the
125 * scheduling and depth calculation..
127 if (ctx
->scheduled
&& is_sfu_or_mem(ctx
->scheduled
) && is_sfu_or_mem(instr
))
128 schedule(ctx
, ir3_instr_create(block
, 0, OPC_NOP
), false);
130 /* remove from depth list:
133 struct ir3_instruction
*p
= prev(instr
);
135 /* NOTE: this can happen for inputs which are not
136 * read.. in that case there is no need to schedule
137 * the input, so just bail:
139 if (instr
!= (p
? p
->next
: block
->head
))
143 p
->next
= instr
->next
;
145 block
->head
= instr
->next
;
148 if (writes_addr(instr
)) {
149 assert(ctx
->addr
== NULL
);
153 if (writes_pred(instr
)) {
154 assert(ctx
->pred
== NULL
);
158 instr
->flags
|= IR3_INSTR_MARK
;
160 instr
->next
= ctx
->scheduled
;
161 ctx
->scheduled
= instr
;
167 * Delay-slot calculation. Follows fanin/fanout.
170 /* calculate delay for specified src: */
171 static unsigned delay_calc_srcn(struct ir3_sched_ctx
*ctx
,
172 struct ir3_instruction
*assigner
,
173 struct ir3_instruction
*consumer
, unsigned srcn
)
177 if (is_meta(assigner
)) {
178 struct ir3_instruction
*src
;
179 foreach_ssa_src(src
, assigner
) {
180 unsigned d
= delay_calc_srcn(ctx
, src
, consumer
, srcn
);
181 delay
= MAX2(delay
, d
);
184 delay
= ir3_delayslots(assigner
, consumer
, srcn
);
185 delay
-= distance(ctx
, assigner
, delay
);
191 /* calculate delay for instruction (maximum of delay for all srcs): */
192 static unsigned delay_calc(struct ir3_sched_ctx
*ctx
,
193 struct ir3_instruction
*instr
)
196 struct ir3_instruction
*src
;
198 foreach_ssa_src_n(src
, i
, instr
) {
199 unsigned d
= delay_calc_srcn(ctx
, src
, instr
, i
);
200 delay
= MAX2(delay
, d
);
206 /* A negative return value signals that an instruction has been newly
207 * SCHEDULED (or DELAYED due to address or predicate register already
208 * in use), return back up to the top of the stack (to block_sched())
210 static int trysched(struct ir3_sched_ctx
*ctx
,
211 struct ir3_instruction
*instr
)
213 struct ir3_instruction
*srcs
[64];
214 struct ir3_instruction
*src
;
215 unsigned delay
, nsrcs
= 0;
217 /* if already scheduled: */
218 if (instr
->flags
& IR3_INSTR_MARK
)
221 /* figure out our src's, copy 'em out into an array for sorting: */
222 foreach_ssa_src(src
, instr
) {
223 debug_assert(nsrcs
< ARRAY_SIZE(srcs
));
227 /* for each src register in sorted order:
230 while ((src
= deepest(srcs
, nsrcs
))) {
231 delay
= trysched(ctx
, src
);
236 /* all our dependents are scheduled, figure out if
237 * we have enough delay slots to schedule ourself:
239 delay
= delay_calc(ctx
, instr
);
243 /* if the instruction is a kill, we need to ensure *every*
244 * bary.f is scheduled. The hw seems unhappy if the thread
245 * gets killed before the end-input (ei) flag is hit.
247 * We could do this by adding each bary.f instruction as
248 * virtual ssa src for the kill instruction. But we have
249 * fixed length instr->regs[].
251 * TODO this wouldn't be quite right if we had multiple
252 * basic blocks, if any block was conditional. We'd need
253 * to schedule the bary.f's outside of any block which
254 * was conditional that contained a kill.. I think..
256 if (is_kill(instr
)) {
257 struct ir3
*ir
= instr
->block
->shader
;
260 for (i
= 0; i
< ir
->baryfs_count
; i
++) {
261 struct ir3_instruction
*baryf
= ir
->baryfs
[i
];
262 if (baryf
->depth
== DEPTH_UNUSED
)
264 delay
= trysched(ctx
, baryf
);
270 /* if instruction writes address register, we need to ensure
271 * that the instructions which use the address register value
272 * have all their other dependencies scheduled.
273 * TODO we may possibly need to do the same thing with predicate
274 * register usage, but for now we get by without since the
275 * predicate usage patterns are more simple
277 if (writes_addr(instr
)) {
278 struct ir3
*ir
= instr
->block
->shader
;
281 for (i
= 0; i
< ir
->indirects_count
; i
++) {
282 struct ir3_instruction
*indirect
= ir
->indirects
[i
];
283 if (indirect
->depth
== DEPTH_UNUSED
)
285 if (indirect
->address
!= instr
)
287 /* NOTE: avoid recursively scheduling the dependency
288 * on ourself (ie. avoid infinite recursion):
290 foreach_ssa_src(src
, indirect
) {
291 if ((src
== instr
) || (src
->address
== instr
))
293 delay
= trysched(ctx
, src
);
300 /* if this is a write to address/predicate register, and that
301 * register is currently in use, we need to defer until it is
304 if (writes_addr(instr
) && ctx
->addr
) {
305 assert(ctx
->addr
!= instr
);
308 if (writes_pred(instr
) && ctx
->pred
) {
309 assert(ctx
->pred
!= instr
);
313 schedule(ctx
, instr
, true);
317 static struct ir3_instruction
* reverse(struct ir3_instruction
*instr
)
319 struct ir3_instruction
*reversed
= NULL
;
321 struct ir3_instruction
*next
= instr
->next
;
322 instr
->next
= reversed
;
329 static bool uses_current_addr(struct ir3_sched_ctx
*ctx
,
330 struct ir3_instruction
*instr
)
332 return instr
->address
&& (ctx
->addr
== instr
->address
);
335 static bool uses_current_pred(struct ir3_sched_ctx
*ctx
,
336 struct ir3_instruction
*instr
)
338 struct ir3_instruction
*src
;
339 foreach_ssa_src(src
, instr
)
340 if (ctx
->pred
== src
)
345 /* when we encounter an instruction that writes to the address register
346 * when it is in use, we delay that instruction and try to schedule all
347 * other instructions using the current address register:
349 static int block_sched_undelayed(struct ir3_sched_ctx
*ctx
,
350 struct ir3_block
*block
)
352 struct ir3_instruction
*instr
= block
->head
;
353 bool addr_in_use
= false;
354 bool pred_in_use
= false;
355 bool all_delayed
= true;
356 unsigned cnt
= ~0, attempted
= 0;
359 struct ir3_instruction
*next
= instr
->next
;
360 bool addr
= uses_current_addr(ctx
, instr
);
361 bool pred
= uses_current_pred(ctx
, instr
);
364 int ret
= trysched(ctx
, instr
);
369 if (ret
== SCHEDULED
)
372 cnt
= MIN2(cnt
, ret
);
390 /* detect if we've gotten ourselves into an impossible situation
393 if (all_delayed
&& (attempted
> 0))
399 static void block_sched(struct ir3_sched_ctx
*ctx
, struct ir3_block
*block
)
401 struct ir3_instruction
*instr
;
403 /* schedule all the shader input's (meta-instr) first so that
404 * the RA step sees that the input registers contain a value
405 * from the start of the shader:
407 if (!block
->parent
) {
409 for (i
= 0; i
< block
->ninputs
; i
++) {
410 struct ir3_instruction
*in
= block
->inputs
[i
];
412 schedule(ctx
, in
, true);
416 while ((instr
= block
->head
) && !ctx
->error
) {
417 /* NOTE: always grab next *before* trysched(), in case the
418 * instruction is actually scheduled (and therefore moved
419 * from depth list into scheduled list)
421 struct ir3_instruction
*next
= instr
->next
;
422 int cnt
= trysched(ctx
, instr
);
425 cnt
= block_sched_undelayed(ctx
, block
);
427 /* -1 is signal to return up stack, but to us means same as 0: */
432 /* if deepest remaining instruction cannot be scheduled, try
433 * the increasingly more shallow instructions until needed
434 * number of delay slots is filled:
436 while (instr
&& (cnt
> ctx
->cnt
)) {
438 trysched(ctx
, instr
);
442 /* and if we run out of instructions that can be scheduled,
443 * then it is time for nop's:
445 while (cnt
> ctx
->cnt
)
446 schedule(ctx
, ir3_instr_create(block
, 0, OPC_NOP
), false);
449 /* at this point, scheduled list is in reverse order, so fix that: */
450 block
->head
= reverse(ctx
->scheduled
);
453 int ir3_block_sched(struct ir3_block
*block
)
455 struct ir3_sched_ctx ctx
= {0};
456 ir3_clear_mark(block
->shader
);
457 block_sched(&ctx
, block
);