freedreno/ir3: comment + better fxn name
[mesa.git] / src / gallium / drivers / freedreno / ir3 / ir3_sched.c
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29
30 #include "util/u_math.h"
31
32 #include "ir3.h"
33
34 enum {
35 SCHEDULED = -1,
36 DELAYED = -2,
37 };
38
39 /*
40 * Instruction Scheduling:
41 *
42 * Using the depth sorted list from depth pass, attempt to recursively
43 * schedule deepest unscheduled path. The first instruction that cannot
44 * be scheduled, returns the required delay slots it needs, at which
45 * point we return back up to the top and attempt to schedule by next
46 * highest depth. After a sufficient number of instructions have been
47 * scheduled, return back to beginning of list and start again. If you
48 * reach the end of depth sorted list without being able to insert any
49 * instruction, insert nop's. Repeat until no more unscheduled
50 * instructions.
51 *
52 * There are a few special cases that need to be handled, since sched
53 * is currently independent of register allocation. Usages of address
54 * register (a0.x) or predicate register (p0.x) must be serialized. Ie.
55 * if you have two pairs of instructions that write the same special
56 * register and then read it, then those pairs cannot be interleaved.
57 * To solve this, when we are in such a scheduling "critical section",
58 * and we encounter a conflicting write to a special register, we try
59 * to schedule any remaining instructions that use that value first.
60 */
61
62 struct ir3_sched_ctx {
63 struct ir3_instruction *scheduled; /* last scheduled instr */
64 struct ir3_instruction *addr; /* current a0.x user, if any */
65 struct ir3_instruction *pred; /* current p0.x user, if any */
66 unsigned cnt;
67 bool error;
68 };
69
70 static struct ir3_instruction *
71 deepest(struct ir3_instruction **srcs, unsigned nsrcs)
72 {
73 struct ir3_instruction *d = NULL;
74 unsigned i = 0, id = 0;
75
76 while ((i < nsrcs) && !(d = srcs[id = i]))
77 i++;
78
79 if (!d)
80 return NULL;
81
82 for (; i < nsrcs; i++)
83 if (srcs[i] && (srcs[i]->depth > d->depth))
84 d = srcs[id = i];
85
86 srcs[id] = NULL;
87
88 return d;
89 }
90
91 static unsigned distance(struct ir3_sched_ctx *ctx,
92 struct ir3_instruction *instr, unsigned maxd)
93 {
94 struct ir3_instruction *n = ctx->scheduled;
95 unsigned d = 0;
96 while (n && (n != instr) && (d < maxd)) {
97 if (is_alu(n) || is_flow(n))
98 d++;
99 n = n->next;
100 }
101 return d;
102 }
103
104 /* TODO maybe we want double linked list? */
105 static struct ir3_instruction * prev(struct ir3_instruction *instr)
106 {
107 struct ir3_instruction *p = instr->block->head;
108 while (p && (p->next != instr))
109 p = p->next;
110 return p;
111 }
112
113 static void schedule(struct ir3_sched_ctx *ctx,
114 struct ir3_instruction *instr, bool remove)
115 {
116 struct ir3_block *block = instr->block;
117
118 /* maybe there is a better way to handle this than just stuffing
119 * a nop.. ideally we'd know about this constraint in the
120 * scheduling and depth calculation..
121 */
122 if (ctx->scheduled && is_sfu(ctx->scheduled) && is_sfu(instr))
123 schedule(ctx, ir3_instr_create(block, 0, OPC_NOP), false);
124
125 /* remove from depth list:
126 */
127 if (remove) {
128 struct ir3_instruction *p = prev(instr);
129
130 /* NOTE: this can happen for inputs which are not
131 * read.. in that case there is no need to schedule
132 * the input, so just bail:
133 */
134 if (instr != (p ? p->next : block->head))
135 return;
136
137 if (p)
138 p->next = instr->next;
139 else
140 block->head = instr->next;
141 }
142
143 if (writes_addr(instr)) {
144 assert(ctx->addr == NULL);
145 ctx->addr = instr;
146 }
147
148 if (writes_pred(instr)) {
149 assert(ctx->pred == NULL);
150 ctx->pred = instr;
151 }
152
153 instr->flags |= IR3_INSTR_MARK;
154
155 instr->next = ctx->scheduled;
156 ctx->scheduled = instr;
157
158 ctx->cnt++;
159 }
160
161 /*
162 * Delay-slot calculation. Follows fanin/fanout.
163 */
164
165 /* calculate delay for specified src: */
166 static unsigned delay_calc_srcn(struct ir3_sched_ctx *ctx,
167 struct ir3_instruction *assigner,
168 struct ir3_instruction *consumer, unsigned srcn)
169 {
170 unsigned delay = 0;
171
172 if (is_meta(assigner)) {
173 unsigned i;
174 for (i = 1; i < assigner->regs_count; i++) {
175 struct ir3_register *reg = assigner->regs[i];
176 if (reg->flags & IR3_REG_SSA) {
177 unsigned d = delay_calc_srcn(ctx, reg->instr,
178 consumer, srcn);
179 delay = MAX2(delay, d);
180 }
181 }
182 } else {
183 delay = ir3_delayslots(assigner, consumer, srcn);
184 delay -= distance(ctx, assigner, delay);
185 }
186
187 return delay;
188 }
189
190 /* calculate delay for instruction (maximum of delay for all srcs): */
191 static unsigned delay_calc(struct ir3_sched_ctx *ctx,
192 struct ir3_instruction *instr)
193 {
194 unsigned i, delay = 0;
195
196 for (i = 1; i < instr->regs_count; i++) {
197 struct ir3_register *reg = instr->regs[i];
198 if (reg->flags & IR3_REG_SSA) {
199 unsigned d = delay_calc_srcn(ctx, reg->instr,
200 instr, i - 1);
201 delay = MAX2(delay, d);
202 }
203 }
204
205 return delay;
206 }
207
208 /* A negative return value signals that an instruction has been newly
209 * scheduled, return back up to the top of the stack (to block_sched())
210 */
211 static int trysched(struct ir3_sched_ctx *ctx,
212 struct ir3_instruction *instr)
213 {
214 struct ir3_instruction *srcs[ARRAY_SIZE(instr->regs) - 1];
215 struct ir3_instruction *src;
216 unsigned i, delay, nsrcs = 0;
217
218 /* if already scheduled: */
219 if (instr->flags & IR3_INSTR_MARK)
220 return 0;
221
222 /* figure out our src's: */
223 for (i = 1; i < instr->regs_count; i++) {
224 struct ir3_register *reg = instr->regs[i];
225 if (reg->flags & IR3_REG_SSA)
226 srcs[nsrcs++] = reg->instr;
227 }
228
229 /* for each src register in sorted order:
230 */
231 delay = 0;
232 while ((src = deepest(srcs, nsrcs))) {
233 delay = trysched(ctx, src);
234 if (delay)
235 return delay;
236 }
237
238 /* all our dependents are scheduled, figure out if
239 * we have enough delay slots to schedule ourself:
240 */
241 delay = delay_calc(ctx, instr);
242 if (delay)
243 return delay;
244
245 /* if this is a write to address/predicate register, and that
246 * register is currently in use, we need to defer until it is
247 * free:
248 */
249 if (writes_addr(instr) && ctx->addr) {
250 assert(ctx->addr != instr);
251 return DELAYED;
252 }
253 if (writes_pred(instr) && ctx->pred) {
254 assert(ctx->pred != instr);
255 return DELAYED;
256 }
257
258 schedule(ctx, instr, true);
259 return SCHEDULED;
260 }
261
262 static struct ir3_instruction * reverse(struct ir3_instruction *instr)
263 {
264 struct ir3_instruction *reversed = NULL;
265 while (instr) {
266 struct ir3_instruction *next = instr->next;
267 instr->next = reversed;
268 reversed = instr;
269 instr = next;
270 }
271 return reversed;
272 }
273
274 static bool uses_current_addr(struct ir3_sched_ctx *ctx,
275 struct ir3_instruction *instr)
276 {
277 unsigned i;
278 for (i = 1; i < instr->regs_count; i++) {
279 struct ir3_register *reg = instr->regs[i];
280 if (reg->flags & IR3_REG_SSA) {
281 if (is_addr(reg->instr)) {
282 struct ir3_instruction *addr;
283 addr = reg->instr->regs[1]->instr; /* the mova */
284 if (ctx->addr == addr)
285 return true;
286 }
287 }
288 }
289 return false;
290 }
291
292 static bool uses_current_pred(struct ir3_sched_ctx *ctx,
293 struct ir3_instruction *instr)
294 {
295 unsigned i;
296 for (i = 1; i < instr->regs_count; i++) {
297 struct ir3_register *reg = instr->regs[i];
298 if ((reg->flags & IR3_REG_SSA) && (ctx->pred == reg->instr))
299 return true;
300 }
301 return false;
302 }
303
304 /* when we encounter an instruction that writes to the address register
305 * when it is in use, we delay that instruction and try to schedule all
306 * other instructions using the current address register:
307 */
308 static int block_sched_undelayed(struct ir3_sched_ctx *ctx,
309 struct ir3_block *block)
310 {
311 struct ir3_instruction *instr = block->head;
312 bool addr_in_use = false;
313 bool pred_in_use = false;
314 bool all_delayed = true;
315 unsigned cnt = ~0, attempted = 0;
316
317 while (instr) {
318 struct ir3_instruction *next = instr->next;
319 bool addr = uses_current_addr(ctx, instr);
320 bool pred = uses_current_pred(ctx, instr);
321
322 if (addr || pred) {
323 int ret = trysched(ctx, instr);
324
325 if (ret != DELAYED)
326 all_delayed = false;
327
328 if (ret == SCHEDULED)
329 cnt = 0;
330 else if (ret > 0)
331 cnt = MIN2(cnt, ret);
332 if (addr)
333 addr_in_use = true;
334 if (pred)
335 pred_in_use = true;
336
337 attempted++;
338 }
339
340 instr = next;
341 }
342
343 if (!addr_in_use)
344 ctx->addr = NULL;
345
346 if (!pred_in_use)
347 ctx->pred = NULL;
348
349 /* detect if we've gotten ourselves into an impossible situation
350 * and bail if needed
351 */
352 if (all_delayed && (attempted > 0))
353 ctx->error = true;
354
355 return cnt;
356 }
357
358 static void block_sched(struct ir3_sched_ctx *ctx, struct ir3_block *block)
359 {
360 struct ir3_instruction *instr;
361
362 /* schedule all the shader input's (meta-instr) first so that
363 * the RA step sees that the input registers contain a value
364 * from the start of the shader:
365 */
366 if (!block->parent) {
367 unsigned i;
368 for (i = 0; i < block->ninputs; i++) {
369 struct ir3_instruction *in = block->inputs[i];
370 if (in)
371 schedule(ctx, in, true);
372 }
373 }
374
375 while ((instr = block->head) && !ctx->error) {
376 /* NOTE: always grab next *before* trysched(), in case the
377 * instruction is actually scheduled (and therefore moved
378 * from depth list into scheduled list)
379 */
380 struct ir3_instruction *next = instr->next;
381 int cnt = trysched(ctx, instr);
382
383 if (cnt == DELAYED)
384 cnt = block_sched_undelayed(ctx, block);
385
386 /* -1 is signal to return up stack, but to us means same as 0: */
387 cnt = MAX2(0, cnt);
388 cnt += ctx->cnt;
389 instr = next;
390
391 /* if deepest remaining instruction cannot be scheduled, try
392 * the increasingly more shallow instructions until needed
393 * number of delay slots is filled:
394 */
395 while (instr && (cnt > ctx->cnt)) {
396 next = instr->next;
397 trysched(ctx, instr);
398 instr = next;
399 }
400
401 /* and if we run out of instructions that can be scheduled,
402 * then it is time for nop's:
403 */
404 while (cnt > ctx->cnt)
405 schedule(ctx, ir3_instr_create(block, 0, OPC_NOP), false);
406 }
407
408 /* at this point, scheduled list is in reverse order, so fix that: */
409 block->head = reverse(ctx->scheduled);
410 }
411
412 int ir3_block_sched(struct ir3_block *block)
413 {
414 struct ir3_sched_ctx ctx = {0};
415 ir3_clear_mark(block->shader);
416 block_sched(&ctx, block);
417 if (ctx.error)
418 return -1;
419 return 0;
420 }