st/nine: Add D3DFMT_DF16 support
[mesa.git] / src / gallium / drivers / freedreno / ir3 / ir3_sched.c
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 * SOFTWARE.
24 *
25 * Authors:
26 * Rob Clark <robclark@freedesktop.org>
27 */
28
29
30 #include "util/u_math.h"
31
32 #include "ir3.h"
33
34 enum {
35 SCHEDULED = -1,
36 DELAYED = -2,
37 };
38
39 /*
40 * Instruction Scheduling:
41 *
42 * Using the depth sorted list from depth pass, attempt to recursively
43 * schedule deepest unscheduled path. The first instruction that cannot
44 * be scheduled, returns the required delay slots it needs, at which
45 * point we return back up to the top and attempt to schedule by next
46 * highest depth. After a sufficient number of instructions have been
47 * scheduled, return back to beginning of list and start again. If you
48 * reach the end of depth sorted list without being able to insert any
49 * instruction, insert nop's. Repeat until no more unscheduled
50 * instructions.
51 *
52 * There are a few special cases that need to be handled, since sched
53 * is currently independent of register allocation. Usages of address
54 * register (a0.x) or predicate register (p0.x) must be serialized. Ie.
55 * if you have two pairs of instructions that write the same special
56 * register and then read it, then those pairs cannot be interleaved.
57 * To solve this, when we are in such a scheduling "critical section",
58 * and we encounter a conflicting write to a special register, we try
59 * to schedule any remaining instructions that use that value first.
60 */
61
62 struct ir3_sched_ctx {
63 struct ir3_instruction *scheduled; /* last scheduled instr */
64 struct ir3_instruction *addr; /* current a0.x user, if any */
65 struct ir3_instruction *pred; /* current p0.x user, if any */
66 unsigned cnt;
67 bool error;
68 };
69
70 static struct ir3_instruction *
71 deepest(struct ir3_instruction **srcs, unsigned nsrcs)
72 {
73 struct ir3_instruction *d = NULL;
74 unsigned i = 0, id = 0;
75
76 while ((i < nsrcs) && !(d = srcs[id = i]))
77 i++;
78
79 if (!d)
80 return NULL;
81
82 for (; i < nsrcs; i++)
83 if (srcs[i] && (srcs[i]->depth > d->depth))
84 d = srcs[id = i];
85
86 srcs[id] = NULL;
87
88 return d;
89 }
90
91 static unsigned distance(struct ir3_sched_ctx *ctx,
92 struct ir3_instruction *instr, unsigned maxd)
93 {
94 struct ir3_instruction *n = ctx->scheduled;
95 unsigned d = 0;
96 while (n && (n != instr) && (d < maxd)) {
97 if (is_alu(n) || is_flow(n))
98 d++;
99 n = n->next;
100 }
101 return d;
102 }
103
104 /* TODO maybe we want double linked list? */
105 static struct ir3_instruction * prev(struct ir3_instruction *instr)
106 {
107 struct ir3_instruction *p = instr->block->head;
108 while (p && (p->next != instr))
109 p = p->next;
110 return p;
111 }
112
113 static void schedule(struct ir3_sched_ctx *ctx,
114 struct ir3_instruction *instr, bool remove)
115 {
116 struct ir3_block *block = instr->block;
117
118 /* maybe there is a better way to handle this than just stuffing
119 * a nop.. ideally we'd know about this constraint in the
120 * scheduling and depth calculation..
121 */
122 if (ctx->scheduled && is_sfu(ctx->scheduled) && is_sfu(instr))
123 schedule(ctx, ir3_instr_create(block, 0, OPC_NOP), false);
124
125 /* remove from depth list:
126 */
127 if (remove) {
128 struct ir3_instruction *p = prev(instr);
129
130 /* NOTE: this can happen for inputs which are not
131 * read.. in that case there is no need to schedule
132 * the input, so just bail:
133 */
134 if (instr != (p ? p->next : block->head))
135 return;
136
137 if (p)
138 p->next = instr->next;
139 else
140 block->head = instr->next;
141 }
142
143 if (writes_addr(instr)) {
144 assert(ctx->addr == NULL);
145 ctx->addr = instr;
146 }
147
148 if (writes_pred(instr)) {
149 assert(ctx->pred == NULL);
150 ctx->pred = instr;
151 }
152
153 instr->flags |= IR3_INSTR_MARK;
154
155 instr->next = ctx->scheduled;
156 ctx->scheduled = instr;
157
158 ctx->cnt++;
159 }
160
161 /*
162 * Delay-slot calculation. Follows fanin/fanout.
163 */
164
165 /* calculate delay for specified src: */
166 static unsigned delay_calc_srcn(struct ir3_sched_ctx *ctx,
167 struct ir3_instruction *assigner,
168 struct ir3_instruction *consumer, unsigned srcn)
169 {
170 unsigned delay = 0;
171
172 if (is_meta(assigner)) {
173 unsigned i;
174 for (i = 1; i < assigner->regs_count; i++) {
175 struct ir3_register *reg = assigner->regs[i];
176 if (reg->flags & IR3_REG_SSA) {
177 unsigned d = delay_calc_srcn(ctx, reg->instr,
178 consumer, srcn);
179 delay = MAX2(delay, d);
180 }
181 }
182 } else {
183 delay = ir3_delayslots(assigner, consumer, srcn);
184 delay -= distance(ctx, assigner, delay);
185 }
186
187 return delay;
188 }
189
190 /* calculate delay for instruction (maximum of delay for all srcs): */
191 static unsigned delay_calc(struct ir3_sched_ctx *ctx,
192 struct ir3_instruction *instr)
193 {
194 unsigned i, delay = 0;
195
196 for (i = 1; i < instr->regs_count; i++) {
197 struct ir3_register *reg = instr->regs[i];
198 if (reg->flags & IR3_REG_SSA) {
199 unsigned d = delay_calc_srcn(ctx, reg->instr,
200 instr, i - 1);
201 delay = MAX2(delay, d);
202 }
203 }
204
205 return delay;
206 }
207
208 /* A negative return value signals that an instruction has been newly
209 * scheduled, return back up to the top of the stack (to block_sched())
210 */
211 static int trysched(struct ir3_sched_ctx *ctx,
212 struct ir3_instruction *instr)
213 {
214 struct ir3_instruction *srcs[64];
215 struct ir3_instruction *src;
216 unsigned i, delay, nsrcs = 0;
217
218 /* if already scheduled: */
219 if (instr->flags & IR3_INSTR_MARK)
220 return 0;
221
222 debug_assert(instr->regs_count < ARRAY_SIZE(srcs));
223
224 /* figure out our src's: */
225 for (i = 1; i < instr->regs_count; i++) {
226 struct ir3_register *reg = instr->regs[i];
227 if (reg->flags & IR3_REG_SSA)
228 srcs[nsrcs++] = reg->instr;
229 }
230
231 /* for each src register in sorted order:
232 */
233 delay = 0;
234 while ((src = deepest(srcs, nsrcs))) {
235 delay = trysched(ctx, src);
236 if (delay)
237 return delay;
238 }
239
240 /* all our dependents are scheduled, figure out if
241 * we have enough delay slots to schedule ourself:
242 */
243 delay = delay_calc(ctx, instr);
244 if (delay)
245 return delay;
246
247 /* if the instruction is a kill, we need to ensure *every*
248 * bary.f is scheduled. The hw seems unhappy if the thread
249 * gets killed before the end-input (ei) flag is hit.
250 *
251 * We could do this by adding each bary.f instruction as
252 * virtual ssa src for the kill instruction. But we have
253 * fixed length instr->regs[].
254 *
255 * TODO this wouldn't be quite right if we had multiple
256 * basic blocks, if any block was conditional. We'd need
257 * to schedule the bary.f's outside of any block which
258 * was conditional that contained a kill.. I think..
259 */
260 if (is_kill(instr)) {
261 struct ir3 *ir = instr->block->shader;
262 unsigned i;
263
264 for (i = 0; i < ir->baryfs_count; i++) {
265 if (ir->baryfs[i]->depth == DEPTH_UNUSED)
266 continue;
267 delay = trysched(ctx, ir->baryfs[i]);
268 if (delay)
269 return delay;
270 }
271 }
272
273 /* if this is a write to address/predicate register, and that
274 * register is currently in use, we need to defer until it is
275 * free:
276 */
277 if (writes_addr(instr) && ctx->addr) {
278 assert(ctx->addr != instr);
279 return DELAYED;
280 }
281 if (writes_pred(instr) && ctx->pred) {
282 assert(ctx->pred != instr);
283 return DELAYED;
284 }
285
286 schedule(ctx, instr, true);
287 return SCHEDULED;
288 }
289
290 static struct ir3_instruction * reverse(struct ir3_instruction *instr)
291 {
292 struct ir3_instruction *reversed = NULL;
293 while (instr) {
294 struct ir3_instruction *next = instr->next;
295 instr->next = reversed;
296 reversed = instr;
297 instr = next;
298 }
299 return reversed;
300 }
301
302 static bool uses_current_addr(struct ir3_sched_ctx *ctx,
303 struct ir3_instruction *instr)
304 {
305 unsigned i;
306 for (i = 1; i < instr->regs_count; i++) {
307 struct ir3_register *reg = instr->regs[i];
308 if (reg->flags & IR3_REG_SSA) {
309 if (is_addr(reg->instr)) {
310 struct ir3_instruction *addr;
311 addr = reg->instr->regs[1]->instr; /* the mova */
312 if (ctx->addr == addr)
313 return true;
314 }
315 }
316 }
317 return false;
318 }
319
320 static bool uses_current_pred(struct ir3_sched_ctx *ctx,
321 struct ir3_instruction *instr)
322 {
323 unsigned i;
324 for (i = 1; i < instr->regs_count; i++) {
325 struct ir3_register *reg = instr->regs[i];
326 if ((reg->flags & IR3_REG_SSA) && (ctx->pred == reg->instr))
327 return true;
328 }
329 return false;
330 }
331
332 /* when we encounter an instruction that writes to the address register
333 * when it is in use, we delay that instruction and try to schedule all
334 * other instructions using the current address register:
335 */
336 static int block_sched_undelayed(struct ir3_sched_ctx *ctx,
337 struct ir3_block *block)
338 {
339 struct ir3_instruction *instr = block->head;
340 bool addr_in_use = false;
341 bool pred_in_use = false;
342 bool all_delayed = true;
343 unsigned cnt = ~0, attempted = 0;
344
345 while (instr) {
346 struct ir3_instruction *next = instr->next;
347 bool addr = uses_current_addr(ctx, instr);
348 bool pred = uses_current_pred(ctx, instr);
349
350 if (addr || pred) {
351 int ret = trysched(ctx, instr);
352
353 if (ret != DELAYED)
354 all_delayed = false;
355
356 if (ret == SCHEDULED)
357 cnt = 0;
358 else if (ret > 0)
359 cnt = MIN2(cnt, ret);
360 if (addr)
361 addr_in_use = true;
362 if (pred)
363 pred_in_use = true;
364
365 attempted++;
366 }
367
368 instr = next;
369 }
370
371 if (!addr_in_use)
372 ctx->addr = NULL;
373
374 if (!pred_in_use)
375 ctx->pred = NULL;
376
377 /* detect if we've gotten ourselves into an impossible situation
378 * and bail if needed
379 */
380 if (all_delayed && (attempted > 0))
381 ctx->error = true;
382
383 return cnt;
384 }
385
386 static void block_sched(struct ir3_sched_ctx *ctx, struct ir3_block *block)
387 {
388 struct ir3_instruction *instr;
389
390 /* schedule all the shader input's (meta-instr) first so that
391 * the RA step sees that the input registers contain a value
392 * from the start of the shader:
393 */
394 if (!block->parent) {
395 unsigned i;
396 for (i = 0; i < block->ninputs; i++) {
397 struct ir3_instruction *in = block->inputs[i];
398 if (in)
399 schedule(ctx, in, true);
400 }
401 }
402
403 while ((instr = block->head) && !ctx->error) {
404 /* NOTE: always grab next *before* trysched(), in case the
405 * instruction is actually scheduled (and therefore moved
406 * from depth list into scheduled list)
407 */
408 struct ir3_instruction *next = instr->next;
409 int cnt = trysched(ctx, instr);
410
411 if (cnt == DELAYED)
412 cnt = block_sched_undelayed(ctx, block);
413
414 /* -1 is signal to return up stack, but to us means same as 0: */
415 cnt = MAX2(0, cnt);
416 cnt += ctx->cnt;
417 instr = next;
418
419 /* if deepest remaining instruction cannot be scheduled, try
420 * the increasingly more shallow instructions until needed
421 * number of delay slots is filled:
422 */
423 while (instr && (cnt > ctx->cnt)) {
424 next = instr->next;
425 trysched(ctx, instr);
426 instr = next;
427 }
428
429 /* and if we run out of instructions that can be scheduled,
430 * then it is time for nop's:
431 */
432 while (cnt > ctx->cnt)
433 schedule(ctx, ir3_instr_create(block, 0, OPC_NOP), false);
434 }
435
436 /* at this point, scheduled list is in reverse order, so fix that: */
437 block->head = reverse(ctx->scheduled);
438 }
439
440 int ir3_block_sched(struct ir3_block *block)
441 {
442 struct ir3_sched_ctx ctx = {0};
443 ir3_clear_mark(block->shader);
444 block_sched(&ctx, block);
445 if (ctx.error)
446 return -1;
447 return 0;
448 }