4a1283c5718984176e92f8a46422cb9c991c5883
[mesa.git] / src / gallium / drivers / vc4 / vc4_qir_schedule.c
1 /*
2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2014-2015 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 /**
26 * @file vc4_qir_schedule.c
27 *
28 * The basic model of the list scheduler is to take a basic block, compute a
29 * DAG of the dependencies from the bottom up, and make a list of the DAG
30 * heads. Heuristically pick a DAG head and schedule (remove) it, then put
31 * all the parents that are now DAG heads into the list of things to
32 * schedule.
33 *
34 * The goal of scheduling here, before register allocation and conversion to
35 * QPU instructions, is to reduce register pressure by reordering instructions
36 * to consume values when possible.
37 */
38
39 #include "vc4_qir.h"
40
41 static bool debug;
42
43 struct schedule_node {
44 struct list_head link;
45 struct qinst *inst;
46
47 struct schedule_node **children;
48 uint32_t child_count;
49 uint32_t child_array_size;
50 uint32_t parent_count;
51
52 /* Length of the longest (latency) chain from a DAG head to the this
53 * instruction.
54 */
55 uint32_t delay;
56
57 /* Longest time + latency_between(parent, this) of any parent of this
58 * node.
59 */
60 uint32_t unblocked_time;
61 };
62
63 struct schedule_state {
64 /* List of struct schedule_node *. This starts out with all
65 * instructions, and after dependency updates it's trimmed to be just
66 * the DAG heads.
67 */
68 struct list_head worklist;
69
70 uint32_t time;
71
72 uint32_t *temp_writes;
73
74 BITSET_WORD *temp_live;
75 };
76
77 /* When walking the instructions in reverse, we need to swap before/after in
78 * add_dep().
79 */
80 enum direction { F, R };
81
82 /**
83 * Marks a dependency between two intructions, that \p after must appear after
84 * \p before.
85 *
86 * Our dependencies are tracked as a DAG. Since we're scheduling bottom-up,
87 * the latest instructions with nothing left to schedule are the DAG heads,
88 * and their inputs are their children.
89 */
90 static void
91 add_dep(enum direction dir,
92 struct schedule_node *before,
93 struct schedule_node *after)
94 {
95 if (!before || !after)
96 return;
97
98 assert(before != after);
99
100 if (dir == R) {
101 struct schedule_node *t = before;
102 before = after;
103 after = t;
104 }
105
106 for (int i = 0; i < after->child_count; i++) {
107 if (after->children[i] == after)
108 return;
109 }
110
111 if (after->child_array_size <= after->child_count) {
112 after->child_array_size = MAX2(after->child_array_size * 2, 16);
113 after->children = reralloc(after, after->children,
114 struct schedule_node *,
115 after->child_array_size);
116 }
117
118 after->children[after->child_count] = before;
119 after->child_count++;
120 before->parent_count++;
121 }
122
123 static void
124 add_write_dep(enum direction dir,
125 struct schedule_node **before,
126 struct schedule_node *after)
127 {
128 add_dep(dir, *before, after);
129 *before = after;
130 }
131
132 struct schedule_setup_state {
133 struct schedule_node **last_temp_write;
134 struct schedule_node *last_sf;
135 struct schedule_node *last_vary_read;
136 struct schedule_node *last_vpm_read;
137 struct schedule_node *last_vpm_write;
138 struct schedule_node *last_tex_coord;
139 struct schedule_node *last_tex_result;
140 struct schedule_node *last_tlb;
141 enum direction dir;
142
143 /**
144 * Texture FIFO tracking. This is done top-to-bottom, and is used to
145 * track the QOP_TEX_RESULTs and add dependencies on previous ones
146 * when trying to submit texture coords with TFREQ full or new texture
147 * fetches with TXRCV full.
148 */
149 struct {
150 struct schedule_node *node;
151 int coords;
152 } tex_fifo[8];
153 int tfreq_count; /**< Number of texture coords outstanding. */
154 int tfrcv_count; /**< Number of texture results outstanding. */
155 int tex_fifo_pos;
156 };
157
158 static void
159 block_until_tex_result(struct schedule_setup_state *state, struct schedule_node *n)
160 {
161 add_dep(state->dir, state->tex_fifo[0].node, n);
162
163 state->tfreq_count -= state->tex_fifo[0].coords;
164 state->tfrcv_count--;
165
166 memmove(&state->tex_fifo[0],
167 &state->tex_fifo[1],
168 state->tex_fifo_pos * sizeof(state->tex_fifo[0]));
169 state->tex_fifo_pos--;
170 }
171
172 /**
173 * Common code for dependencies that need to be tracked both forward and
174 * backward.
175 *
176 * This is for things like "all VPM reads have to happen in order."
177 */
178 static void
179 calculate_deps(struct schedule_setup_state *state, struct schedule_node *n)
180 {
181 struct qinst *inst = n->inst;
182 enum direction dir = state->dir;
183
184
185 /* Add deps for temp registers and varyings accesses. Note that we
186 * ignore uniforms accesses, because qir_reorder_uniforms() happens
187 * after this.
188 */
189 for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) {
190 switch (inst->src[i].file) {
191 case QFILE_TEMP:
192 add_dep(dir,
193 state->last_temp_write[inst->src[i].index], n);
194 break;
195
196 case QFILE_VARY:
197 add_write_dep(dir, &state->last_vary_read, n);
198 break;
199
200 case QFILE_VPM:
201 add_write_dep(dir, &state->last_vpm_read, n);
202 break;
203
204 default:
205 break;
206 }
207 }
208
209 switch (inst->op) {
210 case QOP_VARY_ADD_C:
211 add_dep(dir, state->last_vary_read, n);
212 break;
213
214 case QOP_TEX_S:
215 case QOP_TEX_T:
216 case QOP_TEX_R:
217 case QOP_TEX_B:
218 case QOP_TEX_DIRECT:
219 /* Texturing setup gets scheduled in order, because
220 * the uniforms referenced by them have to land in a
221 * specific order.
222 */
223 add_write_dep(dir, &state->last_tex_coord, n);
224 break;
225
226 case QOP_TEX_RESULT:
227 /* Results have to be fetched in order. */
228 add_write_dep(dir, &state->last_tex_result, n);
229 break;
230
231 case QOP_TLB_COLOR_READ:
232 case QOP_MS_MASK:
233 add_write_dep(dir, &state->last_tlb, n);
234 break;
235
236 default:
237 break;
238 }
239
240 switch (inst->dst.file) {
241 case QFILE_VPM:
242 add_write_dep(dir, &state->last_vpm_write, n);
243 break;
244
245 case QFILE_TEMP:
246 add_write_dep(dir, &state->last_temp_write[inst->dst.index], n);
247 break;
248
249 case QFILE_TLB_COLOR_WRITE:
250 case QFILE_TLB_COLOR_WRITE_MS:
251 case QFILE_TLB_Z_WRITE:
252 case QFILE_TLB_STENCIL_SETUP:
253 add_write_dep(dir, &state->last_tlb, n);
254 break;
255
256 default:
257 break;
258 }
259
260 if (qir_depends_on_flags(inst))
261 add_dep(dir, state->last_sf, n);
262
263 if (inst->sf)
264 add_write_dep(dir, &state->last_sf, n);
265 }
266
267 static void
268 calculate_forward_deps(struct vc4_compile *c, void *mem_ctx,
269 struct list_head *schedule_list)
270 {
271 struct schedule_setup_state state;
272
273 memset(&state, 0, sizeof(state));
274 state.last_temp_write = rzalloc_array(mem_ctx, struct schedule_node *,
275 c->num_temps);
276 state.dir = F;
277
278 list_for_each_entry(struct schedule_node, n, schedule_list, link) {
279 struct qinst *inst = n->inst;
280
281 calculate_deps(&state, n);
282
283 switch (inst->op) {
284 case QOP_TEX_S:
285 case QOP_TEX_T:
286 case QOP_TEX_R:
287 case QOP_TEX_B:
288 case QOP_TEX_DIRECT:
289 /* If the texture coordinate fifo is full,
290 * block this on the last QOP_TEX_RESULT.
291 */
292 if (state.tfreq_count == 8) {
293 block_until_tex_result(&state, n);
294 }
295
296 /* If the texture result fifo is full, block
297 * adding any more to it until the last
298 * QOP_TEX_RESULT.
299 */
300 if (inst->op == QOP_TEX_S ||
301 inst->op == QOP_TEX_DIRECT) {
302 if (state.tfrcv_count == 4)
303 block_until_tex_result(&state, n);
304 state.tfrcv_count++;
305 }
306
307 state.tex_fifo[state.tex_fifo_pos].coords++;
308 state.tfreq_count++;
309 break;
310
311 case QOP_TEX_RESULT:
312 /* Results have to be fetched after the
313 * coordinate setup. Note that we're assuming
314 * here that our input shader has the texture
315 * coord setup and result fetch in order,
316 * which is true initially but not of our
317 * instruction stream after this pass.
318 */
319 add_dep(state.dir, state.last_tex_coord, n);
320
321 state.tex_fifo[state.tex_fifo_pos].node = n;
322
323 state.tex_fifo_pos++;
324 memset(&state.tex_fifo[state.tex_fifo_pos], 0,
325 sizeof(state.tex_fifo[0]));
326 break;
327 default:
328 assert(!qir_is_tex(inst));
329 break;
330 }
331 }
332 }
333
334 static void
335 calculate_reverse_deps(struct vc4_compile *c, void *mem_ctx,
336 struct list_head *schedule_list)
337 {
338 struct schedule_setup_state state;
339
340 memset(&state, 0, sizeof(state));
341 state.dir = R;
342 state.last_temp_write = rzalloc_array(mem_ctx, struct schedule_node *,
343 c->num_temps);
344
345 list_for_each_entry_rev(struct schedule_node, n, schedule_list, link) {
346 calculate_deps(&state, n);
347 }
348 }
349
350 static int
351 get_register_pressure_cost(struct schedule_state *state, struct qinst *inst)
352 {
353 int cost = 0;
354
355 if (inst->dst.file == QFILE_TEMP &&
356 state->temp_writes[inst->dst.index] == 1)
357 cost--;
358
359 for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) {
360 if (inst->src[i].file == QFILE_TEMP &&
361 !BITSET_TEST(state->temp_live, inst->src[i].index)) {
362 cost++;
363 }
364 }
365
366 return cost;
367 }
368
369 static bool
370 locks_scoreboard(struct qinst *inst)
371 {
372 if (inst->op == QOP_TLB_COLOR_READ)
373 return true;
374
375 switch (inst->dst.file) {
376 case QFILE_TLB_Z_WRITE:
377 case QFILE_TLB_COLOR_WRITE:
378 case QFILE_TLB_COLOR_WRITE_MS:
379 return true;
380 default:
381 return false;
382 }
383 }
384
385 static struct schedule_node *
386 choose_instruction(struct schedule_state *state)
387 {
388 struct schedule_node *chosen = NULL;
389
390 list_for_each_entry(struct schedule_node, n, &state->worklist, link) {
391 if (!chosen) {
392 chosen = n;
393 continue;
394 }
395
396 /* Prefer scheduling things that lock the scoreboard, so that
397 * they appear late in the program and we get more parallelism
398 * between shaders on multiple QPUs hitting the same fragment.
399 */
400 if (locks_scoreboard(n->inst) &&
401 !locks_scoreboard(chosen->inst)) {
402 chosen = n;
403 continue;
404 } else if (!locks_scoreboard(n->inst) &&
405 locks_scoreboard(chosen->inst)) {
406 continue;
407 }
408
409 /* If we would block on the previously chosen node, but would
410 * block less on this one, then then prefer it.
411 */
412 if (chosen->unblocked_time > state->time &&
413 n->unblocked_time < chosen->unblocked_time) {
414 chosen = n;
415 continue;
416 } else if (n->unblocked_time > state->time &&
417 n->unblocked_time > chosen->unblocked_time) {
418 continue;
419 }
420
421 /* If we can definitely reduce register pressure, do so
422 * immediately.
423 */
424 int register_pressure_cost =
425 get_register_pressure_cost(state, n->inst);
426 int chosen_register_pressure_cost =
427 get_register_pressure_cost(state, chosen->inst);
428
429 if (register_pressure_cost < chosen_register_pressure_cost) {
430 chosen = n;
431 continue;
432 } else if (register_pressure_cost >
433 chosen_register_pressure_cost) {
434 continue;
435 }
436
437 /* Otherwise, prefer instructions with the deepest chain to
438 * the end of the program. This avoids the problem of
439 * "everything generates a temp, nothing finishes freeing one,
440 * guess I'll just keep emitting varying mul/adds".
441 */
442 if (n->delay > chosen->delay) {
443 chosen = n;
444 continue;
445 } else if (n->delay < chosen->delay) {
446 continue;
447 }
448 }
449
450 return chosen;
451 }
452
453 static void
454 dump_state(struct vc4_compile *c, struct schedule_state *state)
455 {
456 uint32_t i = 0;
457 list_for_each_entry(struct schedule_node, n, &state->worklist, link) {
458 fprintf(stderr, "%3d: ", i++);
459 qir_dump_inst(c, n->inst);
460 fprintf(stderr, " (%d cost)\n",
461 get_register_pressure_cost(state, n->inst));
462
463 for (int i = 0; i < n->child_count; i++) {
464 struct schedule_node *child = n->children[i];
465 fprintf(stderr, " - ");
466 qir_dump_inst(c, child->inst);
467 fprintf(stderr, " (%d parents)\n", child->parent_count);
468 }
469 }
470 }
471
472 /* Estimate of how many instructions we should schedule between operations.
473 *
474 * These aren't in real cycle counts, because we're just estimating cycle
475 * times anyway. QIR instructions will get paired up when turned into QPU
476 * instructions, or extra NOP delays will have to be added due to register
477 * allocation choices.
478 */
479 static uint32_t
480 latency_between(struct schedule_node *before, struct schedule_node *after)
481 {
482 if ((before->inst->op == QOP_TEX_S ||
483 before->inst->op == QOP_TEX_DIRECT) &&
484 after->inst->op == QOP_TEX_RESULT)
485 return 100;
486
487 return 1;
488 }
489
490 /** Recursive computation of the delay member of a node. */
491 static void
492 compute_delay(struct schedule_node *n)
493 {
494 if (!n->child_count) {
495 /* The color read needs to be scheduled late, to avoid locking
496 * the scoreboard early. This is our best tool for
497 * encouraging that. The other scoreboard locking ops will
498 * have this happen by default, since they are generally the
499 * DAG heads or close to them.
500 */
501 if (n->inst->op == QOP_TLB_COLOR_READ)
502 n->delay = 1000;
503 else
504 n->delay = 1;
505 } else {
506 for (int i = 0; i < n->child_count; i++) {
507 if (!n->children[i]->delay)
508 compute_delay(n->children[i]);
509 n->delay = MAX2(n->delay,
510 n->children[i]->delay +
511 latency_between(n, n->children[i]));
512 }
513 }
514 }
515
516 static void
517 schedule_instructions(struct vc4_compile *c, struct schedule_state *state)
518 {
519 if (debug) {
520 fprintf(stderr, "initial deps:\n");
521 dump_state(c, state);
522 }
523
524 /* Remove non-DAG heads from the list. */
525 list_for_each_entry_safe(struct schedule_node, n,
526 &state->worklist, link) {
527 if (n->parent_count != 0)
528 list_del(&n->link);
529 }
530
531 state->time = 0;
532 while (!list_empty(&state->worklist)) {
533 struct schedule_node *chosen = choose_instruction(state);
534 struct qinst *inst = chosen->inst;
535
536 if (debug) {
537 fprintf(stderr, "current list:\n");
538 dump_state(c, state);
539 fprintf(stderr, "chose: ");
540 qir_dump_inst(c, inst);
541 fprintf(stderr, " (%d cost)\n",
542 get_register_pressure_cost(state, inst));
543 }
544
545 state->time = MAX2(state->time, chosen->unblocked_time);
546
547 /* Schedule this instruction back onto the QIR list. */
548 list_del(&chosen->link);
549 list_add(&inst->link, &c->instructions);
550
551 /* Now that we've scheduled a new instruction, some of its
552 * children can be promoted to the list of instructions ready to
553 * be scheduled. Update the children's unblocked time for this
554 * DAG edge as we do so.
555 */
556 for (int i = chosen->child_count - 1; i >= 0; i--) {
557 struct schedule_node *child = chosen->children[i];
558
559 child->unblocked_time = MAX2(child->unblocked_time,
560 state->time +
561 latency_between(chosen,
562 child));
563 child->parent_count--;
564 if (child->parent_count == 0)
565 list_add(&child->link, &state->worklist);
566 }
567
568 /* Update our tracking of register pressure. */
569 for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) {
570 if (inst->src[i].file == QFILE_TEMP)
571 BITSET_SET(state->temp_live, inst->src[i].index);
572 }
573 if (inst->dst.file == QFILE_TEMP) {
574 state->temp_writes[inst->dst.index]--;
575 if (state->temp_writes[inst->dst.index] == 0)
576 BITSET_CLEAR(state->temp_live, inst->dst.index);
577 }
578
579 state->time++;
580 }
581 }
582
583 void
584 qir_schedule_instructions(struct vc4_compile *c)
585 {
586 void *mem_ctx = ralloc_context(NULL);
587 struct schedule_state state = { { 0 } };
588
589 if (debug) {
590 fprintf(stderr, "Pre-schedule instructions\n");
591 qir_dump(c);
592 }
593
594 state.temp_writes = rzalloc_array(mem_ctx, uint32_t, c->num_temps);
595 state.temp_live = rzalloc_array(mem_ctx, BITSET_WORD,
596 BITSET_WORDS(c->num_temps));
597 list_inithead(&state.worklist);
598
599 /* Wrap each instruction in a scheduler structure. */
600 list_for_each_entry_safe(struct qinst, inst, &c->instructions, link) {
601 struct schedule_node *n = rzalloc(mem_ctx, struct schedule_node);
602
603 n->inst = inst;
604 list_del(&inst->link);
605 list_addtail(&n->link, &state.worklist);
606
607 if (inst->dst.file == QFILE_TEMP)
608 state.temp_writes[inst->dst.index]++;
609 }
610
611 /* Dependencies tracked top-to-bottom. */
612 calculate_forward_deps(c, mem_ctx, &state.worklist);
613 /* Dependencies tracked bottom-to-top. */
614 calculate_reverse_deps(c, mem_ctx, &state.worklist);
615
616 list_for_each_entry(struct schedule_node, n, &state.worklist, link)
617 compute_delay(n);
618
619 schedule_instructions(c, &state);
620
621 if (debug) {
622 fprintf(stderr, "Post-schedule instructions\n");
623 qir_dump(c);
624 }
625
626 ralloc_free(mem_ctx);
627 }