Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / gallium / drivers / vc4 / vc4_qpu_schedule.c
1 /*
2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2014 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 /**
26 * @file vc4_qpu_schedule.c
27 *
28 * The basic model of the list scheduler is to take a basic block, compute a
29 * DAG of the dependencies, and make a list of the DAG heads. Heuristically
30 * pick a DAG head, then put all the children that are now DAG heads into the
31 * list of things to schedule.
32 *
33 * The goal of scheduling here is to pack pairs of operations together in a
34 * single QPU instruction.
35 */
36
37 #include "vc4_qir.h"
38 #include "vc4_qpu.h"
39 #include "util/ralloc.h"
40
41 static bool debug;
42
43 struct schedule_node_child;
44
45 struct schedule_node {
46 struct list_head link;
47 struct queued_qpu_inst *inst;
48 struct schedule_node_child *children;
49 uint32_t child_count;
50 uint32_t child_array_size;
51 uint32_t parent_count;
52
53 /**
54 * Minimum number of cycles from scheduling this instruction until the
55 * end of the program, based on the slowest dependency chain through
56 * the children.
57 */
58 uint32_t delay;
59
60 /**
61 * cycles between this instruction being scheduled and when its result
62 * can be consumed.
63 */
64 uint32_t latency;
65
66 /**
67 * Which uniform from uniform_data[] this instruction read, or -1 if
68 * not reading a uniform.
69 */
70 int uniform;
71 };
72
73 struct schedule_node_child {
74 struct schedule_node *node;
75 bool write_after_read;
76 };
77
78 /* When walking the instructions in reverse, we need to swap before/after in
79 * add_dep().
80 */
81 enum direction { F, R };
82
83 struct schedule_state {
84 struct schedule_node *last_r[6];
85 struct schedule_node *last_ra[32];
86 struct schedule_node *last_rb[32];
87 struct schedule_node *last_sf;
88 struct schedule_node *last_vpm_read;
89 struct schedule_node *last_tmu_write;
90 struct schedule_node *last_tlb;
91 struct schedule_node *last_vpm;
92 enum direction dir;
93 };
94
95 static void
96 add_dep(struct schedule_state *state,
97 struct schedule_node *before,
98 struct schedule_node *after,
99 bool write)
100 {
101 bool write_after_read = !write && state->dir == R;
102
103 if (!before || !after)
104 return;
105
106 assert(before != after);
107
108 if (state->dir == R) {
109 struct schedule_node *t = before;
110 before = after;
111 after = t;
112 }
113
114 for (int i = 0; i < before->child_count; i++) {
115 if (before->children[i].node == after &&
116 (before->children[i].write_after_read == write_after_read)) {
117 return;
118 }
119 }
120
121 if (before->child_array_size <= before->child_count) {
122 before->child_array_size = MAX2(before->child_array_size * 2, 16);
123 before->children = reralloc(before, before->children,
124 struct schedule_node_child,
125 before->child_array_size);
126 }
127
128 before->children[before->child_count].node = after;
129 before->children[before->child_count].write_after_read =
130 write_after_read;
131 before->child_count++;
132 after->parent_count++;
133 }
134
135 static void
136 add_read_dep(struct schedule_state *state,
137 struct schedule_node *before,
138 struct schedule_node *after)
139 {
140 add_dep(state, before, after, false);
141 }
142
143 static void
144 add_write_dep(struct schedule_state *state,
145 struct schedule_node **before,
146 struct schedule_node *after)
147 {
148 add_dep(state, *before, after, true);
149 *before = after;
150 }
151
152 static bool
153 qpu_writes_r4(uint64_t inst)
154 {
155 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
156
157 switch(sig) {
158 case QPU_SIG_COLOR_LOAD:
159 case QPU_SIG_LOAD_TMU0:
160 case QPU_SIG_LOAD_TMU1:
161 case QPU_SIG_ALPHA_MASK_LOAD:
162 return true;
163 default:
164 return false;
165 }
166 }
167
168 static void
169 process_raddr_deps(struct schedule_state *state, struct schedule_node *n,
170 uint32_t raddr, bool is_a)
171 {
172 switch (raddr) {
173 case QPU_R_VARY:
174 add_write_dep(state, &state->last_r[5], n);
175 break;
176
177 case QPU_R_VPM:
178 add_write_dep(state, &state->last_vpm_read, n);
179 break;
180
181 case QPU_R_UNIF:
182 case QPU_R_NOP:
183 case QPU_R_ELEM_QPU:
184 case QPU_R_XY_PIXEL_COORD:
185 case QPU_R_MS_REV_FLAGS:
186 break;
187
188 default:
189 if (raddr < 32) {
190 if (is_a)
191 add_read_dep(state, state->last_ra[raddr], n);
192 else
193 add_read_dep(state, state->last_rb[raddr], n);
194 } else {
195 fprintf(stderr, "unknown raddr %d\n", raddr);
196 abort();
197 }
198 break;
199 }
200 }
201
202 static bool
203 is_tmu_write(uint32_t waddr)
204 {
205 switch (waddr) {
206 case QPU_W_TMU0_S:
207 case QPU_W_TMU0_T:
208 case QPU_W_TMU0_R:
209 case QPU_W_TMU0_B:
210 case QPU_W_TMU1_S:
211 case QPU_W_TMU1_T:
212 case QPU_W_TMU1_R:
213 case QPU_W_TMU1_B:
214 return true;
215 default:
216 return false;
217 }
218 }
219
220 static bool
221 reads_uniform(uint64_t inst)
222 {
223 if (QPU_GET_FIELD(inst, QPU_SIG) == QPU_SIG_LOAD_IMM)
224 return false;
225
226 return (QPU_GET_FIELD(inst, QPU_RADDR_A) == QPU_R_UNIF ||
227 (QPU_GET_FIELD(inst, QPU_RADDR_B) == QPU_R_UNIF &&
228 QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_SMALL_IMM) ||
229 is_tmu_write(QPU_GET_FIELD(inst, QPU_WADDR_ADD)) ||
230 is_tmu_write(QPU_GET_FIELD(inst, QPU_WADDR_MUL)));
231 }
232
233 static void
234 process_mux_deps(struct schedule_state *state, struct schedule_node *n,
235 uint32_t mux)
236 {
237 if (mux != QPU_MUX_A && mux != QPU_MUX_B)
238 add_read_dep(state, state->last_r[mux], n);
239 }
240
241
242 static void
243 process_waddr_deps(struct schedule_state *state, struct schedule_node *n,
244 uint32_t waddr, bool is_add)
245 {
246 uint64_t inst = n->inst->inst;
247 bool is_a = is_add ^ ((inst & QPU_WS) != 0);
248
249 if (waddr < 32) {
250 if (is_a) {
251 add_write_dep(state, &state->last_ra[waddr], n);
252 } else {
253 add_write_dep(state, &state->last_rb[waddr], n);
254 }
255 } else if (is_tmu_write(waddr)) {
256 add_write_dep(state, &state->last_tmu_write, n);
257 } else if (qpu_waddr_is_tlb(waddr)) {
258 add_write_dep(state, &state->last_tlb, n);
259 } else {
260 switch (waddr) {
261 case QPU_W_ACC0:
262 case QPU_W_ACC1:
263 case QPU_W_ACC2:
264 case QPU_W_ACC3:
265 case QPU_W_ACC5:
266 add_write_dep(state, &state->last_r[waddr - QPU_W_ACC0],
267 n);
268 break;
269
270 case QPU_W_VPM:
271 add_write_dep(state, &state->last_vpm, n);
272 break;
273
274 case QPU_W_VPMVCD_SETUP:
275 if (is_a)
276 add_write_dep(state, &state->last_vpm_read, n);
277 else
278 add_write_dep(state, &state->last_vpm, n);
279 break;
280
281 case QPU_W_SFU_RECIP:
282 case QPU_W_SFU_RECIPSQRT:
283 case QPU_W_SFU_EXP:
284 case QPU_W_SFU_LOG:
285 add_write_dep(state, &state->last_r[4], n);
286 break;
287
288 case QPU_W_TLB_STENCIL_SETUP:
289 /* This isn't a TLB operation that does things like
290 * implicitly lock the scoreboard, but it does have to
291 * appear before TLB_Z, and each of the TLB_STENCILs
292 * have to schedule in the same order relative to each
293 * other.
294 */
295 add_write_dep(state, &state->last_tlb, n);
296 break;
297
298 case QPU_W_NOP:
299 break;
300
301 default:
302 fprintf(stderr, "Unknown waddr %d\n", waddr);
303 abort();
304 }
305 }
306 }
307
308 static void
309 process_cond_deps(struct schedule_state *state, struct schedule_node *n,
310 uint32_t cond)
311 {
312 switch (cond) {
313 case QPU_COND_NEVER:
314 case QPU_COND_ALWAYS:
315 break;
316 default:
317 add_read_dep(state, state->last_sf, n);
318 break;
319 }
320 }
321
322 /**
323 * Common code for dependencies that need to be tracked both forward and
324 * backward.
325 *
326 * This is for things like "all reads of r4 have to happen between the r4
327 * writes that surround them".
328 */
329 static void
330 calculate_deps(struct schedule_state *state, struct schedule_node *n)
331 {
332 uint64_t inst = n->inst->inst;
333 uint32_t add_op = QPU_GET_FIELD(inst, QPU_OP_ADD);
334 uint32_t mul_op = QPU_GET_FIELD(inst, QPU_OP_MUL);
335 uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
336 uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
337 uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
338 uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
339 uint32_t add_a = QPU_GET_FIELD(inst, QPU_ADD_A);
340 uint32_t add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
341 uint32_t mul_a = QPU_GET_FIELD(inst, QPU_MUL_A);
342 uint32_t mul_b = QPU_GET_FIELD(inst, QPU_MUL_B);
343 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
344
345 if (sig != QPU_SIG_LOAD_IMM) {
346 process_raddr_deps(state, n, raddr_a, true);
347 if (sig != QPU_SIG_SMALL_IMM)
348 process_raddr_deps(state, n, raddr_b, false);
349 }
350
351 if (add_op != QPU_A_NOP) {
352 process_mux_deps(state, n, add_a);
353 process_mux_deps(state, n, add_b);
354 }
355 if (mul_op != QPU_M_NOP) {
356 process_mux_deps(state, n, mul_a);
357 process_mux_deps(state, n, mul_b);
358 }
359
360 process_waddr_deps(state, n, waddr_add, true);
361 process_waddr_deps(state, n, waddr_mul, false);
362 if (qpu_writes_r4(inst))
363 add_write_dep(state, &state->last_r[4], n);
364
365 switch (sig) {
366 case QPU_SIG_SW_BREAKPOINT:
367 case QPU_SIG_NONE:
368 case QPU_SIG_THREAD_SWITCH:
369 case QPU_SIG_LAST_THREAD_SWITCH:
370 case QPU_SIG_SMALL_IMM:
371 case QPU_SIG_LOAD_IMM:
372 break;
373
374 case QPU_SIG_LOAD_TMU0:
375 case QPU_SIG_LOAD_TMU1:
376 /* TMU loads are coming from a FIFO, so ordering is important.
377 */
378 add_write_dep(state, &state->last_tmu_write, n);
379 break;
380
381 case QPU_SIG_COLOR_LOAD:
382 add_read_dep(state, state->last_tlb, n);
383 break;
384
385 case QPU_SIG_PROG_END:
386 case QPU_SIG_WAIT_FOR_SCOREBOARD:
387 case QPU_SIG_SCOREBOARD_UNLOCK:
388 case QPU_SIG_COVERAGE_LOAD:
389 case QPU_SIG_COLOR_LOAD_END:
390 case QPU_SIG_ALPHA_MASK_LOAD:
391 case QPU_SIG_BRANCH:
392 fprintf(stderr, "Unhandled signal bits %d\n", sig);
393 abort();
394 }
395
396 process_cond_deps(state, n, QPU_GET_FIELD(inst, QPU_COND_ADD));
397 process_cond_deps(state, n, QPU_GET_FIELD(inst, QPU_COND_ADD));
398 if (inst & QPU_SF)
399 add_write_dep(state, &state->last_sf, n);
400 }
401
402 static void
403 calculate_forward_deps(struct vc4_compile *c, struct list_head *schedule_list)
404 {
405 struct schedule_state state;
406
407 memset(&state, 0, sizeof(state));
408 state.dir = F;
409
410 list_for_each_entry(struct schedule_node, node, schedule_list, link)
411 calculate_deps(&state, node);
412 }
413
414 static void
415 calculate_reverse_deps(struct vc4_compile *c, struct list_head *schedule_list)
416 {
417 struct list_head *node;
418 struct schedule_state state;
419
420 memset(&state, 0, sizeof(state));
421 state.dir = R;
422
423 for (node = schedule_list->prev; schedule_list != node; node = node->prev) {
424 calculate_deps(&state, (struct schedule_node *)node);
425 }
426 }
427
428 struct choose_scoreboard {
429 int tick;
430 int last_sfu_write_tick;
431 uint32_t last_waddr_a, last_waddr_b;
432 };
433
434 static bool
435 reads_too_soon_after_write(struct choose_scoreboard *scoreboard, uint64_t inst)
436 {
437 uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
438 uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
439 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
440 uint32_t src_muxes[] = {
441 QPU_GET_FIELD(inst, QPU_ADD_A),
442 QPU_GET_FIELD(inst, QPU_ADD_B),
443 QPU_GET_FIELD(inst, QPU_MUL_A),
444 QPU_GET_FIELD(inst, QPU_MUL_B),
445 };
446 for (int i = 0; i < ARRAY_SIZE(src_muxes); i++) {
447 if ((src_muxes[i] == QPU_MUX_A &&
448 raddr_a < 32 &&
449 scoreboard->last_waddr_a == raddr_a) ||
450 (src_muxes[i] == QPU_MUX_B &&
451 sig != QPU_SIG_SMALL_IMM &&
452 raddr_b < 32 &&
453 scoreboard->last_waddr_b == raddr_b)) {
454 return true;
455 }
456
457 if (src_muxes[i] == QPU_MUX_R4) {
458 if (scoreboard->tick -
459 scoreboard->last_sfu_write_tick <= 2) {
460 return true;
461 }
462 }
463 }
464
465 return false;
466 }
467
468 static bool
469 pixel_scoreboard_too_soon(struct choose_scoreboard *scoreboard, uint64_t inst)
470 {
471 return (scoreboard->tick < 2 && qpu_inst_is_tlb(inst));
472 }
473
474 static int
475 get_instruction_priority(uint64_t inst)
476 {
477 uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
478 uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
479 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
480 uint32_t baseline_score;
481 uint32_t next_score = 0;
482
483 /* Schedule TLB operations as late as possible, to get more
484 * parallelism between shaders.
485 */
486 if (qpu_inst_is_tlb(inst))
487 return next_score;
488 next_score++;
489
490 /* Schedule texture read results collection late to hide latency. */
491 if (sig == QPU_SIG_LOAD_TMU0 || sig == QPU_SIG_LOAD_TMU1)
492 return next_score;
493 next_score++;
494
495 /* Default score for things that aren't otherwise special. */
496 baseline_score = next_score;
497 next_score++;
498
499 /* Schedule texture read setup early to hide their latency better. */
500 if (is_tmu_write(waddr_add) || is_tmu_write(waddr_mul))
501 return next_score;
502 next_score++;
503
504 return baseline_score;
505 }
506
507 static struct schedule_node *
508 choose_instruction_to_schedule(struct choose_scoreboard *scoreboard,
509 struct list_head *schedule_list,
510 struct schedule_node *prev_inst)
511 {
512 struct schedule_node *chosen = NULL;
513 int chosen_prio = 0;
514
515 list_for_each_entry(struct schedule_node, n, schedule_list, link) {
516 uint64_t inst = n->inst->inst;
517
518 /* "An instruction must not read from a location in physical
519 * regfile A or B that was written to by the previous
520 * instruction."
521 */
522 if (reads_too_soon_after_write(scoreboard, inst))
523 continue;
524
525 /* "A scoreboard wait must not occur in the first two
526 * instructions of a fragment shader. This is either the
527 * explicit Wait for Scoreboard signal or an implicit wait
528 * with the first tile-buffer read or write instruction."
529 */
530 if (pixel_scoreboard_too_soon(scoreboard, inst))
531 continue;
532
533 /* If we're trying to pair with another instruction, check
534 * that they're compatible.
535 */
536 if (prev_inst) {
537 if (prev_inst->uniform != -1 && n->uniform != -1)
538 continue;
539
540 inst = qpu_merge_inst(prev_inst->inst->inst, inst);
541 if (!inst)
542 continue;
543 }
544
545 int prio = get_instruction_priority(inst);
546
547 /* Found a valid instruction. If nothing better comes along,
548 * this one works.
549 */
550 if (!chosen) {
551 chosen = n;
552 chosen_prio = prio;
553 continue;
554 }
555
556 if (prio > chosen_prio) {
557 chosen = n;
558 chosen_prio = prio;
559 } else if (prio < chosen_prio) {
560 continue;
561 }
562
563 if (n->delay > chosen->delay) {
564 chosen = n;
565 chosen_prio = prio;
566 } else if (n->delay < chosen->delay) {
567 continue;
568 }
569 }
570
571 return chosen;
572 }
573
574 static void
575 update_scoreboard_for_chosen(struct choose_scoreboard *scoreboard,
576 uint64_t inst)
577 {
578 uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
579 uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
580
581 if (!(inst & QPU_WS)) {
582 scoreboard->last_waddr_a = waddr_add;
583 scoreboard->last_waddr_b = waddr_mul;
584 } else {
585 scoreboard->last_waddr_b = waddr_add;
586 scoreboard->last_waddr_a = waddr_mul;
587 }
588
589 if ((waddr_add >= QPU_W_SFU_RECIP && waddr_add <= QPU_W_SFU_LOG) ||
590 (waddr_mul >= QPU_W_SFU_RECIP && waddr_mul <= QPU_W_SFU_LOG)) {
591 scoreboard->last_sfu_write_tick = scoreboard->tick;
592 }
593 }
594
595 static void
596 dump_state(struct list_head *schedule_list)
597 {
598 uint32_t i = 0;
599
600 list_for_each_entry(struct schedule_node, n, schedule_list, link) {
601 fprintf(stderr, "%3d: ", i++);
602 vc4_qpu_disasm(&n->inst->inst, 1);
603 fprintf(stderr, "\n");
604
605 for (int i = 0; i < n->child_count; i++) {
606 struct schedule_node *child = n->children[i].node;
607 if (!child)
608 continue;
609
610 fprintf(stderr, " - ");
611 vc4_qpu_disasm(&child->inst->inst, 1);
612 fprintf(stderr, " (%d parents, %c)\n",
613 child->parent_count,
614 n->children[i].write_after_read ? 'w' : 'r');
615 }
616 }
617 }
618
619 /** Recursive computation of the delay member of a node. */
620 static void
621 compute_delay(struct schedule_node *n)
622 {
623 if (!n->child_count) {
624 n->delay = 1;
625 } else {
626 for (int i = 0; i < n->child_count; i++) {
627 if (!n->children[i].node->delay)
628 compute_delay(n->children[i].node);
629 n->delay = MAX2(n->delay,
630 n->children[i].node->delay + n->latency);
631 }
632 }
633 }
634
635 static void
636 mark_instruction_scheduled(struct list_head *schedule_list,
637 struct schedule_node *node,
638 bool war_only)
639 {
640 if (!node)
641 return;
642
643 for (int i = node->child_count - 1; i >= 0; i--) {
644 struct schedule_node *child =
645 node->children[i].node;
646
647 if (!child)
648 continue;
649
650 if (war_only && !node->children[i].write_after_read)
651 continue;
652
653 child->parent_count--;
654 if (child->parent_count == 0)
655 list_add(&child->link, schedule_list);
656
657 node->children[i].node = NULL;
658 }
659 }
660
661 static void
662 schedule_instructions(struct vc4_compile *c, struct list_head *schedule_list)
663 {
664 struct choose_scoreboard scoreboard;
665
666 /* We reorder the uniforms as we schedule instructions, so save the
667 * old data off and replace it.
668 */
669 uint32_t *uniform_data = c->uniform_data;
670 enum quniform_contents *uniform_contents = c->uniform_contents;
671 c->uniform_contents = ralloc_array(c, enum quniform_contents,
672 c->num_uniforms);
673 c->uniform_data = ralloc_array(c, uint32_t, c->num_uniforms);
674 c->uniform_array_size = c->num_uniforms;
675 uint32_t next_uniform = 0;
676
677 memset(&scoreboard, 0, sizeof(scoreboard));
678 scoreboard.last_waddr_a = ~0;
679 scoreboard.last_waddr_b = ~0;
680 scoreboard.last_sfu_write_tick = -10;
681
682 if (debug) {
683 fprintf(stderr, "initial deps:\n");
684 dump_state(schedule_list);
685 fprintf(stderr, "\n");
686 }
687
688 /* Remove non-DAG heads from the list. */
689 list_for_each_entry_safe(struct schedule_node, n, schedule_list, link) {
690 if (n->parent_count != 0)
691 list_del(&n->link);
692 }
693
694 while (!list_empty(schedule_list)) {
695 struct schedule_node *chosen =
696 choose_instruction_to_schedule(&scoreboard,
697 schedule_list,
698 NULL);
699 struct schedule_node *merge = NULL;
700
701 /* If there are no valid instructions to schedule, drop a NOP
702 * in.
703 */
704 uint64_t inst = chosen ? chosen->inst->inst : qpu_NOP();
705
706 if (debug) {
707 fprintf(stderr, "current list:\n");
708 dump_state(schedule_list);
709 fprintf(stderr, "chose: ");
710 vc4_qpu_disasm(&inst, 1);
711 fprintf(stderr, "\n");
712 }
713
714 /* Schedule this instruction onto the QPU list. Also try to
715 * find an instruction to pair with it.
716 */
717 if (chosen) {
718 list_del(&chosen->link);
719 mark_instruction_scheduled(schedule_list, chosen, true);
720 if (chosen->uniform != -1) {
721 c->uniform_data[next_uniform] =
722 uniform_data[chosen->uniform];
723 c->uniform_contents[next_uniform] =
724 uniform_contents[chosen->uniform];
725 next_uniform++;
726 }
727
728 merge = choose_instruction_to_schedule(&scoreboard,
729 schedule_list,
730 chosen);
731 if (merge) {
732 list_del(&merge->link);
733 inst = qpu_merge_inst(inst, merge->inst->inst);
734 assert(inst != 0);
735 if (merge->uniform != -1) {
736 c->uniform_data[next_uniform] =
737 uniform_data[merge->uniform];
738 c->uniform_contents[next_uniform] =
739 uniform_contents[merge->uniform];
740 next_uniform++;
741 }
742
743 if (debug) {
744 fprintf(stderr, "merging: ");
745 vc4_qpu_disasm(&merge->inst->inst, 1);
746 fprintf(stderr, "\n");
747 fprintf(stderr, "resulting in: ");
748 vc4_qpu_disasm(&inst, 1);
749 fprintf(stderr, "\n");
750 }
751 }
752 }
753
754 if (debug) {
755 fprintf(stderr, "\n");
756 }
757
758 qpu_serialize_one_inst(c, inst);
759
760 update_scoreboard_for_chosen(&scoreboard, inst);
761
762 /* Now that we've scheduled a new instruction, some of its
763 * children can be promoted to the list of instructions ready to
764 * be scheduled. Update the children's unblocked time for this
765 * DAG edge as we do so.
766 */
767 mark_instruction_scheduled(schedule_list, chosen, false);
768 mark_instruction_scheduled(schedule_list, merge, false);
769
770 scoreboard.tick++;
771 }
772
773 assert(next_uniform == c->num_uniforms);
774 }
775
776 static uint32_t waddr_latency(uint32_t waddr)
777 {
778 if (waddr < 32)
779 return 2;
780
781 /* Some huge number, really. */
782 if (waddr >= QPU_W_TMU0_S && waddr <= QPU_W_TMU1_B)
783 return 10;
784
785 switch(waddr) {
786 case QPU_W_SFU_RECIP:
787 case QPU_W_SFU_RECIPSQRT:
788 case QPU_W_SFU_EXP:
789 case QPU_W_SFU_LOG:
790 return 3;
791 default:
792 return 1;
793 }
794 }
795
796 static uint32_t
797 instruction_latency(uint64_t inst)
798 {
799 return MAX2(waddr_latency(QPU_GET_FIELD(inst, QPU_WADDR_ADD)),
800 waddr_latency(QPU_GET_FIELD(inst, QPU_WADDR_MUL)));
801 }
802
803 void
804 qpu_schedule_instructions(struct vc4_compile *c)
805 {
806 void *mem_ctx = ralloc_context(NULL);
807 struct list_head schedule_list;
808
809 list_inithead(&schedule_list);
810
811 if (debug) {
812 fprintf(stderr, "Pre-schedule instructions\n");
813 list_for_each_entry(struct queued_qpu_inst, q,
814 &c->qpu_inst_list, link) {
815 vc4_qpu_disasm(&q->inst, 1);
816 fprintf(stderr, "\n");
817 }
818 fprintf(stderr, "\n");
819 }
820
821 /* Wrap each instruction in a scheduler structure. */
822 uint32_t next_uniform = 0;
823 while (!list_empty(&c->qpu_inst_list)) {
824 struct queued_qpu_inst *inst =
825 (struct queued_qpu_inst *)c->qpu_inst_list.next;
826 struct schedule_node *n = rzalloc(mem_ctx, struct schedule_node);
827
828 n->inst = inst;
829 n->latency = instruction_latency(inst->inst);
830
831 if (reads_uniform(inst->inst)) {
832 n->uniform = next_uniform++;
833 } else {
834 n->uniform = -1;
835 }
836 list_del(&inst->link);
837 list_addtail(&n->link, &schedule_list);
838 }
839 assert(next_uniform == c->num_uniforms);
840
841 calculate_forward_deps(c, &schedule_list);
842 calculate_reverse_deps(c, &schedule_list);
843
844 list_for_each_entry(struct schedule_node, n, &schedule_list, link) {
845 compute_delay(n);
846 }
847
848 schedule_instructions(c, &schedule_list);
849
850 if (debug) {
851 fprintf(stderr, "Post-schedule instructions\n");
852 vc4_qpu_disasm(c->qpu_insts, c->qpu_inst_count);
853 fprintf(stderr, "\n");
854 }
855
856 ralloc_free(mem_ctx);
857 }