broadcom/vc5: Fix scheduling for a non-SFU R4 write after a dead R4 write.
[mesa.git] / src / broadcom / compiler / qpu_schedule.c
1 /*
2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2014-2017 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 /**
26 * @file
27 *
28 * The basic model of the list scheduler is to take a basic block, compute a
29 * DAG of the dependencies, and make a list of the DAG heads. Heuristically
30 * pick a DAG head, then put all the children that are now DAG heads into the
31 * list of things to schedule.
32 *
33 * The goal of scheduling here is to pack pairs of operations together in a
34 * single QPU instruction.
35 */
36
37 #include "qpu/qpu_disasm.h"
38 #include "v3d_compiler.h"
39 #include "util/ralloc.h"
40
41 static bool debug;
42
43 struct schedule_node_child;
44
45 struct schedule_node {
46 struct list_head link;
47 struct qinst *inst;
48 struct schedule_node_child *children;
49 uint32_t child_count;
50 uint32_t child_array_size;
51 uint32_t parent_count;
52
53 /* Longest cycles + instruction_latency() of any parent of this node. */
54 uint32_t unblocked_time;
55
56 /**
57 * Minimum number of cycles from scheduling this instruction until the
58 * end of the program, based on the slowest dependency chain through
59 * the children.
60 */
61 uint32_t delay;
62
63 /**
64 * cycles between this instruction being scheduled and when its result
65 * can be consumed.
66 */
67 uint32_t latency;
68 };
69
70 struct schedule_node_child {
71 struct schedule_node *node;
72 bool write_after_read;
73 };
74
75 /* When walking the instructions in reverse, we need to swap before/after in
76 * add_dep().
77 */
78 enum direction { F, R };
79
80 struct schedule_state {
81 struct schedule_node *last_r[6];
82 struct schedule_node *last_rf[64];
83 struct schedule_node *last_sf;
84 struct schedule_node *last_vpm_read;
85 struct schedule_node *last_tmu_write;
86 struct schedule_node *last_tlb;
87 struct schedule_node *last_vpm;
88 struct schedule_node *last_unif;
89 struct schedule_node *last_rtop;
90 enum direction dir;
91 /* Estimated cycle when the current instruction would start. */
92 uint32_t time;
93 };
94
95 static void
96 add_dep(struct schedule_state *state,
97 struct schedule_node *before,
98 struct schedule_node *after,
99 bool write)
100 {
101 bool write_after_read = !write && state->dir == R;
102
103 if (!before || !after)
104 return;
105
106 assert(before != after);
107
108 if (state->dir == R) {
109 struct schedule_node *t = before;
110 before = after;
111 after = t;
112 }
113
114 for (int i = 0; i < before->child_count; i++) {
115 if (before->children[i].node == after &&
116 (before->children[i].write_after_read == write_after_read)) {
117 return;
118 }
119 }
120
121 if (before->child_array_size <= before->child_count) {
122 before->child_array_size = MAX2(before->child_array_size * 2, 16);
123 before->children = reralloc(before, before->children,
124 struct schedule_node_child,
125 before->child_array_size);
126 }
127
128 before->children[before->child_count].node = after;
129 before->children[before->child_count].write_after_read =
130 write_after_read;
131 before->child_count++;
132 after->parent_count++;
133 }
134
135 static void
136 add_read_dep(struct schedule_state *state,
137 struct schedule_node *before,
138 struct schedule_node *after)
139 {
140 add_dep(state, before, after, false);
141 }
142
143 static void
144 add_write_dep(struct schedule_state *state,
145 struct schedule_node **before,
146 struct schedule_node *after)
147 {
148 add_dep(state, *before, after, true);
149 *before = after;
150 }
151
152 static bool
153 qpu_inst_is_tlb(const struct v3d_qpu_instr *inst)
154 {
155 if (inst->type != V3D_QPU_INSTR_TYPE_ALU)
156 return false;
157
158 if (inst->alu.add.magic_write &&
159 (inst->alu.add.waddr == V3D_QPU_WADDR_TLB ||
160 inst->alu.add.waddr == V3D_QPU_WADDR_TLBU))
161 return true;
162
163 if (inst->alu.mul.magic_write &&
164 (inst->alu.mul.waddr == V3D_QPU_WADDR_TLB ||
165 inst->alu.mul.waddr == V3D_QPU_WADDR_TLBU))
166 return true;
167
168 return false;
169 }
170
171 static void
172 process_mux_deps(struct schedule_state *state, struct schedule_node *n,
173 enum v3d_qpu_mux mux)
174 {
175 switch (mux) {
176 case V3D_QPU_MUX_A:
177 add_read_dep(state, state->last_rf[n->inst->qpu.raddr_a], n);
178 break;
179 case V3D_QPU_MUX_B:
180 add_read_dep(state, state->last_rf[n->inst->qpu.raddr_b], n);
181 break;
182 default:
183 add_read_dep(state, state->last_r[mux - V3D_QPU_MUX_R0], n);
184 break;
185 }
186 }
187
188
189 static void
190 process_waddr_deps(struct schedule_state *state, struct schedule_node *n,
191 uint32_t waddr, bool magic)
192 {
193 if (!magic) {
194 add_write_dep(state, &state->last_rf[waddr], n);
195 } else if (v3d_qpu_magic_waddr_is_tmu(waddr)) {
196 add_write_dep(state, &state->last_tmu_write, n);
197 } else if (v3d_qpu_magic_waddr_is_sfu(waddr)) {
198 /* Handled by v3d_qpu_writes_r4() check. */
199 } else {
200 switch (waddr) {
201 case V3D_QPU_WADDR_R0:
202 case V3D_QPU_WADDR_R1:
203 case V3D_QPU_WADDR_R2:
204 add_write_dep(state,
205 &state->last_r[waddr - V3D_QPU_WADDR_R0],
206 n);
207 break;
208 case V3D_QPU_WADDR_R3:
209 case V3D_QPU_WADDR_R4:
210 case V3D_QPU_WADDR_R5:
211 /* Handled by v3d_qpu_writes_r*() checks below. */
212 break;
213
214 case V3D_QPU_WADDR_VPM:
215 case V3D_QPU_WADDR_VPMU:
216 add_write_dep(state, &state->last_vpm, n);
217 break;
218
219 case V3D_QPU_WADDR_TLB:
220 case V3D_QPU_WADDR_TLBU:
221 add_write_dep(state, &state->last_tlb, n);
222 break;
223
224 case V3D_QPU_WADDR_NOP:
225 break;
226
227 default:
228 fprintf(stderr, "Unknown waddr %d\n", waddr);
229 abort();
230 }
231 }
232 }
233
234 static void
235 process_cond_deps(struct schedule_state *state, struct schedule_node *n,
236 enum v3d_qpu_cond cond)
237 {
238 if (cond != V3D_QPU_COND_NONE)
239 add_read_dep(state, state->last_sf, n);
240 }
241
242 static void
243 process_pf_deps(struct schedule_state *state, struct schedule_node *n,
244 enum v3d_qpu_pf pf)
245 {
246 if (pf != V3D_QPU_PF_NONE)
247 add_write_dep(state, &state->last_sf, n);
248 }
249
250 static void
251 process_uf_deps(struct schedule_state *state, struct schedule_node *n,
252 enum v3d_qpu_uf uf)
253 {
254 if (uf != V3D_QPU_UF_NONE)
255 add_write_dep(state, &state->last_sf, n);
256 }
257
258 /**
259 * Common code for dependencies that need to be tracked both forward and
260 * backward.
261 *
262 * This is for things like "all reads of r4 have to happen between the r4
263 * writes that surround them".
264 */
265 static void
266 calculate_deps(struct schedule_state *state, struct schedule_node *n)
267 {
268 struct qinst *qinst = n->inst;
269 struct v3d_qpu_instr *inst = &qinst->qpu;
270
271 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH) {
272 if (inst->branch.cond != V3D_QPU_BRANCH_COND_ALWAYS)
273 add_read_dep(state, state->last_sf, n);
274
275 /* XXX: BDI */
276 /* XXX: BDU */
277 /* XXX: ub */
278 /* XXX: raddr_a */
279
280 add_write_dep(state, &state->last_unif, n);
281 return;
282 }
283
284 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
285
286 /* XXX: LOAD_IMM */
287
288 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 0)
289 process_mux_deps(state, n, inst->alu.add.a);
290 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 1)
291 process_mux_deps(state, n, inst->alu.add.b);
292
293 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 0)
294 process_mux_deps(state, n, inst->alu.mul.a);
295 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 1)
296 process_mux_deps(state, n, inst->alu.mul.b);
297
298 switch (inst->alu.add.op) {
299 case V3D_QPU_A_VPMSETUP:
300 /* Could distinguish read/write by unpacking the uniform. */
301 add_write_dep(state, &state->last_vpm, n);
302 add_write_dep(state, &state->last_vpm_read, n);
303 break;
304
305 case V3D_QPU_A_STVPMV:
306 case V3D_QPU_A_STVPMD:
307 case V3D_QPU_A_STVPMP:
308 add_write_dep(state, &state->last_vpm, n);
309 break;
310
311 case V3D_QPU_A_MSF:
312 add_read_dep(state, state->last_tlb, n);
313 break;
314
315 case V3D_QPU_A_SETMSF:
316 case V3D_QPU_A_SETREVF:
317 add_write_dep(state, &state->last_tlb, n);
318 break;
319
320 case V3D_QPU_A_FLAPUSH:
321 case V3D_QPU_A_FLBPUSH:
322 case V3D_QPU_A_VFLA:
323 case V3D_QPU_A_VFLNA:
324 case V3D_QPU_A_VFLB:
325 case V3D_QPU_A_VFLNB:
326 add_read_dep(state, state->last_sf, n);
327 break;
328
329 case V3D_QPU_A_FLBPOP:
330 add_write_dep(state, &state->last_sf, n);
331 break;
332
333 default:
334 break;
335 }
336
337 switch (inst->alu.mul.op) {
338 case V3D_QPU_M_MULTOP:
339 case V3D_QPU_M_UMUL24:
340 /* MULTOP sets rtop, and UMUL24 implicitly reads rtop and
341 * resets it to 0. We could possibly reorder umul24s relative
342 * to each other, but for now just keep all the MUL parts in
343 * order.
344 */
345 add_write_dep(state, &state->last_rtop, n);
346 break;
347 default:
348 break;
349 }
350
351 if (inst->alu.add.op != V3D_QPU_A_NOP) {
352 process_waddr_deps(state, n, inst->alu.add.waddr,
353 inst->alu.add.magic_write);
354 }
355 if (inst->alu.mul.op != V3D_QPU_M_NOP) {
356 process_waddr_deps(state, n, inst->alu.mul.waddr,
357 inst->alu.mul.magic_write);
358 }
359
360 if (v3d_qpu_writes_r3(inst))
361 add_write_dep(state, &state->last_r[3], n);
362 if (v3d_qpu_writes_r4(inst))
363 add_write_dep(state, &state->last_r[4], n);
364 if (v3d_qpu_writes_r5(inst))
365 add_write_dep(state, &state->last_r[5], n);
366
367 if (inst->sig.thrsw) {
368 /* All accumulator contents and flags are undefined after the
369 * switch.
370 */
371 for (int i = 0; i < ARRAY_SIZE(state->last_r); i++)
372 add_write_dep(state, &state->last_r[i], n);
373 add_write_dep(state, &state->last_sf, n);
374
375 /* Scoreboard-locking operations have to stay after the last
376 * thread switch.
377 */
378 add_write_dep(state, &state->last_tlb, n);
379
380 add_write_dep(state, &state->last_tmu_write, n);
381 }
382
383 if (inst->sig.ldtmu) {
384 /* TMU loads are coming from a FIFO, so ordering is important.
385 */
386 add_write_dep(state, &state->last_tmu_write, n);
387 }
388
389 if (inst->sig.ldtlb | inst->sig.ldtlbu)
390 add_read_dep(state, state->last_tlb, n);
391
392 if (inst->sig.ldvpm)
393 add_write_dep(state, &state->last_vpm_read, n);
394
395 /* inst->sig.ldunif or sideband uniform read */
396 if (qinst->uniform != ~0)
397 add_write_dep(state, &state->last_unif, n);
398
399 process_cond_deps(state, n, inst->flags.ac);
400 process_cond_deps(state, n, inst->flags.mc);
401 process_pf_deps(state, n, inst->flags.apf);
402 process_pf_deps(state, n, inst->flags.mpf);
403 process_uf_deps(state, n, inst->flags.auf);
404 process_uf_deps(state, n, inst->flags.muf);
405 }
406
407 static void
408 calculate_forward_deps(struct v3d_compile *c, struct list_head *schedule_list)
409 {
410 struct schedule_state state;
411
412 memset(&state, 0, sizeof(state));
413 state.dir = F;
414
415 list_for_each_entry(struct schedule_node, node, schedule_list, link)
416 calculate_deps(&state, node);
417 }
418
419 static void
420 calculate_reverse_deps(struct v3d_compile *c, struct list_head *schedule_list)
421 {
422 struct list_head *node;
423 struct schedule_state state;
424
425 memset(&state, 0, sizeof(state));
426 state.dir = R;
427
428 for (node = schedule_list->prev; schedule_list != node; node = node->prev) {
429 calculate_deps(&state, (struct schedule_node *)node);
430 }
431 }
432
433 struct choose_scoreboard {
434 int tick;
435 int last_sfu_write_tick;
436 int last_ldvary_tick;
437 int last_uniforms_reset_tick;
438 uint32_t last_waddr_add, last_waddr_mul;
439 bool tlb_locked;
440 };
441
442 static bool
443 mux_reads_too_soon(struct choose_scoreboard *scoreboard,
444 const struct v3d_qpu_instr *inst, enum v3d_qpu_mux mux)
445 {
446 switch (mux) {
447 case V3D_QPU_MUX_A:
448 if (scoreboard->last_waddr_add == inst->raddr_a ||
449 scoreboard->last_waddr_mul == inst->raddr_a) {
450 return true;
451 }
452 break;
453
454 case V3D_QPU_MUX_B:
455 if (scoreboard->last_waddr_add == inst->raddr_b ||
456 scoreboard->last_waddr_mul == inst->raddr_b) {
457 return true;
458 }
459 break;
460
461 case V3D_QPU_MUX_R4:
462 if (scoreboard->tick - scoreboard->last_sfu_write_tick <= 2)
463 return true;
464 break;
465
466 case V3D_QPU_MUX_R5:
467 if (scoreboard->tick - scoreboard->last_ldvary_tick <= 1)
468 return true;
469 break;
470 default:
471 break;
472 }
473
474 return false;
475 }
476
477 static bool
478 reads_too_soon_after_write(struct choose_scoreboard *scoreboard,
479 struct qinst *qinst)
480 {
481 const struct v3d_qpu_instr *inst = &qinst->qpu;
482
483 /* XXX: Branching off of raddr. */
484 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH)
485 return false;
486
487 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
488
489 if (inst->alu.add.op != V3D_QPU_A_NOP) {
490 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 0 &&
491 mux_reads_too_soon(scoreboard, inst, inst->alu.add.a)) {
492 return true;
493 }
494 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 1 &&
495 mux_reads_too_soon(scoreboard, inst, inst->alu.add.b)) {
496 return true;
497 }
498 }
499
500 if (inst->alu.mul.op != V3D_QPU_M_NOP) {
501 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 0 &&
502 mux_reads_too_soon(scoreboard, inst, inst->alu.mul.a)) {
503 return true;
504 }
505 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 1 &&
506 mux_reads_too_soon(scoreboard, inst, inst->alu.mul.b)) {
507 return true;
508 }
509 }
510
511 /* XXX: imm */
512
513 return false;
514 }
515
516 static bool
517 writes_too_soon_after_write(struct choose_scoreboard *scoreboard,
518 struct qinst *qinst)
519 {
520 const struct v3d_qpu_instr *inst = &qinst->qpu;
521
522 /* Don't schedule any other r4 write too soon after an SFU write.
523 * This would normally be prevented by dependency tracking, but might
524 * occur if a dead SFU computation makes it to scheduling.
525 */
526 if (scoreboard->tick - scoreboard->last_sfu_write_tick < 2 &&
527 v3d_qpu_writes_r4(inst))
528 return true;
529
530 return false;
531 }
532
533 static bool
534 pixel_scoreboard_too_soon(struct choose_scoreboard *scoreboard,
535 const struct v3d_qpu_instr *inst)
536 {
537 return (scoreboard->tick == 0 && qpu_inst_is_tlb(inst));
538 }
539
540 static int
541 get_instruction_priority(const struct v3d_qpu_instr *inst)
542 {
543 uint32_t baseline_score;
544 uint32_t next_score = 0;
545
546 /* Schedule TLB operations as late as possible, to get more
547 * parallelism between shaders.
548 */
549 if (qpu_inst_is_tlb(inst))
550 return next_score;
551 next_score++;
552
553 /* Schedule texture read results collection late to hide latency. */
554 if (inst->sig.ldtmu)
555 return next_score;
556 next_score++;
557
558 /* Default score for things that aren't otherwise special. */
559 baseline_score = next_score;
560 next_score++;
561
562 /* Schedule texture read setup early to hide their latency better. */
563 if (inst->type == V3D_QPU_INSTR_TYPE_ALU &&
564 ((inst->alu.add.magic_write &&
565 v3d_qpu_magic_waddr_is_tmu(inst->alu.add.waddr)) ||
566 (inst->alu.mul.magic_write &&
567 v3d_qpu_magic_waddr_is_tmu(inst->alu.mul.waddr)))) {
568 return next_score;
569 }
570 next_score++;
571
572 return baseline_score;
573 }
574
575 static bool
576 qpu_magic_waddr_is_periph(enum v3d_qpu_waddr waddr)
577 {
578 return (v3d_qpu_magic_waddr_is_tmu(waddr) ||
579 v3d_qpu_magic_waddr_is_sfu(waddr) ||
580 v3d_qpu_magic_waddr_is_tlb(waddr) ||
581 v3d_qpu_magic_waddr_is_vpm(waddr) ||
582 v3d_qpu_magic_waddr_is_tsy(waddr));
583 }
584
585 static bool
586 qpu_accesses_peripheral(const struct v3d_qpu_instr *inst)
587 {
588 if (inst->type == V3D_QPU_INSTR_TYPE_ALU) {
589 if (inst->alu.add.op != V3D_QPU_A_NOP &&
590 inst->alu.add.magic_write &&
591 qpu_magic_waddr_is_periph(inst->alu.add.waddr)) {
592 return true;
593 }
594
595 if (inst->alu.add.op == V3D_QPU_A_VPMSETUP)
596 return true;
597
598 if (inst->alu.mul.op != V3D_QPU_M_NOP &&
599 inst->alu.mul.magic_write &&
600 qpu_magic_waddr_is_periph(inst->alu.mul.waddr)) {
601 return true;
602 }
603 }
604
605 return (inst->sig.ldvpm ||
606 inst->sig.ldtmu ||
607 inst->sig.ldtlb ||
608 inst->sig.ldtlbu);
609 }
610
611 static bool
612 qpu_merge_inst(const struct v3d_device_info *devinfo,
613 struct v3d_qpu_instr *result,
614 const struct v3d_qpu_instr *a,
615 const struct v3d_qpu_instr *b)
616 {
617 if (a->type != V3D_QPU_INSTR_TYPE_ALU ||
618 b->type != V3D_QPU_INSTR_TYPE_ALU) {
619 return false;
620 }
621
622 /* Can't do more than one peripheral access in an instruction. */
623 if (qpu_accesses_peripheral(a) && qpu_accesses_peripheral(b))
624 return false;
625
626 struct v3d_qpu_instr merge = *a;
627
628 if (b->alu.add.op != V3D_QPU_A_NOP) {
629 if (a->alu.add.op != V3D_QPU_A_NOP)
630 return false;
631 merge.alu.add = b->alu.add;
632
633 merge.flags.ac = b->flags.ac;
634 merge.flags.apf = b->flags.apf;
635 merge.flags.auf = b->flags.auf;
636 }
637
638 if (b->alu.mul.op != V3D_QPU_M_NOP) {
639 if (a->alu.mul.op != V3D_QPU_M_NOP)
640 return false;
641 merge.alu.mul = b->alu.mul;
642
643 merge.flags.mc = b->flags.mc;
644 merge.flags.mpf = b->flags.mpf;
645 merge.flags.muf = b->flags.muf;
646 }
647
648 if (v3d_qpu_uses_mux(b, V3D_QPU_MUX_A)) {
649 if (v3d_qpu_uses_mux(a, V3D_QPU_MUX_A) &&
650 a->raddr_a != b->raddr_a) {
651 return false;
652 }
653 merge.raddr_a = b->raddr_a;
654 }
655
656 if (v3d_qpu_uses_mux(b, V3D_QPU_MUX_B)) {
657 if (v3d_qpu_uses_mux(a, V3D_QPU_MUX_B) &&
658 a->raddr_b != b->raddr_b) {
659 return false;
660 }
661 merge.raddr_b = b->raddr_b;
662 }
663
664 merge.sig.thrsw |= b->sig.thrsw;
665 merge.sig.ldunif |= b->sig.ldunif;
666 merge.sig.ldtmu |= b->sig.ldtmu;
667 merge.sig.ldvary |= b->sig.ldvary;
668 merge.sig.ldvpm |= b->sig.ldvpm;
669 merge.sig.small_imm |= b->sig.small_imm;
670 merge.sig.ldtlb |= b->sig.ldtlb;
671 merge.sig.ldtlbu |= b->sig.ldtlbu;
672 merge.sig.ucb |= b->sig.ucb;
673 merge.sig.rotate |= b->sig.rotate;
674 merge.sig.wrtmuc |= b->sig.wrtmuc;
675
676 uint64_t packed;
677 bool ok = v3d_qpu_instr_pack(devinfo, &merge, &packed);
678
679 *result = merge;
680 /* No modifying the real instructions on failure. */
681 assert(ok || (a != result && b != result));
682
683 return ok;
684 }
685
686 static struct schedule_node *
687 choose_instruction_to_schedule(const struct v3d_device_info *devinfo,
688 struct choose_scoreboard *scoreboard,
689 struct list_head *schedule_list,
690 struct schedule_node *prev_inst)
691 {
692 struct schedule_node *chosen = NULL;
693 int chosen_prio = 0;
694
695 /* Don't pair up anything with a thread switch signal -- emit_thrsw()
696 * will handle pairing it along with filling the delay slots.
697 */
698 if (prev_inst) {
699 if (prev_inst->inst->qpu.sig.thrsw)
700 return NULL;
701 }
702
703 list_for_each_entry(struct schedule_node, n, schedule_list, link) {
704 const struct v3d_qpu_instr *inst = &n->inst->qpu;
705
706 /* Don't choose the branch instruction until it's the last one
707 * left. We'll move it up to fit its delay slots after we
708 * choose it.
709 */
710 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH &&
711 !list_is_singular(schedule_list)) {
712 continue;
713 }
714
715 /* "An instruction must not read from a location in physical
716 * regfile A or B that was written to by the previous
717 * instruction."
718 */
719 if (reads_too_soon_after_write(scoreboard, n->inst))
720 continue;
721
722 if (writes_too_soon_after_write(scoreboard, n->inst))
723 continue;
724
725 /* "A scoreboard wait must not occur in the first two
726 * instructions of a fragment shader. This is either the
727 * explicit Wait for Scoreboard signal or an implicit wait
728 * with the first tile-buffer read or write instruction."
729 */
730 if (pixel_scoreboard_too_soon(scoreboard, inst))
731 continue;
732
733 /* ldunif and ldvary both write r5, but ldunif does so a tick
734 * sooner. If the ldvary's r5 wasn't used, then ldunif might
735 * otherwise get scheduled so ldunif and ldvary try to update
736 * r5 in the same tick.
737 */
738 if (inst->sig.ldunif &&
739 scoreboard->tick == scoreboard->last_ldvary_tick + 1) {
740 continue;
741 }
742
743 /* If we're trying to pair with another instruction, check
744 * that they're compatible.
745 */
746 if (prev_inst) {
747 /* Don't pair up a thread switch signal -- we'll
748 * handle pairing it when we pick it on its own.
749 */
750 if (inst->sig.thrsw)
751 continue;
752
753 if (prev_inst->inst->uniform != -1 &&
754 n->inst->uniform != -1)
755 continue;
756
757 /* Don't merge in something that will lock the TLB.
758 * Hopwefully what we have in inst will release some
759 * other instructions, allowing us to delay the
760 * TLB-locking instruction until later.
761 */
762 if (!scoreboard->tlb_locked && qpu_inst_is_tlb(inst))
763 continue;
764
765 struct v3d_qpu_instr merged_inst;
766 if (!qpu_merge_inst(devinfo, &merged_inst,
767 &prev_inst->inst->qpu, inst)) {
768 continue;
769 }
770 }
771
772 int prio = get_instruction_priority(inst);
773
774 /* Found a valid instruction. If nothing better comes along,
775 * this one works.
776 */
777 if (!chosen) {
778 chosen = n;
779 chosen_prio = prio;
780 continue;
781 }
782
783 if (prio > chosen_prio) {
784 chosen = n;
785 chosen_prio = prio;
786 } else if (prio < chosen_prio) {
787 continue;
788 }
789
790 if (n->delay > chosen->delay) {
791 chosen = n;
792 chosen_prio = prio;
793 } else if (n->delay < chosen->delay) {
794 continue;
795 }
796 }
797
798 return chosen;
799 }
800
801 static void
802 update_scoreboard_for_magic_waddr(struct choose_scoreboard *scoreboard,
803 enum v3d_qpu_waddr waddr)
804 {
805 if (v3d_qpu_magic_waddr_is_sfu(waddr))
806 scoreboard->last_sfu_write_tick = scoreboard->tick;
807 }
808
809 static void
810 update_scoreboard_for_chosen(struct choose_scoreboard *scoreboard,
811 const struct v3d_qpu_instr *inst)
812 {
813 scoreboard->last_waddr_add = ~0;
814 scoreboard->last_waddr_mul = ~0;
815
816 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH)
817 return;
818
819 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
820
821 if (inst->alu.add.op != V3D_QPU_A_NOP) {
822 if (inst->alu.add.magic_write) {
823 update_scoreboard_for_magic_waddr(scoreboard,
824 inst->alu.add.waddr);
825 } else {
826 scoreboard->last_waddr_add = inst->alu.add.waddr;
827 }
828 }
829
830 if (inst->alu.mul.op != V3D_QPU_M_NOP) {
831 if (inst->alu.mul.magic_write) {
832 update_scoreboard_for_magic_waddr(scoreboard,
833 inst->alu.mul.waddr);
834 } else {
835 scoreboard->last_waddr_mul = inst->alu.mul.waddr;
836 }
837 }
838
839 if (inst->sig.ldvary)
840 scoreboard->last_ldvary_tick = scoreboard->tick;
841
842 if (qpu_inst_is_tlb(inst))
843 scoreboard->tlb_locked = true;
844 }
845
846 static void
847 dump_state(const struct v3d_device_info *devinfo,
848 struct list_head *schedule_list)
849 {
850 list_for_each_entry(struct schedule_node, n, schedule_list, link) {
851 fprintf(stderr, " t=%4d: ", n->unblocked_time);
852 v3d_qpu_dump(devinfo, &n->inst->qpu);
853 fprintf(stderr, "\n");
854
855 for (int i = 0; i < n->child_count; i++) {
856 struct schedule_node *child = n->children[i].node;
857 if (!child)
858 continue;
859
860 fprintf(stderr, " - ");
861 v3d_qpu_dump(devinfo, &child->inst->qpu);
862 fprintf(stderr, " (%d parents, %c)\n",
863 child->parent_count,
864 n->children[i].write_after_read ? 'w' : 'r');
865 }
866 }
867 }
868
869 static uint32_t magic_waddr_latency(enum v3d_qpu_waddr waddr,
870 const struct v3d_qpu_instr *after)
871 {
872 /* Apply some huge latency between texture fetch requests and getting
873 * their results back.
874 *
875 * FIXME: This is actually pretty bogus. If we do:
876 *
877 * mov tmu0_s, a
878 * <a bit of math>
879 * mov tmu0_s, b
880 * load_tmu0
881 * <more math>
882 * load_tmu0
883 *
884 * we count that as worse than
885 *
886 * mov tmu0_s, a
887 * mov tmu0_s, b
888 * <lots of math>
889 * load_tmu0
890 * <more math>
891 * load_tmu0
892 *
893 * because we associate the first load_tmu0 with the *second* tmu0_s.
894 */
895 if (v3d_qpu_magic_waddr_is_tmu(waddr) && after->sig.ldtmu)
896 return 100;
897
898 /* Assume that anything depending on us is consuming the SFU result. */
899 if (v3d_qpu_magic_waddr_is_sfu(waddr))
900 return 3;
901
902 return 1;
903 }
904
905 static uint32_t
906 instruction_latency(struct schedule_node *before, struct schedule_node *after)
907 {
908 const struct v3d_qpu_instr *before_inst = &before->inst->qpu;
909 const struct v3d_qpu_instr *after_inst = &after->inst->qpu;
910 uint32_t latency = 1;
911
912 if (before_inst->type != V3D_QPU_INSTR_TYPE_ALU ||
913 after_inst->type != V3D_QPU_INSTR_TYPE_ALU)
914 return latency;
915
916 if (before_inst->alu.add.magic_write) {
917 latency = MAX2(latency,
918 magic_waddr_latency(before_inst->alu.add.waddr,
919 after_inst));
920 }
921
922 if (before_inst->alu.mul.magic_write) {
923 latency = MAX2(latency,
924 magic_waddr_latency(before_inst->alu.mul.waddr,
925 after_inst));
926 }
927
928 return latency;
929 }
930
931 /** Recursive computation of the delay member of a node. */
932 static void
933 compute_delay(struct schedule_node *n)
934 {
935 if (!n->child_count) {
936 n->delay = 1;
937 } else {
938 for (int i = 0; i < n->child_count; i++) {
939 if (!n->children[i].node->delay)
940 compute_delay(n->children[i].node);
941 n->delay = MAX2(n->delay,
942 n->children[i].node->delay +
943 instruction_latency(n, n->children[i].node));
944 }
945 }
946 }
947
948 static void
949 mark_instruction_scheduled(struct list_head *schedule_list,
950 uint32_t time,
951 struct schedule_node *node,
952 bool war_only)
953 {
954 if (!node)
955 return;
956
957 for (int i = node->child_count - 1; i >= 0; i--) {
958 struct schedule_node *child =
959 node->children[i].node;
960
961 if (!child)
962 continue;
963
964 if (war_only && !node->children[i].write_after_read)
965 continue;
966
967 /* If the requirement is only that the node not appear before
968 * the last read of its destination, then it can be scheduled
969 * immediately after (or paired with!) the thing reading the
970 * destination.
971 */
972 uint32_t latency = 0;
973 if (!war_only) {
974 latency = instruction_latency(node,
975 node->children[i].node);
976 }
977
978 child->unblocked_time = MAX2(child->unblocked_time,
979 time + latency);
980 child->parent_count--;
981 if (child->parent_count == 0)
982 list_add(&child->link, schedule_list);
983
984 node->children[i].node = NULL;
985 }
986 }
987
988 static struct qinst *
989 vir_nop()
990 {
991 struct qreg undef = { QFILE_NULL, 0 };
992 struct qinst *qinst = vir_add_inst(V3D_QPU_A_NOP, undef, undef, undef);
993
994 return qinst;
995 }
996
997 #if 0
998 static struct qinst *
999 nop_after(struct qinst *inst)
1000 {
1001 struct qinst *q = vir_nop();
1002
1003 list_add(&q->link, &inst->link);
1004
1005 return q;
1006 }
1007
1008 /**
1009 * Emits a THRSW/LTHRSW signal in the stream, trying to move it up to pair
1010 * with another instruction.
1011 */
1012 static void
1013 emit_thrsw(struct v3d_compile *c,
1014 struct choose_scoreboard *scoreboard,
1015 const struct v3d_qpu_instr *inst)
1016 {
1017 /* There should be nothing in a thrsw inst being scheduled other than
1018 * the signal bits.
1019 */
1020 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
1021 assert(inst->alu.add.op == V3D_QPU_A_NOP);
1022 assert(inst->alu.mul.op == V3D_QPU_M_NOP);
1023
1024 /* Try to find an earlier scheduled instruction that we can merge the
1025 * thrsw into.
1026 */
1027 int thrsw_ip = c->qpu_inst_count;
1028 for (int i = 1; i <= MIN2(c->qpu_inst_count, 3); i++) {
1029 uint64_t prev_instr = c->qpu_insts[c->qpu_inst_count - i];
1030 uint32_t prev_sig = QPU_GET_FIELD(prev_instr, QPU_SIG);
1031
1032 if (prev_sig == QPU_SIG_NONE)
1033 thrsw_ip = c->qpu_inst_count - i;
1034 }
1035
1036 if (thrsw_ip != c->qpu_inst_count) {
1037 /* Merge the thrsw into the existing instruction. */
1038 c->qpu_insts[thrsw_ip] =
1039 QPU_UPDATE_FIELD(c->qpu_insts[thrsw_ip], sig, QPU_SIG);
1040 } else {
1041 qpu_serialize_one_inst(c, inst);
1042 update_scoreboard_for_chosen(scoreboard, inst);
1043 }
1044
1045 /* Fill the delay slots. */
1046 while (c->qpu_inst_count < thrsw_ip + 3) {
1047 update_scoreboard_for_chosen(scoreboard, v3d_qpu_nop());
1048 qpu_serialize_one_inst(c, v3d_qpu_nop());
1049 }
1050 }
1051 #endif
1052
1053 static uint32_t
1054 schedule_instructions(struct v3d_compile *c,
1055 struct choose_scoreboard *scoreboard,
1056 struct qblock *block,
1057 struct list_head *schedule_list,
1058 enum quniform_contents *orig_uniform_contents,
1059 uint32_t *orig_uniform_data,
1060 uint32_t *next_uniform)
1061 {
1062 const struct v3d_device_info *devinfo = c->devinfo;
1063 uint32_t time = 0;
1064
1065 if (debug) {
1066 fprintf(stderr, "initial deps:\n");
1067 dump_state(devinfo, schedule_list);
1068 fprintf(stderr, "\n");
1069 }
1070
1071 /* Remove non-DAG heads from the list. */
1072 list_for_each_entry_safe(struct schedule_node, n, schedule_list, link) {
1073 if (n->parent_count != 0)
1074 list_del(&n->link);
1075 }
1076
1077 while (!list_empty(schedule_list)) {
1078 struct schedule_node *chosen =
1079 choose_instruction_to_schedule(devinfo,
1080 scoreboard,
1081 schedule_list,
1082 NULL);
1083 struct schedule_node *merge = NULL;
1084
1085 /* If there are no valid instructions to schedule, drop a NOP
1086 * in.
1087 */
1088 struct qinst *qinst = chosen ? chosen->inst : vir_nop();
1089 struct v3d_qpu_instr *inst = &qinst->qpu;
1090
1091 if (debug) {
1092 fprintf(stderr, "t=%4d: current list:\n",
1093 time);
1094 dump_state(devinfo, schedule_list);
1095 fprintf(stderr, "t=%4d: chose: ", time);
1096 v3d_qpu_dump(devinfo, inst);
1097 fprintf(stderr, "\n");
1098 }
1099
1100 /* Schedule this instruction onto the QPU list. Also try to
1101 * find an instruction to pair with it.
1102 */
1103 if (chosen) {
1104 time = MAX2(chosen->unblocked_time, time);
1105 list_del(&chosen->link);
1106 mark_instruction_scheduled(schedule_list, time,
1107 chosen, true);
1108
1109 merge = choose_instruction_to_schedule(devinfo,
1110 scoreboard,
1111 schedule_list,
1112 chosen);
1113 if (merge) {
1114 time = MAX2(merge->unblocked_time, time);
1115 list_del(&merge->link);
1116 (void)qpu_merge_inst(devinfo, inst,
1117 inst, &merge->inst->qpu);
1118 if (merge->inst->uniform != -1) {
1119 chosen->inst->uniform =
1120 merge->inst->uniform;
1121 }
1122
1123 if (debug) {
1124 fprintf(stderr, "t=%4d: merging: ",
1125 time);
1126 v3d_qpu_dump(devinfo, &merge->inst->qpu);
1127 fprintf(stderr, "\n");
1128 fprintf(stderr, " result: ");
1129 v3d_qpu_dump(devinfo, inst);
1130 fprintf(stderr, "\n");
1131 }
1132 }
1133 }
1134
1135 /* Update the uniform index for the rewritten location --
1136 * branch target updating will still need to change
1137 * c->uniform_data[] using this index.
1138 */
1139 if (qinst->uniform != -1) {
1140 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH)
1141 block->branch_uniform = *next_uniform;
1142
1143 c->uniform_data[*next_uniform] =
1144 orig_uniform_data[qinst->uniform];
1145 c->uniform_contents[*next_uniform] =
1146 orig_uniform_contents[qinst->uniform];
1147 qinst->uniform = *next_uniform;
1148 (*next_uniform)++;
1149 }
1150
1151 if (debug) {
1152 fprintf(stderr, "\n");
1153 }
1154
1155 /* Now that we've scheduled a new instruction, some of its
1156 * children can be promoted to the list of instructions ready to
1157 * be scheduled. Update the children's unblocked time for this
1158 * DAG edge as we do so.
1159 */
1160 mark_instruction_scheduled(schedule_list, time, chosen, false);
1161
1162 if (merge) {
1163 mark_instruction_scheduled(schedule_list, time, merge,
1164 false);
1165
1166 /* The merged VIR instruction doesn't get re-added to the
1167 * block, so free it now.
1168 */
1169 free(merge->inst);
1170 }
1171
1172 if (0 && inst->sig.thrsw) {
1173 /* XXX emit_thrsw(c, scoreboard, qinst); */
1174 } else {
1175 c->qpu_inst_count++;
1176 list_addtail(&qinst->link, &block->instructions);
1177 update_scoreboard_for_chosen(scoreboard, inst);
1178 }
1179
1180 scoreboard->tick++;
1181 time++;
1182
1183 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH ||
1184 inst->sig.thrsw /* XXX */) {
1185 block->branch_qpu_ip = c->qpu_inst_count - 1;
1186 /* Fill the delay slots.
1187 *
1188 * We should fill these with actual instructions,
1189 * instead, but that will probably need to be done
1190 * after this, once we know what the leading
1191 * instructions of the successors are (so we can
1192 * handle A/B register file write latency)
1193 */
1194 /* XXX: scoreboard */
1195 int slots = (inst->type == V3D_QPU_INSTR_TYPE_BRANCH ?
1196 3 : 2);
1197 for (int i = 0; i < slots; i++) {
1198 struct qinst *nop = vir_nop();
1199 list_addtail(&nop->link, &block->instructions);
1200
1201 update_scoreboard_for_chosen(scoreboard,
1202 &nop->qpu);
1203 c->qpu_inst_count++;
1204 scoreboard->tick++;
1205 time++;
1206 }
1207 }
1208 }
1209
1210 return time;
1211 }
1212
1213 static uint32_t
1214 qpu_schedule_instructions_block(struct v3d_compile *c,
1215 struct choose_scoreboard *scoreboard,
1216 struct qblock *block,
1217 enum quniform_contents *orig_uniform_contents,
1218 uint32_t *orig_uniform_data,
1219 uint32_t *next_uniform)
1220 {
1221 void *mem_ctx = ralloc_context(NULL);
1222 struct list_head schedule_list;
1223
1224 list_inithead(&schedule_list);
1225
1226 /* Wrap each instruction in a scheduler structure. */
1227 while (!list_empty(&block->instructions)) {
1228 struct qinst *qinst = (struct qinst *)block->instructions.next;
1229 struct schedule_node *n =
1230 rzalloc(mem_ctx, struct schedule_node);
1231
1232 n->inst = qinst;
1233
1234 list_del(&qinst->link);
1235 list_addtail(&n->link, &schedule_list);
1236 }
1237
1238 calculate_forward_deps(c, &schedule_list);
1239 calculate_reverse_deps(c, &schedule_list);
1240
1241 list_for_each_entry(struct schedule_node, n, &schedule_list, link) {
1242 compute_delay(n);
1243 }
1244
1245 uint32_t cycles = schedule_instructions(c, scoreboard, block,
1246 &schedule_list,
1247 orig_uniform_contents,
1248 orig_uniform_data,
1249 next_uniform);
1250
1251 ralloc_free(mem_ctx);
1252
1253 return cycles;
1254 }
1255
1256 static void
1257 qpu_set_branch_targets(struct v3d_compile *c)
1258 {
1259 vir_for_each_block(block, c) {
1260 /* The end block of the program has no branch. */
1261 if (!block->successors[0])
1262 continue;
1263
1264 /* If there was no branch instruction, then the successor
1265 * block must follow immediately after this one.
1266 */
1267 if (block->branch_qpu_ip == ~0) {
1268 assert(block->end_qpu_ip + 1 ==
1269 block->successors[0]->start_qpu_ip);
1270 continue;
1271 }
1272
1273 /* Walk back through the delay slots to find the branch
1274 * instr.
1275 */
1276 struct list_head *entry = block->instructions.prev;
1277 for (int i = 0; i < 3; i++)
1278 entry = entry->prev;
1279 struct qinst *branch = container_of(entry, branch, link);
1280 assert(branch->qpu.type == V3D_QPU_INSTR_TYPE_BRANCH);
1281
1282 /* Make sure that the if-we-don't-jump
1283 * successor was scheduled just after the
1284 * delay slots.
1285 */
1286 assert(!block->successors[1] ||
1287 block->successors[1]->start_qpu_ip ==
1288 block->branch_qpu_ip + 4);
1289
1290 branch->qpu.branch.offset =
1291 ((block->successors[0]->start_qpu_ip -
1292 (block->branch_qpu_ip + 4)) *
1293 sizeof(uint64_t));
1294
1295 /* Set up the relative offset to jump in the
1296 * uniform stream.
1297 *
1298 * Use a temporary here, because
1299 * uniform_data[inst->uniform] may be shared
1300 * between multiple instructions.
1301 */
1302 assert(c->uniform_contents[branch->uniform] == QUNIFORM_CONSTANT);
1303 c->uniform_data[branch->uniform] =
1304 (block->successors[0]->start_uniform -
1305 (block->branch_uniform + 1)) * 4;
1306 }
1307 }
1308
1309 uint32_t
1310 v3d_qpu_schedule_instructions(struct v3d_compile *c)
1311 {
1312 const struct v3d_device_info *devinfo = c->devinfo;
1313
1314 /* We reorder the uniforms as we schedule instructions, so save the
1315 * old data off and replace it.
1316 */
1317 uint32_t *uniform_data = c->uniform_data;
1318 enum quniform_contents *uniform_contents = c->uniform_contents;
1319 c->uniform_contents = ralloc_array(c, enum quniform_contents,
1320 c->num_uniforms);
1321 c->uniform_data = ralloc_array(c, uint32_t, c->num_uniforms);
1322 c->uniform_array_size = c->num_uniforms;
1323 uint32_t next_uniform = 0;
1324
1325 struct choose_scoreboard scoreboard;
1326 memset(&scoreboard, 0, sizeof(scoreboard));
1327 scoreboard.last_waddr_add = ~0;
1328 scoreboard.last_waddr_mul = ~0;
1329 scoreboard.last_ldvary_tick = -10;
1330 scoreboard.last_sfu_write_tick = -10;
1331 scoreboard.last_uniforms_reset_tick = -10;
1332
1333 if (debug) {
1334 fprintf(stderr, "Pre-schedule instructions\n");
1335 vir_for_each_block(block, c) {
1336 fprintf(stderr, "BLOCK %d\n", block->index);
1337 list_for_each_entry(struct qinst, qinst,
1338 &block->instructions, link) {
1339 v3d_qpu_dump(devinfo, &qinst->qpu);
1340 fprintf(stderr, "\n");
1341 }
1342 }
1343 fprintf(stderr, "\n");
1344 }
1345
1346 uint32_t cycles = 0;
1347 vir_for_each_block(block, c) {
1348 block->start_qpu_ip = c->qpu_inst_count;
1349 block->branch_qpu_ip = ~0;
1350 block->start_uniform = next_uniform;
1351
1352 cycles += qpu_schedule_instructions_block(c,
1353 &scoreboard,
1354 block,
1355 uniform_contents,
1356 uniform_data,
1357 &next_uniform);
1358
1359 block->end_qpu_ip = c->qpu_inst_count - 1;
1360 }
1361
1362 qpu_set_branch_targets(c);
1363
1364 assert(next_uniform == c->num_uniforms);
1365
1366 return cycles;
1367 }