v3d: Drop in a bunch of notes about performance improvement opportunities.
[mesa.git] / src / broadcom / compiler / qpu_schedule.c
1 /*
2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2014-2017 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 /**
26 * @file
27 *
28 * The basic model of the list scheduler is to take a basic block, compute a
29 * DAG of the dependencies, and make a list of the DAG heads. Heuristically
30 * pick a DAG head, then put all the children that are now DAG heads into the
31 * list of things to schedule.
32 *
33 * The goal of scheduling here is to pack pairs of operations together in a
34 * single QPU instruction.
35 */
36
37 #include "qpu/qpu_disasm.h"
38 #include "v3d_compiler.h"
39 #include "util/ralloc.h"
40
41 static bool debug;
42
43 struct schedule_node_child;
44
45 struct schedule_node {
46 struct list_head link;
47 struct qinst *inst;
48 struct schedule_node_child *children;
49 uint32_t child_count;
50 uint32_t child_array_size;
51 uint32_t parent_count;
52
53 /* Longest cycles + instruction_latency() of any parent of this node. */
54 uint32_t unblocked_time;
55
56 /**
57 * Minimum number of cycles from scheduling this instruction until the
58 * end of the program, based on the slowest dependency chain through
59 * the children.
60 */
61 uint32_t delay;
62
63 /**
64 * cycles between this instruction being scheduled and when its result
65 * can be consumed.
66 */
67 uint32_t latency;
68 };
69
70 struct schedule_node_child {
71 struct schedule_node *node;
72 bool write_after_read;
73 };
74
75 /* When walking the instructions in reverse, we need to swap before/after in
76 * add_dep().
77 */
78 enum direction { F, R };
79
80 struct schedule_state {
81 const struct v3d_device_info *devinfo;
82 struct schedule_node *last_r[6];
83 struct schedule_node *last_rf[64];
84 struct schedule_node *last_sf;
85 struct schedule_node *last_vpm_read;
86 struct schedule_node *last_tmu_write;
87 struct schedule_node *last_tmu_config;
88 struct schedule_node *last_tlb;
89 struct schedule_node *last_vpm;
90 struct schedule_node *last_unif;
91 struct schedule_node *last_rtop;
92 enum direction dir;
93 /* Estimated cycle when the current instruction would start. */
94 uint32_t time;
95 };
96
97 static void
98 add_dep(struct schedule_state *state,
99 struct schedule_node *before,
100 struct schedule_node *after,
101 bool write)
102 {
103 bool write_after_read = !write && state->dir == R;
104
105 if (!before || !after)
106 return;
107
108 assert(before != after);
109
110 if (state->dir == R) {
111 struct schedule_node *t = before;
112 before = after;
113 after = t;
114 }
115
116 for (int i = 0; i < before->child_count; i++) {
117 if (before->children[i].node == after &&
118 (before->children[i].write_after_read == write_after_read)) {
119 return;
120 }
121 }
122
123 if (before->child_array_size <= before->child_count) {
124 before->child_array_size = MAX2(before->child_array_size * 2, 16);
125 before->children = reralloc(before, before->children,
126 struct schedule_node_child,
127 before->child_array_size);
128 }
129
130 before->children[before->child_count].node = after;
131 before->children[before->child_count].write_after_read =
132 write_after_read;
133 before->child_count++;
134 after->parent_count++;
135 }
136
137 static void
138 add_read_dep(struct schedule_state *state,
139 struct schedule_node *before,
140 struct schedule_node *after)
141 {
142 add_dep(state, before, after, false);
143 }
144
145 static void
146 add_write_dep(struct schedule_state *state,
147 struct schedule_node **before,
148 struct schedule_node *after)
149 {
150 add_dep(state, *before, after, true);
151 *before = after;
152 }
153
154 static bool
155 qpu_inst_is_tlb(const struct v3d_qpu_instr *inst)
156 {
157 if (inst->type != V3D_QPU_INSTR_TYPE_ALU)
158 return false;
159
160 if (inst->alu.add.magic_write &&
161 (inst->alu.add.waddr == V3D_QPU_WADDR_TLB ||
162 inst->alu.add.waddr == V3D_QPU_WADDR_TLBU))
163 return true;
164
165 if (inst->alu.mul.magic_write &&
166 (inst->alu.mul.waddr == V3D_QPU_WADDR_TLB ||
167 inst->alu.mul.waddr == V3D_QPU_WADDR_TLBU))
168 return true;
169
170 return false;
171 }
172
173 static void
174 process_mux_deps(struct schedule_state *state, struct schedule_node *n,
175 enum v3d_qpu_mux mux)
176 {
177 switch (mux) {
178 case V3D_QPU_MUX_A:
179 add_read_dep(state, state->last_rf[n->inst->qpu.raddr_a], n);
180 break;
181 case V3D_QPU_MUX_B:
182 add_read_dep(state, state->last_rf[n->inst->qpu.raddr_b], n);
183 break;
184 default:
185 add_read_dep(state, state->last_r[mux - V3D_QPU_MUX_R0], n);
186 break;
187 }
188 }
189
190
191 static void
192 process_waddr_deps(struct schedule_state *state, struct schedule_node *n,
193 uint32_t waddr, bool magic)
194 {
195 if (!magic) {
196 add_write_dep(state, &state->last_rf[waddr], n);
197 } else if (v3d_qpu_magic_waddr_is_tmu(waddr)) {
198 /* XXX perf: For V3D 4.x, we could reorder TMU writes other
199 * than the TMUS/TMUD/TMUA to improve scheduling flexibility.
200 */
201 add_write_dep(state, &state->last_tmu_write, n);
202 switch (waddr) {
203 case V3D_QPU_WADDR_TMUS:
204 case V3D_QPU_WADDR_TMUSCM:
205 case V3D_QPU_WADDR_TMUSF:
206 case V3D_QPU_WADDR_TMUSLOD:
207 add_write_dep(state, &state->last_tmu_config, n);
208 break;
209 default:
210 break;
211 }
212 } else if (v3d_qpu_magic_waddr_is_sfu(waddr)) {
213 /* Handled by v3d_qpu_writes_r4() check. */
214 } else {
215 switch (waddr) {
216 case V3D_QPU_WADDR_R0:
217 case V3D_QPU_WADDR_R1:
218 case V3D_QPU_WADDR_R2:
219 add_write_dep(state,
220 &state->last_r[waddr - V3D_QPU_WADDR_R0],
221 n);
222 break;
223 case V3D_QPU_WADDR_R3:
224 case V3D_QPU_WADDR_R4:
225 case V3D_QPU_WADDR_R5:
226 /* Handled by v3d_qpu_writes_r*() checks below. */
227 break;
228
229 case V3D_QPU_WADDR_VPM:
230 case V3D_QPU_WADDR_VPMU:
231 add_write_dep(state, &state->last_vpm, n);
232 break;
233
234 case V3D_QPU_WADDR_TLB:
235 case V3D_QPU_WADDR_TLBU:
236 add_write_dep(state, &state->last_tlb, n);
237 break;
238
239 case V3D_QPU_WADDR_NOP:
240 break;
241
242 default:
243 fprintf(stderr, "Unknown waddr %d\n", waddr);
244 abort();
245 }
246 }
247 }
248
249 static void
250 process_cond_deps(struct schedule_state *state, struct schedule_node *n,
251 enum v3d_qpu_cond cond)
252 {
253 if (cond != V3D_QPU_COND_NONE)
254 add_read_dep(state, state->last_sf, n);
255 }
256
257 static void
258 process_pf_deps(struct schedule_state *state, struct schedule_node *n,
259 enum v3d_qpu_pf pf)
260 {
261 if (pf != V3D_QPU_PF_NONE)
262 add_write_dep(state, &state->last_sf, n);
263 }
264
265 static void
266 process_uf_deps(struct schedule_state *state, struct schedule_node *n,
267 enum v3d_qpu_uf uf)
268 {
269 if (uf != V3D_QPU_UF_NONE)
270 add_write_dep(state, &state->last_sf, n);
271 }
272
273 /**
274 * Common code for dependencies that need to be tracked both forward and
275 * backward.
276 *
277 * This is for things like "all reads of r4 have to happen between the r4
278 * writes that surround them".
279 */
280 static void
281 calculate_deps(struct schedule_state *state, struct schedule_node *n)
282 {
283 const struct v3d_device_info *devinfo = state->devinfo;
284 struct qinst *qinst = n->inst;
285 struct v3d_qpu_instr *inst = &qinst->qpu;
286 /* If the input and output segments are shared, then all VPM reads to
287 * a location need to happen before all writes. We handle this by
288 * serializing all VPM operations for now.
289 */
290 bool separate_vpm_segment = false;
291
292 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH) {
293 if (inst->branch.cond != V3D_QPU_BRANCH_COND_ALWAYS)
294 add_read_dep(state, state->last_sf, n);
295
296 /* XXX: BDI */
297 /* XXX: BDU */
298 /* XXX: ub */
299 /* XXX: raddr_a */
300
301 add_write_dep(state, &state->last_unif, n);
302 return;
303 }
304
305 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
306
307 /* XXX: LOAD_IMM */
308
309 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 0)
310 process_mux_deps(state, n, inst->alu.add.a);
311 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 1)
312 process_mux_deps(state, n, inst->alu.add.b);
313
314 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 0)
315 process_mux_deps(state, n, inst->alu.mul.a);
316 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 1)
317 process_mux_deps(state, n, inst->alu.mul.b);
318
319 switch (inst->alu.add.op) {
320 case V3D_QPU_A_VPMSETUP:
321 /* Could distinguish read/write by unpacking the uniform. */
322 add_write_dep(state, &state->last_vpm, n);
323 add_write_dep(state, &state->last_vpm_read, n);
324 break;
325
326 case V3D_QPU_A_STVPMV:
327 case V3D_QPU_A_STVPMD:
328 case V3D_QPU_A_STVPMP:
329 add_write_dep(state, &state->last_vpm, n);
330 break;
331
332 case V3D_QPU_A_LDVPMV_IN:
333 case V3D_QPU_A_LDVPMD_IN:
334 case V3D_QPU_A_LDVPMG_IN:
335 case V3D_QPU_A_LDVPMP:
336 if (!separate_vpm_segment)
337 add_write_dep(state, &state->last_vpm, n);
338 break;
339
340 case V3D_QPU_A_VPMWT:
341 add_read_dep(state, state->last_vpm, n);
342 break;
343
344 case V3D_QPU_A_MSF:
345 add_read_dep(state, state->last_tlb, n);
346 break;
347
348 case V3D_QPU_A_SETMSF:
349 case V3D_QPU_A_SETREVF:
350 add_write_dep(state, &state->last_tlb, n);
351 break;
352
353 case V3D_QPU_A_FLAPUSH:
354 case V3D_QPU_A_FLBPUSH:
355 case V3D_QPU_A_VFLA:
356 case V3D_QPU_A_VFLNA:
357 case V3D_QPU_A_VFLB:
358 case V3D_QPU_A_VFLNB:
359 add_read_dep(state, state->last_sf, n);
360 break;
361
362 case V3D_QPU_A_FLPOP:
363 add_write_dep(state, &state->last_sf, n);
364 break;
365
366 default:
367 break;
368 }
369
370 switch (inst->alu.mul.op) {
371 case V3D_QPU_M_MULTOP:
372 case V3D_QPU_M_UMUL24:
373 /* MULTOP sets rtop, and UMUL24 implicitly reads rtop and
374 * resets it to 0. We could possibly reorder umul24s relative
375 * to each other, but for now just keep all the MUL parts in
376 * order.
377 */
378 add_write_dep(state, &state->last_rtop, n);
379 break;
380 default:
381 break;
382 }
383
384 if (inst->alu.add.op != V3D_QPU_A_NOP) {
385 process_waddr_deps(state, n, inst->alu.add.waddr,
386 inst->alu.add.magic_write);
387 }
388 if (inst->alu.mul.op != V3D_QPU_M_NOP) {
389 process_waddr_deps(state, n, inst->alu.mul.waddr,
390 inst->alu.mul.magic_write);
391 }
392 if (v3d_qpu_sig_writes_address(devinfo, &inst->sig)) {
393 process_waddr_deps(state, n, inst->sig_addr,
394 inst->sig_magic);
395 }
396
397 if (v3d_qpu_writes_r3(devinfo, inst))
398 add_write_dep(state, &state->last_r[3], n);
399 if (v3d_qpu_writes_r4(devinfo, inst))
400 add_write_dep(state, &state->last_r[4], n);
401 if (v3d_qpu_writes_r5(devinfo, inst))
402 add_write_dep(state, &state->last_r[5], n);
403
404 if (inst->sig.thrsw) {
405 /* All accumulator contents and flags are undefined after the
406 * switch.
407 */
408 for (int i = 0; i < ARRAY_SIZE(state->last_r); i++)
409 add_write_dep(state, &state->last_r[i], n);
410 add_write_dep(state, &state->last_sf, n);
411 add_write_dep(state, &state->last_rtop, n);
412
413 /* Scoreboard-locking operations have to stay after the last
414 * thread switch.
415 */
416 add_write_dep(state, &state->last_tlb, n);
417
418 add_write_dep(state, &state->last_tmu_write, n);
419 add_write_dep(state, &state->last_tmu_config, n);
420 }
421
422 if (v3d_qpu_waits_on_tmu(inst)) {
423 /* TMU loads are coming from a FIFO, so ordering is important.
424 */
425 add_write_dep(state, &state->last_tmu_write, n);
426 }
427
428 if (inst->sig.wrtmuc)
429 add_write_dep(state, &state->last_tmu_config, n);
430
431 if (inst->sig.ldtlb | inst->sig.ldtlbu)
432 add_read_dep(state, state->last_tlb, n);
433
434 if (inst->sig.ldvpm) {
435 add_write_dep(state, &state->last_vpm_read, n);
436
437 /* At least for now, we're doing shared I/O segments, so queue
438 * all writes after all reads.
439 */
440 if (!separate_vpm_segment)
441 add_write_dep(state, &state->last_vpm, n);
442 }
443
444 /* inst->sig.ldunif or sideband uniform read */
445 if (qinst->uniform != ~0)
446 add_write_dep(state, &state->last_unif, n);
447
448 process_cond_deps(state, n, inst->flags.ac);
449 process_cond_deps(state, n, inst->flags.mc);
450 process_pf_deps(state, n, inst->flags.apf);
451 process_pf_deps(state, n, inst->flags.mpf);
452 process_uf_deps(state, n, inst->flags.auf);
453 process_uf_deps(state, n, inst->flags.muf);
454 }
455
456 static void
457 calculate_forward_deps(struct v3d_compile *c, struct list_head *schedule_list)
458 {
459 struct schedule_state state;
460
461 memset(&state, 0, sizeof(state));
462 state.devinfo = c->devinfo;
463 state.dir = F;
464
465 list_for_each_entry(struct schedule_node, node, schedule_list, link)
466 calculate_deps(&state, node);
467 }
468
469 static void
470 calculate_reverse_deps(struct v3d_compile *c, struct list_head *schedule_list)
471 {
472 struct list_head *node;
473 struct schedule_state state;
474
475 memset(&state, 0, sizeof(state));
476 state.devinfo = c->devinfo;
477 state.dir = R;
478
479 for (node = schedule_list->prev; schedule_list != node; node = node->prev) {
480 calculate_deps(&state, (struct schedule_node *)node);
481 }
482 }
483
484 struct choose_scoreboard {
485 int tick;
486 int last_magic_sfu_write_tick;
487 int last_ldvary_tick;
488 int last_uniforms_reset_tick;
489 int last_thrsw_tick;
490 bool tlb_locked;
491 };
492
493 static bool
494 mux_reads_too_soon(struct choose_scoreboard *scoreboard,
495 const struct v3d_qpu_instr *inst, enum v3d_qpu_mux mux)
496 {
497 switch (mux) {
498 case V3D_QPU_MUX_R4:
499 if (scoreboard->tick - scoreboard->last_magic_sfu_write_tick <= 2)
500 return true;
501 break;
502
503 case V3D_QPU_MUX_R5:
504 if (scoreboard->tick - scoreboard->last_ldvary_tick <= 1)
505 return true;
506 break;
507 default:
508 break;
509 }
510
511 return false;
512 }
513
514 static bool
515 reads_too_soon_after_write(struct choose_scoreboard *scoreboard,
516 struct qinst *qinst)
517 {
518 const struct v3d_qpu_instr *inst = &qinst->qpu;
519
520 /* XXX: Branching off of raddr. */
521 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH)
522 return false;
523
524 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
525
526 if (inst->alu.add.op != V3D_QPU_A_NOP) {
527 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 0 &&
528 mux_reads_too_soon(scoreboard, inst, inst->alu.add.a)) {
529 return true;
530 }
531 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 1 &&
532 mux_reads_too_soon(scoreboard, inst, inst->alu.add.b)) {
533 return true;
534 }
535 }
536
537 if (inst->alu.mul.op != V3D_QPU_M_NOP) {
538 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 0 &&
539 mux_reads_too_soon(scoreboard, inst, inst->alu.mul.a)) {
540 return true;
541 }
542 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 1 &&
543 mux_reads_too_soon(scoreboard, inst, inst->alu.mul.b)) {
544 return true;
545 }
546 }
547
548 /* XXX: imm */
549
550 return false;
551 }
552
553 static bool
554 writes_too_soon_after_write(const struct v3d_device_info *devinfo,
555 struct choose_scoreboard *scoreboard,
556 struct qinst *qinst)
557 {
558 const struct v3d_qpu_instr *inst = &qinst->qpu;
559
560 /* Don't schedule any other r4 write too soon after an SFU write.
561 * This would normally be prevented by dependency tracking, but might
562 * occur if a dead SFU computation makes it to scheduling.
563 */
564 if (scoreboard->tick - scoreboard->last_magic_sfu_write_tick < 2 &&
565 v3d_qpu_writes_r4(devinfo, inst))
566 return true;
567
568 return false;
569 }
570
571 static bool
572 pixel_scoreboard_too_soon(struct choose_scoreboard *scoreboard,
573 const struct v3d_qpu_instr *inst)
574 {
575 return (scoreboard->tick == 0 && qpu_inst_is_tlb(inst));
576 }
577
578 static int
579 get_instruction_priority(const struct v3d_qpu_instr *inst)
580 {
581 uint32_t baseline_score;
582 uint32_t next_score = 0;
583
584 /* Schedule TLB operations as late as possible, to get more
585 * parallelism between shaders.
586 */
587 if (qpu_inst_is_tlb(inst))
588 return next_score;
589 next_score++;
590
591 /* Schedule texture read results collection late to hide latency. */
592 if (v3d_qpu_waits_on_tmu(inst))
593 return next_score;
594 next_score++;
595
596 /* XXX perf: We should schedule SFU ALU ops so that the reader is 2
597 * instructions after the producer if possible, not just 1.
598 */
599
600 /* Default score for things that aren't otherwise special. */
601 baseline_score = next_score;
602 next_score++;
603
604 /* Schedule texture read setup early to hide their latency better. */
605 if (v3d_qpu_writes_tmu(inst))
606 return next_score;
607 next_score++;
608
609 return baseline_score;
610 }
611
612 static bool
613 qpu_magic_waddr_is_periph(enum v3d_qpu_waddr waddr)
614 {
615 return (v3d_qpu_magic_waddr_is_tmu(waddr) ||
616 v3d_qpu_magic_waddr_is_sfu(waddr) ||
617 v3d_qpu_magic_waddr_is_tlb(waddr) ||
618 v3d_qpu_magic_waddr_is_vpm(waddr) ||
619 v3d_qpu_magic_waddr_is_tsy(waddr));
620 }
621
622 static bool
623 qpu_accesses_peripheral(const struct v3d_qpu_instr *inst)
624 {
625 if (v3d_qpu_uses_vpm(inst))
626 return true;
627 if (v3d_qpu_uses_sfu(inst))
628 return true;
629
630 if (inst->type == V3D_QPU_INSTR_TYPE_ALU) {
631 if (inst->alu.add.op != V3D_QPU_A_NOP &&
632 inst->alu.add.magic_write &&
633 qpu_magic_waddr_is_periph(inst->alu.add.waddr)) {
634 return true;
635 }
636
637 if (inst->alu.add.op == V3D_QPU_A_TMUWT)
638 return true;
639
640 if (inst->alu.mul.op != V3D_QPU_M_NOP &&
641 inst->alu.mul.magic_write &&
642 qpu_magic_waddr_is_periph(inst->alu.mul.waddr)) {
643 return true;
644 }
645 }
646
647 return (inst->sig.ldvpm ||
648 inst->sig.ldtmu ||
649 inst->sig.ldtlb ||
650 inst->sig.ldtlbu ||
651 inst->sig.wrtmuc);
652 }
653
654 static bool
655 qpu_merge_inst(const struct v3d_device_info *devinfo,
656 struct v3d_qpu_instr *result,
657 const struct v3d_qpu_instr *a,
658 const struct v3d_qpu_instr *b)
659 {
660 if (a->type != V3D_QPU_INSTR_TYPE_ALU ||
661 b->type != V3D_QPU_INSTR_TYPE_ALU) {
662 return false;
663 }
664
665 /* Can't do more than one peripheral access in an instruction.
666 *
667 * XXX: V3D 4.1 allows TMU read along with a VPM read or write, and
668 * WRTMUC with a TMU magic register write (other than tmuc).
669 */
670 if (qpu_accesses_peripheral(a) && qpu_accesses_peripheral(b))
671 return false;
672
673 struct v3d_qpu_instr merge = *a;
674
675 if (b->alu.add.op != V3D_QPU_A_NOP) {
676 if (a->alu.add.op != V3D_QPU_A_NOP)
677 return false;
678 merge.alu.add = b->alu.add;
679
680 merge.flags.ac = b->flags.ac;
681 merge.flags.apf = b->flags.apf;
682 merge.flags.auf = b->flags.auf;
683 }
684
685 if (b->alu.mul.op != V3D_QPU_M_NOP) {
686 if (a->alu.mul.op != V3D_QPU_M_NOP)
687 return false;
688 merge.alu.mul = b->alu.mul;
689
690 merge.flags.mc = b->flags.mc;
691 merge.flags.mpf = b->flags.mpf;
692 merge.flags.muf = b->flags.muf;
693 }
694
695 if (v3d_qpu_uses_mux(b, V3D_QPU_MUX_A)) {
696 if (v3d_qpu_uses_mux(a, V3D_QPU_MUX_A) &&
697 a->raddr_a != b->raddr_a) {
698 return false;
699 }
700 merge.raddr_a = b->raddr_a;
701 }
702
703 if (v3d_qpu_uses_mux(b, V3D_QPU_MUX_B)) {
704 if (v3d_qpu_uses_mux(a, V3D_QPU_MUX_B) &&
705 (a->raddr_b != b->raddr_b ||
706 a->sig.small_imm != b->sig.small_imm)) {
707 return false;
708 }
709 merge.raddr_b = b->raddr_b;
710 }
711
712 merge.sig.thrsw |= b->sig.thrsw;
713 merge.sig.ldunif |= b->sig.ldunif;
714 merge.sig.ldunifrf |= b->sig.ldunifrf;
715 merge.sig.ldunifa |= b->sig.ldunifa;
716 merge.sig.ldunifarf |= b->sig.ldunifarf;
717 merge.sig.ldtmu |= b->sig.ldtmu;
718 merge.sig.ldvary |= b->sig.ldvary;
719 merge.sig.ldvpm |= b->sig.ldvpm;
720 merge.sig.small_imm |= b->sig.small_imm;
721 merge.sig.ldtlb |= b->sig.ldtlb;
722 merge.sig.ldtlbu |= b->sig.ldtlbu;
723 merge.sig.ucb |= b->sig.ucb;
724 merge.sig.rotate |= b->sig.rotate;
725 merge.sig.wrtmuc |= b->sig.wrtmuc;
726
727 if (v3d_qpu_sig_writes_address(devinfo, &a->sig) &&
728 v3d_qpu_sig_writes_address(devinfo, &b->sig))
729 return false;
730 merge.sig_addr |= b->sig_addr;
731 merge.sig_magic |= b->sig_magic;
732
733 uint64_t packed;
734 bool ok = v3d_qpu_instr_pack(devinfo, &merge, &packed);
735
736 *result = merge;
737 /* No modifying the real instructions on failure. */
738 assert(ok || (a != result && b != result));
739
740 return ok;
741 }
742
743 static struct schedule_node *
744 choose_instruction_to_schedule(const struct v3d_device_info *devinfo,
745 struct choose_scoreboard *scoreboard,
746 struct list_head *schedule_list,
747 struct schedule_node *prev_inst)
748 {
749 struct schedule_node *chosen = NULL;
750 int chosen_prio = 0;
751
752 /* Don't pair up anything with a thread switch signal -- emit_thrsw()
753 * will handle pairing it along with filling the delay slots.
754 */
755 if (prev_inst) {
756 if (prev_inst->inst->qpu.sig.thrsw)
757 return NULL;
758 }
759
760 list_for_each_entry(struct schedule_node, n, schedule_list, link) {
761 const struct v3d_qpu_instr *inst = &n->inst->qpu;
762
763 /* Don't choose the branch instruction until it's the last one
764 * left. We'll move it up to fit its delay slots after we
765 * choose it.
766 */
767 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH &&
768 !list_is_singular(schedule_list)) {
769 continue;
770 }
771
772 /* "An instruction must not read from a location in physical
773 * regfile A or B that was written to by the previous
774 * instruction."
775 */
776 if (reads_too_soon_after_write(scoreboard, n->inst))
777 continue;
778
779 if (writes_too_soon_after_write(devinfo, scoreboard, n->inst))
780 continue;
781
782 /* "A scoreboard wait must not occur in the first two
783 * instructions of a fragment shader. This is either the
784 * explicit Wait for Scoreboard signal or an implicit wait
785 * with the first tile-buffer read or write instruction."
786 */
787 if (pixel_scoreboard_too_soon(scoreboard, inst))
788 continue;
789
790 /* ldunif and ldvary both write r5, but ldunif does so a tick
791 * sooner. If the ldvary's r5 wasn't used, then ldunif might
792 * otherwise get scheduled so ldunif and ldvary try to update
793 * r5 in the same tick.
794 *
795 * XXX perf: To get good pipelining of a sequence of varying
796 * loads, we need to figure out how to pair the ldvary signal
797 * up to the instruction before the last r5 user in the
798 * previous ldvary sequence. Currently, it usually pairs with
799 * the last r5 user.
800 */
801 if ((inst->sig.ldunif || inst->sig.ldunifa) &&
802 scoreboard->tick == scoreboard->last_ldvary_tick + 1) {
803 continue;
804 }
805
806 /* If we're trying to pair with another instruction, check
807 * that they're compatible.
808 */
809 if (prev_inst) {
810 /* Don't pair up a thread switch signal -- we'll
811 * handle pairing it when we pick it on its own.
812 */
813 if (inst->sig.thrsw)
814 continue;
815
816 if (prev_inst->inst->uniform != -1 &&
817 n->inst->uniform != -1)
818 continue;
819
820 /* Don't merge in something that will lock the TLB.
821 * Hopwefully what we have in inst will release some
822 * other instructions, allowing us to delay the
823 * TLB-locking instruction until later.
824 */
825 if (!scoreboard->tlb_locked && qpu_inst_is_tlb(inst))
826 continue;
827
828 struct v3d_qpu_instr merged_inst;
829 if (!qpu_merge_inst(devinfo, &merged_inst,
830 &prev_inst->inst->qpu, inst)) {
831 continue;
832 }
833 }
834
835 int prio = get_instruction_priority(inst);
836
837 /* Found a valid instruction. If nothing better comes along,
838 * this one works.
839 */
840 if (!chosen) {
841 chosen = n;
842 chosen_prio = prio;
843 continue;
844 }
845
846 if (prio > chosen_prio) {
847 chosen = n;
848 chosen_prio = prio;
849 } else if (prio < chosen_prio) {
850 continue;
851 }
852
853 if (n->delay > chosen->delay) {
854 chosen = n;
855 chosen_prio = prio;
856 } else if (n->delay < chosen->delay) {
857 continue;
858 }
859 }
860
861 return chosen;
862 }
863
864 static void
865 update_scoreboard_for_magic_waddr(struct choose_scoreboard *scoreboard,
866 enum v3d_qpu_waddr waddr)
867 {
868 if (v3d_qpu_magic_waddr_is_sfu(waddr))
869 scoreboard->last_magic_sfu_write_tick = scoreboard->tick;
870 }
871
872 static void
873 update_scoreboard_for_chosen(struct choose_scoreboard *scoreboard,
874 const struct v3d_qpu_instr *inst)
875 {
876 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH)
877 return;
878
879 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
880
881 if (inst->alu.add.op != V3D_QPU_A_NOP) {
882 if (inst->alu.add.magic_write) {
883 update_scoreboard_for_magic_waddr(scoreboard,
884 inst->alu.add.waddr);
885 }
886 }
887
888 if (inst->alu.mul.op != V3D_QPU_M_NOP) {
889 if (inst->alu.mul.magic_write) {
890 update_scoreboard_for_magic_waddr(scoreboard,
891 inst->alu.mul.waddr);
892 }
893 }
894
895 if (inst->sig.ldvary)
896 scoreboard->last_ldvary_tick = scoreboard->tick;
897
898 if (qpu_inst_is_tlb(inst))
899 scoreboard->tlb_locked = true;
900 }
901
902 static void
903 dump_state(const struct v3d_device_info *devinfo,
904 struct list_head *schedule_list)
905 {
906 list_for_each_entry(struct schedule_node, n, schedule_list, link) {
907 fprintf(stderr, " t=%4d: ", n->unblocked_time);
908 v3d_qpu_dump(devinfo, &n->inst->qpu);
909 fprintf(stderr, "\n");
910
911 for (int i = 0; i < n->child_count; i++) {
912 struct schedule_node *child = n->children[i].node;
913 if (!child)
914 continue;
915
916 fprintf(stderr, " - ");
917 v3d_qpu_dump(devinfo, &child->inst->qpu);
918 fprintf(stderr, " (%d parents, %c)\n",
919 child->parent_count,
920 n->children[i].write_after_read ? 'w' : 'r');
921 }
922 }
923 }
924
925 static uint32_t magic_waddr_latency(enum v3d_qpu_waddr waddr,
926 const struct v3d_qpu_instr *after)
927 {
928 /* Apply some huge latency between texture fetch requests and getting
929 * their results back.
930 *
931 * FIXME: This is actually pretty bogus. If we do:
932 *
933 * mov tmu0_s, a
934 * <a bit of math>
935 * mov tmu0_s, b
936 * load_tmu0
937 * <more math>
938 * load_tmu0
939 *
940 * we count that as worse than
941 *
942 * mov tmu0_s, a
943 * mov tmu0_s, b
944 * <lots of math>
945 * load_tmu0
946 * <more math>
947 * load_tmu0
948 *
949 * because we associate the first load_tmu0 with the *second* tmu0_s.
950 */
951 if (v3d_qpu_magic_waddr_is_tmu(waddr) && v3d_qpu_waits_on_tmu(after))
952 return 100;
953
954 /* Assume that anything depending on us is consuming the SFU result. */
955 if (v3d_qpu_magic_waddr_is_sfu(waddr))
956 return 3;
957
958 return 1;
959 }
960
961 static uint32_t
962 instruction_latency(struct schedule_node *before, struct schedule_node *after)
963 {
964 const struct v3d_qpu_instr *before_inst = &before->inst->qpu;
965 const struct v3d_qpu_instr *after_inst = &after->inst->qpu;
966 uint32_t latency = 1;
967
968 if (before_inst->type != V3D_QPU_INSTR_TYPE_ALU ||
969 after_inst->type != V3D_QPU_INSTR_TYPE_ALU)
970 return latency;
971
972 if (before_inst->alu.add.magic_write) {
973 latency = MAX2(latency,
974 magic_waddr_latency(before_inst->alu.add.waddr,
975 after_inst));
976 }
977
978 if (before_inst->alu.mul.magic_write) {
979 latency = MAX2(latency,
980 magic_waddr_latency(before_inst->alu.mul.waddr,
981 after_inst));
982 }
983
984 return latency;
985 }
986
987 /** Recursive computation of the delay member of a node. */
988 static void
989 compute_delay(struct schedule_node *n)
990 {
991 if (!n->child_count) {
992 n->delay = 1;
993 } else {
994 for (int i = 0; i < n->child_count; i++) {
995 if (!n->children[i].node->delay)
996 compute_delay(n->children[i].node);
997 n->delay = MAX2(n->delay,
998 n->children[i].node->delay +
999 instruction_latency(n, n->children[i].node));
1000 }
1001 }
1002 }
1003
1004 static void
1005 mark_instruction_scheduled(struct list_head *schedule_list,
1006 uint32_t time,
1007 struct schedule_node *node,
1008 bool war_only)
1009 {
1010 if (!node)
1011 return;
1012
1013 for (int i = node->child_count - 1; i >= 0; i--) {
1014 struct schedule_node *child =
1015 node->children[i].node;
1016
1017 if (!child)
1018 continue;
1019
1020 if (war_only && !node->children[i].write_after_read)
1021 continue;
1022
1023 /* If the requirement is only that the node not appear before
1024 * the last read of its destination, then it can be scheduled
1025 * immediately after (or paired with!) the thing reading the
1026 * destination.
1027 */
1028 uint32_t latency = 0;
1029 if (!war_only) {
1030 latency = instruction_latency(node,
1031 node->children[i].node);
1032 }
1033
1034 child->unblocked_time = MAX2(child->unblocked_time,
1035 time + latency);
1036 child->parent_count--;
1037 if (child->parent_count == 0)
1038 list_add(&child->link, schedule_list);
1039
1040 node->children[i].node = NULL;
1041 }
1042 }
1043
1044 static void
1045 insert_scheduled_instruction(struct v3d_compile *c,
1046 struct qblock *block,
1047 struct choose_scoreboard *scoreboard,
1048 struct qinst *inst)
1049 {
1050 list_addtail(&inst->link, &block->instructions);
1051
1052 update_scoreboard_for_chosen(scoreboard, &inst->qpu);
1053 c->qpu_inst_count++;
1054 scoreboard->tick++;
1055 }
1056
1057 static struct qinst *
1058 vir_nop()
1059 {
1060 struct qreg undef = { QFILE_NULL, 0 };
1061 struct qinst *qinst = vir_add_inst(V3D_QPU_A_NOP, undef, undef, undef);
1062
1063 return qinst;
1064 }
1065
1066 static void
1067 emit_nop(struct v3d_compile *c, struct qblock *block,
1068 struct choose_scoreboard *scoreboard)
1069 {
1070 insert_scheduled_instruction(c, block, scoreboard, vir_nop());
1071 }
1072
1073 static bool
1074 qpu_instruction_valid_in_thrend_slot(struct v3d_compile *c,
1075 const struct qinst *qinst, int slot)
1076 {
1077 const struct v3d_qpu_instr *inst = &qinst->qpu;
1078
1079 /* Only TLB Z writes are prohibited in the last slot, but we don't
1080 * have those flagged so prohibit all TLB ops for now.
1081 */
1082 if (slot == 2 && qpu_inst_is_tlb(inst))
1083 return false;
1084
1085 if (slot > 0 && qinst->uniform != ~0)
1086 return false;
1087
1088 if (v3d_qpu_uses_vpm(inst))
1089 return false;
1090
1091 if (inst->sig.ldvary)
1092 return false;
1093
1094 if (inst->type == V3D_QPU_INSTR_TYPE_ALU) {
1095 /* GFXH-1625: TMUWT not allowed in the final instruction. */
1096 if (slot == 2 && inst->alu.add.op == V3D_QPU_A_TMUWT)
1097 return false;
1098
1099 /* No writing physical registers at the end. */
1100 if (!inst->alu.add.magic_write ||
1101 !inst->alu.mul.magic_write) {
1102 return false;
1103 }
1104
1105 if (c->devinfo->ver < 40 && inst->alu.add.op == V3D_QPU_A_SETMSF)
1106 return false;
1107
1108 /* RF0-2 might be overwritten during the delay slots by
1109 * fragment shader setup.
1110 */
1111 if (inst->raddr_a < 3 &&
1112 (inst->alu.add.a == V3D_QPU_MUX_A ||
1113 inst->alu.add.b == V3D_QPU_MUX_A ||
1114 inst->alu.mul.a == V3D_QPU_MUX_A ||
1115 inst->alu.mul.b == V3D_QPU_MUX_A)) {
1116 return false;
1117 }
1118
1119 if (inst->raddr_b < 3 &&
1120 !inst->sig.small_imm &&
1121 (inst->alu.add.a == V3D_QPU_MUX_B ||
1122 inst->alu.add.b == V3D_QPU_MUX_B ||
1123 inst->alu.mul.a == V3D_QPU_MUX_B ||
1124 inst->alu.mul.b == V3D_QPU_MUX_B)) {
1125 return false;
1126 }
1127 }
1128
1129 return true;
1130 }
1131
1132 static bool
1133 valid_thrsw_sequence(struct v3d_compile *c, struct choose_scoreboard *scoreboard,
1134 struct qinst *qinst, int instructions_in_sequence,
1135 bool is_thrend)
1136 {
1137 /* No emitting our thrsw while the previous thrsw hasn't happened yet. */
1138 if (scoreboard->last_thrsw_tick + 3 >
1139 scoreboard->tick - instructions_in_sequence) {
1140 return false;
1141 }
1142
1143 for (int slot = 0; slot < instructions_in_sequence; slot++) {
1144 /* No scheduling SFU when the result would land in the other
1145 * thread. The simulator complains for safety, though it
1146 * would only occur for dead code in our case.
1147 */
1148 if (slot > 0 &&
1149 qinst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
1150 (v3d_qpu_magic_waddr_is_sfu(qinst->qpu.alu.add.waddr) ||
1151 v3d_qpu_magic_waddr_is_sfu(qinst->qpu.alu.mul.waddr))) {
1152 return false;
1153 }
1154
1155 if (slot > 0 && qinst->qpu.sig.ldvary)
1156 return false;
1157
1158 if (is_thrend &&
1159 !qpu_instruction_valid_in_thrend_slot(c, qinst, slot)) {
1160 return false;
1161 }
1162
1163 /* Note that the list is circular, so we can only do this up
1164 * to instructions_in_sequence.
1165 */
1166 qinst = (struct qinst *)qinst->link.next;
1167 }
1168
1169 return true;
1170 }
1171
1172 /**
1173 * Emits a THRSW signal in the stream, trying to move it up to pair with
1174 * another instruction.
1175 */
1176 static int
1177 emit_thrsw(struct v3d_compile *c,
1178 struct qblock *block,
1179 struct choose_scoreboard *scoreboard,
1180 struct qinst *inst,
1181 bool is_thrend)
1182 {
1183 int time = 0;
1184
1185 /* There should be nothing in a thrsw inst being scheduled other than
1186 * the signal bits.
1187 */
1188 assert(inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU);
1189 assert(inst->qpu.alu.add.op == V3D_QPU_A_NOP);
1190 assert(inst->qpu.alu.mul.op == V3D_QPU_M_NOP);
1191
1192 /* Find how far back into previous instructions we can put the THRSW. */
1193 int slots_filled = 0;
1194 struct qinst *merge_inst = NULL;
1195 vir_for_each_inst_rev(prev_inst, block) {
1196 struct v3d_qpu_sig sig = prev_inst->qpu.sig;
1197 sig.thrsw = true;
1198 uint32_t packed_sig;
1199
1200 if (!v3d_qpu_sig_pack(c->devinfo, &sig, &packed_sig))
1201 break;
1202
1203 if (!valid_thrsw_sequence(c, scoreboard,
1204 prev_inst, slots_filled + 1,
1205 is_thrend)) {
1206 break;
1207 }
1208
1209 merge_inst = prev_inst;
1210 if (++slots_filled == 3)
1211 break;
1212 }
1213
1214 bool needs_free = false;
1215 if (merge_inst) {
1216 merge_inst->qpu.sig.thrsw = true;
1217 needs_free = true;
1218 scoreboard->last_thrsw_tick = scoreboard->tick - slots_filled;
1219 } else {
1220 scoreboard->last_thrsw_tick = scoreboard->tick;
1221 insert_scheduled_instruction(c, block, scoreboard, inst);
1222 time++;
1223 slots_filled++;
1224 merge_inst = inst;
1225 }
1226
1227 /* Insert any extra delay slot NOPs we need. */
1228 for (int i = 0; i < 3 - slots_filled; i++) {
1229 emit_nop(c, block, scoreboard);
1230 time++;
1231 }
1232
1233 /* If we're emitting the last THRSW (other than program end), then
1234 * signal that to the HW by emitting two THRSWs in a row.
1235 */
1236 if (inst->is_last_thrsw) {
1237 struct qinst *second_inst =
1238 (struct qinst *)merge_inst->link.next;
1239 second_inst->qpu.sig.thrsw = true;
1240 }
1241
1242 /* If we put our THRSW into another instruction, free up the
1243 * instruction that didn't end up scheduled into the list.
1244 */
1245 if (needs_free)
1246 free(inst);
1247
1248 return time;
1249 }
1250
1251 static uint32_t
1252 schedule_instructions(struct v3d_compile *c,
1253 struct choose_scoreboard *scoreboard,
1254 struct qblock *block,
1255 struct list_head *schedule_list,
1256 enum quniform_contents *orig_uniform_contents,
1257 uint32_t *orig_uniform_data,
1258 uint32_t *next_uniform)
1259 {
1260 const struct v3d_device_info *devinfo = c->devinfo;
1261 uint32_t time = 0;
1262
1263 if (debug) {
1264 fprintf(stderr, "initial deps:\n");
1265 dump_state(devinfo, schedule_list);
1266 fprintf(stderr, "\n");
1267 }
1268
1269 /* Remove non-DAG heads from the list. */
1270 list_for_each_entry_safe(struct schedule_node, n, schedule_list, link) {
1271 if (n->parent_count != 0)
1272 list_del(&n->link);
1273 }
1274
1275 while (!list_empty(schedule_list)) {
1276 struct schedule_node *chosen =
1277 choose_instruction_to_schedule(devinfo,
1278 scoreboard,
1279 schedule_list,
1280 NULL);
1281 struct schedule_node *merge = NULL;
1282
1283 /* If there are no valid instructions to schedule, drop a NOP
1284 * in.
1285 */
1286 struct qinst *qinst = chosen ? chosen->inst : vir_nop();
1287 struct v3d_qpu_instr *inst = &qinst->qpu;
1288
1289 if (debug) {
1290 fprintf(stderr, "t=%4d: current list:\n",
1291 time);
1292 dump_state(devinfo, schedule_list);
1293 fprintf(stderr, "t=%4d: chose: ", time);
1294 v3d_qpu_dump(devinfo, inst);
1295 fprintf(stderr, "\n");
1296 }
1297
1298 /* We can't mark_instruction_scheduled() the chosen inst until
1299 * we're done identifying instructions to merge, so put the
1300 * merged instructions on a list for a moment.
1301 */
1302 struct list_head merged_list;
1303 list_inithead(&merged_list);
1304
1305 /* Schedule this instruction onto the QPU list. Also try to
1306 * find an instruction to pair with it.
1307 */
1308 if (chosen) {
1309 time = MAX2(chosen->unblocked_time, time);
1310 list_del(&chosen->link);
1311 mark_instruction_scheduled(schedule_list, time,
1312 chosen, true);
1313
1314 while ((merge =
1315 choose_instruction_to_schedule(devinfo,
1316 scoreboard,
1317 schedule_list,
1318 chosen))) {
1319 time = MAX2(merge->unblocked_time, time);
1320 list_del(&merge->link);
1321 list_addtail(&merge->link, &merged_list);
1322 (void)qpu_merge_inst(devinfo, inst,
1323 inst, &merge->inst->qpu);
1324 if (merge->inst->uniform != -1) {
1325 chosen->inst->uniform =
1326 merge->inst->uniform;
1327 }
1328
1329 if (debug) {
1330 fprintf(stderr, "t=%4d: merging: ",
1331 time);
1332 v3d_qpu_dump(devinfo, &merge->inst->qpu);
1333 fprintf(stderr, "\n");
1334 fprintf(stderr, " result: ");
1335 v3d_qpu_dump(devinfo, inst);
1336 fprintf(stderr, "\n");
1337 }
1338 }
1339 }
1340
1341 /* Update the uniform index for the rewritten location --
1342 * branch target updating will still need to change
1343 * c->uniform_data[] using this index.
1344 */
1345 if (qinst->uniform != -1) {
1346 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH)
1347 block->branch_uniform = *next_uniform;
1348
1349 c->uniform_data[*next_uniform] =
1350 orig_uniform_data[qinst->uniform];
1351 c->uniform_contents[*next_uniform] =
1352 orig_uniform_contents[qinst->uniform];
1353 qinst->uniform = *next_uniform;
1354 (*next_uniform)++;
1355 }
1356
1357 if (debug) {
1358 fprintf(stderr, "\n");
1359 }
1360
1361 /* Now that we've scheduled a new instruction, some of its
1362 * children can be promoted to the list of instructions ready to
1363 * be scheduled. Update the children's unblocked time for this
1364 * DAG edge as we do so.
1365 */
1366 mark_instruction_scheduled(schedule_list, time, chosen, false);
1367 list_for_each_entry(struct schedule_node, merge, &merged_list,
1368 link) {
1369 mark_instruction_scheduled(schedule_list, time, merge,
1370 false);
1371
1372 /* The merged VIR instruction doesn't get re-added to the
1373 * block, so free it now.
1374 */
1375 free(merge->inst);
1376 }
1377
1378 if (inst->sig.thrsw) {
1379 time += emit_thrsw(c, block, scoreboard, qinst, false);
1380 } else {
1381 insert_scheduled_instruction(c, block,
1382 scoreboard, qinst);
1383
1384 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH) {
1385 block->branch_qpu_ip = c->qpu_inst_count - 1;
1386 /* Fill the delay slots.
1387 *
1388 * We should fill these with actual instructions,
1389 * instead, but that will probably need to be done
1390 * after this, once we know what the leading
1391 * instructions of the successors are (so we can
1392 * handle A/B register file write latency)
1393 */
1394 for (int i = 0; i < 3; i++)
1395 emit_nop(c, block, scoreboard);
1396 }
1397 }
1398 }
1399
1400 return time;
1401 }
1402
1403 static uint32_t
1404 qpu_schedule_instructions_block(struct v3d_compile *c,
1405 struct choose_scoreboard *scoreboard,
1406 struct qblock *block,
1407 enum quniform_contents *orig_uniform_contents,
1408 uint32_t *orig_uniform_data,
1409 uint32_t *next_uniform)
1410 {
1411 void *mem_ctx = ralloc_context(NULL);
1412 struct list_head schedule_list;
1413
1414 list_inithead(&schedule_list);
1415
1416 /* Wrap each instruction in a scheduler structure. */
1417 while (!list_empty(&block->instructions)) {
1418 struct qinst *qinst = (struct qinst *)block->instructions.next;
1419 struct schedule_node *n =
1420 rzalloc(mem_ctx, struct schedule_node);
1421
1422 n->inst = qinst;
1423
1424 list_del(&qinst->link);
1425 list_addtail(&n->link, &schedule_list);
1426 }
1427
1428 calculate_forward_deps(c, &schedule_list);
1429 calculate_reverse_deps(c, &schedule_list);
1430
1431 list_for_each_entry(struct schedule_node, n, &schedule_list, link) {
1432 compute_delay(n);
1433 }
1434
1435 uint32_t cycles = schedule_instructions(c, scoreboard, block,
1436 &schedule_list,
1437 orig_uniform_contents,
1438 orig_uniform_data,
1439 next_uniform);
1440
1441 ralloc_free(mem_ctx);
1442
1443 return cycles;
1444 }
1445
1446 static void
1447 qpu_set_branch_targets(struct v3d_compile *c)
1448 {
1449 vir_for_each_block(block, c) {
1450 /* The end block of the program has no branch. */
1451 if (!block->successors[0])
1452 continue;
1453
1454 /* If there was no branch instruction, then the successor
1455 * block must follow immediately after this one.
1456 */
1457 if (block->branch_qpu_ip == ~0) {
1458 assert(block->end_qpu_ip + 1 ==
1459 block->successors[0]->start_qpu_ip);
1460 continue;
1461 }
1462
1463 /* Walk back through the delay slots to find the branch
1464 * instr.
1465 */
1466 struct list_head *entry = block->instructions.prev;
1467 for (int i = 0; i < 3; i++)
1468 entry = entry->prev;
1469 struct qinst *branch = container_of(entry, branch, link);
1470 assert(branch->qpu.type == V3D_QPU_INSTR_TYPE_BRANCH);
1471
1472 /* Make sure that the if-we-don't-jump
1473 * successor was scheduled just after the
1474 * delay slots.
1475 */
1476 assert(!block->successors[1] ||
1477 block->successors[1]->start_qpu_ip ==
1478 block->branch_qpu_ip + 4);
1479
1480 branch->qpu.branch.offset =
1481 ((block->successors[0]->start_qpu_ip -
1482 (block->branch_qpu_ip + 4)) *
1483 sizeof(uint64_t));
1484
1485 /* Set up the relative offset to jump in the
1486 * uniform stream.
1487 *
1488 * Use a temporary here, because
1489 * uniform_data[inst->uniform] may be shared
1490 * between multiple instructions.
1491 */
1492 assert(c->uniform_contents[branch->uniform] == QUNIFORM_CONSTANT);
1493 c->uniform_data[branch->uniform] =
1494 (block->successors[0]->start_uniform -
1495 (block->branch_uniform + 1)) * 4;
1496 }
1497 }
1498
1499 uint32_t
1500 v3d_qpu_schedule_instructions(struct v3d_compile *c)
1501 {
1502 const struct v3d_device_info *devinfo = c->devinfo;
1503 struct qblock *end_block = list_last_entry(&c->blocks,
1504 struct qblock, link);
1505
1506 /* We reorder the uniforms as we schedule instructions, so save the
1507 * old data off and replace it.
1508 */
1509 uint32_t *uniform_data = c->uniform_data;
1510 enum quniform_contents *uniform_contents = c->uniform_contents;
1511 c->uniform_contents = ralloc_array(c, enum quniform_contents,
1512 c->num_uniforms);
1513 c->uniform_data = ralloc_array(c, uint32_t, c->num_uniforms);
1514 c->uniform_array_size = c->num_uniforms;
1515 uint32_t next_uniform = 0;
1516
1517 struct choose_scoreboard scoreboard;
1518 memset(&scoreboard, 0, sizeof(scoreboard));
1519 scoreboard.last_ldvary_tick = -10;
1520 scoreboard.last_magic_sfu_write_tick = -10;
1521 scoreboard.last_uniforms_reset_tick = -10;
1522 scoreboard.last_thrsw_tick = -10;
1523
1524 if (debug) {
1525 fprintf(stderr, "Pre-schedule instructions\n");
1526 vir_for_each_block(block, c) {
1527 fprintf(stderr, "BLOCK %d\n", block->index);
1528 list_for_each_entry(struct qinst, qinst,
1529 &block->instructions, link) {
1530 v3d_qpu_dump(devinfo, &qinst->qpu);
1531 fprintf(stderr, "\n");
1532 }
1533 }
1534 fprintf(stderr, "\n");
1535 }
1536
1537 uint32_t cycles = 0;
1538 vir_for_each_block(block, c) {
1539 block->start_qpu_ip = c->qpu_inst_count;
1540 block->branch_qpu_ip = ~0;
1541 block->start_uniform = next_uniform;
1542
1543 cycles += qpu_schedule_instructions_block(c,
1544 &scoreboard,
1545 block,
1546 uniform_contents,
1547 uniform_data,
1548 &next_uniform);
1549
1550 block->end_qpu_ip = c->qpu_inst_count - 1;
1551 }
1552
1553 /* Emit the program-end THRSW instruction. */;
1554 struct qinst *thrsw = vir_nop();
1555 thrsw->qpu.sig.thrsw = true;
1556 emit_thrsw(c, end_block, &scoreboard, thrsw, true);
1557
1558 qpu_set_branch_targets(c);
1559
1560 assert(next_uniform == c->num_uniforms);
1561
1562 return cycles;
1563 }