broadcom/vc5: Properly schedule the thread-end THRSW.
[mesa.git] / src / broadcom / compiler / qpu_schedule.c
1 /*
2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2014-2017 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 /**
26 * @file
27 *
28 * The basic model of the list scheduler is to take a basic block, compute a
29 * DAG of the dependencies, and make a list of the DAG heads. Heuristically
30 * pick a DAG head, then put all the children that are now DAG heads into the
31 * list of things to schedule.
32 *
33 * The goal of scheduling here is to pack pairs of operations together in a
34 * single QPU instruction.
35 */
36
37 #include "qpu/qpu_disasm.h"
38 #include "v3d_compiler.h"
39 #include "util/ralloc.h"
40
41 static bool debug;
42
43 struct schedule_node_child;
44
45 struct schedule_node {
46 struct list_head link;
47 struct qinst *inst;
48 struct schedule_node_child *children;
49 uint32_t child_count;
50 uint32_t child_array_size;
51 uint32_t parent_count;
52
53 /* Longest cycles + instruction_latency() of any parent of this node. */
54 uint32_t unblocked_time;
55
56 /**
57 * Minimum number of cycles from scheduling this instruction until the
58 * end of the program, based on the slowest dependency chain through
59 * the children.
60 */
61 uint32_t delay;
62
63 /**
64 * cycles between this instruction being scheduled and when its result
65 * can be consumed.
66 */
67 uint32_t latency;
68 };
69
70 struct schedule_node_child {
71 struct schedule_node *node;
72 bool write_after_read;
73 };
74
75 /* When walking the instructions in reverse, we need to swap before/after in
76 * add_dep().
77 */
78 enum direction { F, R };
79
80 struct schedule_state {
81 const struct v3d_device_info *devinfo;
82 struct schedule_node *last_r[6];
83 struct schedule_node *last_rf[64];
84 struct schedule_node *last_sf;
85 struct schedule_node *last_vpm_read;
86 struct schedule_node *last_tmu_write;
87 struct schedule_node *last_tlb;
88 struct schedule_node *last_vpm;
89 struct schedule_node *last_unif;
90 struct schedule_node *last_rtop;
91 enum direction dir;
92 /* Estimated cycle when the current instruction would start. */
93 uint32_t time;
94 };
95
96 static void
97 add_dep(struct schedule_state *state,
98 struct schedule_node *before,
99 struct schedule_node *after,
100 bool write)
101 {
102 bool write_after_read = !write && state->dir == R;
103
104 if (!before || !after)
105 return;
106
107 assert(before != after);
108
109 if (state->dir == R) {
110 struct schedule_node *t = before;
111 before = after;
112 after = t;
113 }
114
115 for (int i = 0; i < before->child_count; i++) {
116 if (before->children[i].node == after &&
117 (before->children[i].write_after_read == write_after_read)) {
118 return;
119 }
120 }
121
122 if (before->child_array_size <= before->child_count) {
123 before->child_array_size = MAX2(before->child_array_size * 2, 16);
124 before->children = reralloc(before, before->children,
125 struct schedule_node_child,
126 before->child_array_size);
127 }
128
129 before->children[before->child_count].node = after;
130 before->children[before->child_count].write_after_read =
131 write_after_read;
132 before->child_count++;
133 after->parent_count++;
134 }
135
136 static void
137 add_read_dep(struct schedule_state *state,
138 struct schedule_node *before,
139 struct schedule_node *after)
140 {
141 add_dep(state, before, after, false);
142 }
143
144 static void
145 add_write_dep(struct schedule_state *state,
146 struct schedule_node **before,
147 struct schedule_node *after)
148 {
149 add_dep(state, *before, after, true);
150 *before = after;
151 }
152
153 static bool
154 qpu_inst_is_tlb(const struct v3d_qpu_instr *inst)
155 {
156 if (inst->type != V3D_QPU_INSTR_TYPE_ALU)
157 return false;
158
159 if (inst->alu.add.magic_write &&
160 (inst->alu.add.waddr == V3D_QPU_WADDR_TLB ||
161 inst->alu.add.waddr == V3D_QPU_WADDR_TLBU))
162 return true;
163
164 if (inst->alu.mul.magic_write &&
165 (inst->alu.mul.waddr == V3D_QPU_WADDR_TLB ||
166 inst->alu.mul.waddr == V3D_QPU_WADDR_TLBU))
167 return true;
168
169 return false;
170 }
171
172 static void
173 process_mux_deps(struct schedule_state *state, struct schedule_node *n,
174 enum v3d_qpu_mux mux)
175 {
176 switch (mux) {
177 case V3D_QPU_MUX_A:
178 add_read_dep(state, state->last_rf[n->inst->qpu.raddr_a], n);
179 break;
180 case V3D_QPU_MUX_B:
181 add_read_dep(state, state->last_rf[n->inst->qpu.raddr_b], n);
182 break;
183 default:
184 add_read_dep(state, state->last_r[mux - V3D_QPU_MUX_R0], n);
185 break;
186 }
187 }
188
189
190 static void
191 process_waddr_deps(struct schedule_state *state, struct schedule_node *n,
192 uint32_t waddr, bool magic)
193 {
194 if (!magic) {
195 add_write_dep(state, &state->last_rf[waddr], n);
196 } else if (v3d_qpu_magic_waddr_is_tmu(waddr)) {
197 add_write_dep(state, &state->last_tmu_write, n);
198 } else if (v3d_qpu_magic_waddr_is_sfu(waddr)) {
199 /* Handled by v3d_qpu_writes_r4() check. */
200 } else {
201 switch (waddr) {
202 case V3D_QPU_WADDR_R0:
203 case V3D_QPU_WADDR_R1:
204 case V3D_QPU_WADDR_R2:
205 add_write_dep(state,
206 &state->last_r[waddr - V3D_QPU_WADDR_R0],
207 n);
208 break;
209 case V3D_QPU_WADDR_R3:
210 case V3D_QPU_WADDR_R4:
211 case V3D_QPU_WADDR_R5:
212 /* Handled by v3d_qpu_writes_r*() checks below. */
213 break;
214
215 case V3D_QPU_WADDR_VPM:
216 case V3D_QPU_WADDR_VPMU:
217 add_write_dep(state, &state->last_vpm, n);
218 break;
219
220 case V3D_QPU_WADDR_TLB:
221 case V3D_QPU_WADDR_TLBU:
222 add_write_dep(state, &state->last_tlb, n);
223 break;
224
225 case V3D_QPU_WADDR_NOP:
226 break;
227
228 default:
229 fprintf(stderr, "Unknown waddr %d\n", waddr);
230 abort();
231 }
232 }
233 }
234
235 static void
236 process_cond_deps(struct schedule_state *state, struct schedule_node *n,
237 enum v3d_qpu_cond cond)
238 {
239 if (cond != V3D_QPU_COND_NONE)
240 add_read_dep(state, state->last_sf, n);
241 }
242
243 static void
244 process_pf_deps(struct schedule_state *state, struct schedule_node *n,
245 enum v3d_qpu_pf pf)
246 {
247 if (pf != V3D_QPU_PF_NONE)
248 add_write_dep(state, &state->last_sf, n);
249 }
250
251 static void
252 process_uf_deps(struct schedule_state *state, struct schedule_node *n,
253 enum v3d_qpu_uf uf)
254 {
255 if (uf != V3D_QPU_UF_NONE)
256 add_write_dep(state, &state->last_sf, n);
257 }
258
259 /**
260 * Common code for dependencies that need to be tracked both forward and
261 * backward.
262 *
263 * This is for things like "all reads of r4 have to happen between the r4
264 * writes that surround them".
265 */
266 static void
267 calculate_deps(struct schedule_state *state, struct schedule_node *n)
268 {
269 const struct v3d_device_info *devinfo = state->devinfo;
270 struct qinst *qinst = n->inst;
271 struct v3d_qpu_instr *inst = &qinst->qpu;
272
273 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH) {
274 if (inst->branch.cond != V3D_QPU_BRANCH_COND_ALWAYS)
275 add_read_dep(state, state->last_sf, n);
276
277 /* XXX: BDI */
278 /* XXX: BDU */
279 /* XXX: ub */
280 /* XXX: raddr_a */
281
282 add_write_dep(state, &state->last_unif, n);
283 return;
284 }
285
286 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
287
288 /* XXX: LOAD_IMM */
289
290 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 0)
291 process_mux_deps(state, n, inst->alu.add.a);
292 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 1)
293 process_mux_deps(state, n, inst->alu.add.b);
294
295 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 0)
296 process_mux_deps(state, n, inst->alu.mul.a);
297 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 1)
298 process_mux_deps(state, n, inst->alu.mul.b);
299
300 switch (inst->alu.add.op) {
301 case V3D_QPU_A_VPMSETUP:
302 /* Could distinguish read/write by unpacking the uniform. */
303 add_write_dep(state, &state->last_vpm, n);
304 add_write_dep(state, &state->last_vpm_read, n);
305 break;
306
307 case V3D_QPU_A_STVPMV:
308 case V3D_QPU_A_STVPMD:
309 case V3D_QPU_A_STVPMP:
310 add_write_dep(state, &state->last_vpm, n);
311 break;
312
313 case V3D_QPU_A_VPMWT:
314 add_read_dep(state, state->last_vpm, n);
315 break;
316
317 case V3D_QPU_A_MSF:
318 add_read_dep(state, state->last_tlb, n);
319 break;
320
321 case V3D_QPU_A_SETMSF:
322 case V3D_QPU_A_SETREVF:
323 add_write_dep(state, &state->last_tlb, n);
324 break;
325
326 case V3D_QPU_A_FLAPUSH:
327 case V3D_QPU_A_FLBPUSH:
328 case V3D_QPU_A_VFLA:
329 case V3D_QPU_A_VFLNA:
330 case V3D_QPU_A_VFLB:
331 case V3D_QPU_A_VFLNB:
332 add_read_dep(state, state->last_sf, n);
333 break;
334
335 case V3D_QPU_A_FLBPOP:
336 add_write_dep(state, &state->last_sf, n);
337 break;
338
339 default:
340 break;
341 }
342
343 switch (inst->alu.mul.op) {
344 case V3D_QPU_M_MULTOP:
345 case V3D_QPU_M_UMUL24:
346 /* MULTOP sets rtop, and UMUL24 implicitly reads rtop and
347 * resets it to 0. We could possibly reorder umul24s relative
348 * to each other, but for now just keep all the MUL parts in
349 * order.
350 */
351 add_write_dep(state, &state->last_rtop, n);
352 break;
353 default:
354 break;
355 }
356
357 if (inst->alu.add.op != V3D_QPU_A_NOP) {
358 process_waddr_deps(state, n, inst->alu.add.waddr,
359 inst->alu.add.magic_write);
360 }
361 if (inst->alu.mul.op != V3D_QPU_M_NOP) {
362 process_waddr_deps(state, n, inst->alu.mul.waddr,
363 inst->alu.mul.magic_write);
364 }
365 if (v3d_qpu_sig_writes_address(devinfo, &inst->sig)) {
366 process_waddr_deps(state, n, inst->sig_addr,
367 inst->sig_magic);
368 }
369
370 if (v3d_qpu_writes_r3(devinfo, inst))
371 add_write_dep(state, &state->last_r[3], n);
372 if (v3d_qpu_writes_r4(devinfo, inst))
373 add_write_dep(state, &state->last_r[4], n);
374 if (v3d_qpu_writes_r5(devinfo, inst))
375 add_write_dep(state, &state->last_r[5], n);
376
377 if (inst->sig.thrsw) {
378 /* All accumulator contents and flags are undefined after the
379 * switch.
380 */
381 for (int i = 0; i < ARRAY_SIZE(state->last_r); i++)
382 add_write_dep(state, &state->last_r[i], n);
383 add_write_dep(state, &state->last_sf, n);
384
385 /* Scoreboard-locking operations have to stay after the last
386 * thread switch.
387 */
388 add_write_dep(state, &state->last_tlb, n);
389
390 add_write_dep(state, &state->last_tmu_write, n);
391 }
392
393 if (inst->sig.ldtmu) {
394 /* TMU loads are coming from a FIFO, so ordering is important.
395 */
396 add_write_dep(state, &state->last_tmu_write, n);
397 }
398
399 if (inst->sig.ldtlb | inst->sig.ldtlbu)
400 add_read_dep(state, state->last_tlb, n);
401
402 if (inst->sig.ldvpm)
403 add_write_dep(state, &state->last_vpm_read, n);
404
405 /* inst->sig.ldunif or sideband uniform read */
406 if (qinst->uniform != ~0)
407 add_write_dep(state, &state->last_unif, n);
408
409 process_cond_deps(state, n, inst->flags.ac);
410 process_cond_deps(state, n, inst->flags.mc);
411 process_pf_deps(state, n, inst->flags.apf);
412 process_pf_deps(state, n, inst->flags.mpf);
413 process_uf_deps(state, n, inst->flags.auf);
414 process_uf_deps(state, n, inst->flags.muf);
415 }
416
417 static void
418 calculate_forward_deps(struct v3d_compile *c, struct list_head *schedule_list)
419 {
420 struct schedule_state state;
421
422 memset(&state, 0, sizeof(state));
423 state.devinfo = c->devinfo;
424 state.dir = F;
425
426 list_for_each_entry(struct schedule_node, node, schedule_list, link)
427 calculate_deps(&state, node);
428 }
429
430 static void
431 calculate_reverse_deps(struct v3d_compile *c, struct list_head *schedule_list)
432 {
433 struct list_head *node;
434 struct schedule_state state;
435
436 memset(&state, 0, sizeof(state));
437 state.devinfo = c->devinfo;
438 state.dir = R;
439
440 for (node = schedule_list->prev; schedule_list != node; node = node->prev) {
441 calculate_deps(&state, (struct schedule_node *)node);
442 }
443 }
444
445 struct choose_scoreboard {
446 int tick;
447 int last_sfu_write_tick;
448 int last_ldvary_tick;
449 int last_uniforms_reset_tick;
450 uint32_t last_waddr_add, last_waddr_mul;
451 bool tlb_locked;
452 };
453
454 static bool
455 mux_reads_too_soon(struct choose_scoreboard *scoreboard,
456 const struct v3d_qpu_instr *inst, enum v3d_qpu_mux mux)
457 {
458 switch (mux) {
459 case V3D_QPU_MUX_A:
460 if (scoreboard->last_waddr_add == inst->raddr_a ||
461 scoreboard->last_waddr_mul == inst->raddr_a) {
462 return true;
463 }
464 break;
465
466 case V3D_QPU_MUX_B:
467 if (scoreboard->last_waddr_add == inst->raddr_b ||
468 scoreboard->last_waddr_mul == inst->raddr_b) {
469 return true;
470 }
471 break;
472
473 case V3D_QPU_MUX_R4:
474 if (scoreboard->tick - scoreboard->last_sfu_write_tick <= 2)
475 return true;
476 break;
477
478 case V3D_QPU_MUX_R5:
479 if (scoreboard->tick - scoreboard->last_ldvary_tick <= 1)
480 return true;
481 break;
482 default:
483 break;
484 }
485
486 return false;
487 }
488
489 static bool
490 reads_too_soon_after_write(struct choose_scoreboard *scoreboard,
491 struct qinst *qinst)
492 {
493 const struct v3d_qpu_instr *inst = &qinst->qpu;
494
495 /* XXX: Branching off of raddr. */
496 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH)
497 return false;
498
499 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
500
501 if (inst->alu.add.op != V3D_QPU_A_NOP) {
502 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 0 &&
503 mux_reads_too_soon(scoreboard, inst, inst->alu.add.a)) {
504 return true;
505 }
506 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 1 &&
507 mux_reads_too_soon(scoreboard, inst, inst->alu.add.b)) {
508 return true;
509 }
510 }
511
512 if (inst->alu.mul.op != V3D_QPU_M_NOP) {
513 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 0 &&
514 mux_reads_too_soon(scoreboard, inst, inst->alu.mul.a)) {
515 return true;
516 }
517 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 1 &&
518 mux_reads_too_soon(scoreboard, inst, inst->alu.mul.b)) {
519 return true;
520 }
521 }
522
523 /* XXX: imm */
524
525 return false;
526 }
527
528 static bool
529 writes_too_soon_after_write(const struct v3d_device_info *devinfo,
530 struct choose_scoreboard *scoreboard,
531 struct qinst *qinst)
532 {
533 const struct v3d_qpu_instr *inst = &qinst->qpu;
534
535 /* Don't schedule any other r4 write too soon after an SFU write.
536 * This would normally be prevented by dependency tracking, but might
537 * occur if a dead SFU computation makes it to scheduling.
538 */
539 if (scoreboard->tick - scoreboard->last_sfu_write_tick < 2 &&
540 v3d_qpu_writes_r4(devinfo, inst))
541 return true;
542
543 return false;
544 }
545
546 static bool
547 pixel_scoreboard_too_soon(struct choose_scoreboard *scoreboard,
548 const struct v3d_qpu_instr *inst)
549 {
550 return (scoreboard->tick == 0 && qpu_inst_is_tlb(inst));
551 }
552
553 static int
554 get_instruction_priority(const struct v3d_qpu_instr *inst)
555 {
556 uint32_t baseline_score;
557 uint32_t next_score = 0;
558
559 /* Schedule TLB operations as late as possible, to get more
560 * parallelism between shaders.
561 */
562 if (qpu_inst_is_tlb(inst))
563 return next_score;
564 next_score++;
565
566 /* Schedule texture read results collection late to hide latency. */
567 if (inst->sig.ldtmu)
568 return next_score;
569 next_score++;
570
571 /* Default score for things that aren't otherwise special. */
572 baseline_score = next_score;
573 next_score++;
574
575 /* Schedule texture read setup early to hide their latency better. */
576 if (inst->type == V3D_QPU_INSTR_TYPE_ALU &&
577 ((inst->alu.add.magic_write &&
578 v3d_qpu_magic_waddr_is_tmu(inst->alu.add.waddr)) ||
579 (inst->alu.mul.magic_write &&
580 v3d_qpu_magic_waddr_is_tmu(inst->alu.mul.waddr)))) {
581 return next_score;
582 }
583 next_score++;
584
585 return baseline_score;
586 }
587
588 static bool
589 qpu_magic_waddr_is_periph(enum v3d_qpu_waddr waddr)
590 {
591 return (v3d_qpu_magic_waddr_is_tmu(waddr) ||
592 v3d_qpu_magic_waddr_is_sfu(waddr) ||
593 v3d_qpu_magic_waddr_is_tlb(waddr) ||
594 v3d_qpu_magic_waddr_is_vpm(waddr) ||
595 v3d_qpu_magic_waddr_is_tsy(waddr));
596 }
597
598 static bool
599 qpu_accesses_peripheral(const struct v3d_qpu_instr *inst)
600 {
601 if (v3d_qpu_uses_vpm(inst))
602 return true;
603
604 if (inst->type == V3D_QPU_INSTR_TYPE_ALU) {
605 if (inst->alu.add.op != V3D_QPU_A_NOP &&
606 inst->alu.add.magic_write &&
607 qpu_magic_waddr_is_periph(inst->alu.add.waddr)) {
608 return true;
609 }
610
611 if (inst->alu.mul.op != V3D_QPU_M_NOP &&
612 inst->alu.mul.magic_write &&
613 qpu_magic_waddr_is_periph(inst->alu.mul.waddr)) {
614 return true;
615 }
616 }
617
618 return (inst->sig.ldvpm ||
619 inst->sig.ldtmu ||
620 inst->sig.ldtlb ||
621 inst->sig.ldtlbu ||
622 inst->sig.wrtmuc);
623 }
624
625 static bool
626 qpu_merge_inst(const struct v3d_device_info *devinfo,
627 struct v3d_qpu_instr *result,
628 const struct v3d_qpu_instr *a,
629 const struct v3d_qpu_instr *b)
630 {
631 if (a->type != V3D_QPU_INSTR_TYPE_ALU ||
632 b->type != V3D_QPU_INSTR_TYPE_ALU) {
633 return false;
634 }
635
636 /* Can't do more than one peripheral access in an instruction.
637 *
638 * XXX: V3D 4.1 allows TMU read along with a VPM read or write, and
639 * WRTMUC with a TMU magic register write (other than tmuc).
640 */
641 if (qpu_accesses_peripheral(a) && qpu_accesses_peripheral(b))
642 return false;
643
644 struct v3d_qpu_instr merge = *a;
645
646 if (b->alu.add.op != V3D_QPU_A_NOP) {
647 if (a->alu.add.op != V3D_QPU_A_NOP)
648 return false;
649 merge.alu.add = b->alu.add;
650
651 merge.flags.ac = b->flags.ac;
652 merge.flags.apf = b->flags.apf;
653 merge.flags.auf = b->flags.auf;
654 }
655
656 if (b->alu.mul.op != V3D_QPU_M_NOP) {
657 if (a->alu.mul.op != V3D_QPU_M_NOP)
658 return false;
659 merge.alu.mul = b->alu.mul;
660
661 merge.flags.mc = b->flags.mc;
662 merge.flags.mpf = b->flags.mpf;
663 merge.flags.muf = b->flags.muf;
664 }
665
666 if (v3d_qpu_uses_mux(b, V3D_QPU_MUX_A)) {
667 if (v3d_qpu_uses_mux(a, V3D_QPU_MUX_A) &&
668 a->raddr_a != b->raddr_a) {
669 return false;
670 }
671 merge.raddr_a = b->raddr_a;
672 }
673
674 if (v3d_qpu_uses_mux(b, V3D_QPU_MUX_B)) {
675 if (v3d_qpu_uses_mux(a, V3D_QPU_MUX_B) &&
676 a->raddr_b != b->raddr_b) {
677 return false;
678 }
679 merge.raddr_b = b->raddr_b;
680 }
681
682 merge.sig.thrsw |= b->sig.thrsw;
683 merge.sig.ldunif |= b->sig.ldunif;
684 merge.sig.ldunifrf |= b->sig.ldunifrf;
685 merge.sig.ldunifa |= b->sig.ldunifa;
686 merge.sig.ldunifarf |= b->sig.ldunifarf;
687 merge.sig.ldtmu |= b->sig.ldtmu;
688 merge.sig.ldvary |= b->sig.ldvary;
689 merge.sig.ldvpm |= b->sig.ldvpm;
690 merge.sig.small_imm |= b->sig.small_imm;
691 merge.sig.ldtlb |= b->sig.ldtlb;
692 merge.sig.ldtlbu |= b->sig.ldtlbu;
693 merge.sig.ucb |= b->sig.ucb;
694 merge.sig.rotate |= b->sig.rotate;
695 merge.sig.wrtmuc |= b->sig.wrtmuc;
696
697 if (v3d_qpu_sig_writes_address(devinfo, &a->sig) &&
698 v3d_qpu_sig_writes_address(devinfo, &b->sig))
699 return false;
700 merge.sig_addr |= b->sig_addr;
701 merge.sig_magic |= b->sig_magic;
702
703 uint64_t packed;
704 bool ok = v3d_qpu_instr_pack(devinfo, &merge, &packed);
705
706 *result = merge;
707 /* No modifying the real instructions on failure. */
708 assert(ok || (a != result && b != result));
709
710 return ok;
711 }
712
713 static struct schedule_node *
714 choose_instruction_to_schedule(const struct v3d_device_info *devinfo,
715 struct choose_scoreboard *scoreboard,
716 struct list_head *schedule_list,
717 struct schedule_node *prev_inst)
718 {
719 struct schedule_node *chosen = NULL;
720 int chosen_prio = 0;
721
722 /* Don't pair up anything with a thread switch signal -- emit_thrsw()
723 * will handle pairing it along with filling the delay slots.
724 */
725 if (prev_inst) {
726 if (prev_inst->inst->qpu.sig.thrsw)
727 return NULL;
728 }
729
730 list_for_each_entry(struct schedule_node, n, schedule_list, link) {
731 const struct v3d_qpu_instr *inst = &n->inst->qpu;
732
733 /* Don't choose the branch instruction until it's the last one
734 * left. We'll move it up to fit its delay slots after we
735 * choose it.
736 */
737 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH &&
738 !list_is_singular(schedule_list)) {
739 continue;
740 }
741
742 /* "An instruction must not read from a location in physical
743 * regfile A or B that was written to by the previous
744 * instruction."
745 */
746 if (reads_too_soon_after_write(scoreboard, n->inst))
747 continue;
748
749 if (writes_too_soon_after_write(devinfo, scoreboard, n->inst))
750 continue;
751
752 /* "A scoreboard wait must not occur in the first two
753 * instructions of a fragment shader. This is either the
754 * explicit Wait for Scoreboard signal or an implicit wait
755 * with the first tile-buffer read or write instruction."
756 */
757 if (pixel_scoreboard_too_soon(scoreboard, inst))
758 continue;
759
760 /* ldunif and ldvary both write r5, but ldunif does so a tick
761 * sooner. If the ldvary's r5 wasn't used, then ldunif might
762 * otherwise get scheduled so ldunif and ldvary try to update
763 * r5 in the same tick.
764 */
765 if ((inst->sig.ldunif || inst->sig.ldunifa) &&
766 scoreboard->tick == scoreboard->last_ldvary_tick + 1) {
767 continue;
768 }
769
770 /* If we're trying to pair with another instruction, check
771 * that they're compatible.
772 */
773 if (prev_inst) {
774 /* Don't pair up a thread switch signal -- we'll
775 * handle pairing it when we pick it on its own.
776 */
777 if (inst->sig.thrsw)
778 continue;
779
780 if (prev_inst->inst->uniform != -1 &&
781 n->inst->uniform != -1)
782 continue;
783
784 /* Don't merge in something that will lock the TLB.
785 * Hopwefully what we have in inst will release some
786 * other instructions, allowing us to delay the
787 * TLB-locking instruction until later.
788 */
789 if (!scoreboard->tlb_locked && qpu_inst_is_tlb(inst))
790 continue;
791
792 struct v3d_qpu_instr merged_inst;
793 if (!qpu_merge_inst(devinfo, &merged_inst,
794 &prev_inst->inst->qpu, inst)) {
795 continue;
796 }
797 }
798
799 int prio = get_instruction_priority(inst);
800
801 /* Found a valid instruction. If nothing better comes along,
802 * this one works.
803 */
804 if (!chosen) {
805 chosen = n;
806 chosen_prio = prio;
807 continue;
808 }
809
810 if (prio > chosen_prio) {
811 chosen = n;
812 chosen_prio = prio;
813 } else if (prio < chosen_prio) {
814 continue;
815 }
816
817 if (n->delay > chosen->delay) {
818 chosen = n;
819 chosen_prio = prio;
820 } else if (n->delay < chosen->delay) {
821 continue;
822 }
823 }
824
825 return chosen;
826 }
827
828 static void
829 update_scoreboard_for_magic_waddr(struct choose_scoreboard *scoreboard,
830 enum v3d_qpu_waddr waddr)
831 {
832 if (v3d_qpu_magic_waddr_is_sfu(waddr))
833 scoreboard->last_sfu_write_tick = scoreboard->tick;
834 }
835
836 static void
837 update_scoreboard_for_chosen(struct choose_scoreboard *scoreboard,
838 const struct v3d_qpu_instr *inst)
839 {
840 scoreboard->last_waddr_add = ~0;
841 scoreboard->last_waddr_mul = ~0;
842
843 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH)
844 return;
845
846 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
847
848 if (inst->alu.add.op != V3D_QPU_A_NOP) {
849 if (inst->alu.add.magic_write) {
850 update_scoreboard_for_magic_waddr(scoreboard,
851 inst->alu.add.waddr);
852 } else {
853 scoreboard->last_waddr_add = inst->alu.add.waddr;
854 }
855 }
856
857 if (inst->alu.mul.op != V3D_QPU_M_NOP) {
858 if (inst->alu.mul.magic_write) {
859 update_scoreboard_for_magic_waddr(scoreboard,
860 inst->alu.mul.waddr);
861 } else {
862 scoreboard->last_waddr_mul = inst->alu.mul.waddr;
863 }
864 }
865
866 if (inst->sig.ldvary)
867 scoreboard->last_ldvary_tick = scoreboard->tick;
868
869 if (qpu_inst_is_tlb(inst))
870 scoreboard->tlb_locked = true;
871 }
872
873 static void
874 dump_state(const struct v3d_device_info *devinfo,
875 struct list_head *schedule_list)
876 {
877 list_for_each_entry(struct schedule_node, n, schedule_list, link) {
878 fprintf(stderr, " t=%4d: ", n->unblocked_time);
879 v3d_qpu_dump(devinfo, &n->inst->qpu);
880 fprintf(stderr, "\n");
881
882 for (int i = 0; i < n->child_count; i++) {
883 struct schedule_node *child = n->children[i].node;
884 if (!child)
885 continue;
886
887 fprintf(stderr, " - ");
888 v3d_qpu_dump(devinfo, &child->inst->qpu);
889 fprintf(stderr, " (%d parents, %c)\n",
890 child->parent_count,
891 n->children[i].write_after_read ? 'w' : 'r');
892 }
893 }
894 }
895
896 static uint32_t magic_waddr_latency(enum v3d_qpu_waddr waddr,
897 const struct v3d_qpu_instr *after)
898 {
899 /* Apply some huge latency between texture fetch requests and getting
900 * their results back.
901 *
902 * FIXME: This is actually pretty bogus. If we do:
903 *
904 * mov tmu0_s, a
905 * <a bit of math>
906 * mov tmu0_s, b
907 * load_tmu0
908 * <more math>
909 * load_tmu0
910 *
911 * we count that as worse than
912 *
913 * mov tmu0_s, a
914 * mov tmu0_s, b
915 * <lots of math>
916 * load_tmu0
917 * <more math>
918 * load_tmu0
919 *
920 * because we associate the first load_tmu0 with the *second* tmu0_s.
921 */
922 if (v3d_qpu_magic_waddr_is_tmu(waddr) && after->sig.ldtmu)
923 return 100;
924
925 /* Assume that anything depending on us is consuming the SFU result. */
926 if (v3d_qpu_magic_waddr_is_sfu(waddr))
927 return 3;
928
929 return 1;
930 }
931
932 static uint32_t
933 instruction_latency(struct schedule_node *before, struct schedule_node *after)
934 {
935 const struct v3d_qpu_instr *before_inst = &before->inst->qpu;
936 const struct v3d_qpu_instr *after_inst = &after->inst->qpu;
937 uint32_t latency = 1;
938
939 if (before_inst->type != V3D_QPU_INSTR_TYPE_ALU ||
940 after_inst->type != V3D_QPU_INSTR_TYPE_ALU)
941 return latency;
942
943 if (before_inst->alu.add.magic_write) {
944 latency = MAX2(latency,
945 magic_waddr_latency(before_inst->alu.add.waddr,
946 after_inst));
947 }
948
949 if (before_inst->alu.mul.magic_write) {
950 latency = MAX2(latency,
951 magic_waddr_latency(before_inst->alu.mul.waddr,
952 after_inst));
953 }
954
955 return latency;
956 }
957
958 /** Recursive computation of the delay member of a node. */
959 static void
960 compute_delay(struct schedule_node *n)
961 {
962 if (!n->child_count) {
963 n->delay = 1;
964 } else {
965 for (int i = 0; i < n->child_count; i++) {
966 if (!n->children[i].node->delay)
967 compute_delay(n->children[i].node);
968 n->delay = MAX2(n->delay,
969 n->children[i].node->delay +
970 instruction_latency(n, n->children[i].node));
971 }
972 }
973 }
974
975 static void
976 mark_instruction_scheduled(struct list_head *schedule_list,
977 uint32_t time,
978 struct schedule_node *node,
979 bool war_only)
980 {
981 if (!node)
982 return;
983
984 for (int i = node->child_count - 1; i >= 0; i--) {
985 struct schedule_node *child =
986 node->children[i].node;
987
988 if (!child)
989 continue;
990
991 if (war_only && !node->children[i].write_after_read)
992 continue;
993
994 /* If the requirement is only that the node not appear before
995 * the last read of its destination, then it can be scheduled
996 * immediately after (or paired with!) the thing reading the
997 * destination.
998 */
999 uint32_t latency = 0;
1000 if (!war_only) {
1001 latency = instruction_latency(node,
1002 node->children[i].node);
1003 }
1004
1005 child->unblocked_time = MAX2(child->unblocked_time,
1006 time + latency);
1007 child->parent_count--;
1008 if (child->parent_count == 0)
1009 list_add(&child->link, schedule_list);
1010
1011 node->children[i].node = NULL;
1012 }
1013 }
1014
1015 static void
1016 insert_scheduled_instruction(struct v3d_compile *c,
1017 struct qblock *block,
1018 struct choose_scoreboard *scoreboard,
1019 struct qinst *inst)
1020 {
1021 list_addtail(&inst->link, &block->instructions);
1022
1023 update_scoreboard_for_chosen(scoreboard, &inst->qpu);
1024 c->qpu_inst_count++;
1025 scoreboard->tick++;
1026 }
1027
1028 static struct qinst *
1029 vir_nop()
1030 {
1031 struct qreg undef = { QFILE_NULL, 0 };
1032 struct qinst *qinst = vir_add_inst(V3D_QPU_A_NOP, undef, undef, undef);
1033
1034 return qinst;
1035 }
1036
1037 static void
1038 emit_nop(struct v3d_compile *c, struct qblock *block,
1039 struct choose_scoreboard *scoreboard)
1040 {
1041 insert_scheduled_instruction(c, block, scoreboard, vir_nop());
1042 }
1043
1044 static bool
1045 qpu_instruction_valid_in_thrend_slot(struct v3d_compile *c,
1046 const struct qinst *qinst, int slot)
1047 {
1048 const struct v3d_qpu_instr *inst = &qinst->qpu;
1049
1050 /* Only TLB Z writes are prohibited in the last slot, but we don't
1051 * have those flagged so prohibit all TLB ops for now.
1052 */
1053 if (slot == 2 && qpu_inst_is_tlb(inst))
1054 return false;
1055
1056 if (slot > 0 && qinst->uniform != ~0)
1057 return false;
1058
1059 if (v3d_qpu_uses_vpm(inst))
1060 return false;
1061
1062 if (inst->sig.ldvary)
1063 return false;
1064
1065 if (inst->type == V3D_QPU_INSTR_TYPE_ALU) {
1066 /* No writing physical registers at the end. */
1067 if (!inst->alu.add.magic_write ||
1068 !inst->alu.mul.magic_write) {
1069 return false;
1070 }
1071
1072 if (c->devinfo->ver < 40 && inst->alu.add.op == V3D_QPU_A_SETMSF)
1073 return false;
1074
1075 /* RF0-2 might be overwritten during the delay slots by
1076 * fragment shader setup.
1077 */
1078 if (inst->raddr_a < 3 &&
1079 (inst->alu.add.a == V3D_QPU_MUX_A ||
1080 inst->alu.add.b == V3D_QPU_MUX_A ||
1081 inst->alu.mul.a == V3D_QPU_MUX_A ||
1082 inst->alu.mul.b == V3D_QPU_MUX_A)) {
1083 return false;
1084 }
1085
1086 if (inst->raddr_b < 3 &&
1087 !inst->sig.small_imm &&
1088 (inst->alu.add.a == V3D_QPU_MUX_B ||
1089 inst->alu.add.b == V3D_QPU_MUX_B ||
1090 inst->alu.mul.a == V3D_QPU_MUX_B ||
1091 inst->alu.mul.b == V3D_QPU_MUX_B)) {
1092 return false;
1093 }
1094 }
1095
1096 return true;
1097 }
1098
1099 static bool
1100 valid_thrend_sequence(struct v3d_compile *c,
1101 struct qinst *qinst, int instructions_in_sequence)
1102 {
1103 for (int slot = 0; slot < instructions_in_sequence; slot++) {
1104 if (!qpu_instruction_valid_in_thrend_slot(c, qinst, slot))
1105 return false;
1106
1107 /* Note that the list is circular, so we can only do this up
1108 * to instructions_in_sequence.
1109 */
1110 qinst = (struct qinst *)qinst->link.next;
1111 }
1112
1113 return true;
1114 }
1115
1116 /**
1117 * Emits a THRSW signal in the stream, trying to move it up to pair with
1118 * another instruction.
1119 */
1120 static int
1121 emit_thrsw(struct v3d_compile *c,
1122 struct qblock *block,
1123 struct choose_scoreboard *scoreboard,
1124 struct qinst *inst)
1125 {
1126 int time = 0;
1127
1128 /* There should be nothing in a thrsw inst being scheduled other than
1129 * the signal bits.
1130 */
1131 assert(inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU);
1132 assert(inst->qpu.alu.add.op == V3D_QPU_A_NOP);
1133 assert(inst->qpu.alu.mul.op == V3D_QPU_M_NOP);
1134
1135 /* Find how far back into previous instructions we can put the THRSW. */
1136 int slots_filled = 0;
1137 struct qinst *merge_inst = NULL;
1138 vir_for_each_inst_rev(prev_inst, block) {
1139 struct v3d_qpu_sig sig = prev_inst->qpu.sig;
1140 sig.thrsw = true;
1141 uint32_t packed_sig;
1142
1143 if (!v3d_qpu_sig_pack(c->devinfo, &sig, &packed_sig))
1144 break;
1145
1146 if (!valid_thrend_sequence(c, prev_inst, slots_filled + 1))
1147 break;
1148
1149 merge_inst = prev_inst;
1150 if (++slots_filled == 3)
1151 break;
1152 }
1153
1154 if (merge_inst) {
1155 merge_inst->qpu.sig.thrsw = true;
1156 } else {
1157 insert_scheduled_instruction(c, block, scoreboard, inst);
1158 time++;
1159 slots_filled++;
1160 }
1161
1162 /* Insert any extra delay slot NOPs we need. */
1163 for (int i = 0; i < 3 - slots_filled; i++) {
1164 emit_nop(c, block, scoreboard);
1165 time++;
1166 }
1167
1168 /* If we put our THRSW into another instruction, free up the
1169 * instruction that didn't end up scheduled into the list.
1170 */
1171 if (merge_inst)
1172 free(inst);
1173
1174 return time;
1175 }
1176
1177 static uint32_t
1178 schedule_instructions(struct v3d_compile *c,
1179 struct choose_scoreboard *scoreboard,
1180 struct qblock *block,
1181 struct list_head *schedule_list,
1182 enum quniform_contents *orig_uniform_contents,
1183 uint32_t *orig_uniform_data,
1184 uint32_t *next_uniform)
1185 {
1186 const struct v3d_device_info *devinfo = c->devinfo;
1187 uint32_t time = 0;
1188
1189 if (debug) {
1190 fprintf(stderr, "initial deps:\n");
1191 dump_state(devinfo, schedule_list);
1192 fprintf(stderr, "\n");
1193 }
1194
1195 /* Remove non-DAG heads from the list. */
1196 list_for_each_entry_safe(struct schedule_node, n, schedule_list, link) {
1197 if (n->parent_count != 0)
1198 list_del(&n->link);
1199 }
1200
1201 while (!list_empty(schedule_list)) {
1202 struct schedule_node *chosen =
1203 choose_instruction_to_schedule(devinfo,
1204 scoreboard,
1205 schedule_list,
1206 NULL);
1207 struct schedule_node *merge = NULL;
1208
1209 /* If there are no valid instructions to schedule, drop a NOP
1210 * in.
1211 */
1212 struct qinst *qinst = chosen ? chosen->inst : vir_nop();
1213 struct v3d_qpu_instr *inst = &qinst->qpu;
1214
1215 if (debug) {
1216 fprintf(stderr, "t=%4d: current list:\n",
1217 time);
1218 dump_state(devinfo, schedule_list);
1219 fprintf(stderr, "t=%4d: chose: ", time);
1220 v3d_qpu_dump(devinfo, inst);
1221 fprintf(stderr, "\n");
1222 }
1223
1224 /* Schedule this instruction onto the QPU list. Also try to
1225 * find an instruction to pair with it.
1226 */
1227 if (chosen) {
1228 time = MAX2(chosen->unblocked_time, time);
1229 list_del(&chosen->link);
1230 mark_instruction_scheduled(schedule_list, time,
1231 chosen, true);
1232
1233 merge = choose_instruction_to_schedule(devinfo,
1234 scoreboard,
1235 schedule_list,
1236 chosen);
1237 if (merge) {
1238 time = MAX2(merge->unblocked_time, time);
1239 list_del(&merge->link);
1240 (void)qpu_merge_inst(devinfo, inst,
1241 inst, &merge->inst->qpu);
1242 if (merge->inst->uniform != -1) {
1243 chosen->inst->uniform =
1244 merge->inst->uniform;
1245 }
1246
1247 if (debug) {
1248 fprintf(stderr, "t=%4d: merging: ",
1249 time);
1250 v3d_qpu_dump(devinfo, &merge->inst->qpu);
1251 fprintf(stderr, "\n");
1252 fprintf(stderr, " result: ");
1253 v3d_qpu_dump(devinfo, inst);
1254 fprintf(stderr, "\n");
1255 }
1256 }
1257 }
1258
1259 /* Update the uniform index for the rewritten location --
1260 * branch target updating will still need to change
1261 * c->uniform_data[] using this index.
1262 */
1263 if (qinst->uniform != -1) {
1264 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH)
1265 block->branch_uniform = *next_uniform;
1266
1267 c->uniform_data[*next_uniform] =
1268 orig_uniform_data[qinst->uniform];
1269 c->uniform_contents[*next_uniform] =
1270 orig_uniform_contents[qinst->uniform];
1271 qinst->uniform = *next_uniform;
1272 (*next_uniform)++;
1273 }
1274
1275 if (debug) {
1276 fprintf(stderr, "\n");
1277 }
1278
1279 /* Now that we've scheduled a new instruction, some of its
1280 * children can be promoted to the list of instructions ready to
1281 * be scheduled. Update the children's unblocked time for this
1282 * DAG edge as we do so.
1283 */
1284 mark_instruction_scheduled(schedule_list, time, chosen, false);
1285
1286 if (merge) {
1287 mark_instruction_scheduled(schedule_list, time, merge,
1288 false);
1289
1290 /* The merged VIR instruction doesn't get re-added to the
1291 * block, so free it now.
1292 */
1293 free(merge->inst);
1294 }
1295
1296 if (0 && inst->sig.thrsw) {
1297 /* XXX emit_thrsw(c, scoreboard, qinst); */
1298 } else {
1299 c->qpu_inst_count++;
1300 list_addtail(&qinst->link, &block->instructions);
1301 update_scoreboard_for_chosen(scoreboard, inst);
1302 }
1303
1304 scoreboard->tick++;
1305 time++;
1306
1307 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH ||
1308 inst->sig.thrsw /* XXX */) {
1309 block->branch_qpu_ip = c->qpu_inst_count - 1;
1310 /* Fill the delay slots.
1311 *
1312 * We should fill these with actual instructions,
1313 * instead, but that will probably need to be done
1314 * after this, once we know what the leading
1315 * instructions of the successors are (so we can
1316 * handle A/B register file write latency)
1317 */
1318 /* XXX: scoreboard */
1319 int slots = (inst->type == V3D_QPU_INSTR_TYPE_BRANCH ?
1320 3 : 2);
1321 for (int i = 0; i < slots; i++) {
1322 struct qinst *nop = vir_nop();
1323 list_addtail(&nop->link, &block->instructions);
1324
1325 update_scoreboard_for_chosen(scoreboard,
1326 &nop->qpu);
1327 c->qpu_inst_count++;
1328 scoreboard->tick++;
1329 time++;
1330 }
1331 }
1332 }
1333
1334 return time;
1335 }
1336
1337 static uint32_t
1338 qpu_schedule_instructions_block(struct v3d_compile *c,
1339 struct choose_scoreboard *scoreboard,
1340 struct qblock *block,
1341 enum quniform_contents *orig_uniform_contents,
1342 uint32_t *orig_uniform_data,
1343 uint32_t *next_uniform)
1344 {
1345 void *mem_ctx = ralloc_context(NULL);
1346 struct list_head schedule_list;
1347
1348 list_inithead(&schedule_list);
1349
1350 /* Wrap each instruction in a scheduler structure. */
1351 while (!list_empty(&block->instructions)) {
1352 struct qinst *qinst = (struct qinst *)block->instructions.next;
1353 struct schedule_node *n =
1354 rzalloc(mem_ctx, struct schedule_node);
1355
1356 n->inst = qinst;
1357
1358 list_del(&qinst->link);
1359 list_addtail(&n->link, &schedule_list);
1360 }
1361
1362 calculate_forward_deps(c, &schedule_list);
1363 calculate_reverse_deps(c, &schedule_list);
1364
1365 list_for_each_entry(struct schedule_node, n, &schedule_list, link) {
1366 compute_delay(n);
1367 }
1368
1369 uint32_t cycles = schedule_instructions(c, scoreboard, block,
1370 &schedule_list,
1371 orig_uniform_contents,
1372 orig_uniform_data,
1373 next_uniform);
1374
1375 ralloc_free(mem_ctx);
1376
1377 return cycles;
1378 }
1379
1380 static void
1381 qpu_set_branch_targets(struct v3d_compile *c)
1382 {
1383 vir_for_each_block(block, c) {
1384 /* The end block of the program has no branch. */
1385 if (!block->successors[0])
1386 continue;
1387
1388 /* If there was no branch instruction, then the successor
1389 * block must follow immediately after this one.
1390 */
1391 if (block->branch_qpu_ip == ~0) {
1392 assert(block->end_qpu_ip + 1 ==
1393 block->successors[0]->start_qpu_ip);
1394 continue;
1395 }
1396
1397 /* Walk back through the delay slots to find the branch
1398 * instr.
1399 */
1400 struct list_head *entry = block->instructions.prev;
1401 for (int i = 0; i < 3; i++)
1402 entry = entry->prev;
1403 struct qinst *branch = container_of(entry, branch, link);
1404 assert(branch->qpu.type == V3D_QPU_INSTR_TYPE_BRANCH);
1405
1406 /* Make sure that the if-we-don't-jump
1407 * successor was scheduled just after the
1408 * delay slots.
1409 */
1410 assert(!block->successors[1] ||
1411 block->successors[1]->start_qpu_ip ==
1412 block->branch_qpu_ip + 4);
1413
1414 branch->qpu.branch.offset =
1415 ((block->successors[0]->start_qpu_ip -
1416 (block->branch_qpu_ip + 4)) *
1417 sizeof(uint64_t));
1418
1419 /* Set up the relative offset to jump in the
1420 * uniform stream.
1421 *
1422 * Use a temporary here, because
1423 * uniform_data[inst->uniform] may be shared
1424 * between multiple instructions.
1425 */
1426 assert(c->uniform_contents[branch->uniform] == QUNIFORM_CONSTANT);
1427 c->uniform_data[branch->uniform] =
1428 (block->successors[0]->start_uniform -
1429 (block->branch_uniform + 1)) * 4;
1430 }
1431 }
1432
1433 uint32_t
1434 v3d_qpu_schedule_instructions(struct v3d_compile *c)
1435 {
1436 const struct v3d_device_info *devinfo = c->devinfo;
1437 struct qblock *end_block = list_last_entry(&c->blocks,
1438 struct qblock, link);
1439
1440 /* We reorder the uniforms as we schedule instructions, so save the
1441 * old data off and replace it.
1442 */
1443 uint32_t *uniform_data = c->uniform_data;
1444 enum quniform_contents *uniform_contents = c->uniform_contents;
1445 c->uniform_contents = ralloc_array(c, enum quniform_contents,
1446 c->num_uniforms);
1447 c->uniform_data = ralloc_array(c, uint32_t, c->num_uniforms);
1448 c->uniform_array_size = c->num_uniforms;
1449 uint32_t next_uniform = 0;
1450
1451 struct choose_scoreboard scoreboard;
1452 memset(&scoreboard, 0, sizeof(scoreboard));
1453 scoreboard.last_waddr_add = ~0;
1454 scoreboard.last_waddr_mul = ~0;
1455 scoreboard.last_ldvary_tick = -10;
1456 scoreboard.last_sfu_write_tick = -10;
1457 scoreboard.last_uniforms_reset_tick = -10;
1458
1459 if (debug) {
1460 fprintf(stderr, "Pre-schedule instructions\n");
1461 vir_for_each_block(block, c) {
1462 fprintf(stderr, "BLOCK %d\n", block->index);
1463 list_for_each_entry(struct qinst, qinst,
1464 &block->instructions, link) {
1465 v3d_qpu_dump(devinfo, &qinst->qpu);
1466 fprintf(stderr, "\n");
1467 }
1468 }
1469 fprintf(stderr, "\n");
1470 }
1471
1472 uint32_t cycles = 0;
1473 vir_for_each_block(block, c) {
1474 block->start_qpu_ip = c->qpu_inst_count;
1475 block->branch_qpu_ip = ~0;
1476 block->start_uniform = next_uniform;
1477
1478 cycles += qpu_schedule_instructions_block(c,
1479 &scoreboard,
1480 block,
1481 uniform_contents,
1482 uniform_data,
1483 &next_uniform);
1484
1485 block->end_qpu_ip = c->qpu_inst_count - 1;
1486 }
1487
1488 /* Emit the program-end THRSW instruction. */;
1489 struct qinst *thrsw = vir_nop();
1490 thrsw->qpu.sig.thrsw = true;
1491 emit_thrsw(c, end_block, &scoreboard, thrsw);
1492
1493 qpu_set_branch_targets(c);
1494
1495 assert(next_uniform == c->num_uniforms);
1496
1497 return cycles;
1498 }