v3d: Make sure that a thrsw doesn't split a multop from its umul24.
[mesa.git] / src / broadcom / compiler / qpu_schedule.c
1 /*
2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2014-2017 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 /**
26 * @file
27 *
28 * The basic model of the list scheduler is to take a basic block, compute a
29 * DAG of the dependencies, and make a list of the DAG heads. Heuristically
30 * pick a DAG head, then put all the children that are now DAG heads into the
31 * list of things to schedule.
32 *
33 * The goal of scheduling here is to pack pairs of operations together in a
34 * single QPU instruction.
35 */
36
37 #include "qpu/qpu_disasm.h"
38 #include "v3d_compiler.h"
39 #include "util/ralloc.h"
40
41 static bool debug;
42
43 struct schedule_node_child;
44
45 struct schedule_node {
46 struct list_head link;
47 struct qinst *inst;
48 struct schedule_node_child *children;
49 uint32_t child_count;
50 uint32_t child_array_size;
51 uint32_t parent_count;
52
53 /* Longest cycles + instruction_latency() of any parent of this node. */
54 uint32_t unblocked_time;
55
56 /**
57 * Minimum number of cycles from scheduling this instruction until the
58 * end of the program, based on the slowest dependency chain through
59 * the children.
60 */
61 uint32_t delay;
62
63 /**
64 * cycles between this instruction being scheduled and when its result
65 * can be consumed.
66 */
67 uint32_t latency;
68 };
69
70 struct schedule_node_child {
71 struct schedule_node *node;
72 bool write_after_read;
73 };
74
75 /* When walking the instructions in reverse, we need to swap before/after in
76 * add_dep().
77 */
78 enum direction { F, R };
79
80 struct schedule_state {
81 const struct v3d_device_info *devinfo;
82 struct schedule_node *last_r[6];
83 struct schedule_node *last_rf[64];
84 struct schedule_node *last_sf;
85 struct schedule_node *last_vpm_read;
86 struct schedule_node *last_tmu_write;
87 struct schedule_node *last_tmu_config;
88 struct schedule_node *last_tlb;
89 struct schedule_node *last_vpm;
90 struct schedule_node *last_unif;
91 struct schedule_node *last_rtop;
92 enum direction dir;
93 /* Estimated cycle when the current instruction would start. */
94 uint32_t time;
95 };
96
97 static void
98 add_dep(struct schedule_state *state,
99 struct schedule_node *before,
100 struct schedule_node *after,
101 bool write)
102 {
103 bool write_after_read = !write && state->dir == R;
104
105 if (!before || !after)
106 return;
107
108 assert(before != after);
109
110 if (state->dir == R) {
111 struct schedule_node *t = before;
112 before = after;
113 after = t;
114 }
115
116 for (int i = 0; i < before->child_count; i++) {
117 if (before->children[i].node == after &&
118 (before->children[i].write_after_read == write_after_read)) {
119 return;
120 }
121 }
122
123 if (before->child_array_size <= before->child_count) {
124 before->child_array_size = MAX2(before->child_array_size * 2, 16);
125 before->children = reralloc(before, before->children,
126 struct schedule_node_child,
127 before->child_array_size);
128 }
129
130 before->children[before->child_count].node = after;
131 before->children[before->child_count].write_after_read =
132 write_after_read;
133 before->child_count++;
134 after->parent_count++;
135 }
136
137 static void
138 add_read_dep(struct schedule_state *state,
139 struct schedule_node *before,
140 struct schedule_node *after)
141 {
142 add_dep(state, before, after, false);
143 }
144
145 static void
146 add_write_dep(struct schedule_state *state,
147 struct schedule_node **before,
148 struct schedule_node *after)
149 {
150 add_dep(state, *before, after, true);
151 *before = after;
152 }
153
154 static bool
155 qpu_inst_is_tlb(const struct v3d_qpu_instr *inst)
156 {
157 if (inst->type != V3D_QPU_INSTR_TYPE_ALU)
158 return false;
159
160 if (inst->alu.add.magic_write &&
161 (inst->alu.add.waddr == V3D_QPU_WADDR_TLB ||
162 inst->alu.add.waddr == V3D_QPU_WADDR_TLBU))
163 return true;
164
165 if (inst->alu.mul.magic_write &&
166 (inst->alu.mul.waddr == V3D_QPU_WADDR_TLB ||
167 inst->alu.mul.waddr == V3D_QPU_WADDR_TLBU))
168 return true;
169
170 return false;
171 }
172
173 static void
174 process_mux_deps(struct schedule_state *state, struct schedule_node *n,
175 enum v3d_qpu_mux mux)
176 {
177 switch (mux) {
178 case V3D_QPU_MUX_A:
179 add_read_dep(state, state->last_rf[n->inst->qpu.raddr_a], n);
180 break;
181 case V3D_QPU_MUX_B:
182 add_read_dep(state, state->last_rf[n->inst->qpu.raddr_b], n);
183 break;
184 default:
185 add_read_dep(state, state->last_r[mux - V3D_QPU_MUX_R0], n);
186 break;
187 }
188 }
189
190
191 static void
192 process_waddr_deps(struct schedule_state *state, struct schedule_node *n,
193 uint32_t waddr, bool magic)
194 {
195 if (!magic) {
196 add_write_dep(state, &state->last_rf[waddr], n);
197 } else if (v3d_qpu_magic_waddr_is_tmu(waddr)) {
198 add_write_dep(state, &state->last_tmu_write, n);
199 switch (waddr) {
200 case V3D_QPU_WADDR_TMUS:
201 case V3D_QPU_WADDR_TMUSCM:
202 case V3D_QPU_WADDR_TMUSF:
203 case V3D_QPU_WADDR_TMUSLOD:
204 add_write_dep(state, &state->last_tmu_config, n);
205 break;
206 default:
207 break;
208 }
209 } else if (v3d_qpu_magic_waddr_is_sfu(waddr)) {
210 /* Handled by v3d_qpu_writes_r4() check. */
211 } else {
212 switch (waddr) {
213 case V3D_QPU_WADDR_R0:
214 case V3D_QPU_WADDR_R1:
215 case V3D_QPU_WADDR_R2:
216 add_write_dep(state,
217 &state->last_r[waddr - V3D_QPU_WADDR_R0],
218 n);
219 break;
220 case V3D_QPU_WADDR_R3:
221 case V3D_QPU_WADDR_R4:
222 case V3D_QPU_WADDR_R5:
223 /* Handled by v3d_qpu_writes_r*() checks below. */
224 break;
225
226 case V3D_QPU_WADDR_VPM:
227 case V3D_QPU_WADDR_VPMU:
228 add_write_dep(state, &state->last_vpm, n);
229 break;
230
231 case V3D_QPU_WADDR_TLB:
232 case V3D_QPU_WADDR_TLBU:
233 add_write_dep(state, &state->last_tlb, n);
234 break;
235
236 case V3D_QPU_WADDR_NOP:
237 break;
238
239 default:
240 fprintf(stderr, "Unknown waddr %d\n", waddr);
241 abort();
242 }
243 }
244 }
245
246 static void
247 process_cond_deps(struct schedule_state *state, struct schedule_node *n,
248 enum v3d_qpu_cond cond)
249 {
250 if (cond != V3D_QPU_COND_NONE)
251 add_read_dep(state, state->last_sf, n);
252 }
253
254 static void
255 process_pf_deps(struct schedule_state *state, struct schedule_node *n,
256 enum v3d_qpu_pf pf)
257 {
258 if (pf != V3D_QPU_PF_NONE)
259 add_write_dep(state, &state->last_sf, n);
260 }
261
262 static void
263 process_uf_deps(struct schedule_state *state, struct schedule_node *n,
264 enum v3d_qpu_uf uf)
265 {
266 if (uf != V3D_QPU_UF_NONE)
267 add_write_dep(state, &state->last_sf, n);
268 }
269
270 /**
271 * Common code for dependencies that need to be tracked both forward and
272 * backward.
273 *
274 * This is for things like "all reads of r4 have to happen between the r4
275 * writes that surround them".
276 */
277 static void
278 calculate_deps(struct schedule_state *state, struct schedule_node *n)
279 {
280 const struct v3d_device_info *devinfo = state->devinfo;
281 struct qinst *qinst = n->inst;
282 struct v3d_qpu_instr *inst = &qinst->qpu;
283 /* If the input and output segments are shared, then all VPM reads to
284 * a location need to happen before all writes. We handle this by
285 * serializing all VPM operations for now.
286 */
287 bool separate_vpm_segment = false;
288
289 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH) {
290 if (inst->branch.cond != V3D_QPU_BRANCH_COND_ALWAYS)
291 add_read_dep(state, state->last_sf, n);
292
293 /* XXX: BDI */
294 /* XXX: BDU */
295 /* XXX: ub */
296 /* XXX: raddr_a */
297
298 add_write_dep(state, &state->last_unif, n);
299 return;
300 }
301
302 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
303
304 /* XXX: LOAD_IMM */
305
306 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 0)
307 process_mux_deps(state, n, inst->alu.add.a);
308 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 1)
309 process_mux_deps(state, n, inst->alu.add.b);
310
311 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 0)
312 process_mux_deps(state, n, inst->alu.mul.a);
313 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 1)
314 process_mux_deps(state, n, inst->alu.mul.b);
315
316 switch (inst->alu.add.op) {
317 case V3D_QPU_A_VPMSETUP:
318 /* Could distinguish read/write by unpacking the uniform. */
319 add_write_dep(state, &state->last_vpm, n);
320 add_write_dep(state, &state->last_vpm_read, n);
321 break;
322
323 case V3D_QPU_A_STVPMV:
324 case V3D_QPU_A_STVPMD:
325 case V3D_QPU_A_STVPMP:
326 add_write_dep(state, &state->last_vpm, n);
327 break;
328
329 case V3D_QPU_A_LDVPMV_IN:
330 case V3D_QPU_A_LDVPMD_IN:
331 case V3D_QPU_A_LDVPMG_IN:
332 case V3D_QPU_A_LDVPMP:
333 if (!separate_vpm_segment)
334 add_write_dep(state, &state->last_vpm, n);
335 break;
336
337 case V3D_QPU_A_VPMWT:
338 add_read_dep(state, state->last_vpm, n);
339 break;
340
341 case V3D_QPU_A_MSF:
342 add_read_dep(state, state->last_tlb, n);
343 break;
344
345 case V3D_QPU_A_SETMSF:
346 case V3D_QPU_A_SETREVF:
347 add_write_dep(state, &state->last_tlb, n);
348 break;
349
350 case V3D_QPU_A_FLAPUSH:
351 case V3D_QPU_A_FLBPUSH:
352 case V3D_QPU_A_VFLA:
353 case V3D_QPU_A_VFLNA:
354 case V3D_QPU_A_VFLB:
355 case V3D_QPU_A_VFLNB:
356 add_read_dep(state, state->last_sf, n);
357 break;
358
359 case V3D_QPU_A_FLPOP:
360 add_write_dep(state, &state->last_sf, n);
361 break;
362
363 default:
364 break;
365 }
366
367 switch (inst->alu.mul.op) {
368 case V3D_QPU_M_MULTOP:
369 case V3D_QPU_M_UMUL24:
370 /* MULTOP sets rtop, and UMUL24 implicitly reads rtop and
371 * resets it to 0. We could possibly reorder umul24s relative
372 * to each other, but for now just keep all the MUL parts in
373 * order.
374 */
375 add_write_dep(state, &state->last_rtop, n);
376 break;
377 default:
378 break;
379 }
380
381 if (inst->alu.add.op != V3D_QPU_A_NOP) {
382 process_waddr_deps(state, n, inst->alu.add.waddr,
383 inst->alu.add.magic_write);
384 }
385 if (inst->alu.mul.op != V3D_QPU_M_NOP) {
386 process_waddr_deps(state, n, inst->alu.mul.waddr,
387 inst->alu.mul.magic_write);
388 }
389 if (v3d_qpu_sig_writes_address(devinfo, &inst->sig)) {
390 process_waddr_deps(state, n, inst->sig_addr,
391 inst->sig_magic);
392 }
393
394 if (v3d_qpu_writes_r3(devinfo, inst))
395 add_write_dep(state, &state->last_r[3], n);
396 if (v3d_qpu_writes_r4(devinfo, inst))
397 add_write_dep(state, &state->last_r[4], n);
398 if (v3d_qpu_writes_r5(devinfo, inst))
399 add_write_dep(state, &state->last_r[5], n);
400
401 if (inst->sig.thrsw) {
402 /* All accumulator contents and flags are undefined after the
403 * switch.
404 */
405 for (int i = 0; i < ARRAY_SIZE(state->last_r); i++)
406 add_write_dep(state, &state->last_r[i], n);
407 add_write_dep(state, &state->last_sf, n);
408 add_write_dep(state, &state->last_rtop, n);
409
410 /* Scoreboard-locking operations have to stay after the last
411 * thread switch.
412 */
413 add_write_dep(state, &state->last_tlb, n);
414
415 add_write_dep(state, &state->last_tmu_write, n);
416 add_write_dep(state, &state->last_tmu_config, n);
417 }
418
419 if (v3d_qpu_waits_on_tmu(inst)) {
420 /* TMU loads are coming from a FIFO, so ordering is important.
421 */
422 add_write_dep(state, &state->last_tmu_write, n);
423 }
424
425 if (inst->sig.wrtmuc)
426 add_write_dep(state, &state->last_tmu_config, n);
427
428 if (inst->sig.ldtlb | inst->sig.ldtlbu)
429 add_read_dep(state, state->last_tlb, n);
430
431 if (inst->sig.ldvpm) {
432 add_write_dep(state, &state->last_vpm_read, n);
433
434 /* At least for now, we're doing shared I/O segments, so queue
435 * all writes after all reads.
436 */
437 if (!separate_vpm_segment)
438 add_write_dep(state, &state->last_vpm, n);
439 }
440
441 /* inst->sig.ldunif or sideband uniform read */
442 if (qinst->uniform != ~0)
443 add_write_dep(state, &state->last_unif, n);
444
445 process_cond_deps(state, n, inst->flags.ac);
446 process_cond_deps(state, n, inst->flags.mc);
447 process_pf_deps(state, n, inst->flags.apf);
448 process_pf_deps(state, n, inst->flags.mpf);
449 process_uf_deps(state, n, inst->flags.auf);
450 process_uf_deps(state, n, inst->flags.muf);
451 }
452
453 static void
454 calculate_forward_deps(struct v3d_compile *c, struct list_head *schedule_list)
455 {
456 struct schedule_state state;
457
458 memset(&state, 0, sizeof(state));
459 state.devinfo = c->devinfo;
460 state.dir = F;
461
462 list_for_each_entry(struct schedule_node, node, schedule_list, link)
463 calculate_deps(&state, node);
464 }
465
466 static void
467 calculate_reverse_deps(struct v3d_compile *c, struct list_head *schedule_list)
468 {
469 struct list_head *node;
470 struct schedule_state state;
471
472 memset(&state, 0, sizeof(state));
473 state.devinfo = c->devinfo;
474 state.dir = R;
475
476 for (node = schedule_list->prev; schedule_list != node; node = node->prev) {
477 calculate_deps(&state, (struct schedule_node *)node);
478 }
479 }
480
481 struct choose_scoreboard {
482 int tick;
483 int last_magic_sfu_write_tick;
484 int last_ldvary_tick;
485 int last_uniforms_reset_tick;
486 int last_thrsw_tick;
487 bool tlb_locked;
488 };
489
490 static bool
491 mux_reads_too_soon(struct choose_scoreboard *scoreboard,
492 const struct v3d_qpu_instr *inst, enum v3d_qpu_mux mux)
493 {
494 switch (mux) {
495 case V3D_QPU_MUX_R4:
496 if (scoreboard->tick - scoreboard->last_magic_sfu_write_tick <= 2)
497 return true;
498 break;
499
500 case V3D_QPU_MUX_R5:
501 if (scoreboard->tick - scoreboard->last_ldvary_tick <= 1)
502 return true;
503 break;
504 default:
505 break;
506 }
507
508 return false;
509 }
510
511 static bool
512 reads_too_soon_after_write(struct choose_scoreboard *scoreboard,
513 struct qinst *qinst)
514 {
515 const struct v3d_qpu_instr *inst = &qinst->qpu;
516
517 /* XXX: Branching off of raddr. */
518 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH)
519 return false;
520
521 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
522
523 if (inst->alu.add.op != V3D_QPU_A_NOP) {
524 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 0 &&
525 mux_reads_too_soon(scoreboard, inst, inst->alu.add.a)) {
526 return true;
527 }
528 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 1 &&
529 mux_reads_too_soon(scoreboard, inst, inst->alu.add.b)) {
530 return true;
531 }
532 }
533
534 if (inst->alu.mul.op != V3D_QPU_M_NOP) {
535 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 0 &&
536 mux_reads_too_soon(scoreboard, inst, inst->alu.mul.a)) {
537 return true;
538 }
539 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 1 &&
540 mux_reads_too_soon(scoreboard, inst, inst->alu.mul.b)) {
541 return true;
542 }
543 }
544
545 /* XXX: imm */
546
547 return false;
548 }
549
550 static bool
551 writes_too_soon_after_write(const struct v3d_device_info *devinfo,
552 struct choose_scoreboard *scoreboard,
553 struct qinst *qinst)
554 {
555 const struct v3d_qpu_instr *inst = &qinst->qpu;
556
557 /* Don't schedule any other r4 write too soon after an SFU write.
558 * This would normally be prevented by dependency tracking, but might
559 * occur if a dead SFU computation makes it to scheduling.
560 */
561 if (scoreboard->tick - scoreboard->last_magic_sfu_write_tick < 2 &&
562 v3d_qpu_writes_r4(devinfo, inst))
563 return true;
564
565 return false;
566 }
567
568 static bool
569 pixel_scoreboard_too_soon(struct choose_scoreboard *scoreboard,
570 const struct v3d_qpu_instr *inst)
571 {
572 return (scoreboard->tick == 0 && qpu_inst_is_tlb(inst));
573 }
574
575 static int
576 get_instruction_priority(const struct v3d_qpu_instr *inst)
577 {
578 uint32_t baseline_score;
579 uint32_t next_score = 0;
580
581 /* Schedule TLB operations as late as possible, to get more
582 * parallelism between shaders.
583 */
584 if (qpu_inst_is_tlb(inst))
585 return next_score;
586 next_score++;
587
588 /* Schedule texture read results collection late to hide latency. */
589 if (v3d_qpu_waits_on_tmu(inst))
590 return next_score;
591 next_score++;
592
593 /* Default score for things that aren't otherwise special. */
594 baseline_score = next_score;
595 next_score++;
596
597 /* Schedule texture read setup early to hide their latency better. */
598 if (v3d_qpu_writes_tmu(inst))
599 return next_score;
600 next_score++;
601
602 return baseline_score;
603 }
604
605 static bool
606 qpu_magic_waddr_is_periph(enum v3d_qpu_waddr waddr)
607 {
608 return (v3d_qpu_magic_waddr_is_tmu(waddr) ||
609 v3d_qpu_magic_waddr_is_sfu(waddr) ||
610 v3d_qpu_magic_waddr_is_tlb(waddr) ||
611 v3d_qpu_magic_waddr_is_vpm(waddr) ||
612 v3d_qpu_magic_waddr_is_tsy(waddr));
613 }
614
615 static bool
616 qpu_accesses_peripheral(const struct v3d_qpu_instr *inst)
617 {
618 if (v3d_qpu_uses_vpm(inst))
619 return true;
620 if (v3d_qpu_uses_sfu(inst))
621 return true;
622
623 if (inst->type == V3D_QPU_INSTR_TYPE_ALU) {
624 if (inst->alu.add.op != V3D_QPU_A_NOP &&
625 inst->alu.add.magic_write &&
626 qpu_magic_waddr_is_periph(inst->alu.add.waddr)) {
627 return true;
628 }
629
630 if (inst->alu.add.op == V3D_QPU_A_TMUWT)
631 return true;
632
633 if (inst->alu.mul.op != V3D_QPU_M_NOP &&
634 inst->alu.mul.magic_write &&
635 qpu_magic_waddr_is_periph(inst->alu.mul.waddr)) {
636 return true;
637 }
638 }
639
640 return (inst->sig.ldvpm ||
641 inst->sig.ldtmu ||
642 inst->sig.ldtlb ||
643 inst->sig.ldtlbu ||
644 inst->sig.wrtmuc);
645 }
646
647 static bool
648 qpu_merge_inst(const struct v3d_device_info *devinfo,
649 struct v3d_qpu_instr *result,
650 const struct v3d_qpu_instr *a,
651 const struct v3d_qpu_instr *b)
652 {
653 if (a->type != V3D_QPU_INSTR_TYPE_ALU ||
654 b->type != V3D_QPU_INSTR_TYPE_ALU) {
655 return false;
656 }
657
658 /* Can't do more than one peripheral access in an instruction.
659 *
660 * XXX: V3D 4.1 allows TMU read along with a VPM read or write, and
661 * WRTMUC with a TMU magic register write (other than tmuc).
662 */
663 if (qpu_accesses_peripheral(a) && qpu_accesses_peripheral(b))
664 return false;
665
666 struct v3d_qpu_instr merge = *a;
667
668 if (b->alu.add.op != V3D_QPU_A_NOP) {
669 if (a->alu.add.op != V3D_QPU_A_NOP)
670 return false;
671 merge.alu.add = b->alu.add;
672
673 merge.flags.ac = b->flags.ac;
674 merge.flags.apf = b->flags.apf;
675 merge.flags.auf = b->flags.auf;
676 }
677
678 if (b->alu.mul.op != V3D_QPU_M_NOP) {
679 if (a->alu.mul.op != V3D_QPU_M_NOP)
680 return false;
681 merge.alu.mul = b->alu.mul;
682
683 merge.flags.mc = b->flags.mc;
684 merge.flags.mpf = b->flags.mpf;
685 merge.flags.muf = b->flags.muf;
686 }
687
688 if (v3d_qpu_uses_mux(b, V3D_QPU_MUX_A)) {
689 if (v3d_qpu_uses_mux(a, V3D_QPU_MUX_A) &&
690 a->raddr_a != b->raddr_a) {
691 return false;
692 }
693 merge.raddr_a = b->raddr_a;
694 }
695
696 if (v3d_qpu_uses_mux(b, V3D_QPU_MUX_B)) {
697 if (v3d_qpu_uses_mux(a, V3D_QPU_MUX_B) &&
698 (a->raddr_b != b->raddr_b ||
699 a->sig.small_imm != b->sig.small_imm)) {
700 return false;
701 }
702 merge.raddr_b = b->raddr_b;
703 }
704
705 merge.sig.thrsw |= b->sig.thrsw;
706 merge.sig.ldunif |= b->sig.ldunif;
707 merge.sig.ldunifrf |= b->sig.ldunifrf;
708 merge.sig.ldunifa |= b->sig.ldunifa;
709 merge.sig.ldunifarf |= b->sig.ldunifarf;
710 merge.sig.ldtmu |= b->sig.ldtmu;
711 merge.sig.ldvary |= b->sig.ldvary;
712 merge.sig.ldvpm |= b->sig.ldvpm;
713 merge.sig.small_imm |= b->sig.small_imm;
714 merge.sig.ldtlb |= b->sig.ldtlb;
715 merge.sig.ldtlbu |= b->sig.ldtlbu;
716 merge.sig.ucb |= b->sig.ucb;
717 merge.sig.rotate |= b->sig.rotate;
718 merge.sig.wrtmuc |= b->sig.wrtmuc;
719
720 if (v3d_qpu_sig_writes_address(devinfo, &a->sig) &&
721 v3d_qpu_sig_writes_address(devinfo, &b->sig))
722 return false;
723 merge.sig_addr |= b->sig_addr;
724 merge.sig_magic |= b->sig_magic;
725
726 uint64_t packed;
727 bool ok = v3d_qpu_instr_pack(devinfo, &merge, &packed);
728
729 *result = merge;
730 /* No modifying the real instructions on failure. */
731 assert(ok || (a != result && b != result));
732
733 return ok;
734 }
735
736 static struct schedule_node *
737 choose_instruction_to_schedule(const struct v3d_device_info *devinfo,
738 struct choose_scoreboard *scoreboard,
739 struct list_head *schedule_list,
740 struct schedule_node *prev_inst)
741 {
742 struct schedule_node *chosen = NULL;
743 int chosen_prio = 0;
744
745 /* Don't pair up anything with a thread switch signal -- emit_thrsw()
746 * will handle pairing it along with filling the delay slots.
747 */
748 if (prev_inst) {
749 if (prev_inst->inst->qpu.sig.thrsw)
750 return NULL;
751 }
752
753 list_for_each_entry(struct schedule_node, n, schedule_list, link) {
754 const struct v3d_qpu_instr *inst = &n->inst->qpu;
755
756 /* Don't choose the branch instruction until it's the last one
757 * left. We'll move it up to fit its delay slots after we
758 * choose it.
759 */
760 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH &&
761 !list_is_singular(schedule_list)) {
762 continue;
763 }
764
765 /* "An instruction must not read from a location in physical
766 * regfile A or B that was written to by the previous
767 * instruction."
768 */
769 if (reads_too_soon_after_write(scoreboard, n->inst))
770 continue;
771
772 if (writes_too_soon_after_write(devinfo, scoreboard, n->inst))
773 continue;
774
775 /* "A scoreboard wait must not occur in the first two
776 * instructions of a fragment shader. This is either the
777 * explicit Wait for Scoreboard signal or an implicit wait
778 * with the first tile-buffer read or write instruction."
779 */
780 if (pixel_scoreboard_too_soon(scoreboard, inst))
781 continue;
782
783 /* ldunif and ldvary both write r5, but ldunif does so a tick
784 * sooner. If the ldvary's r5 wasn't used, then ldunif might
785 * otherwise get scheduled so ldunif and ldvary try to update
786 * r5 in the same tick.
787 */
788 if ((inst->sig.ldunif || inst->sig.ldunifa) &&
789 scoreboard->tick == scoreboard->last_ldvary_tick + 1) {
790 continue;
791 }
792
793 /* If we're trying to pair with another instruction, check
794 * that they're compatible.
795 */
796 if (prev_inst) {
797 /* Don't pair up a thread switch signal -- we'll
798 * handle pairing it when we pick it on its own.
799 */
800 if (inst->sig.thrsw)
801 continue;
802
803 if (prev_inst->inst->uniform != -1 &&
804 n->inst->uniform != -1)
805 continue;
806
807 /* Don't merge in something that will lock the TLB.
808 * Hopwefully what we have in inst will release some
809 * other instructions, allowing us to delay the
810 * TLB-locking instruction until later.
811 */
812 if (!scoreboard->tlb_locked && qpu_inst_is_tlb(inst))
813 continue;
814
815 struct v3d_qpu_instr merged_inst;
816 if (!qpu_merge_inst(devinfo, &merged_inst,
817 &prev_inst->inst->qpu, inst)) {
818 continue;
819 }
820 }
821
822 int prio = get_instruction_priority(inst);
823
824 /* Found a valid instruction. If nothing better comes along,
825 * this one works.
826 */
827 if (!chosen) {
828 chosen = n;
829 chosen_prio = prio;
830 continue;
831 }
832
833 if (prio > chosen_prio) {
834 chosen = n;
835 chosen_prio = prio;
836 } else if (prio < chosen_prio) {
837 continue;
838 }
839
840 if (n->delay > chosen->delay) {
841 chosen = n;
842 chosen_prio = prio;
843 } else if (n->delay < chosen->delay) {
844 continue;
845 }
846 }
847
848 return chosen;
849 }
850
851 static void
852 update_scoreboard_for_magic_waddr(struct choose_scoreboard *scoreboard,
853 enum v3d_qpu_waddr waddr)
854 {
855 if (v3d_qpu_magic_waddr_is_sfu(waddr))
856 scoreboard->last_magic_sfu_write_tick = scoreboard->tick;
857 }
858
859 static void
860 update_scoreboard_for_chosen(struct choose_scoreboard *scoreboard,
861 const struct v3d_qpu_instr *inst)
862 {
863 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH)
864 return;
865
866 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
867
868 if (inst->alu.add.op != V3D_QPU_A_NOP) {
869 if (inst->alu.add.magic_write) {
870 update_scoreboard_for_magic_waddr(scoreboard,
871 inst->alu.add.waddr);
872 }
873 }
874
875 if (inst->alu.mul.op != V3D_QPU_M_NOP) {
876 if (inst->alu.mul.magic_write) {
877 update_scoreboard_for_magic_waddr(scoreboard,
878 inst->alu.mul.waddr);
879 }
880 }
881
882 if (inst->sig.ldvary)
883 scoreboard->last_ldvary_tick = scoreboard->tick;
884
885 if (qpu_inst_is_tlb(inst))
886 scoreboard->tlb_locked = true;
887 }
888
889 static void
890 dump_state(const struct v3d_device_info *devinfo,
891 struct list_head *schedule_list)
892 {
893 list_for_each_entry(struct schedule_node, n, schedule_list, link) {
894 fprintf(stderr, " t=%4d: ", n->unblocked_time);
895 v3d_qpu_dump(devinfo, &n->inst->qpu);
896 fprintf(stderr, "\n");
897
898 for (int i = 0; i < n->child_count; i++) {
899 struct schedule_node *child = n->children[i].node;
900 if (!child)
901 continue;
902
903 fprintf(stderr, " - ");
904 v3d_qpu_dump(devinfo, &child->inst->qpu);
905 fprintf(stderr, " (%d parents, %c)\n",
906 child->parent_count,
907 n->children[i].write_after_read ? 'w' : 'r');
908 }
909 }
910 }
911
912 static uint32_t magic_waddr_latency(enum v3d_qpu_waddr waddr,
913 const struct v3d_qpu_instr *after)
914 {
915 /* Apply some huge latency between texture fetch requests and getting
916 * their results back.
917 *
918 * FIXME: This is actually pretty bogus. If we do:
919 *
920 * mov tmu0_s, a
921 * <a bit of math>
922 * mov tmu0_s, b
923 * load_tmu0
924 * <more math>
925 * load_tmu0
926 *
927 * we count that as worse than
928 *
929 * mov tmu0_s, a
930 * mov tmu0_s, b
931 * <lots of math>
932 * load_tmu0
933 * <more math>
934 * load_tmu0
935 *
936 * because we associate the first load_tmu0 with the *second* tmu0_s.
937 */
938 if (v3d_qpu_magic_waddr_is_tmu(waddr) && v3d_qpu_waits_on_tmu(after))
939 return 100;
940
941 /* Assume that anything depending on us is consuming the SFU result. */
942 if (v3d_qpu_magic_waddr_is_sfu(waddr))
943 return 3;
944
945 return 1;
946 }
947
948 static uint32_t
949 instruction_latency(struct schedule_node *before, struct schedule_node *after)
950 {
951 const struct v3d_qpu_instr *before_inst = &before->inst->qpu;
952 const struct v3d_qpu_instr *after_inst = &after->inst->qpu;
953 uint32_t latency = 1;
954
955 if (before_inst->type != V3D_QPU_INSTR_TYPE_ALU ||
956 after_inst->type != V3D_QPU_INSTR_TYPE_ALU)
957 return latency;
958
959 if (before_inst->alu.add.magic_write) {
960 latency = MAX2(latency,
961 magic_waddr_latency(before_inst->alu.add.waddr,
962 after_inst));
963 }
964
965 if (before_inst->alu.mul.magic_write) {
966 latency = MAX2(latency,
967 magic_waddr_latency(before_inst->alu.mul.waddr,
968 after_inst));
969 }
970
971 return latency;
972 }
973
974 /** Recursive computation of the delay member of a node. */
975 static void
976 compute_delay(struct schedule_node *n)
977 {
978 if (!n->child_count) {
979 n->delay = 1;
980 } else {
981 for (int i = 0; i < n->child_count; i++) {
982 if (!n->children[i].node->delay)
983 compute_delay(n->children[i].node);
984 n->delay = MAX2(n->delay,
985 n->children[i].node->delay +
986 instruction_latency(n, n->children[i].node));
987 }
988 }
989 }
990
991 static void
992 mark_instruction_scheduled(struct list_head *schedule_list,
993 uint32_t time,
994 struct schedule_node *node,
995 bool war_only)
996 {
997 if (!node)
998 return;
999
1000 for (int i = node->child_count - 1; i >= 0; i--) {
1001 struct schedule_node *child =
1002 node->children[i].node;
1003
1004 if (!child)
1005 continue;
1006
1007 if (war_only && !node->children[i].write_after_read)
1008 continue;
1009
1010 /* If the requirement is only that the node not appear before
1011 * the last read of its destination, then it can be scheduled
1012 * immediately after (or paired with!) the thing reading the
1013 * destination.
1014 */
1015 uint32_t latency = 0;
1016 if (!war_only) {
1017 latency = instruction_latency(node,
1018 node->children[i].node);
1019 }
1020
1021 child->unblocked_time = MAX2(child->unblocked_time,
1022 time + latency);
1023 child->parent_count--;
1024 if (child->parent_count == 0)
1025 list_add(&child->link, schedule_list);
1026
1027 node->children[i].node = NULL;
1028 }
1029 }
1030
1031 static void
1032 insert_scheduled_instruction(struct v3d_compile *c,
1033 struct qblock *block,
1034 struct choose_scoreboard *scoreboard,
1035 struct qinst *inst)
1036 {
1037 list_addtail(&inst->link, &block->instructions);
1038
1039 update_scoreboard_for_chosen(scoreboard, &inst->qpu);
1040 c->qpu_inst_count++;
1041 scoreboard->tick++;
1042 }
1043
1044 static struct qinst *
1045 vir_nop()
1046 {
1047 struct qreg undef = { QFILE_NULL, 0 };
1048 struct qinst *qinst = vir_add_inst(V3D_QPU_A_NOP, undef, undef, undef);
1049
1050 return qinst;
1051 }
1052
1053 static void
1054 emit_nop(struct v3d_compile *c, struct qblock *block,
1055 struct choose_scoreboard *scoreboard)
1056 {
1057 insert_scheduled_instruction(c, block, scoreboard, vir_nop());
1058 }
1059
1060 static bool
1061 qpu_instruction_valid_in_thrend_slot(struct v3d_compile *c,
1062 const struct qinst *qinst, int slot)
1063 {
1064 const struct v3d_qpu_instr *inst = &qinst->qpu;
1065
1066 /* Only TLB Z writes are prohibited in the last slot, but we don't
1067 * have those flagged so prohibit all TLB ops for now.
1068 */
1069 if (slot == 2 && qpu_inst_is_tlb(inst))
1070 return false;
1071
1072 if (slot > 0 && qinst->uniform != ~0)
1073 return false;
1074
1075 if (v3d_qpu_uses_vpm(inst))
1076 return false;
1077
1078 if (inst->sig.ldvary)
1079 return false;
1080
1081 if (inst->type == V3D_QPU_INSTR_TYPE_ALU) {
1082 /* GFXH-1625: TMUWT not allowed in the final instruction. */
1083 if (slot == 2 && inst->alu.add.op == V3D_QPU_A_TMUWT)
1084 return false;
1085
1086 /* No writing physical registers at the end. */
1087 if (!inst->alu.add.magic_write ||
1088 !inst->alu.mul.magic_write) {
1089 return false;
1090 }
1091
1092 if (c->devinfo->ver < 40 && inst->alu.add.op == V3D_QPU_A_SETMSF)
1093 return false;
1094
1095 /* RF0-2 might be overwritten during the delay slots by
1096 * fragment shader setup.
1097 */
1098 if (inst->raddr_a < 3 &&
1099 (inst->alu.add.a == V3D_QPU_MUX_A ||
1100 inst->alu.add.b == V3D_QPU_MUX_A ||
1101 inst->alu.mul.a == V3D_QPU_MUX_A ||
1102 inst->alu.mul.b == V3D_QPU_MUX_A)) {
1103 return false;
1104 }
1105
1106 if (inst->raddr_b < 3 &&
1107 !inst->sig.small_imm &&
1108 (inst->alu.add.a == V3D_QPU_MUX_B ||
1109 inst->alu.add.b == V3D_QPU_MUX_B ||
1110 inst->alu.mul.a == V3D_QPU_MUX_B ||
1111 inst->alu.mul.b == V3D_QPU_MUX_B)) {
1112 return false;
1113 }
1114 }
1115
1116 return true;
1117 }
1118
1119 static bool
1120 valid_thrsw_sequence(struct v3d_compile *c, struct choose_scoreboard *scoreboard,
1121 struct qinst *qinst, int instructions_in_sequence,
1122 bool is_thrend)
1123 {
1124 /* No emitting our thrsw while the previous thrsw hasn't happened yet. */
1125 if (scoreboard->last_thrsw_tick + 3 >
1126 scoreboard->tick - instructions_in_sequence) {
1127 return false;
1128 }
1129
1130 for (int slot = 0; slot < instructions_in_sequence; slot++) {
1131 /* No scheduling SFU when the result would land in the other
1132 * thread. The simulator complains for safety, though it
1133 * would only occur for dead code in our case.
1134 */
1135 if (slot > 0 &&
1136 qinst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
1137 (v3d_qpu_magic_waddr_is_sfu(qinst->qpu.alu.add.waddr) ||
1138 v3d_qpu_magic_waddr_is_sfu(qinst->qpu.alu.mul.waddr))) {
1139 return false;
1140 }
1141
1142 if (slot > 0 && qinst->qpu.sig.ldvary)
1143 return false;
1144
1145 if (is_thrend &&
1146 !qpu_instruction_valid_in_thrend_slot(c, qinst, slot)) {
1147 return false;
1148 }
1149
1150 /* Note that the list is circular, so we can only do this up
1151 * to instructions_in_sequence.
1152 */
1153 qinst = (struct qinst *)qinst->link.next;
1154 }
1155
1156 return true;
1157 }
1158
1159 /**
1160 * Emits a THRSW signal in the stream, trying to move it up to pair with
1161 * another instruction.
1162 */
1163 static int
1164 emit_thrsw(struct v3d_compile *c,
1165 struct qblock *block,
1166 struct choose_scoreboard *scoreboard,
1167 struct qinst *inst,
1168 bool is_thrend)
1169 {
1170 int time = 0;
1171
1172 /* There should be nothing in a thrsw inst being scheduled other than
1173 * the signal bits.
1174 */
1175 assert(inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU);
1176 assert(inst->qpu.alu.add.op == V3D_QPU_A_NOP);
1177 assert(inst->qpu.alu.mul.op == V3D_QPU_M_NOP);
1178
1179 /* Find how far back into previous instructions we can put the THRSW. */
1180 int slots_filled = 0;
1181 struct qinst *merge_inst = NULL;
1182 vir_for_each_inst_rev(prev_inst, block) {
1183 struct v3d_qpu_sig sig = prev_inst->qpu.sig;
1184 sig.thrsw = true;
1185 uint32_t packed_sig;
1186
1187 if (!v3d_qpu_sig_pack(c->devinfo, &sig, &packed_sig))
1188 break;
1189
1190 if (!valid_thrsw_sequence(c, scoreboard,
1191 prev_inst, slots_filled + 1,
1192 is_thrend)) {
1193 break;
1194 }
1195
1196 merge_inst = prev_inst;
1197 if (++slots_filled == 3)
1198 break;
1199 }
1200
1201 bool needs_free = false;
1202 if (merge_inst) {
1203 merge_inst->qpu.sig.thrsw = true;
1204 needs_free = true;
1205 scoreboard->last_thrsw_tick = scoreboard->tick - slots_filled;
1206 } else {
1207 scoreboard->last_thrsw_tick = scoreboard->tick;
1208 insert_scheduled_instruction(c, block, scoreboard, inst);
1209 time++;
1210 slots_filled++;
1211 merge_inst = inst;
1212 }
1213
1214 /* Insert any extra delay slot NOPs we need. */
1215 for (int i = 0; i < 3 - slots_filled; i++) {
1216 emit_nop(c, block, scoreboard);
1217 time++;
1218 }
1219
1220 /* If we're emitting the last THRSW (other than program end), then
1221 * signal that to the HW by emitting two THRSWs in a row.
1222 */
1223 if (inst->is_last_thrsw) {
1224 struct qinst *second_inst =
1225 (struct qinst *)merge_inst->link.next;
1226 second_inst->qpu.sig.thrsw = true;
1227 }
1228
1229 /* If we put our THRSW into another instruction, free up the
1230 * instruction that didn't end up scheduled into the list.
1231 */
1232 if (needs_free)
1233 free(inst);
1234
1235 return time;
1236 }
1237
1238 static uint32_t
1239 schedule_instructions(struct v3d_compile *c,
1240 struct choose_scoreboard *scoreboard,
1241 struct qblock *block,
1242 struct list_head *schedule_list,
1243 enum quniform_contents *orig_uniform_contents,
1244 uint32_t *orig_uniform_data,
1245 uint32_t *next_uniform)
1246 {
1247 const struct v3d_device_info *devinfo = c->devinfo;
1248 uint32_t time = 0;
1249
1250 if (debug) {
1251 fprintf(stderr, "initial deps:\n");
1252 dump_state(devinfo, schedule_list);
1253 fprintf(stderr, "\n");
1254 }
1255
1256 /* Remove non-DAG heads from the list. */
1257 list_for_each_entry_safe(struct schedule_node, n, schedule_list, link) {
1258 if (n->parent_count != 0)
1259 list_del(&n->link);
1260 }
1261
1262 while (!list_empty(schedule_list)) {
1263 struct schedule_node *chosen =
1264 choose_instruction_to_schedule(devinfo,
1265 scoreboard,
1266 schedule_list,
1267 NULL);
1268 struct schedule_node *merge = NULL;
1269
1270 /* If there are no valid instructions to schedule, drop a NOP
1271 * in.
1272 */
1273 struct qinst *qinst = chosen ? chosen->inst : vir_nop();
1274 struct v3d_qpu_instr *inst = &qinst->qpu;
1275
1276 if (debug) {
1277 fprintf(stderr, "t=%4d: current list:\n",
1278 time);
1279 dump_state(devinfo, schedule_list);
1280 fprintf(stderr, "t=%4d: chose: ", time);
1281 v3d_qpu_dump(devinfo, inst);
1282 fprintf(stderr, "\n");
1283 }
1284
1285 /* We can't mark_instruction_scheduled() the chosen inst until
1286 * we're done identifying instructions to merge, so put the
1287 * merged instructions on a list for a moment.
1288 */
1289 struct list_head merged_list;
1290 list_inithead(&merged_list);
1291
1292 /* Schedule this instruction onto the QPU list. Also try to
1293 * find an instruction to pair with it.
1294 */
1295 if (chosen) {
1296 time = MAX2(chosen->unblocked_time, time);
1297 list_del(&chosen->link);
1298 mark_instruction_scheduled(schedule_list, time,
1299 chosen, true);
1300
1301 while ((merge =
1302 choose_instruction_to_schedule(devinfo,
1303 scoreboard,
1304 schedule_list,
1305 chosen))) {
1306 time = MAX2(merge->unblocked_time, time);
1307 list_del(&merge->link);
1308 list_addtail(&merge->link, &merged_list);
1309 (void)qpu_merge_inst(devinfo, inst,
1310 inst, &merge->inst->qpu);
1311 if (merge->inst->uniform != -1) {
1312 chosen->inst->uniform =
1313 merge->inst->uniform;
1314 }
1315
1316 if (debug) {
1317 fprintf(stderr, "t=%4d: merging: ",
1318 time);
1319 v3d_qpu_dump(devinfo, &merge->inst->qpu);
1320 fprintf(stderr, "\n");
1321 fprintf(stderr, " result: ");
1322 v3d_qpu_dump(devinfo, inst);
1323 fprintf(stderr, "\n");
1324 }
1325 }
1326 }
1327
1328 /* Update the uniform index for the rewritten location --
1329 * branch target updating will still need to change
1330 * c->uniform_data[] using this index.
1331 */
1332 if (qinst->uniform != -1) {
1333 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH)
1334 block->branch_uniform = *next_uniform;
1335
1336 c->uniform_data[*next_uniform] =
1337 orig_uniform_data[qinst->uniform];
1338 c->uniform_contents[*next_uniform] =
1339 orig_uniform_contents[qinst->uniform];
1340 qinst->uniform = *next_uniform;
1341 (*next_uniform)++;
1342 }
1343
1344 if (debug) {
1345 fprintf(stderr, "\n");
1346 }
1347
1348 /* Now that we've scheduled a new instruction, some of its
1349 * children can be promoted to the list of instructions ready to
1350 * be scheduled. Update the children's unblocked time for this
1351 * DAG edge as we do so.
1352 */
1353 mark_instruction_scheduled(schedule_list, time, chosen, false);
1354 list_for_each_entry(struct schedule_node, merge, &merged_list,
1355 link) {
1356 mark_instruction_scheduled(schedule_list, time, merge,
1357 false);
1358
1359 /* The merged VIR instruction doesn't get re-added to the
1360 * block, so free it now.
1361 */
1362 free(merge->inst);
1363 }
1364
1365 if (inst->sig.thrsw) {
1366 time += emit_thrsw(c, block, scoreboard, qinst, false);
1367 } else {
1368 insert_scheduled_instruction(c, block,
1369 scoreboard, qinst);
1370
1371 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH) {
1372 block->branch_qpu_ip = c->qpu_inst_count - 1;
1373 /* Fill the delay slots.
1374 *
1375 * We should fill these with actual instructions,
1376 * instead, but that will probably need to be done
1377 * after this, once we know what the leading
1378 * instructions of the successors are (so we can
1379 * handle A/B register file write latency)
1380 */
1381 for (int i = 0; i < 3; i++)
1382 emit_nop(c, block, scoreboard);
1383 }
1384 }
1385 }
1386
1387 return time;
1388 }
1389
1390 static uint32_t
1391 qpu_schedule_instructions_block(struct v3d_compile *c,
1392 struct choose_scoreboard *scoreboard,
1393 struct qblock *block,
1394 enum quniform_contents *orig_uniform_contents,
1395 uint32_t *orig_uniform_data,
1396 uint32_t *next_uniform)
1397 {
1398 void *mem_ctx = ralloc_context(NULL);
1399 struct list_head schedule_list;
1400
1401 list_inithead(&schedule_list);
1402
1403 /* Wrap each instruction in a scheduler structure. */
1404 while (!list_empty(&block->instructions)) {
1405 struct qinst *qinst = (struct qinst *)block->instructions.next;
1406 struct schedule_node *n =
1407 rzalloc(mem_ctx, struct schedule_node);
1408
1409 n->inst = qinst;
1410
1411 list_del(&qinst->link);
1412 list_addtail(&n->link, &schedule_list);
1413 }
1414
1415 calculate_forward_deps(c, &schedule_list);
1416 calculate_reverse_deps(c, &schedule_list);
1417
1418 list_for_each_entry(struct schedule_node, n, &schedule_list, link) {
1419 compute_delay(n);
1420 }
1421
1422 uint32_t cycles = schedule_instructions(c, scoreboard, block,
1423 &schedule_list,
1424 orig_uniform_contents,
1425 orig_uniform_data,
1426 next_uniform);
1427
1428 ralloc_free(mem_ctx);
1429
1430 return cycles;
1431 }
1432
1433 static void
1434 qpu_set_branch_targets(struct v3d_compile *c)
1435 {
1436 vir_for_each_block(block, c) {
1437 /* The end block of the program has no branch. */
1438 if (!block->successors[0])
1439 continue;
1440
1441 /* If there was no branch instruction, then the successor
1442 * block must follow immediately after this one.
1443 */
1444 if (block->branch_qpu_ip == ~0) {
1445 assert(block->end_qpu_ip + 1 ==
1446 block->successors[0]->start_qpu_ip);
1447 continue;
1448 }
1449
1450 /* Walk back through the delay slots to find the branch
1451 * instr.
1452 */
1453 struct list_head *entry = block->instructions.prev;
1454 for (int i = 0; i < 3; i++)
1455 entry = entry->prev;
1456 struct qinst *branch = container_of(entry, branch, link);
1457 assert(branch->qpu.type == V3D_QPU_INSTR_TYPE_BRANCH);
1458
1459 /* Make sure that the if-we-don't-jump
1460 * successor was scheduled just after the
1461 * delay slots.
1462 */
1463 assert(!block->successors[1] ||
1464 block->successors[1]->start_qpu_ip ==
1465 block->branch_qpu_ip + 4);
1466
1467 branch->qpu.branch.offset =
1468 ((block->successors[0]->start_qpu_ip -
1469 (block->branch_qpu_ip + 4)) *
1470 sizeof(uint64_t));
1471
1472 /* Set up the relative offset to jump in the
1473 * uniform stream.
1474 *
1475 * Use a temporary here, because
1476 * uniform_data[inst->uniform] may be shared
1477 * between multiple instructions.
1478 */
1479 assert(c->uniform_contents[branch->uniform] == QUNIFORM_CONSTANT);
1480 c->uniform_data[branch->uniform] =
1481 (block->successors[0]->start_uniform -
1482 (block->branch_uniform + 1)) * 4;
1483 }
1484 }
1485
1486 uint32_t
1487 v3d_qpu_schedule_instructions(struct v3d_compile *c)
1488 {
1489 const struct v3d_device_info *devinfo = c->devinfo;
1490 struct qblock *end_block = list_last_entry(&c->blocks,
1491 struct qblock, link);
1492
1493 /* We reorder the uniforms as we schedule instructions, so save the
1494 * old data off and replace it.
1495 */
1496 uint32_t *uniform_data = c->uniform_data;
1497 enum quniform_contents *uniform_contents = c->uniform_contents;
1498 c->uniform_contents = ralloc_array(c, enum quniform_contents,
1499 c->num_uniforms);
1500 c->uniform_data = ralloc_array(c, uint32_t, c->num_uniforms);
1501 c->uniform_array_size = c->num_uniforms;
1502 uint32_t next_uniform = 0;
1503
1504 struct choose_scoreboard scoreboard;
1505 memset(&scoreboard, 0, sizeof(scoreboard));
1506 scoreboard.last_ldvary_tick = -10;
1507 scoreboard.last_magic_sfu_write_tick = -10;
1508 scoreboard.last_uniforms_reset_tick = -10;
1509 scoreboard.last_thrsw_tick = -10;
1510
1511 if (debug) {
1512 fprintf(stderr, "Pre-schedule instructions\n");
1513 vir_for_each_block(block, c) {
1514 fprintf(stderr, "BLOCK %d\n", block->index);
1515 list_for_each_entry(struct qinst, qinst,
1516 &block->instructions, link) {
1517 v3d_qpu_dump(devinfo, &qinst->qpu);
1518 fprintf(stderr, "\n");
1519 }
1520 }
1521 fprintf(stderr, "\n");
1522 }
1523
1524 uint32_t cycles = 0;
1525 vir_for_each_block(block, c) {
1526 block->start_qpu_ip = c->qpu_inst_count;
1527 block->branch_qpu_ip = ~0;
1528 block->start_uniform = next_uniform;
1529
1530 cycles += qpu_schedule_instructions_block(c,
1531 &scoreboard,
1532 block,
1533 uniform_contents,
1534 uniform_data,
1535 &next_uniform);
1536
1537 block->end_qpu_ip = c->qpu_inst_count - 1;
1538 }
1539
1540 /* Emit the program-end THRSW instruction. */;
1541 struct qinst *thrsw = vir_nop();
1542 thrsw->qpu.sig.thrsw = true;
1543 emit_thrsw(c, end_block, &scoreboard, thrsw, true);
1544
1545 qpu_set_branch_targets(c);
1546
1547 assert(next_uniform == c->num_uniforms);
1548
1549 return cycles;
1550 }