v3d: implement simultaneous peripheral access exceptions for V3D 4.1+
[mesa.git] / src / broadcom / compiler / qpu_schedule.c
1 /*
2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2014-2017 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 /**
26 * @file
27 *
28 * The basic model of the list scheduler is to take a basic block, compute a
29 * DAG of the dependencies, and make a list of the DAG heads. Heuristically
30 * pick a DAG head, then put all the children that are now DAG heads into the
31 * list of things to schedule.
32 *
33 * The goal of scheduling here is to pack pairs of operations together in a
34 * single QPU instruction.
35 */
36
37 #include "qpu/qpu_disasm.h"
38 #include "v3d_compiler.h"
39 #include "util/ralloc.h"
40 #include "util/dag.h"
41
42 static bool debug;
43
44 struct schedule_node_child;
45
46 struct schedule_node {
47 struct dag_node dag;
48 struct list_head link;
49 struct qinst *inst;
50
51 /* Longest cycles + instruction_latency() of any parent of this node. */
52 uint32_t unblocked_time;
53
54 /**
55 * Minimum number of cycles from scheduling this instruction until the
56 * end of the program, based on the slowest dependency chain through
57 * the children.
58 */
59 uint32_t delay;
60
61 /**
62 * cycles between this instruction being scheduled and when its result
63 * can be consumed.
64 */
65 uint32_t latency;
66 };
67
68 /* When walking the instructions in reverse, we need to swap before/after in
69 * add_dep().
70 */
71 enum direction { F, R };
72
73 struct schedule_state {
74 const struct v3d_device_info *devinfo;
75 struct dag *dag;
76 struct schedule_node *last_r[6];
77 struct schedule_node *last_rf[64];
78 struct schedule_node *last_sf;
79 struct schedule_node *last_vpm_read;
80 struct schedule_node *last_tmu_write;
81 struct schedule_node *last_tmu_config;
82 struct schedule_node *last_tlb;
83 struct schedule_node *last_vpm;
84 struct schedule_node *last_unif;
85 struct schedule_node *last_rtop;
86 enum direction dir;
87 /* Estimated cycle when the current instruction would start. */
88 uint32_t time;
89 };
90
91 static void
92 add_dep(struct schedule_state *state,
93 struct schedule_node *before,
94 struct schedule_node *after,
95 bool write)
96 {
97 bool write_after_read = !write && state->dir == R;
98 void *edge_data = (void *)(uintptr_t)write_after_read;
99
100 if (!before || !after)
101 return;
102
103 assert(before != after);
104
105 if (state->dir == F)
106 dag_add_edge(&before->dag, &after->dag, edge_data);
107 else
108 dag_add_edge(&after->dag, &before->dag, edge_data);
109 }
110
111 static void
112 add_read_dep(struct schedule_state *state,
113 struct schedule_node *before,
114 struct schedule_node *after)
115 {
116 add_dep(state, before, after, false);
117 }
118
119 static void
120 add_write_dep(struct schedule_state *state,
121 struct schedule_node **before,
122 struct schedule_node *after)
123 {
124 add_dep(state, *before, after, true);
125 *before = after;
126 }
127
128 static bool
129 qpu_inst_is_tlb(const struct v3d_qpu_instr *inst)
130 {
131 if (inst->type != V3D_QPU_INSTR_TYPE_ALU)
132 return false;
133
134 if (inst->alu.add.magic_write &&
135 (inst->alu.add.waddr == V3D_QPU_WADDR_TLB ||
136 inst->alu.add.waddr == V3D_QPU_WADDR_TLBU))
137 return true;
138
139 if (inst->alu.mul.magic_write &&
140 (inst->alu.mul.waddr == V3D_QPU_WADDR_TLB ||
141 inst->alu.mul.waddr == V3D_QPU_WADDR_TLBU))
142 return true;
143
144 return false;
145 }
146
147 static void
148 process_mux_deps(struct schedule_state *state, struct schedule_node *n,
149 enum v3d_qpu_mux mux)
150 {
151 switch (mux) {
152 case V3D_QPU_MUX_A:
153 add_read_dep(state, state->last_rf[n->inst->qpu.raddr_a], n);
154 break;
155 case V3D_QPU_MUX_B:
156 if (!n->inst->qpu.sig.small_imm) {
157 add_read_dep(state,
158 state->last_rf[n->inst->qpu.raddr_b], n);
159 }
160 break;
161 default:
162 add_read_dep(state, state->last_r[mux - V3D_QPU_MUX_R0], n);
163 break;
164 }
165 }
166
167
168 static void
169 process_waddr_deps(struct schedule_state *state, struct schedule_node *n,
170 uint32_t waddr, bool magic)
171 {
172 if (!magic) {
173 add_write_dep(state, &state->last_rf[waddr], n);
174 } else if (v3d_qpu_magic_waddr_is_tmu(waddr)) {
175 /* XXX perf: For V3D 4.x, we could reorder TMU writes other
176 * than the TMUS/TMUD/TMUA to improve scheduling flexibility.
177 */
178 add_write_dep(state, &state->last_tmu_write, n);
179 switch (waddr) {
180 case V3D_QPU_WADDR_TMUS:
181 case V3D_QPU_WADDR_TMUSCM:
182 case V3D_QPU_WADDR_TMUSF:
183 case V3D_QPU_WADDR_TMUSLOD:
184 add_write_dep(state, &state->last_tmu_config, n);
185 break;
186 default:
187 break;
188 }
189 } else if (v3d_qpu_magic_waddr_is_sfu(waddr)) {
190 /* Handled by v3d_qpu_writes_r4() check. */
191 } else {
192 switch (waddr) {
193 case V3D_QPU_WADDR_R0:
194 case V3D_QPU_WADDR_R1:
195 case V3D_QPU_WADDR_R2:
196 add_write_dep(state,
197 &state->last_r[waddr - V3D_QPU_WADDR_R0],
198 n);
199 break;
200 case V3D_QPU_WADDR_R3:
201 case V3D_QPU_WADDR_R4:
202 case V3D_QPU_WADDR_R5:
203 /* Handled by v3d_qpu_writes_r*() checks below. */
204 break;
205
206 case V3D_QPU_WADDR_VPM:
207 case V3D_QPU_WADDR_VPMU:
208 add_write_dep(state, &state->last_vpm, n);
209 break;
210
211 case V3D_QPU_WADDR_TLB:
212 case V3D_QPU_WADDR_TLBU:
213 add_write_dep(state, &state->last_tlb, n);
214 break;
215
216 case V3D_QPU_WADDR_SYNC:
217 case V3D_QPU_WADDR_SYNCB:
218 case V3D_QPU_WADDR_SYNCU:
219 /* For CS barrier(): Sync against any other memory
220 * accesses. There doesn't appear to be any need for
221 * barriers to affect ALU operations.
222 */
223 add_write_dep(state, &state->last_tmu_write, n);
224 break;
225
226 case V3D_QPU_WADDR_NOP:
227 break;
228
229 default:
230 fprintf(stderr, "Unknown waddr %d\n", waddr);
231 abort();
232 }
233 }
234 }
235
236 /**
237 * Common code for dependencies that need to be tracked both forward and
238 * backward.
239 *
240 * This is for things like "all reads of r4 have to happen between the r4
241 * writes that surround them".
242 */
243 static void
244 calculate_deps(struct schedule_state *state, struct schedule_node *n)
245 {
246 const struct v3d_device_info *devinfo = state->devinfo;
247 struct qinst *qinst = n->inst;
248 struct v3d_qpu_instr *inst = &qinst->qpu;
249 /* If the input and output segments are shared, then all VPM reads to
250 * a location need to happen before all writes. We handle this by
251 * serializing all VPM operations for now.
252 */
253 bool separate_vpm_segment = false;
254
255 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH) {
256 if (inst->branch.cond != V3D_QPU_BRANCH_COND_ALWAYS)
257 add_read_dep(state, state->last_sf, n);
258
259 /* XXX: BDI */
260 /* XXX: BDU */
261 /* XXX: ub */
262 /* XXX: raddr_a */
263
264 add_write_dep(state, &state->last_unif, n);
265 return;
266 }
267
268 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
269
270 /* XXX: LOAD_IMM */
271
272 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 0)
273 process_mux_deps(state, n, inst->alu.add.a);
274 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 1)
275 process_mux_deps(state, n, inst->alu.add.b);
276
277 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 0)
278 process_mux_deps(state, n, inst->alu.mul.a);
279 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 1)
280 process_mux_deps(state, n, inst->alu.mul.b);
281
282 switch (inst->alu.add.op) {
283 case V3D_QPU_A_VPMSETUP:
284 /* Could distinguish read/write by unpacking the uniform. */
285 add_write_dep(state, &state->last_vpm, n);
286 add_write_dep(state, &state->last_vpm_read, n);
287 break;
288
289 case V3D_QPU_A_STVPMV:
290 case V3D_QPU_A_STVPMD:
291 case V3D_QPU_A_STVPMP:
292 add_write_dep(state, &state->last_vpm, n);
293 break;
294
295 case V3D_QPU_A_LDVPMV_IN:
296 case V3D_QPU_A_LDVPMD_IN:
297 case V3D_QPU_A_LDVPMG_IN:
298 case V3D_QPU_A_LDVPMP:
299 if (!separate_vpm_segment)
300 add_write_dep(state, &state->last_vpm, n);
301 break;
302
303 case V3D_QPU_A_VPMWT:
304 add_read_dep(state, state->last_vpm, n);
305 break;
306
307 case V3D_QPU_A_MSF:
308 add_read_dep(state, state->last_tlb, n);
309 break;
310
311 case V3D_QPU_A_SETMSF:
312 case V3D_QPU_A_SETREVF:
313 add_write_dep(state, &state->last_tlb, n);
314 break;
315
316 default:
317 break;
318 }
319
320 switch (inst->alu.mul.op) {
321 case V3D_QPU_M_MULTOP:
322 case V3D_QPU_M_UMUL24:
323 /* MULTOP sets rtop, and UMUL24 implicitly reads rtop and
324 * resets it to 0. We could possibly reorder umul24s relative
325 * to each other, but for now just keep all the MUL parts in
326 * order.
327 */
328 add_write_dep(state, &state->last_rtop, n);
329 break;
330 default:
331 break;
332 }
333
334 if (inst->alu.add.op != V3D_QPU_A_NOP) {
335 process_waddr_deps(state, n, inst->alu.add.waddr,
336 inst->alu.add.magic_write);
337 }
338 if (inst->alu.mul.op != V3D_QPU_M_NOP) {
339 process_waddr_deps(state, n, inst->alu.mul.waddr,
340 inst->alu.mul.magic_write);
341 }
342 if (v3d_qpu_sig_writes_address(devinfo, &inst->sig)) {
343 process_waddr_deps(state, n, inst->sig_addr,
344 inst->sig_magic);
345 }
346
347 if (v3d_qpu_writes_r3(devinfo, inst))
348 add_write_dep(state, &state->last_r[3], n);
349 if (v3d_qpu_writes_r4(devinfo, inst))
350 add_write_dep(state, &state->last_r[4], n);
351 if (v3d_qpu_writes_r5(devinfo, inst))
352 add_write_dep(state, &state->last_r[5], n);
353
354 if (inst->sig.thrsw) {
355 /* All accumulator contents and flags are undefined after the
356 * switch.
357 */
358 for (int i = 0; i < ARRAY_SIZE(state->last_r); i++)
359 add_write_dep(state, &state->last_r[i], n);
360 add_write_dep(state, &state->last_sf, n);
361 add_write_dep(state, &state->last_rtop, n);
362
363 /* Scoreboard-locking operations have to stay after the last
364 * thread switch.
365 */
366 add_write_dep(state, &state->last_tlb, n);
367
368 add_write_dep(state, &state->last_tmu_write, n);
369 add_write_dep(state, &state->last_tmu_config, n);
370 }
371
372 if (v3d_qpu_waits_on_tmu(inst)) {
373 /* TMU loads are coming from a FIFO, so ordering is important.
374 */
375 add_write_dep(state, &state->last_tmu_write, n);
376 }
377
378 if (inst->sig.wrtmuc)
379 add_write_dep(state, &state->last_tmu_config, n);
380
381 if (inst->sig.ldtlb | inst->sig.ldtlbu)
382 add_read_dep(state, state->last_tlb, n);
383
384 if (inst->sig.ldvpm) {
385 add_write_dep(state, &state->last_vpm_read, n);
386
387 /* At least for now, we're doing shared I/O segments, so queue
388 * all writes after all reads.
389 */
390 if (!separate_vpm_segment)
391 add_write_dep(state, &state->last_vpm, n);
392 }
393
394 /* inst->sig.ldunif or sideband uniform read */
395 if (vir_has_uniform(qinst))
396 add_write_dep(state, &state->last_unif, n);
397
398 if (v3d_qpu_reads_flags(inst))
399 add_read_dep(state, state->last_sf, n);
400 if (v3d_qpu_writes_flags(inst))
401 add_write_dep(state, &state->last_sf, n);
402 }
403
404 static void
405 calculate_forward_deps(struct v3d_compile *c, struct dag *dag,
406 struct list_head *schedule_list)
407 {
408 struct schedule_state state;
409
410 memset(&state, 0, sizeof(state));
411 state.dag = dag;
412 state.devinfo = c->devinfo;
413 state.dir = F;
414
415 list_for_each_entry(struct schedule_node, node, schedule_list, link)
416 calculate_deps(&state, node);
417 }
418
419 static void
420 calculate_reverse_deps(struct v3d_compile *c, struct dag *dag,
421 struct list_head *schedule_list)
422 {
423 struct schedule_state state;
424
425 memset(&state, 0, sizeof(state));
426 state.dag = dag;
427 state.devinfo = c->devinfo;
428 state.dir = R;
429
430 list_for_each_entry_rev(struct schedule_node, node, schedule_list,
431 link) {
432 calculate_deps(&state, (struct schedule_node *)node);
433 }
434 }
435
436 struct choose_scoreboard {
437 struct dag *dag;
438 int tick;
439 int last_magic_sfu_write_tick;
440 int last_ldvary_tick;
441 int last_uniforms_reset_tick;
442 int last_thrsw_tick;
443 bool tlb_locked;
444 };
445
446 static bool
447 mux_reads_too_soon(struct choose_scoreboard *scoreboard,
448 const struct v3d_qpu_instr *inst, enum v3d_qpu_mux mux)
449 {
450 switch (mux) {
451 case V3D_QPU_MUX_R4:
452 if (scoreboard->tick - scoreboard->last_magic_sfu_write_tick <= 2)
453 return true;
454 break;
455
456 case V3D_QPU_MUX_R5:
457 if (scoreboard->tick - scoreboard->last_ldvary_tick <= 1)
458 return true;
459 break;
460 default:
461 break;
462 }
463
464 return false;
465 }
466
467 static bool
468 reads_too_soon_after_write(struct choose_scoreboard *scoreboard,
469 struct qinst *qinst)
470 {
471 const struct v3d_qpu_instr *inst = &qinst->qpu;
472
473 /* XXX: Branching off of raddr. */
474 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH)
475 return false;
476
477 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
478
479 if (inst->alu.add.op != V3D_QPU_A_NOP) {
480 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 0 &&
481 mux_reads_too_soon(scoreboard, inst, inst->alu.add.a)) {
482 return true;
483 }
484 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 1 &&
485 mux_reads_too_soon(scoreboard, inst, inst->alu.add.b)) {
486 return true;
487 }
488 }
489
490 if (inst->alu.mul.op != V3D_QPU_M_NOP) {
491 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 0 &&
492 mux_reads_too_soon(scoreboard, inst, inst->alu.mul.a)) {
493 return true;
494 }
495 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 1 &&
496 mux_reads_too_soon(scoreboard, inst, inst->alu.mul.b)) {
497 return true;
498 }
499 }
500
501 /* XXX: imm */
502
503 return false;
504 }
505
506 static bool
507 writes_too_soon_after_write(const struct v3d_device_info *devinfo,
508 struct choose_scoreboard *scoreboard,
509 struct qinst *qinst)
510 {
511 const struct v3d_qpu_instr *inst = &qinst->qpu;
512
513 /* Don't schedule any other r4 write too soon after an SFU write.
514 * This would normally be prevented by dependency tracking, but might
515 * occur if a dead SFU computation makes it to scheduling.
516 */
517 if (scoreboard->tick - scoreboard->last_magic_sfu_write_tick < 2 &&
518 v3d_qpu_writes_r4(devinfo, inst))
519 return true;
520
521 return false;
522 }
523
524 static bool
525 pixel_scoreboard_too_soon(struct choose_scoreboard *scoreboard,
526 const struct v3d_qpu_instr *inst)
527 {
528 return (scoreboard->tick == 0 && qpu_inst_is_tlb(inst));
529 }
530
531 static int
532 get_instruction_priority(const struct v3d_qpu_instr *inst)
533 {
534 uint32_t baseline_score;
535 uint32_t next_score = 0;
536
537 /* Schedule TLB operations as late as possible, to get more
538 * parallelism between shaders.
539 */
540 if (qpu_inst_is_tlb(inst))
541 return next_score;
542 next_score++;
543
544 /* Schedule texture read results collection late to hide latency. */
545 if (v3d_qpu_waits_on_tmu(inst))
546 return next_score;
547 next_score++;
548
549 /* XXX perf: We should schedule SFU ALU ops so that the reader is 2
550 * instructions after the producer if possible, not just 1.
551 */
552
553 /* Default score for things that aren't otherwise special. */
554 baseline_score = next_score;
555 next_score++;
556
557 /* Schedule texture read setup early to hide their latency better. */
558 if (v3d_qpu_writes_tmu(inst))
559 return next_score;
560 next_score++;
561
562 return baseline_score;
563 }
564
565 static bool
566 qpu_magic_waddr_is_periph(enum v3d_qpu_waddr waddr)
567 {
568 return (v3d_qpu_magic_waddr_is_tmu(waddr) ||
569 v3d_qpu_magic_waddr_is_sfu(waddr) ||
570 v3d_qpu_magic_waddr_is_tlb(waddr) ||
571 v3d_qpu_magic_waddr_is_vpm(waddr) ||
572 v3d_qpu_magic_waddr_is_tsy(waddr));
573 }
574
575 static bool
576 qpu_accesses_peripheral(const struct v3d_qpu_instr *inst)
577 {
578 if (v3d_qpu_uses_vpm(inst))
579 return true;
580 if (v3d_qpu_uses_sfu(inst))
581 return true;
582
583 if (inst->type == V3D_QPU_INSTR_TYPE_ALU) {
584 if (inst->alu.add.op != V3D_QPU_A_NOP &&
585 inst->alu.add.magic_write &&
586 qpu_magic_waddr_is_periph(inst->alu.add.waddr)) {
587 return true;
588 }
589
590 if (inst->alu.add.op == V3D_QPU_A_TMUWT)
591 return true;
592
593 if (inst->alu.mul.op != V3D_QPU_M_NOP &&
594 inst->alu.mul.magic_write &&
595 qpu_magic_waddr_is_periph(inst->alu.mul.waddr)) {
596 return true;
597 }
598 }
599
600 return (inst->sig.ldvpm ||
601 inst->sig.ldtmu ||
602 inst->sig.ldtlb ||
603 inst->sig.ldtlbu ||
604 inst->sig.wrtmuc);
605 }
606
607 static bool
608 qpu_compatible_peripheral_access(const struct v3d_device_info *devinfo,
609 const struct v3d_qpu_instr *a,
610 const struct v3d_qpu_instr *b)
611 {
612 const bool a_uses_peripheral = qpu_accesses_peripheral(a);
613 const bool b_uses_peripheral = qpu_accesses_peripheral(b);
614
615 /* We can always do one peripheral access per instruction. */
616 if (!a_uses_peripheral || !b_uses_peripheral)
617 return true;
618
619 if (devinfo->ver < 41)
620 return false;
621
622 /* V3D 4.1 and later allow TMU read along with a VPM read or write, and
623 * WRTMUC with a TMU magic register write (other than tmuc).
624 */
625 if ((a->sig.ldtmu && v3d_qpu_uses_vpm(b)) ||
626 (b->sig.ldtmu && v3d_qpu_uses_vpm(a))) {
627 return true;
628 }
629
630 if ((a->sig.wrtmuc && v3d_qpu_writes_tmu_not_tmuc(b)) ||
631 (b->sig.wrtmuc && v3d_qpu_writes_tmu_not_tmuc(a))) {
632 return true;
633 }
634
635 return false;
636 }
637
638 static bool
639 qpu_merge_inst(const struct v3d_device_info *devinfo,
640 struct v3d_qpu_instr *result,
641 const struct v3d_qpu_instr *a,
642 const struct v3d_qpu_instr *b)
643 {
644 if (a->type != V3D_QPU_INSTR_TYPE_ALU ||
645 b->type != V3D_QPU_INSTR_TYPE_ALU) {
646 return false;
647 }
648
649 if (!qpu_compatible_peripheral_access(devinfo, a, b))
650 return false;
651
652 struct v3d_qpu_instr merge = *a;
653
654 if (b->alu.add.op != V3D_QPU_A_NOP) {
655 if (a->alu.add.op != V3D_QPU_A_NOP)
656 return false;
657 merge.alu.add = b->alu.add;
658
659 merge.flags.ac = b->flags.ac;
660 merge.flags.apf = b->flags.apf;
661 merge.flags.auf = b->flags.auf;
662 }
663
664 if (b->alu.mul.op != V3D_QPU_M_NOP) {
665 if (a->alu.mul.op != V3D_QPU_M_NOP)
666 return false;
667 merge.alu.mul = b->alu.mul;
668
669 merge.flags.mc = b->flags.mc;
670 merge.flags.mpf = b->flags.mpf;
671 merge.flags.muf = b->flags.muf;
672 }
673
674 if (v3d_qpu_uses_mux(b, V3D_QPU_MUX_A)) {
675 if (v3d_qpu_uses_mux(a, V3D_QPU_MUX_A) &&
676 a->raddr_a != b->raddr_a) {
677 return false;
678 }
679 merge.raddr_a = b->raddr_a;
680 }
681
682 if (v3d_qpu_uses_mux(b, V3D_QPU_MUX_B)) {
683 if (v3d_qpu_uses_mux(a, V3D_QPU_MUX_B) &&
684 (a->raddr_b != b->raddr_b ||
685 a->sig.small_imm != b->sig.small_imm)) {
686 return false;
687 }
688 merge.raddr_b = b->raddr_b;
689 }
690
691 merge.sig.thrsw |= b->sig.thrsw;
692 merge.sig.ldunif |= b->sig.ldunif;
693 merge.sig.ldunifrf |= b->sig.ldunifrf;
694 merge.sig.ldunifa |= b->sig.ldunifa;
695 merge.sig.ldunifarf |= b->sig.ldunifarf;
696 merge.sig.ldtmu |= b->sig.ldtmu;
697 merge.sig.ldvary |= b->sig.ldvary;
698 merge.sig.ldvpm |= b->sig.ldvpm;
699 merge.sig.small_imm |= b->sig.small_imm;
700 merge.sig.ldtlb |= b->sig.ldtlb;
701 merge.sig.ldtlbu |= b->sig.ldtlbu;
702 merge.sig.ucb |= b->sig.ucb;
703 merge.sig.rotate |= b->sig.rotate;
704 merge.sig.wrtmuc |= b->sig.wrtmuc;
705
706 if (v3d_qpu_sig_writes_address(devinfo, &a->sig) &&
707 v3d_qpu_sig_writes_address(devinfo, &b->sig))
708 return false;
709 merge.sig_addr |= b->sig_addr;
710 merge.sig_magic |= b->sig_magic;
711
712 uint64_t packed;
713 bool ok = v3d_qpu_instr_pack(devinfo, &merge, &packed);
714
715 *result = merge;
716 /* No modifying the real instructions on failure. */
717 assert(ok || (a != result && b != result));
718
719 return ok;
720 }
721
722 static struct schedule_node *
723 choose_instruction_to_schedule(const struct v3d_device_info *devinfo,
724 struct choose_scoreboard *scoreboard,
725 struct schedule_node *prev_inst)
726 {
727 struct schedule_node *chosen = NULL;
728 int chosen_prio = 0;
729
730 /* Don't pair up anything with a thread switch signal -- emit_thrsw()
731 * will handle pairing it along with filling the delay slots.
732 */
733 if (prev_inst) {
734 if (prev_inst->inst->qpu.sig.thrsw)
735 return NULL;
736 }
737
738 list_for_each_entry(struct schedule_node, n, &scoreboard->dag->heads,
739 dag.link) {
740 const struct v3d_qpu_instr *inst = &n->inst->qpu;
741
742 /* Don't choose the branch instruction until it's the last one
743 * left. We'll move it up to fit its delay slots after we
744 * choose it.
745 */
746 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH &&
747 !list_is_singular(&scoreboard->dag->heads)) {
748 continue;
749 }
750
751 /* "An instruction must not read from a location in physical
752 * regfile A or B that was written to by the previous
753 * instruction."
754 */
755 if (reads_too_soon_after_write(scoreboard, n->inst))
756 continue;
757
758 if (writes_too_soon_after_write(devinfo, scoreboard, n->inst))
759 continue;
760
761 /* "A scoreboard wait must not occur in the first two
762 * instructions of a fragment shader. This is either the
763 * explicit Wait for Scoreboard signal or an implicit wait
764 * with the first tile-buffer read or write instruction."
765 */
766 if (pixel_scoreboard_too_soon(scoreboard, inst))
767 continue;
768
769 /* ldunif and ldvary both write r5, but ldunif does so a tick
770 * sooner. If the ldvary's r5 wasn't used, then ldunif might
771 * otherwise get scheduled so ldunif and ldvary try to update
772 * r5 in the same tick.
773 *
774 * XXX perf: To get good pipelining of a sequence of varying
775 * loads, we need to figure out how to pair the ldvary signal
776 * up to the instruction before the last r5 user in the
777 * previous ldvary sequence. Currently, it usually pairs with
778 * the last r5 user.
779 */
780 if ((inst->sig.ldunif || inst->sig.ldunifa) &&
781 scoreboard->tick == scoreboard->last_ldvary_tick + 1) {
782 continue;
783 }
784
785 /* If we're trying to pair with another instruction, check
786 * that they're compatible.
787 */
788 if (prev_inst) {
789 /* Don't pair up a thread switch signal -- we'll
790 * handle pairing it when we pick it on its own.
791 */
792 if (inst->sig.thrsw)
793 continue;
794
795 if (prev_inst->inst->uniform != -1 &&
796 n->inst->uniform != -1)
797 continue;
798
799 /* Don't merge in something that will lock the TLB.
800 * Hopwefully what we have in inst will release some
801 * other instructions, allowing us to delay the
802 * TLB-locking instruction until later.
803 */
804 if (!scoreboard->tlb_locked && qpu_inst_is_tlb(inst))
805 continue;
806
807 struct v3d_qpu_instr merged_inst;
808 if (!qpu_merge_inst(devinfo, &merged_inst,
809 &prev_inst->inst->qpu, inst)) {
810 continue;
811 }
812 }
813
814 int prio = get_instruction_priority(inst);
815
816 /* Found a valid instruction. If nothing better comes along,
817 * this one works.
818 */
819 if (!chosen) {
820 chosen = n;
821 chosen_prio = prio;
822 continue;
823 }
824
825 if (prio > chosen_prio) {
826 chosen = n;
827 chosen_prio = prio;
828 } else if (prio < chosen_prio) {
829 continue;
830 }
831
832 if (n->delay > chosen->delay) {
833 chosen = n;
834 chosen_prio = prio;
835 } else if (n->delay < chosen->delay) {
836 continue;
837 }
838 }
839
840 return chosen;
841 }
842
843 static void
844 update_scoreboard_for_magic_waddr(struct choose_scoreboard *scoreboard,
845 enum v3d_qpu_waddr waddr)
846 {
847 if (v3d_qpu_magic_waddr_is_sfu(waddr))
848 scoreboard->last_magic_sfu_write_tick = scoreboard->tick;
849 }
850
851 static void
852 update_scoreboard_for_chosen(struct choose_scoreboard *scoreboard,
853 const struct v3d_qpu_instr *inst)
854 {
855 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH)
856 return;
857
858 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
859
860 if (inst->alu.add.op != V3D_QPU_A_NOP) {
861 if (inst->alu.add.magic_write) {
862 update_scoreboard_for_magic_waddr(scoreboard,
863 inst->alu.add.waddr);
864 }
865 }
866
867 if (inst->alu.mul.op != V3D_QPU_M_NOP) {
868 if (inst->alu.mul.magic_write) {
869 update_scoreboard_for_magic_waddr(scoreboard,
870 inst->alu.mul.waddr);
871 }
872 }
873
874 if (inst->sig.ldvary)
875 scoreboard->last_ldvary_tick = scoreboard->tick;
876
877 if (qpu_inst_is_tlb(inst))
878 scoreboard->tlb_locked = true;
879 }
880
881 static void
882 dump_state(const struct v3d_device_info *devinfo, struct dag *dag)
883 {
884 list_for_each_entry(struct schedule_node, n, &dag->heads, dag.link) {
885 fprintf(stderr, " t=%4d: ", n->unblocked_time);
886 v3d_qpu_dump(devinfo, &n->inst->qpu);
887 fprintf(stderr, "\n");
888
889 util_dynarray_foreach(&n->dag.edges, struct dag_edge, edge) {
890 struct schedule_node *child =
891 (struct schedule_node *)edge->child;
892 if (!child)
893 continue;
894
895 fprintf(stderr, " - ");
896 v3d_qpu_dump(devinfo, &child->inst->qpu);
897 fprintf(stderr, " (%d parents, %c)\n",
898 child->dag.parent_count,
899 edge->data ? 'w' : 'r');
900 }
901 }
902 }
903
904 static uint32_t magic_waddr_latency(enum v3d_qpu_waddr waddr,
905 const struct v3d_qpu_instr *after)
906 {
907 /* Apply some huge latency between texture fetch requests and getting
908 * their results back.
909 *
910 * FIXME: This is actually pretty bogus. If we do:
911 *
912 * mov tmu0_s, a
913 * <a bit of math>
914 * mov tmu0_s, b
915 * load_tmu0
916 * <more math>
917 * load_tmu0
918 *
919 * we count that as worse than
920 *
921 * mov tmu0_s, a
922 * mov tmu0_s, b
923 * <lots of math>
924 * load_tmu0
925 * <more math>
926 * load_tmu0
927 *
928 * because we associate the first load_tmu0 with the *second* tmu0_s.
929 */
930 if (v3d_qpu_magic_waddr_is_tmu(waddr) && v3d_qpu_waits_on_tmu(after))
931 return 100;
932
933 /* Assume that anything depending on us is consuming the SFU result. */
934 if (v3d_qpu_magic_waddr_is_sfu(waddr))
935 return 3;
936
937 return 1;
938 }
939
940 static uint32_t
941 instruction_latency(struct schedule_node *before, struct schedule_node *after)
942 {
943 const struct v3d_qpu_instr *before_inst = &before->inst->qpu;
944 const struct v3d_qpu_instr *after_inst = &after->inst->qpu;
945 uint32_t latency = 1;
946
947 if (before_inst->type != V3D_QPU_INSTR_TYPE_ALU ||
948 after_inst->type != V3D_QPU_INSTR_TYPE_ALU)
949 return latency;
950
951 if (before_inst->alu.add.magic_write) {
952 latency = MAX2(latency,
953 magic_waddr_latency(before_inst->alu.add.waddr,
954 after_inst));
955 }
956
957 if (before_inst->alu.mul.magic_write) {
958 latency = MAX2(latency,
959 magic_waddr_latency(before_inst->alu.mul.waddr,
960 after_inst));
961 }
962
963 return latency;
964 }
965
966 /** Recursive computation of the delay member of a node. */
967 static void
968 compute_delay(struct dag_node *node, void *state)
969 {
970 struct schedule_node *n = (struct schedule_node *)node;
971
972 n->delay = 1;
973
974 util_dynarray_foreach(&n->dag.edges, struct dag_edge, edge) {
975 struct schedule_node *child =
976 (struct schedule_node *)edge->child;
977
978 n->delay = MAX2(n->delay, (child->delay +
979 instruction_latency(n, child)));
980 }
981 }
982
983 /* Removes a DAG head, but removing only the WAR edges. (dag_prune_head()
984 * should be called on it later to finish pruning the other edges).
985 */
986 static void
987 pre_remove_head(struct dag *dag, struct schedule_node *n)
988 {
989 list_delinit(&n->dag.link);
990
991 util_dynarray_foreach(&n->dag.edges, struct dag_edge, edge) {
992 if (edge->data)
993 dag_remove_edge(dag, edge);
994 }
995 }
996
997 static void
998 mark_instruction_scheduled(struct dag *dag,
999 uint32_t time,
1000 struct schedule_node *node)
1001 {
1002 if (!node)
1003 return;
1004
1005 util_dynarray_foreach(&node->dag.edges, struct dag_edge, edge) {
1006 struct schedule_node *child =
1007 (struct schedule_node *)edge->child;
1008
1009 if (!child)
1010 continue;
1011
1012 uint32_t latency = instruction_latency(node, child);
1013
1014 child->unblocked_time = MAX2(child->unblocked_time,
1015 time + latency);
1016 }
1017 dag_prune_head(dag, &node->dag);
1018 }
1019
1020 static void
1021 insert_scheduled_instruction(struct v3d_compile *c,
1022 struct qblock *block,
1023 struct choose_scoreboard *scoreboard,
1024 struct qinst *inst)
1025 {
1026 list_addtail(&inst->link, &block->instructions);
1027
1028 update_scoreboard_for_chosen(scoreboard, &inst->qpu);
1029 c->qpu_inst_count++;
1030 scoreboard->tick++;
1031 }
1032
1033 static struct qinst *
1034 vir_nop()
1035 {
1036 struct qreg undef = vir_nop_reg();
1037 struct qinst *qinst = vir_add_inst(V3D_QPU_A_NOP, undef, undef, undef);
1038
1039 return qinst;
1040 }
1041
1042 static void
1043 emit_nop(struct v3d_compile *c, struct qblock *block,
1044 struct choose_scoreboard *scoreboard)
1045 {
1046 insert_scheduled_instruction(c, block, scoreboard, vir_nop());
1047 }
1048
1049 static bool
1050 qpu_instruction_valid_in_thrend_slot(struct v3d_compile *c,
1051 const struct qinst *qinst, int slot)
1052 {
1053 const struct v3d_qpu_instr *inst = &qinst->qpu;
1054
1055 /* Only TLB Z writes are prohibited in the last slot, but we don't
1056 * have those flagged so prohibit all TLB ops for now.
1057 */
1058 if (slot == 2 && qpu_inst_is_tlb(inst))
1059 return false;
1060
1061 if (slot > 0 && qinst->uniform != ~0)
1062 return false;
1063
1064 if (v3d_qpu_uses_vpm(inst))
1065 return false;
1066
1067 if (inst->sig.ldvary)
1068 return false;
1069
1070 if (inst->type == V3D_QPU_INSTR_TYPE_ALU) {
1071 /* GFXH-1625: TMUWT not allowed in the final instruction. */
1072 if (slot == 2 && inst->alu.add.op == V3D_QPU_A_TMUWT)
1073 return false;
1074
1075 /* No writing physical registers at the end. */
1076 if (!inst->alu.add.magic_write ||
1077 !inst->alu.mul.magic_write) {
1078 return false;
1079 }
1080
1081 if (c->devinfo->ver < 40 && inst->alu.add.op == V3D_QPU_A_SETMSF)
1082 return false;
1083
1084 /* RF0-2 might be overwritten during the delay slots by
1085 * fragment shader setup.
1086 */
1087 if (inst->raddr_a < 3 &&
1088 (inst->alu.add.a == V3D_QPU_MUX_A ||
1089 inst->alu.add.b == V3D_QPU_MUX_A ||
1090 inst->alu.mul.a == V3D_QPU_MUX_A ||
1091 inst->alu.mul.b == V3D_QPU_MUX_A)) {
1092 return false;
1093 }
1094
1095 if (inst->raddr_b < 3 &&
1096 !inst->sig.small_imm &&
1097 (inst->alu.add.a == V3D_QPU_MUX_B ||
1098 inst->alu.add.b == V3D_QPU_MUX_B ||
1099 inst->alu.mul.a == V3D_QPU_MUX_B ||
1100 inst->alu.mul.b == V3D_QPU_MUX_B)) {
1101 return false;
1102 }
1103 }
1104
1105 return true;
1106 }
1107
1108 static bool
1109 valid_thrsw_sequence(struct v3d_compile *c, struct choose_scoreboard *scoreboard,
1110 struct qinst *qinst, int instructions_in_sequence,
1111 bool is_thrend)
1112 {
1113 /* No emitting our thrsw while the previous thrsw hasn't happened yet. */
1114 if (scoreboard->last_thrsw_tick + 3 >
1115 scoreboard->tick - instructions_in_sequence) {
1116 return false;
1117 }
1118
1119 for (int slot = 0; slot < instructions_in_sequence; slot++) {
1120 /* No scheduling SFU when the result would land in the other
1121 * thread. The simulator complains for safety, though it
1122 * would only occur for dead code in our case.
1123 */
1124 if (slot > 0 &&
1125 qinst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
1126 (v3d_qpu_magic_waddr_is_sfu(qinst->qpu.alu.add.waddr) ||
1127 v3d_qpu_magic_waddr_is_sfu(qinst->qpu.alu.mul.waddr))) {
1128 return false;
1129 }
1130
1131 if (slot > 0 && qinst->qpu.sig.ldvary)
1132 return false;
1133
1134 if (is_thrend &&
1135 !qpu_instruction_valid_in_thrend_slot(c, qinst, slot)) {
1136 return false;
1137 }
1138
1139 /* Note that the list is circular, so we can only do this up
1140 * to instructions_in_sequence.
1141 */
1142 qinst = (struct qinst *)qinst->link.next;
1143 }
1144
1145 return true;
1146 }
1147
1148 /**
1149 * Emits a THRSW signal in the stream, trying to move it up to pair with
1150 * another instruction.
1151 */
1152 static int
1153 emit_thrsw(struct v3d_compile *c,
1154 struct qblock *block,
1155 struct choose_scoreboard *scoreboard,
1156 struct qinst *inst,
1157 bool is_thrend)
1158 {
1159 int time = 0;
1160
1161 /* There should be nothing in a thrsw inst being scheduled other than
1162 * the signal bits.
1163 */
1164 assert(inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU);
1165 assert(inst->qpu.alu.add.op == V3D_QPU_A_NOP);
1166 assert(inst->qpu.alu.mul.op == V3D_QPU_M_NOP);
1167
1168 /* Find how far back into previous instructions we can put the THRSW. */
1169 int slots_filled = 0;
1170 struct qinst *merge_inst = NULL;
1171 vir_for_each_inst_rev(prev_inst, block) {
1172 struct v3d_qpu_sig sig = prev_inst->qpu.sig;
1173 sig.thrsw = true;
1174 uint32_t packed_sig;
1175
1176 if (!v3d_qpu_sig_pack(c->devinfo, &sig, &packed_sig))
1177 break;
1178
1179 if (!valid_thrsw_sequence(c, scoreboard,
1180 prev_inst, slots_filled + 1,
1181 is_thrend)) {
1182 break;
1183 }
1184
1185 merge_inst = prev_inst;
1186 if (++slots_filled == 3)
1187 break;
1188 }
1189
1190 bool needs_free = false;
1191 if (merge_inst) {
1192 merge_inst->qpu.sig.thrsw = true;
1193 needs_free = true;
1194 scoreboard->last_thrsw_tick = scoreboard->tick - slots_filled;
1195 } else {
1196 scoreboard->last_thrsw_tick = scoreboard->tick;
1197 insert_scheduled_instruction(c, block, scoreboard, inst);
1198 time++;
1199 slots_filled++;
1200 merge_inst = inst;
1201 }
1202
1203 /* Insert any extra delay slot NOPs we need. */
1204 for (int i = 0; i < 3 - slots_filled; i++) {
1205 emit_nop(c, block, scoreboard);
1206 time++;
1207 }
1208
1209 /* If we're emitting the last THRSW (other than program end), then
1210 * signal that to the HW by emitting two THRSWs in a row.
1211 */
1212 if (inst->is_last_thrsw) {
1213 struct qinst *second_inst =
1214 (struct qinst *)merge_inst->link.next;
1215 second_inst->qpu.sig.thrsw = true;
1216 }
1217
1218 /* If we put our THRSW into another instruction, free up the
1219 * instruction that didn't end up scheduled into the list.
1220 */
1221 if (needs_free)
1222 free(inst);
1223
1224 return time;
1225 }
1226
1227 static uint32_t
1228 schedule_instructions(struct v3d_compile *c,
1229 struct choose_scoreboard *scoreboard,
1230 struct qblock *block,
1231 enum quniform_contents *orig_uniform_contents,
1232 uint32_t *orig_uniform_data,
1233 uint32_t *next_uniform)
1234 {
1235 const struct v3d_device_info *devinfo = c->devinfo;
1236 uint32_t time = 0;
1237
1238 while (!list_empty(&scoreboard->dag->heads)) {
1239 struct schedule_node *chosen =
1240 choose_instruction_to_schedule(devinfo,
1241 scoreboard,
1242 NULL);
1243 struct schedule_node *merge = NULL;
1244
1245 /* If there are no valid instructions to schedule, drop a NOP
1246 * in.
1247 */
1248 struct qinst *qinst = chosen ? chosen->inst : vir_nop();
1249 struct v3d_qpu_instr *inst = &qinst->qpu;
1250
1251 if (debug) {
1252 fprintf(stderr, "t=%4d: current list:\n",
1253 time);
1254 dump_state(devinfo, scoreboard->dag);
1255 fprintf(stderr, "t=%4d: chose: ", time);
1256 v3d_qpu_dump(devinfo, inst);
1257 fprintf(stderr, "\n");
1258 }
1259
1260 /* We can't mark_instruction_scheduled() the chosen inst until
1261 * we're done identifying instructions to merge, so put the
1262 * merged instructions on a list for a moment.
1263 */
1264 struct list_head merged_list;
1265 list_inithead(&merged_list);
1266
1267 /* Schedule this instruction onto the QPU list. Also try to
1268 * find an instruction to pair with it.
1269 */
1270 if (chosen) {
1271 time = MAX2(chosen->unblocked_time, time);
1272 pre_remove_head(scoreboard->dag, chosen);
1273
1274 while ((merge =
1275 choose_instruction_to_schedule(devinfo,
1276 scoreboard,
1277 chosen))) {
1278 time = MAX2(merge->unblocked_time, time);
1279 pre_remove_head(scoreboard->dag, chosen);
1280 list_addtail(&merge->link, &merged_list);
1281 (void)qpu_merge_inst(devinfo, inst,
1282 inst, &merge->inst->qpu);
1283 if (merge->inst->uniform != -1) {
1284 chosen->inst->uniform =
1285 merge->inst->uniform;
1286 }
1287
1288 if (debug) {
1289 fprintf(stderr, "t=%4d: merging: ",
1290 time);
1291 v3d_qpu_dump(devinfo, &merge->inst->qpu);
1292 fprintf(stderr, "\n");
1293 fprintf(stderr, " result: ");
1294 v3d_qpu_dump(devinfo, inst);
1295 fprintf(stderr, "\n");
1296 }
1297 }
1298 }
1299
1300 /* Update the uniform index for the rewritten location --
1301 * branch target updating will still need to change
1302 * c->uniform_data[] using this index.
1303 */
1304 if (qinst->uniform != -1) {
1305 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH)
1306 block->branch_uniform = *next_uniform;
1307
1308 c->uniform_data[*next_uniform] =
1309 orig_uniform_data[qinst->uniform];
1310 c->uniform_contents[*next_uniform] =
1311 orig_uniform_contents[qinst->uniform];
1312 qinst->uniform = *next_uniform;
1313 (*next_uniform)++;
1314 }
1315
1316 if (debug) {
1317 fprintf(stderr, "\n");
1318 }
1319
1320 /* Now that we've scheduled a new instruction, some of its
1321 * children can be promoted to the list of instructions ready to
1322 * be scheduled. Update the children's unblocked time for this
1323 * DAG edge as we do so.
1324 */
1325 mark_instruction_scheduled(scoreboard->dag, time, chosen);
1326 list_for_each_entry(struct schedule_node, merge, &merged_list,
1327 link) {
1328 mark_instruction_scheduled(scoreboard->dag, time, merge);
1329
1330 /* The merged VIR instruction doesn't get re-added to the
1331 * block, so free it now.
1332 */
1333 free(merge->inst);
1334 }
1335
1336 if (inst->sig.thrsw) {
1337 time += emit_thrsw(c, block, scoreboard, qinst, false);
1338 } else {
1339 insert_scheduled_instruction(c, block,
1340 scoreboard, qinst);
1341
1342 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH) {
1343 block->branch_qpu_ip = c->qpu_inst_count - 1;
1344 /* Fill the delay slots.
1345 *
1346 * We should fill these with actual instructions,
1347 * instead, but that will probably need to be done
1348 * after this, once we know what the leading
1349 * instructions of the successors are (so we can
1350 * handle A/B register file write latency)
1351 */
1352 for (int i = 0; i < 3; i++)
1353 emit_nop(c, block, scoreboard);
1354 }
1355 }
1356 }
1357
1358 return time;
1359 }
1360
1361 static uint32_t
1362 qpu_schedule_instructions_block(struct v3d_compile *c,
1363 struct choose_scoreboard *scoreboard,
1364 struct qblock *block,
1365 enum quniform_contents *orig_uniform_contents,
1366 uint32_t *orig_uniform_data,
1367 uint32_t *next_uniform)
1368 {
1369 void *mem_ctx = ralloc_context(NULL);
1370 scoreboard->dag = dag_create(mem_ctx);
1371 struct list_head setup_list;
1372
1373 list_inithead(&setup_list);
1374
1375 /* Wrap each instruction in a scheduler structure. */
1376 while (!list_empty(&block->instructions)) {
1377 struct qinst *qinst = (struct qinst *)block->instructions.next;
1378 struct schedule_node *n =
1379 rzalloc(mem_ctx, struct schedule_node);
1380
1381 dag_init_node(scoreboard->dag, &n->dag);
1382 n->inst = qinst;
1383
1384 list_del(&qinst->link);
1385 list_addtail(&n->link, &setup_list);
1386 }
1387
1388 calculate_forward_deps(c, scoreboard->dag, &setup_list);
1389 calculate_reverse_deps(c, scoreboard->dag, &setup_list);
1390
1391 dag_traverse_bottom_up(scoreboard->dag, compute_delay, NULL);
1392
1393 uint32_t cycles = schedule_instructions(c, scoreboard, block,
1394 orig_uniform_contents,
1395 orig_uniform_data,
1396 next_uniform);
1397
1398 ralloc_free(mem_ctx);
1399 scoreboard->dag = NULL;
1400
1401 return cycles;
1402 }
1403
1404 static void
1405 qpu_set_branch_targets(struct v3d_compile *c)
1406 {
1407 vir_for_each_block(block, c) {
1408 /* The end block of the program has no branch. */
1409 if (!block->successors[0])
1410 continue;
1411
1412 /* If there was no branch instruction, then the successor
1413 * block must follow immediately after this one.
1414 */
1415 if (block->branch_qpu_ip == ~0) {
1416 assert(block->end_qpu_ip + 1 ==
1417 block->successors[0]->start_qpu_ip);
1418 continue;
1419 }
1420
1421 /* Walk back through the delay slots to find the branch
1422 * instr.
1423 */
1424 struct list_head *entry = block->instructions.prev;
1425 for (int i = 0; i < 3; i++)
1426 entry = entry->prev;
1427 struct qinst *branch = container_of(entry, branch, link);
1428 assert(branch->qpu.type == V3D_QPU_INSTR_TYPE_BRANCH);
1429
1430 /* Make sure that the if-we-don't-jump
1431 * successor was scheduled just after the
1432 * delay slots.
1433 */
1434 assert(!block->successors[1] ||
1435 block->successors[1]->start_qpu_ip ==
1436 block->branch_qpu_ip + 4);
1437
1438 branch->qpu.branch.offset =
1439 ((block->successors[0]->start_qpu_ip -
1440 (block->branch_qpu_ip + 4)) *
1441 sizeof(uint64_t));
1442
1443 /* Set up the relative offset to jump in the
1444 * uniform stream.
1445 *
1446 * Use a temporary here, because
1447 * uniform_data[inst->uniform] may be shared
1448 * between multiple instructions.
1449 */
1450 assert(c->uniform_contents[branch->uniform] == QUNIFORM_CONSTANT);
1451 c->uniform_data[branch->uniform] =
1452 (block->successors[0]->start_uniform -
1453 (block->branch_uniform + 1)) * 4;
1454 }
1455 }
1456
1457 uint32_t
1458 v3d_qpu_schedule_instructions(struct v3d_compile *c)
1459 {
1460 const struct v3d_device_info *devinfo = c->devinfo;
1461 struct qblock *end_block = list_last_entry(&c->blocks,
1462 struct qblock, link);
1463
1464 /* We reorder the uniforms as we schedule instructions, so save the
1465 * old data off and replace it.
1466 */
1467 uint32_t *uniform_data = c->uniform_data;
1468 enum quniform_contents *uniform_contents = c->uniform_contents;
1469 c->uniform_contents = ralloc_array(c, enum quniform_contents,
1470 c->num_uniforms);
1471 c->uniform_data = ralloc_array(c, uint32_t, c->num_uniforms);
1472 c->uniform_array_size = c->num_uniforms;
1473 uint32_t next_uniform = 0;
1474
1475 struct choose_scoreboard scoreboard;
1476 memset(&scoreboard, 0, sizeof(scoreboard));
1477 scoreboard.last_ldvary_tick = -10;
1478 scoreboard.last_magic_sfu_write_tick = -10;
1479 scoreboard.last_uniforms_reset_tick = -10;
1480 scoreboard.last_thrsw_tick = -10;
1481
1482 if (debug) {
1483 fprintf(stderr, "Pre-schedule instructions\n");
1484 vir_for_each_block(block, c) {
1485 fprintf(stderr, "BLOCK %d\n", block->index);
1486 list_for_each_entry(struct qinst, qinst,
1487 &block->instructions, link) {
1488 v3d_qpu_dump(devinfo, &qinst->qpu);
1489 fprintf(stderr, "\n");
1490 }
1491 }
1492 fprintf(stderr, "\n");
1493 }
1494
1495 uint32_t cycles = 0;
1496 vir_for_each_block(block, c) {
1497 block->start_qpu_ip = c->qpu_inst_count;
1498 block->branch_qpu_ip = ~0;
1499 block->start_uniform = next_uniform;
1500
1501 cycles += qpu_schedule_instructions_block(c,
1502 &scoreboard,
1503 block,
1504 uniform_contents,
1505 uniform_data,
1506 &next_uniform);
1507
1508 block->end_qpu_ip = c->qpu_inst_count - 1;
1509 }
1510
1511 /* Emit the program-end THRSW instruction. */;
1512 struct qinst *thrsw = vir_nop();
1513 thrsw->qpu.sig.thrsw = true;
1514 emit_thrsw(c, end_block, &scoreboard, thrsw, true);
1515
1516 qpu_set_branch_targets(c);
1517
1518 assert(next_uniform == c->num_uniforms);
1519
1520 return cycles;
1521 }