v3d: Move "does this instruction have flags" from sched to generic helpers.
[mesa.git] / src / broadcom / compiler / qpu_schedule.c
1 /*
2 * Copyright © 2010 Intel Corporation
3 * Copyright © 2014-2017 Broadcom
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 /**
26 * @file
27 *
28 * The basic model of the list scheduler is to take a basic block, compute a
29 * DAG of the dependencies, and make a list of the DAG heads. Heuristically
30 * pick a DAG head, then put all the children that are now DAG heads into the
31 * list of things to schedule.
32 *
33 * The goal of scheduling here is to pack pairs of operations together in a
34 * single QPU instruction.
35 */
36
37 #include "qpu/qpu_disasm.h"
38 #include "v3d_compiler.h"
39 #include "util/ralloc.h"
40
41 static bool debug;
42
43 struct schedule_node_child;
44
45 struct schedule_node {
46 struct list_head link;
47 struct qinst *inst;
48 struct schedule_node_child *children;
49 uint32_t child_count;
50 uint32_t child_array_size;
51 uint32_t parent_count;
52
53 /* Longest cycles + instruction_latency() of any parent of this node. */
54 uint32_t unblocked_time;
55
56 /**
57 * Minimum number of cycles from scheduling this instruction until the
58 * end of the program, based on the slowest dependency chain through
59 * the children.
60 */
61 uint32_t delay;
62
63 /**
64 * cycles between this instruction being scheduled and when its result
65 * can be consumed.
66 */
67 uint32_t latency;
68 };
69
70 struct schedule_node_child {
71 struct schedule_node *node;
72 bool write_after_read;
73 };
74
75 /* When walking the instructions in reverse, we need to swap before/after in
76 * add_dep().
77 */
78 enum direction { F, R };
79
80 struct schedule_state {
81 const struct v3d_device_info *devinfo;
82 struct schedule_node *last_r[6];
83 struct schedule_node *last_rf[64];
84 struct schedule_node *last_sf;
85 struct schedule_node *last_vpm_read;
86 struct schedule_node *last_tmu_write;
87 struct schedule_node *last_tmu_config;
88 struct schedule_node *last_tlb;
89 struct schedule_node *last_vpm;
90 struct schedule_node *last_unif;
91 struct schedule_node *last_rtop;
92 enum direction dir;
93 /* Estimated cycle when the current instruction would start. */
94 uint32_t time;
95 };
96
97 static void
98 add_dep(struct schedule_state *state,
99 struct schedule_node *before,
100 struct schedule_node *after,
101 bool write)
102 {
103 bool write_after_read = !write && state->dir == R;
104
105 if (!before || !after)
106 return;
107
108 assert(before != after);
109
110 if (state->dir == R) {
111 struct schedule_node *t = before;
112 before = after;
113 after = t;
114 }
115
116 for (int i = 0; i < before->child_count; i++) {
117 if (before->children[i].node == after &&
118 (before->children[i].write_after_read == write_after_read)) {
119 return;
120 }
121 }
122
123 if (before->child_array_size <= before->child_count) {
124 before->child_array_size = MAX2(before->child_array_size * 2, 16);
125 before->children = reralloc(before, before->children,
126 struct schedule_node_child,
127 before->child_array_size);
128 }
129
130 before->children[before->child_count].node = after;
131 before->children[before->child_count].write_after_read =
132 write_after_read;
133 before->child_count++;
134 after->parent_count++;
135 }
136
137 static void
138 add_read_dep(struct schedule_state *state,
139 struct schedule_node *before,
140 struct schedule_node *after)
141 {
142 add_dep(state, before, after, false);
143 }
144
145 static void
146 add_write_dep(struct schedule_state *state,
147 struct schedule_node **before,
148 struct schedule_node *after)
149 {
150 add_dep(state, *before, after, true);
151 *before = after;
152 }
153
154 static bool
155 qpu_inst_is_tlb(const struct v3d_qpu_instr *inst)
156 {
157 if (inst->type != V3D_QPU_INSTR_TYPE_ALU)
158 return false;
159
160 if (inst->alu.add.magic_write &&
161 (inst->alu.add.waddr == V3D_QPU_WADDR_TLB ||
162 inst->alu.add.waddr == V3D_QPU_WADDR_TLBU))
163 return true;
164
165 if (inst->alu.mul.magic_write &&
166 (inst->alu.mul.waddr == V3D_QPU_WADDR_TLB ||
167 inst->alu.mul.waddr == V3D_QPU_WADDR_TLBU))
168 return true;
169
170 return false;
171 }
172
173 static void
174 process_mux_deps(struct schedule_state *state, struct schedule_node *n,
175 enum v3d_qpu_mux mux)
176 {
177 switch (mux) {
178 case V3D_QPU_MUX_A:
179 add_read_dep(state, state->last_rf[n->inst->qpu.raddr_a], n);
180 break;
181 case V3D_QPU_MUX_B:
182 add_read_dep(state, state->last_rf[n->inst->qpu.raddr_b], n);
183 break;
184 default:
185 add_read_dep(state, state->last_r[mux - V3D_QPU_MUX_R0], n);
186 break;
187 }
188 }
189
190
191 static void
192 process_waddr_deps(struct schedule_state *state, struct schedule_node *n,
193 uint32_t waddr, bool magic)
194 {
195 if (!magic) {
196 add_write_dep(state, &state->last_rf[waddr], n);
197 } else if (v3d_qpu_magic_waddr_is_tmu(waddr)) {
198 /* XXX perf: For V3D 4.x, we could reorder TMU writes other
199 * than the TMUS/TMUD/TMUA to improve scheduling flexibility.
200 */
201 add_write_dep(state, &state->last_tmu_write, n);
202 switch (waddr) {
203 case V3D_QPU_WADDR_TMUS:
204 case V3D_QPU_WADDR_TMUSCM:
205 case V3D_QPU_WADDR_TMUSF:
206 case V3D_QPU_WADDR_TMUSLOD:
207 add_write_dep(state, &state->last_tmu_config, n);
208 break;
209 default:
210 break;
211 }
212 } else if (v3d_qpu_magic_waddr_is_sfu(waddr)) {
213 /* Handled by v3d_qpu_writes_r4() check. */
214 } else {
215 switch (waddr) {
216 case V3D_QPU_WADDR_R0:
217 case V3D_QPU_WADDR_R1:
218 case V3D_QPU_WADDR_R2:
219 add_write_dep(state,
220 &state->last_r[waddr - V3D_QPU_WADDR_R0],
221 n);
222 break;
223 case V3D_QPU_WADDR_R3:
224 case V3D_QPU_WADDR_R4:
225 case V3D_QPU_WADDR_R5:
226 /* Handled by v3d_qpu_writes_r*() checks below. */
227 break;
228
229 case V3D_QPU_WADDR_VPM:
230 case V3D_QPU_WADDR_VPMU:
231 add_write_dep(state, &state->last_vpm, n);
232 break;
233
234 case V3D_QPU_WADDR_TLB:
235 case V3D_QPU_WADDR_TLBU:
236 add_write_dep(state, &state->last_tlb, n);
237 break;
238
239 case V3D_QPU_WADDR_NOP:
240 break;
241
242 default:
243 fprintf(stderr, "Unknown waddr %d\n", waddr);
244 abort();
245 }
246 }
247 }
248
249 /**
250 * Common code for dependencies that need to be tracked both forward and
251 * backward.
252 *
253 * This is for things like "all reads of r4 have to happen between the r4
254 * writes that surround them".
255 */
256 static void
257 calculate_deps(struct schedule_state *state, struct schedule_node *n)
258 {
259 const struct v3d_device_info *devinfo = state->devinfo;
260 struct qinst *qinst = n->inst;
261 struct v3d_qpu_instr *inst = &qinst->qpu;
262 /* If the input and output segments are shared, then all VPM reads to
263 * a location need to happen before all writes. We handle this by
264 * serializing all VPM operations for now.
265 */
266 bool separate_vpm_segment = false;
267
268 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH) {
269 if (inst->branch.cond != V3D_QPU_BRANCH_COND_ALWAYS)
270 add_read_dep(state, state->last_sf, n);
271
272 /* XXX: BDI */
273 /* XXX: BDU */
274 /* XXX: ub */
275 /* XXX: raddr_a */
276
277 add_write_dep(state, &state->last_unif, n);
278 return;
279 }
280
281 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
282
283 /* XXX: LOAD_IMM */
284
285 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 0)
286 process_mux_deps(state, n, inst->alu.add.a);
287 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 1)
288 process_mux_deps(state, n, inst->alu.add.b);
289
290 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 0)
291 process_mux_deps(state, n, inst->alu.mul.a);
292 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 1)
293 process_mux_deps(state, n, inst->alu.mul.b);
294
295 switch (inst->alu.add.op) {
296 case V3D_QPU_A_VPMSETUP:
297 /* Could distinguish read/write by unpacking the uniform. */
298 add_write_dep(state, &state->last_vpm, n);
299 add_write_dep(state, &state->last_vpm_read, n);
300 break;
301
302 case V3D_QPU_A_STVPMV:
303 case V3D_QPU_A_STVPMD:
304 case V3D_QPU_A_STVPMP:
305 add_write_dep(state, &state->last_vpm, n);
306 break;
307
308 case V3D_QPU_A_LDVPMV_IN:
309 case V3D_QPU_A_LDVPMD_IN:
310 case V3D_QPU_A_LDVPMG_IN:
311 case V3D_QPU_A_LDVPMP:
312 if (!separate_vpm_segment)
313 add_write_dep(state, &state->last_vpm, n);
314 break;
315
316 case V3D_QPU_A_VPMWT:
317 add_read_dep(state, state->last_vpm, n);
318 break;
319
320 case V3D_QPU_A_MSF:
321 add_read_dep(state, state->last_tlb, n);
322 break;
323
324 case V3D_QPU_A_SETMSF:
325 case V3D_QPU_A_SETREVF:
326 add_write_dep(state, &state->last_tlb, n);
327 break;
328
329 default:
330 break;
331 }
332
333 switch (inst->alu.mul.op) {
334 case V3D_QPU_M_MULTOP:
335 case V3D_QPU_M_UMUL24:
336 /* MULTOP sets rtop, and UMUL24 implicitly reads rtop and
337 * resets it to 0. We could possibly reorder umul24s relative
338 * to each other, but for now just keep all the MUL parts in
339 * order.
340 */
341 add_write_dep(state, &state->last_rtop, n);
342 break;
343 default:
344 break;
345 }
346
347 if (inst->alu.add.op != V3D_QPU_A_NOP) {
348 process_waddr_deps(state, n, inst->alu.add.waddr,
349 inst->alu.add.magic_write);
350 }
351 if (inst->alu.mul.op != V3D_QPU_M_NOP) {
352 process_waddr_deps(state, n, inst->alu.mul.waddr,
353 inst->alu.mul.magic_write);
354 }
355 if (v3d_qpu_sig_writes_address(devinfo, &inst->sig)) {
356 process_waddr_deps(state, n, inst->sig_addr,
357 inst->sig_magic);
358 }
359
360 if (v3d_qpu_writes_r3(devinfo, inst))
361 add_write_dep(state, &state->last_r[3], n);
362 if (v3d_qpu_writes_r4(devinfo, inst))
363 add_write_dep(state, &state->last_r[4], n);
364 if (v3d_qpu_writes_r5(devinfo, inst))
365 add_write_dep(state, &state->last_r[5], n);
366
367 if (inst->sig.thrsw) {
368 /* All accumulator contents and flags are undefined after the
369 * switch.
370 */
371 for (int i = 0; i < ARRAY_SIZE(state->last_r); i++)
372 add_write_dep(state, &state->last_r[i], n);
373 add_write_dep(state, &state->last_sf, n);
374 add_write_dep(state, &state->last_rtop, n);
375
376 /* Scoreboard-locking operations have to stay after the last
377 * thread switch.
378 */
379 add_write_dep(state, &state->last_tlb, n);
380
381 add_write_dep(state, &state->last_tmu_write, n);
382 add_write_dep(state, &state->last_tmu_config, n);
383 }
384
385 if (v3d_qpu_waits_on_tmu(inst)) {
386 /* TMU loads are coming from a FIFO, so ordering is important.
387 */
388 add_write_dep(state, &state->last_tmu_write, n);
389 }
390
391 if (inst->sig.wrtmuc)
392 add_write_dep(state, &state->last_tmu_config, n);
393
394 if (inst->sig.ldtlb | inst->sig.ldtlbu)
395 add_read_dep(state, state->last_tlb, n);
396
397 if (inst->sig.ldvpm) {
398 add_write_dep(state, &state->last_vpm_read, n);
399
400 /* At least for now, we're doing shared I/O segments, so queue
401 * all writes after all reads.
402 */
403 if (!separate_vpm_segment)
404 add_write_dep(state, &state->last_vpm, n);
405 }
406
407 /* inst->sig.ldunif or sideband uniform read */
408 if (qinst->uniform != ~0)
409 add_write_dep(state, &state->last_unif, n);
410
411 if (v3d_qpu_reads_flags(inst))
412 add_read_dep(state, state->last_sf, n);
413 if (v3d_qpu_writes_flags(inst))
414 add_write_dep(state, &state->last_sf, n);
415 }
416
417 static void
418 calculate_forward_deps(struct v3d_compile *c, struct list_head *schedule_list)
419 {
420 struct schedule_state state;
421
422 memset(&state, 0, sizeof(state));
423 state.devinfo = c->devinfo;
424 state.dir = F;
425
426 list_for_each_entry(struct schedule_node, node, schedule_list, link)
427 calculate_deps(&state, node);
428 }
429
430 static void
431 calculate_reverse_deps(struct v3d_compile *c, struct list_head *schedule_list)
432 {
433 struct list_head *node;
434 struct schedule_state state;
435
436 memset(&state, 0, sizeof(state));
437 state.devinfo = c->devinfo;
438 state.dir = R;
439
440 for (node = schedule_list->prev; schedule_list != node; node = node->prev) {
441 calculate_deps(&state, (struct schedule_node *)node);
442 }
443 }
444
445 struct choose_scoreboard {
446 int tick;
447 int last_magic_sfu_write_tick;
448 int last_ldvary_tick;
449 int last_uniforms_reset_tick;
450 int last_thrsw_tick;
451 bool tlb_locked;
452 };
453
454 static bool
455 mux_reads_too_soon(struct choose_scoreboard *scoreboard,
456 const struct v3d_qpu_instr *inst, enum v3d_qpu_mux mux)
457 {
458 switch (mux) {
459 case V3D_QPU_MUX_R4:
460 if (scoreboard->tick - scoreboard->last_magic_sfu_write_tick <= 2)
461 return true;
462 break;
463
464 case V3D_QPU_MUX_R5:
465 if (scoreboard->tick - scoreboard->last_ldvary_tick <= 1)
466 return true;
467 break;
468 default:
469 break;
470 }
471
472 return false;
473 }
474
475 static bool
476 reads_too_soon_after_write(struct choose_scoreboard *scoreboard,
477 struct qinst *qinst)
478 {
479 const struct v3d_qpu_instr *inst = &qinst->qpu;
480
481 /* XXX: Branching off of raddr. */
482 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH)
483 return false;
484
485 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
486
487 if (inst->alu.add.op != V3D_QPU_A_NOP) {
488 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 0 &&
489 mux_reads_too_soon(scoreboard, inst, inst->alu.add.a)) {
490 return true;
491 }
492 if (v3d_qpu_add_op_num_src(inst->alu.add.op) > 1 &&
493 mux_reads_too_soon(scoreboard, inst, inst->alu.add.b)) {
494 return true;
495 }
496 }
497
498 if (inst->alu.mul.op != V3D_QPU_M_NOP) {
499 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 0 &&
500 mux_reads_too_soon(scoreboard, inst, inst->alu.mul.a)) {
501 return true;
502 }
503 if (v3d_qpu_mul_op_num_src(inst->alu.mul.op) > 1 &&
504 mux_reads_too_soon(scoreboard, inst, inst->alu.mul.b)) {
505 return true;
506 }
507 }
508
509 /* XXX: imm */
510
511 return false;
512 }
513
514 static bool
515 writes_too_soon_after_write(const struct v3d_device_info *devinfo,
516 struct choose_scoreboard *scoreboard,
517 struct qinst *qinst)
518 {
519 const struct v3d_qpu_instr *inst = &qinst->qpu;
520
521 /* Don't schedule any other r4 write too soon after an SFU write.
522 * This would normally be prevented by dependency tracking, but might
523 * occur if a dead SFU computation makes it to scheduling.
524 */
525 if (scoreboard->tick - scoreboard->last_magic_sfu_write_tick < 2 &&
526 v3d_qpu_writes_r4(devinfo, inst))
527 return true;
528
529 return false;
530 }
531
532 static bool
533 pixel_scoreboard_too_soon(struct choose_scoreboard *scoreboard,
534 const struct v3d_qpu_instr *inst)
535 {
536 return (scoreboard->tick == 0 && qpu_inst_is_tlb(inst));
537 }
538
539 static int
540 get_instruction_priority(const struct v3d_qpu_instr *inst)
541 {
542 uint32_t baseline_score;
543 uint32_t next_score = 0;
544
545 /* Schedule TLB operations as late as possible, to get more
546 * parallelism between shaders.
547 */
548 if (qpu_inst_is_tlb(inst))
549 return next_score;
550 next_score++;
551
552 /* Schedule texture read results collection late to hide latency. */
553 if (v3d_qpu_waits_on_tmu(inst))
554 return next_score;
555 next_score++;
556
557 /* XXX perf: We should schedule SFU ALU ops so that the reader is 2
558 * instructions after the producer if possible, not just 1.
559 */
560
561 /* Default score for things that aren't otherwise special. */
562 baseline_score = next_score;
563 next_score++;
564
565 /* Schedule texture read setup early to hide their latency better. */
566 if (v3d_qpu_writes_tmu(inst))
567 return next_score;
568 next_score++;
569
570 return baseline_score;
571 }
572
573 static bool
574 qpu_magic_waddr_is_periph(enum v3d_qpu_waddr waddr)
575 {
576 return (v3d_qpu_magic_waddr_is_tmu(waddr) ||
577 v3d_qpu_magic_waddr_is_sfu(waddr) ||
578 v3d_qpu_magic_waddr_is_tlb(waddr) ||
579 v3d_qpu_magic_waddr_is_vpm(waddr) ||
580 v3d_qpu_magic_waddr_is_tsy(waddr));
581 }
582
583 static bool
584 qpu_accesses_peripheral(const struct v3d_qpu_instr *inst)
585 {
586 if (v3d_qpu_uses_vpm(inst))
587 return true;
588 if (v3d_qpu_uses_sfu(inst))
589 return true;
590
591 if (inst->type == V3D_QPU_INSTR_TYPE_ALU) {
592 if (inst->alu.add.op != V3D_QPU_A_NOP &&
593 inst->alu.add.magic_write &&
594 qpu_magic_waddr_is_periph(inst->alu.add.waddr)) {
595 return true;
596 }
597
598 if (inst->alu.add.op == V3D_QPU_A_TMUWT)
599 return true;
600
601 if (inst->alu.mul.op != V3D_QPU_M_NOP &&
602 inst->alu.mul.magic_write &&
603 qpu_magic_waddr_is_periph(inst->alu.mul.waddr)) {
604 return true;
605 }
606 }
607
608 return (inst->sig.ldvpm ||
609 inst->sig.ldtmu ||
610 inst->sig.ldtlb ||
611 inst->sig.ldtlbu ||
612 inst->sig.wrtmuc);
613 }
614
615 static bool
616 qpu_merge_inst(const struct v3d_device_info *devinfo,
617 struct v3d_qpu_instr *result,
618 const struct v3d_qpu_instr *a,
619 const struct v3d_qpu_instr *b)
620 {
621 if (a->type != V3D_QPU_INSTR_TYPE_ALU ||
622 b->type != V3D_QPU_INSTR_TYPE_ALU) {
623 return false;
624 }
625
626 /* Can't do more than one peripheral access in an instruction.
627 *
628 * XXX: V3D 4.1 allows TMU read along with a VPM read or write, and
629 * WRTMUC with a TMU magic register write (other than tmuc).
630 */
631 if (qpu_accesses_peripheral(a) && qpu_accesses_peripheral(b))
632 return false;
633
634 struct v3d_qpu_instr merge = *a;
635
636 if (b->alu.add.op != V3D_QPU_A_NOP) {
637 if (a->alu.add.op != V3D_QPU_A_NOP)
638 return false;
639 merge.alu.add = b->alu.add;
640
641 merge.flags.ac = b->flags.ac;
642 merge.flags.apf = b->flags.apf;
643 merge.flags.auf = b->flags.auf;
644 }
645
646 if (b->alu.mul.op != V3D_QPU_M_NOP) {
647 if (a->alu.mul.op != V3D_QPU_M_NOP)
648 return false;
649 merge.alu.mul = b->alu.mul;
650
651 merge.flags.mc = b->flags.mc;
652 merge.flags.mpf = b->flags.mpf;
653 merge.flags.muf = b->flags.muf;
654 }
655
656 if (v3d_qpu_uses_mux(b, V3D_QPU_MUX_A)) {
657 if (v3d_qpu_uses_mux(a, V3D_QPU_MUX_A) &&
658 a->raddr_a != b->raddr_a) {
659 return false;
660 }
661 merge.raddr_a = b->raddr_a;
662 }
663
664 if (v3d_qpu_uses_mux(b, V3D_QPU_MUX_B)) {
665 if (v3d_qpu_uses_mux(a, V3D_QPU_MUX_B) &&
666 (a->raddr_b != b->raddr_b ||
667 a->sig.small_imm != b->sig.small_imm)) {
668 return false;
669 }
670 merge.raddr_b = b->raddr_b;
671 }
672
673 merge.sig.thrsw |= b->sig.thrsw;
674 merge.sig.ldunif |= b->sig.ldunif;
675 merge.sig.ldunifrf |= b->sig.ldunifrf;
676 merge.sig.ldunifa |= b->sig.ldunifa;
677 merge.sig.ldunifarf |= b->sig.ldunifarf;
678 merge.sig.ldtmu |= b->sig.ldtmu;
679 merge.sig.ldvary |= b->sig.ldvary;
680 merge.sig.ldvpm |= b->sig.ldvpm;
681 merge.sig.small_imm |= b->sig.small_imm;
682 merge.sig.ldtlb |= b->sig.ldtlb;
683 merge.sig.ldtlbu |= b->sig.ldtlbu;
684 merge.sig.ucb |= b->sig.ucb;
685 merge.sig.rotate |= b->sig.rotate;
686 merge.sig.wrtmuc |= b->sig.wrtmuc;
687
688 if (v3d_qpu_sig_writes_address(devinfo, &a->sig) &&
689 v3d_qpu_sig_writes_address(devinfo, &b->sig))
690 return false;
691 merge.sig_addr |= b->sig_addr;
692 merge.sig_magic |= b->sig_magic;
693
694 uint64_t packed;
695 bool ok = v3d_qpu_instr_pack(devinfo, &merge, &packed);
696
697 *result = merge;
698 /* No modifying the real instructions on failure. */
699 assert(ok || (a != result && b != result));
700
701 return ok;
702 }
703
704 static struct schedule_node *
705 choose_instruction_to_schedule(const struct v3d_device_info *devinfo,
706 struct choose_scoreboard *scoreboard,
707 struct list_head *schedule_list,
708 struct schedule_node *prev_inst)
709 {
710 struct schedule_node *chosen = NULL;
711 int chosen_prio = 0;
712
713 /* Don't pair up anything with a thread switch signal -- emit_thrsw()
714 * will handle pairing it along with filling the delay slots.
715 */
716 if (prev_inst) {
717 if (prev_inst->inst->qpu.sig.thrsw)
718 return NULL;
719 }
720
721 list_for_each_entry(struct schedule_node, n, schedule_list, link) {
722 const struct v3d_qpu_instr *inst = &n->inst->qpu;
723
724 /* Don't choose the branch instruction until it's the last one
725 * left. We'll move it up to fit its delay slots after we
726 * choose it.
727 */
728 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH &&
729 !list_is_singular(schedule_list)) {
730 continue;
731 }
732
733 /* "An instruction must not read from a location in physical
734 * regfile A or B that was written to by the previous
735 * instruction."
736 */
737 if (reads_too_soon_after_write(scoreboard, n->inst))
738 continue;
739
740 if (writes_too_soon_after_write(devinfo, scoreboard, n->inst))
741 continue;
742
743 /* "A scoreboard wait must not occur in the first two
744 * instructions of a fragment shader. This is either the
745 * explicit Wait for Scoreboard signal or an implicit wait
746 * with the first tile-buffer read or write instruction."
747 */
748 if (pixel_scoreboard_too_soon(scoreboard, inst))
749 continue;
750
751 /* ldunif and ldvary both write r5, but ldunif does so a tick
752 * sooner. If the ldvary's r5 wasn't used, then ldunif might
753 * otherwise get scheduled so ldunif and ldvary try to update
754 * r5 in the same tick.
755 *
756 * XXX perf: To get good pipelining of a sequence of varying
757 * loads, we need to figure out how to pair the ldvary signal
758 * up to the instruction before the last r5 user in the
759 * previous ldvary sequence. Currently, it usually pairs with
760 * the last r5 user.
761 */
762 if ((inst->sig.ldunif || inst->sig.ldunifa) &&
763 scoreboard->tick == scoreboard->last_ldvary_tick + 1) {
764 continue;
765 }
766
767 /* If we're trying to pair with another instruction, check
768 * that they're compatible.
769 */
770 if (prev_inst) {
771 /* Don't pair up a thread switch signal -- we'll
772 * handle pairing it when we pick it on its own.
773 */
774 if (inst->sig.thrsw)
775 continue;
776
777 if (prev_inst->inst->uniform != -1 &&
778 n->inst->uniform != -1)
779 continue;
780
781 /* Don't merge in something that will lock the TLB.
782 * Hopwefully what we have in inst will release some
783 * other instructions, allowing us to delay the
784 * TLB-locking instruction until later.
785 */
786 if (!scoreboard->tlb_locked && qpu_inst_is_tlb(inst))
787 continue;
788
789 struct v3d_qpu_instr merged_inst;
790 if (!qpu_merge_inst(devinfo, &merged_inst,
791 &prev_inst->inst->qpu, inst)) {
792 continue;
793 }
794 }
795
796 int prio = get_instruction_priority(inst);
797
798 /* Found a valid instruction. If nothing better comes along,
799 * this one works.
800 */
801 if (!chosen) {
802 chosen = n;
803 chosen_prio = prio;
804 continue;
805 }
806
807 if (prio > chosen_prio) {
808 chosen = n;
809 chosen_prio = prio;
810 } else if (prio < chosen_prio) {
811 continue;
812 }
813
814 if (n->delay > chosen->delay) {
815 chosen = n;
816 chosen_prio = prio;
817 } else if (n->delay < chosen->delay) {
818 continue;
819 }
820 }
821
822 return chosen;
823 }
824
825 static void
826 update_scoreboard_for_magic_waddr(struct choose_scoreboard *scoreboard,
827 enum v3d_qpu_waddr waddr)
828 {
829 if (v3d_qpu_magic_waddr_is_sfu(waddr))
830 scoreboard->last_magic_sfu_write_tick = scoreboard->tick;
831 }
832
833 static void
834 update_scoreboard_for_chosen(struct choose_scoreboard *scoreboard,
835 const struct v3d_qpu_instr *inst)
836 {
837 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH)
838 return;
839
840 assert(inst->type == V3D_QPU_INSTR_TYPE_ALU);
841
842 if (inst->alu.add.op != V3D_QPU_A_NOP) {
843 if (inst->alu.add.magic_write) {
844 update_scoreboard_for_magic_waddr(scoreboard,
845 inst->alu.add.waddr);
846 }
847 }
848
849 if (inst->alu.mul.op != V3D_QPU_M_NOP) {
850 if (inst->alu.mul.magic_write) {
851 update_scoreboard_for_magic_waddr(scoreboard,
852 inst->alu.mul.waddr);
853 }
854 }
855
856 if (inst->sig.ldvary)
857 scoreboard->last_ldvary_tick = scoreboard->tick;
858
859 if (qpu_inst_is_tlb(inst))
860 scoreboard->tlb_locked = true;
861 }
862
863 static void
864 dump_state(const struct v3d_device_info *devinfo,
865 struct list_head *schedule_list)
866 {
867 list_for_each_entry(struct schedule_node, n, schedule_list, link) {
868 fprintf(stderr, " t=%4d: ", n->unblocked_time);
869 v3d_qpu_dump(devinfo, &n->inst->qpu);
870 fprintf(stderr, "\n");
871
872 for (int i = 0; i < n->child_count; i++) {
873 struct schedule_node *child = n->children[i].node;
874 if (!child)
875 continue;
876
877 fprintf(stderr, " - ");
878 v3d_qpu_dump(devinfo, &child->inst->qpu);
879 fprintf(stderr, " (%d parents, %c)\n",
880 child->parent_count,
881 n->children[i].write_after_read ? 'w' : 'r');
882 }
883 }
884 }
885
886 static uint32_t magic_waddr_latency(enum v3d_qpu_waddr waddr,
887 const struct v3d_qpu_instr *after)
888 {
889 /* Apply some huge latency between texture fetch requests and getting
890 * their results back.
891 *
892 * FIXME: This is actually pretty bogus. If we do:
893 *
894 * mov tmu0_s, a
895 * <a bit of math>
896 * mov tmu0_s, b
897 * load_tmu0
898 * <more math>
899 * load_tmu0
900 *
901 * we count that as worse than
902 *
903 * mov tmu0_s, a
904 * mov tmu0_s, b
905 * <lots of math>
906 * load_tmu0
907 * <more math>
908 * load_tmu0
909 *
910 * because we associate the first load_tmu0 with the *second* tmu0_s.
911 */
912 if (v3d_qpu_magic_waddr_is_tmu(waddr) && v3d_qpu_waits_on_tmu(after))
913 return 100;
914
915 /* Assume that anything depending on us is consuming the SFU result. */
916 if (v3d_qpu_magic_waddr_is_sfu(waddr))
917 return 3;
918
919 return 1;
920 }
921
922 static uint32_t
923 instruction_latency(struct schedule_node *before, struct schedule_node *after)
924 {
925 const struct v3d_qpu_instr *before_inst = &before->inst->qpu;
926 const struct v3d_qpu_instr *after_inst = &after->inst->qpu;
927 uint32_t latency = 1;
928
929 if (before_inst->type != V3D_QPU_INSTR_TYPE_ALU ||
930 after_inst->type != V3D_QPU_INSTR_TYPE_ALU)
931 return latency;
932
933 if (before_inst->alu.add.magic_write) {
934 latency = MAX2(latency,
935 magic_waddr_latency(before_inst->alu.add.waddr,
936 after_inst));
937 }
938
939 if (before_inst->alu.mul.magic_write) {
940 latency = MAX2(latency,
941 magic_waddr_latency(before_inst->alu.mul.waddr,
942 after_inst));
943 }
944
945 return latency;
946 }
947
948 /** Recursive computation of the delay member of a node. */
949 static void
950 compute_delay(struct schedule_node *n)
951 {
952 if (!n->child_count) {
953 n->delay = 1;
954 } else {
955 for (int i = 0; i < n->child_count; i++) {
956 if (!n->children[i].node->delay)
957 compute_delay(n->children[i].node);
958 n->delay = MAX2(n->delay,
959 n->children[i].node->delay +
960 instruction_latency(n, n->children[i].node));
961 }
962 }
963 }
964
965 static void
966 mark_instruction_scheduled(struct list_head *schedule_list,
967 uint32_t time,
968 struct schedule_node *node,
969 bool war_only)
970 {
971 if (!node)
972 return;
973
974 for (int i = node->child_count - 1; i >= 0; i--) {
975 struct schedule_node *child =
976 node->children[i].node;
977
978 if (!child)
979 continue;
980
981 if (war_only && !node->children[i].write_after_read)
982 continue;
983
984 /* If the requirement is only that the node not appear before
985 * the last read of its destination, then it can be scheduled
986 * immediately after (or paired with!) the thing reading the
987 * destination.
988 */
989 uint32_t latency = 0;
990 if (!war_only) {
991 latency = instruction_latency(node,
992 node->children[i].node);
993 }
994
995 child->unblocked_time = MAX2(child->unblocked_time,
996 time + latency);
997 child->parent_count--;
998 if (child->parent_count == 0)
999 list_add(&child->link, schedule_list);
1000
1001 node->children[i].node = NULL;
1002 }
1003 }
1004
1005 static void
1006 insert_scheduled_instruction(struct v3d_compile *c,
1007 struct qblock *block,
1008 struct choose_scoreboard *scoreboard,
1009 struct qinst *inst)
1010 {
1011 list_addtail(&inst->link, &block->instructions);
1012
1013 update_scoreboard_for_chosen(scoreboard, &inst->qpu);
1014 c->qpu_inst_count++;
1015 scoreboard->tick++;
1016 }
1017
1018 static struct qinst *
1019 vir_nop()
1020 {
1021 struct qreg undef = { QFILE_NULL, 0 };
1022 struct qinst *qinst = vir_add_inst(V3D_QPU_A_NOP, undef, undef, undef);
1023
1024 return qinst;
1025 }
1026
1027 static void
1028 emit_nop(struct v3d_compile *c, struct qblock *block,
1029 struct choose_scoreboard *scoreboard)
1030 {
1031 insert_scheduled_instruction(c, block, scoreboard, vir_nop());
1032 }
1033
1034 static bool
1035 qpu_instruction_valid_in_thrend_slot(struct v3d_compile *c,
1036 const struct qinst *qinst, int slot)
1037 {
1038 const struct v3d_qpu_instr *inst = &qinst->qpu;
1039
1040 /* Only TLB Z writes are prohibited in the last slot, but we don't
1041 * have those flagged so prohibit all TLB ops for now.
1042 */
1043 if (slot == 2 && qpu_inst_is_tlb(inst))
1044 return false;
1045
1046 if (slot > 0 && qinst->uniform != ~0)
1047 return false;
1048
1049 if (v3d_qpu_uses_vpm(inst))
1050 return false;
1051
1052 if (inst->sig.ldvary)
1053 return false;
1054
1055 if (inst->type == V3D_QPU_INSTR_TYPE_ALU) {
1056 /* GFXH-1625: TMUWT not allowed in the final instruction. */
1057 if (slot == 2 && inst->alu.add.op == V3D_QPU_A_TMUWT)
1058 return false;
1059
1060 /* No writing physical registers at the end. */
1061 if (!inst->alu.add.magic_write ||
1062 !inst->alu.mul.magic_write) {
1063 return false;
1064 }
1065
1066 if (c->devinfo->ver < 40 && inst->alu.add.op == V3D_QPU_A_SETMSF)
1067 return false;
1068
1069 /* RF0-2 might be overwritten during the delay slots by
1070 * fragment shader setup.
1071 */
1072 if (inst->raddr_a < 3 &&
1073 (inst->alu.add.a == V3D_QPU_MUX_A ||
1074 inst->alu.add.b == V3D_QPU_MUX_A ||
1075 inst->alu.mul.a == V3D_QPU_MUX_A ||
1076 inst->alu.mul.b == V3D_QPU_MUX_A)) {
1077 return false;
1078 }
1079
1080 if (inst->raddr_b < 3 &&
1081 !inst->sig.small_imm &&
1082 (inst->alu.add.a == V3D_QPU_MUX_B ||
1083 inst->alu.add.b == V3D_QPU_MUX_B ||
1084 inst->alu.mul.a == V3D_QPU_MUX_B ||
1085 inst->alu.mul.b == V3D_QPU_MUX_B)) {
1086 return false;
1087 }
1088 }
1089
1090 return true;
1091 }
1092
1093 static bool
1094 valid_thrsw_sequence(struct v3d_compile *c, struct choose_scoreboard *scoreboard,
1095 struct qinst *qinst, int instructions_in_sequence,
1096 bool is_thrend)
1097 {
1098 /* No emitting our thrsw while the previous thrsw hasn't happened yet. */
1099 if (scoreboard->last_thrsw_tick + 3 >
1100 scoreboard->tick - instructions_in_sequence) {
1101 return false;
1102 }
1103
1104 for (int slot = 0; slot < instructions_in_sequence; slot++) {
1105 /* No scheduling SFU when the result would land in the other
1106 * thread. The simulator complains for safety, though it
1107 * would only occur for dead code in our case.
1108 */
1109 if (slot > 0 &&
1110 qinst->qpu.type == V3D_QPU_INSTR_TYPE_ALU &&
1111 (v3d_qpu_magic_waddr_is_sfu(qinst->qpu.alu.add.waddr) ||
1112 v3d_qpu_magic_waddr_is_sfu(qinst->qpu.alu.mul.waddr))) {
1113 return false;
1114 }
1115
1116 if (slot > 0 && qinst->qpu.sig.ldvary)
1117 return false;
1118
1119 if (is_thrend &&
1120 !qpu_instruction_valid_in_thrend_slot(c, qinst, slot)) {
1121 return false;
1122 }
1123
1124 /* Note that the list is circular, so we can only do this up
1125 * to instructions_in_sequence.
1126 */
1127 qinst = (struct qinst *)qinst->link.next;
1128 }
1129
1130 return true;
1131 }
1132
1133 /**
1134 * Emits a THRSW signal in the stream, trying to move it up to pair with
1135 * another instruction.
1136 */
1137 static int
1138 emit_thrsw(struct v3d_compile *c,
1139 struct qblock *block,
1140 struct choose_scoreboard *scoreboard,
1141 struct qinst *inst,
1142 bool is_thrend)
1143 {
1144 int time = 0;
1145
1146 /* There should be nothing in a thrsw inst being scheduled other than
1147 * the signal bits.
1148 */
1149 assert(inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU);
1150 assert(inst->qpu.alu.add.op == V3D_QPU_A_NOP);
1151 assert(inst->qpu.alu.mul.op == V3D_QPU_M_NOP);
1152
1153 /* Find how far back into previous instructions we can put the THRSW. */
1154 int slots_filled = 0;
1155 struct qinst *merge_inst = NULL;
1156 vir_for_each_inst_rev(prev_inst, block) {
1157 struct v3d_qpu_sig sig = prev_inst->qpu.sig;
1158 sig.thrsw = true;
1159 uint32_t packed_sig;
1160
1161 if (!v3d_qpu_sig_pack(c->devinfo, &sig, &packed_sig))
1162 break;
1163
1164 if (!valid_thrsw_sequence(c, scoreboard,
1165 prev_inst, slots_filled + 1,
1166 is_thrend)) {
1167 break;
1168 }
1169
1170 merge_inst = prev_inst;
1171 if (++slots_filled == 3)
1172 break;
1173 }
1174
1175 bool needs_free = false;
1176 if (merge_inst) {
1177 merge_inst->qpu.sig.thrsw = true;
1178 needs_free = true;
1179 scoreboard->last_thrsw_tick = scoreboard->tick - slots_filled;
1180 } else {
1181 scoreboard->last_thrsw_tick = scoreboard->tick;
1182 insert_scheduled_instruction(c, block, scoreboard, inst);
1183 time++;
1184 slots_filled++;
1185 merge_inst = inst;
1186 }
1187
1188 /* Insert any extra delay slot NOPs we need. */
1189 for (int i = 0; i < 3 - slots_filled; i++) {
1190 emit_nop(c, block, scoreboard);
1191 time++;
1192 }
1193
1194 /* If we're emitting the last THRSW (other than program end), then
1195 * signal that to the HW by emitting two THRSWs in a row.
1196 */
1197 if (inst->is_last_thrsw) {
1198 struct qinst *second_inst =
1199 (struct qinst *)merge_inst->link.next;
1200 second_inst->qpu.sig.thrsw = true;
1201 }
1202
1203 /* If we put our THRSW into another instruction, free up the
1204 * instruction that didn't end up scheduled into the list.
1205 */
1206 if (needs_free)
1207 free(inst);
1208
1209 return time;
1210 }
1211
1212 static uint32_t
1213 schedule_instructions(struct v3d_compile *c,
1214 struct choose_scoreboard *scoreboard,
1215 struct qblock *block,
1216 struct list_head *schedule_list,
1217 enum quniform_contents *orig_uniform_contents,
1218 uint32_t *orig_uniform_data,
1219 uint32_t *next_uniform)
1220 {
1221 const struct v3d_device_info *devinfo = c->devinfo;
1222 uint32_t time = 0;
1223
1224 if (debug) {
1225 fprintf(stderr, "initial deps:\n");
1226 dump_state(devinfo, schedule_list);
1227 fprintf(stderr, "\n");
1228 }
1229
1230 /* Remove non-DAG heads from the list. */
1231 list_for_each_entry_safe(struct schedule_node, n, schedule_list, link) {
1232 if (n->parent_count != 0)
1233 list_del(&n->link);
1234 }
1235
1236 while (!list_empty(schedule_list)) {
1237 struct schedule_node *chosen =
1238 choose_instruction_to_schedule(devinfo,
1239 scoreboard,
1240 schedule_list,
1241 NULL);
1242 struct schedule_node *merge = NULL;
1243
1244 /* If there are no valid instructions to schedule, drop a NOP
1245 * in.
1246 */
1247 struct qinst *qinst = chosen ? chosen->inst : vir_nop();
1248 struct v3d_qpu_instr *inst = &qinst->qpu;
1249
1250 if (debug) {
1251 fprintf(stderr, "t=%4d: current list:\n",
1252 time);
1253 dump_state(devinfo, schedule_list);
1254 fprintf(stderr, "t=%4d: chose: ", time);
1255 v3d_qpu_dump(devinfo, inst);
1256 fprintf(stderr, "\n");
1257 }
1258
1259 /* We can't mark_instruction_scheduled() the chosen inst until
1260 * we're done identifying instructions to merge, so put the
1261 * merged instructions on a list for a moment.
1262 */
1263 struct list_head merged_list;
1264 list_inithead(&merged_list);
1265
1266 /* Schedule this instruction onto the QPU list. Also try to
1267 * find an instruction to pair with it.
1268 */
1269 if (chosen) {
1270 time = MAX2(chosen->unblocked_time, time);
1271 list_del(&chosen->link);
1272 mark_instruction_scheduled(schedule_list, time,
1273 chosen, true);
1274
1275 while ((merge =
1276 choose_instruction_to_schedule(devinfo,
1277 scoreboard,
1278 schedule_list,
1279 chosen))) {
1280 time = MAX2(merge->unblocked_time, time);
1281 list_del(&merge->link);
1282 list_addtail(&merge->link, &merged_list);
1283 (void)qpu_merge_inst(devinfo, inst,
1284 inst, &merge->inst->qpu);
1285 if (merge->inst->uniform != -1) {
1286 chosen->inst->uniform =
1287 merge->inst->uniform;
1288 }
1289
1290 if (debug) {
1291 fprintf(stderr, "t=%4d: merging: ",
1292 time);
1293 v3d_qpu_dump(devinfo, &merge->inst->qpu);
1294 fprintf(stderr, "\n");
1295 fprintf(stderr, " result: ");
1296 v3d_qpu_dump(devinfo, inst);
1297 fprintf(stderr, "\n");
1298 }
1299 }
1300 }
1301
1302 /* Update the uniform index for the rewritten location --
1303 * branch target updating will still need to change
1304 * c->uniform_data[] using this index.
1305 */
1306 if (qinst->uniform != -1) {
1307 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH)
1308 block->branch_uniform = *next_uniform;
1309
1310 c->uniform_data[*next_uniform] =
1311 orig_uniform_data[qinst->uniform];
1312 c->uniform_contents[*next_uniform] =
1313 orig_uniform_contents[qinst->uniform];
1314 qinst->uniform = *next_uniform;
1315 (*next_uniform)++;
1316 }
1317
1318 if (debug) {
1319 fprintf(stderr, "\n");
1320 }
1321
1322 /* Now that we've scheduled a new instruction, some of its
1323 * children can be promoted to the list of instructions ready to
1324 * be scheduled. Update the children's unblocked time for this
1325 * DAG edge as we do so.
1326 */
1327 mark_instruction_scheduled(schedule_list, time, chosen, false);
1328 list_for_each_entry(struct schedule_node, merge, &merged_list,
1329 link) {
1330 mark_instruction_scheduled(schedule_list, time, merge,
1331 false);
1332
1333 /* The merged VIR instruction doesn't get re-added to the
1334 * block, so free it now.
1335 */
1336 free(merge->inst);
1337 }
1338
1339 if (inst->sig.thrsw) {
1340 time += emit_thrsw(c, block, scoreboard, qinst, false);
1341 } else {
1342 insert_scheduled_instruction(c, block,
1343 scoreboard, qinst);
1344
1345 if (inst->type == V3D_QPU_INSTR_TYPE_BRANCH) {
1346 block->branch_qpu_ip = c->qpu_inst_count - 1;
1347 /* Fill the delay slots.
1348 *
1349 * We should fill these with actual instructions,
1350 * instead, but that will probably need to be done
1351 * after this, once we know what the leading
1352 * instructions of the successors are (so we can
1353 * handle A/B register file write latency)
1354 */
1355 for (int i = 0; i < 3; i++)
1356 emit_nop(c, block, scoreboard);
1357 }
1358 }
1359 }
1360
1361 return time;
1362 }
1363
1364 static uint32_t
1365 qpu_schedule_instructions_block(struct v3d_compile *c,
1366 struct choose_scoreboard *scoreboard,
1367 struct qblock *block,
1368 enum quniform_contents *orig_uniform_contents,
1369 uint32_t *orig_uniform_data,
1370 uint32_t *next_uniform)
1371 {
1372 void *mem_ctx = ralloc_context(NULL);
1373 struct list_head schedule_list;
1374
1375 list_inithead(&schedule_list);
1376
1377 /* Wrap each instruction in a scheduler structure. */
1378 while (!list_empty(&block->instructions)) {
1379 struct qinst *qinst = (struct qinst *)block->instructions.next;
1380 struct schedule_node *n =
1381 rzalloc(mem_ctx, struct schedule_node);
1382
1383 n->inst = qinst;
1384
1385 list_del(&qinst->link);
1386 list_addtail(&n->link, &schedule_list);
1387 }
1388
1389 calculate_forward_deps(c, &schedule_list);
1390 calculate_reverse_deps(c, &schedule_list);
1391
1392 list_for_each_entry(struct schedule_node, n, &schedule_list, link) {
1393 compute_delay(n);
1394 }
1395
1396 uint32_t cycles = schedule_instructions(c, scoreboard, block,
1397 &schedule_list,
1398 orig_uniform_contents,
1399 orig_uniform_data,
1400 next_uniform);
1401
1402 ralloc_free(mem_ctx);
1403
1404 return cycles;
1405 }
1406
1407 static void
1408 qpu_set_branch_targets(struct v3d_compile *c)
1409 {
1410 vir_for_each_block(block, c) {
1411 /* The end block of the program has no branch. */
1412 if (!block->successors[0])
1413 continue;
1414
1415 /* If there was no branch instruction, then the successor
1416 * block must follow immediately after this one.
1417 */
1418 if (block->branch_qpu_ip == ~0) {
1419 assert(block->end_qpu_ip + 1 ==
1420 block->successors[0]->start_qpu_ip);
1421 continue;
1422 }
1423
1424 /* Walk back through the delay slots to find the branch
1425 * instr.
1426 */
1427 struct list_head *entry = block->instructions.prev;
1428 for (int i = 0; i < 3; i++)
1429 entry = entry->prev;
1430 struct qinst *branch = container_of(entry, branch, link);
1431 assert(branch->qpu.type == V3D_QPU_INSTR_TYPE_BRANCH);
1432
1433 /* Make sure that the if-we-don't-jump
1434 * successor was scheduled just after the
1435 * delay slots.
1436 */
1437 assert(!block->successors[1] ||
1438 block->successors[1]->start_qpu_ip ==
1439 block->branch_qpu_ip + 4);
1440
1441 branch->qpu.branch.offset =
1442 ((block->successors[0]->start_qpu_ip -
1443 (block->branch_qpu_ip + 4)) *
1444 sizeof(uint64_t));
1445
1446 /* Set up the relative offset to jump in the
1447 * uniform stream.
1448 *
1449 * Use a temporary here, because
1450 * uniform_data[inst->uniform] may be shared
1451 * between multiple instructions.
1452 */
1453 assert(c->uniform_contents[branch->uniform] == QUNIFORM_CONSTANT);
1454 c->uniform_data[branch->uniform] =
1455 (block->successors[0]->start_uniform -
1456 (block->branch_uniform + 1)) * 4;
1457 }
1458 }
1459
1460 uint32_t
1461 v3d_qpu_schedule_instructions(struct v3d_compile *c)
1462 {
1463 const struct v3d_device_info *devinfo = c->devinfo;
1464 struct qblock *end_block = list_last_entry(&c->blocks,
1465 struct qblock, link);
1466
1467 /* We reorder the uniforms as we schedule instructions, so save the
1468 * old data off and replace it.
1469 */
1470 uint32_t *uniform_data = c->uniform_data;
1471 enum quniform_contents *uniform_contents = c->uniform_contents;
1472 c->uniform_contents = ralloc_array(c, enum quniform_contents,
1473 c->num_uniforms);
1474 c->uniform_data = ralloc_array(c, uint32_t, c->num_uniforms);
1475 c->uniform_array_size = c->num_uniforms;
1476 uint32_t next_uniform = 0;
1477
1478 struct choose_scoreboard scoreboard;
1479 memset(&scoreboard, 0, sizeof(scoreboard));
1480 scoreboard.last_ldvary_tick = -10;
1481 scoreboard.last_magic_sfu_write_tick = -10;
1482 scoreboard.last_uniforms_reset_tick = -10;
1483 scoreboard.last_thrsw_tick = -10;
1484
1485 if (debug) {
1486 fprintf(stderr, "Pre-schedule instructions\n");
1487 vir_for_each_block(block, c) {
1488 fprintf(stderr, "BLOCK %d\n", block->index);
1489 list_for_each_entry(struct qinst, qinst,
1490 &block->instructions, link) {
1491 v3d_qpu_dump(devinfo, &qinst->qpu);
1492 fprintf(stderr, "\n");
1493 }
1494 }
1495 fprintf(stderr, "\n");
1496 }
1497
1498 uint32_t cycles = 0;
1499 vir_for_each_block(block, c) {
1500 block->start_qpu_ip = c->qpu_inst_count;
1501 block->branch_qpu_ip = ~0;
1502 block->start_uniform = next_uniform;
1503
1504 cycles += qpu_schedule_instructions_block(c,
1505 &scoreboard,
1506 block,
1507 uniform_contents,
1508 uniform_data,
1509 &next_uniform);
1510
1511 block->end_qpu_ip = c->qpu_inst_count - 1;
1512 }
1513
1514 /* Emit the program-end THRSW instruction. */;
1515 struct qinst *thrsw = vir_nop();
1516 thrsw->qpu.sig.thrsw = true;
1517 emit_thrsw(c, end_block, &scoreboard, thrsw, true);
1518
1519 qpu_set_branch_targets(c);
1520
1521 assert(next_uniform == c->num_uniforms);
1522
1523 return cycles;
1524 }