415d2b0dfd5164b39688185f9f17ab3f21273cdb
[mesa.git] / src / gallium / drivers / vc4 / vc4_qpu_emit.c
1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <inttypes.h>
25
26 #include "vc4_context.h"
27 #include "vc4_qir.h"
28 #include "vc4_qpu.h"
29
30 static void
31 vc4_dump_program(struct qcompile *c)
32 {
33 fprintf(stderr, "%s:\n", qir_get_stage_name(c->stage));
34
35 for (int i = 0; i < c->qpu_inst_count; i++) {
36 fprintf(stderr, "0x%016"PRIx64" ", c->qpu_insts[i]);
37 vc4_qpu_disasm(&c->qpu_insts[i], 1);
38 fprintf(stderr, "\n");
39 }
40 }
41
42 struct queued_qpu_inst {
43 struct simple_node link;
44 uint64_t inst;
45 };
46
47 static void
48 queue(struct qcompile *c, uint64_t inst)
49 {
50 struct queued_qpu_inst *q = calloc(1, sizeof(*q));
51 q->inst = inst;
52 insert_at_tail(&c->qpu_inst_list, &q->link);
53 }
54
55 static uint64_t *
56 last_inst(struct qcompile *c)
57 {
58 struct queued_qpu_inst *q =
59 (struct queued_qpu_inst *)last_elem(&c->qpu_inst_list);
60 return &q->inst;
61 }
62
63 /**
64 * This is used to resolve the fact that we might register-allocate two
65 * different operands of an instruction to the same physical register file
66 * even though instructions have only one field for the register file source
67 * address.
68 *
69 * In that case, we need to move one to a temporary that can be used in the
70 * instruction, instead.
71 */
72 static void
73 fixup_raddr_conflict(struct qcompile *c,
74 struct qpu_reg src0, struct qpu_reg *src1)
75 {
76 if ((src0.mux == QPU_MUX_A || src0.mux == QPU_MUX_B) &&
77 (src1->mux == QPU_MUX_A || src1->mux == QPU_MUX_B) &&
78 src0.addr != src1->addr) {
79 queue(c, qpu_inst(qpu_a_MOV(qpu_r3(), *src1),
80 qpu_m_NOP()));
81 *src1 = qpu_r3();
82 }
83 }
84
85 static void
86 serialize_one_inst(struct qcompile *c, uint64_t inst)
87 {
88 if (c->qpu_inst_count >= c->qpu_inst_size) {
89 c->qpu_inst_size = MAX2(16, c->qpu_inst_size * 2);
90 c->qpu_insts = realloc(c->qpu_insts,
91 c->qpu_inst_size * sizeof(uint64_t));
92 }
93 c->qpu_insts[c->qpu_inst_count++] = inst;
94 }
95
96 static void
97 serialize_insts(struct qcompile *c)
98 {
99 int last_sfu_write = -10;
100 bool scoreboard_wait_emitted = false;
101
102 while (!is_empty_list(&c->qpu_inst_list)) {
103 struct queued_qpu_inst *q =
104 (struct queued_qpu_inst *)first_elem(&c->qpu_inst_list);
105 uint32_t last_waddr_a = QPU_W_NOP, last_waddr_b = QPU_W_NOP;
106 uint32_t raddr_a = QPU_GET_FIELD(q->inst, QPU_RADDR_A);
107 uint32_t raddr_b = QPU_GET_FIELD(q->inst, QPU_RADDR_B);
108
109 if (c->qpu_inst_count > 0) {
110 uint64_t last_inst = c->qpu_insts[c->qpu_inst_count -
111 1];
112 uint32_t last_waddr_add = QPU_GET_FIELD(last_inst,
113 QPU_WADDR_ADD);
114 uint32_t last_waddr_mul = QPU_GET_FIELD(last_inst,
115 QPU_WADDR_MUL);
116
117 if (last_inst & QPU_WS) {
118 last_waddr_a = last_waddr_mul;
119 last_waddr_b = last_waddr_add;
120 } else {
121 last_waddr_a = last_waddr_add;
122 last_waddr_b = last_waddr_mul;
123 }
124 }
125
126 uint32_t src_muxes[] = {
127 QPU_GET_FIELD(q->inst, QPU_ADD_A),
128 QPU_GET_FIELD(q->inst, QPU_ADD_B),
129 QPU_GET_FIELD(q->inst, QPU_MUL_A),
130 QPU_GET_FIELD(q->inst, QPU_MUL_B),
131 };
132
133 /* "An instruction must not read from a location in physical
134 * regfile A or B that was written to by the previous
135 * instruction."
136 */
137 bool needs_raddr_vs_waddr_nop = false;
138 bool reads_r4 = false;
139 for (int i = 0; i < ARRAY_SIZE(src_muxes); i++) {
140 if ((raddr_a < 32 &&
141 src_muxes[i] == QPU_MUX_A &&
142 last_waddr_a == raddr_a) ||
143 (raddr_b < 32 &&
144 src_muxes[i] == QPU_MUX_B &&
145 last_waddr_b == raddr_b)) {
146 needs_raddr_vs_waddr_nop = true;
147 }
148 if (src_muxes[i] == QPU_MUX_R4)
149 reads_r4 = true;
150 }
151
152 if (needs_raddr_vs_waddr_nop) {
153 serialize_one_inst(c, qpu_inst(qpu_a_NOP(),
154 qpu_m_NOP()));
155 }
156
157 /* "After an SFU lookup instruction, accumulator r4 must not
158 * be read in the following two instructions. Any other
159 * instruction that results in r4 being written (that is, TMU
160 * read, TLB read, SFU lookup) cannot occur in the two
161 * instructions following an SFU lookup."
162 */
163 if (reads_r4) {
164 while (c->qpu_inst_count - last_sfu_write < 3) {
165 serialize_one_inst(c, qpu_inst(qpu_a_NOP(),
166 qpu_m_NOP()));
167 }
168 }
169
170 uint32_t waddr_a = QPU_GET_FIELD(q->inst, QPU_WADDR_ADD);
171 uint32_t waddr_m = QPU_GET_FIELD(q->inst, QPU_WADDR_MUL);
172 if ((waddr_a >= QPU_W_SFU_RECIP && waddr_a <= QPU_W_SFU_LOG) ||
173 (waddr_m >= QPU_W_SFU_RECIP && waddr_m <= QPU_W_SFU_LOG)) {
174 last_sfu_write = c->qpu_inst_count;
175 }
176
177 /* "A scoreboard wait must not occur in the first two
178 * instructions of a fragment shader. This is either the
179 * explicit Wait for Scoreboard signal or an implicit wait
180 * with the first tile-buffer read or write instruction."
181 */
182 if (!scoreboard_wait_emitted &&
183 (waddr_a == QPU_W_TLB_Z || waddr_m == QPU_W_TLB_Z ||
184 waddr_a == QPU_W_TLB_COLOR_MS ||
185 waddr_m == QPU_W_TLB_COLOR_MS ||
186 waddr_a == QPU_W_TLB_COLOR_ALL ||
187 waddr_m == QPU_W_TLB_COLOR_ALL ||
188 QPU_GET_FIELD(q->inst, QPU_SIG) == QPU_SIG_COLOR_LOAD)) {
189 while (c->qpu_inst_count < 3 ||
190 QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
191 QPU_SIG) != QPU_SIG_NONE) {
192 serialize_one_inst(c, qpu_inst(qpu_a_NOP(),
193 qpu_m_NOP()));
194 }
195 c->qpu_insts[c->qpu_inst_count - 1] =
196 qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
197 QPU_SIG_WAIT_FOR_SCOREBOARD);
198 scoreboard_wait_emitted = true;
199 }
200
201 serialize_one_inst(c, q->inst);
202
203 remove_from_list(&q->link);
204 free(q);
205 }
206 }
207
208 void
209 vc4_generate_code(struct qcompile *c)
210 {
211 struct qpu_reg allocate_to_qpu_reg[3 + 32 + 32];
212 bool reg_in_use[ARRAY_SIZE(allocate_to_qpu_reg)];
213 int *reg_allocated = calloc(c->num_temps, sizeof(*reg_allocated));
214 int *reg_uses_remaining =
215 calloc(c->num_temps, sizeof(*reg_uses_remaining));
216 bool discard = false;
217
218 for (int i = 0; i < ARRAY_SIZE(reg_in_use); i++)
219 reg_in_use[i] = false;
220 for (int i = 0; i < c->num_temps; i++)
221 reg_allocated[i] = -1;
222 for (int i = 0; i < 3; i++)
223 allocate_to_qpu_reg[i] = qpu_rn(i);
224 for (int i = 0; i < 32; i++)
225 allocate_to_qpu_reg[i + 3] = qpu_ra(i);
226 for (int i = 0; i < 32; i++)
227 allocate_to_qpu_reg[i + 3 + 32] = qpu_rb(i);
228
229 make_empty_list(&c->qpu_inst_list);
230
231 struct simple_node *node;
232 foreach(node, &c->instructions) {
233 struct qinst *qinst = (struct qinst *)node;
234
235 if (qinst->dst.file == QFILE_TEMP)
236 reg_uses_remaining[qinst->dst.index]++;
237 for (int i = 0; i < qir_get_op_nsrc(qinst->op); i++) {
238 if (qinst->src[i].file == QFILE_TEMP)
239 reg_uses_remaining[qinst->src[i].index]++;
240 }
241 if (qinst->op == QOP_TLB_PASSTHROUGH_Z_WRITE ||
242 qinst->op == QOP_FRAG_Z)
243 reg_in_use[3 + 32 + QPU_R_FRAG_PAYLOAD_ZW] = true;
244 }
245
246 switch (c->stage) {
247 case QSTAGE_VERT:
248 case QSTAGE_COORD:
249 queue(c, qpu_load_imm_ui(qpu_vrsetup(),
250 (0x00001a00 +
251 0x00100000 * c->num_inputs)));
252 queue(c, qpu_load_imm_ui(qpu_vwsetup(), 0x00001a00));
253 break;
254 case QSTAGE_FRAG:
255 break;
256 }
257
258 foreach(node, &c->instructions) {
259 struct qinst *qinst = (struct qinst *)node;
260
261 #if 0
262 fprintf(stderr, "translating qinst to qpu: ");
263 qir_dump_inst(qinst);
264 fprintf(stderr, "\n");
265 #endif
266
267 static const struct {
268 uint32_t op;
269 bool is_mul;
270 } translate[] = {
271 #define A(name) [QOP_##name] = {QPU_A_##name, false}
272 #define M(name) [QOP_##name] = {QPU_M_##name, true}
273 A(FADD),
274 A(FSUB),
275 A(FMIN),
276 A(FMAX),
277 A(FMINABS),
278 A(FMAXABS),
279 A(FTOI),
280 A(ITOF),
281
282 M(FMUL),
283 };
284
285 static const uint32_t compareflags[] = {
286 [QOP_SEQ - QOP_SEQ] = QPU_COND_ZS,
287 [QOP_SNE - QOP_SEQ] = QPU_COND_ZC,
288 [QOP_SLT - QOP_SEQ] = QPU_COND_NS,
289 [QOP_SGE - QOP_SEQ] = QPU_COND_NC,
290 };
291
292 struct qpu_reg src[4];
293 for (int i = 0; i < qir_get_op_nsrc(qinst->op); i++) {
294 int index = qinst->src[i].index;
295 switch (qinst->src[i].file) {
296 case QFILE_NULL:
297 src[i] = qpu_rn(0);
298 break;
299 case QFILE_TEMP:
300 if (reg_allocated[index] == -1) {
301 fprintf(stderr, "undefined reg use: ");
302 qir_dump_inst(qinst);
303 fprintf(stderr, "\n");
304
305 src[i] = qpu_rn(0);
306 } else {
307 src[i] = allocate_to_qpu_reg[reg_allocated[index]];
308 reg_uses_remaining[index]--;
309 if (reg_uses_remaining[index] == 0)
310 reg_in_use[reg_allocated[index]] = false;
311 }
312 break;
313 case QFILE_UNIF:
314 src[i] = qpu_unif();
315 break;
316 case QFILE_VARY:
317 src[i] = qpu_vary();
318 break;
319 }
320 }
321
322 struct qpu_reg dst;
323 switch (qinst->dst.file) {
324 case QFILE_NULL:
325 dst = qpu_ra(QPU_W_NOP);
326 break;
327
328 case QFILE_TEMP:
329 if (reg_allocated[qinst->dst.index] == -1) {
330 int alloc;
331 for (alloc = 0;
332 alloc < ARRAY_SIZE(reg_in_use);
333 alloc++) {
334 /* The pack flags require an A-file register. */
335 if (qinst->op == QOP_PACK_SCALED &&
336 allocate_to_qpu_reg[alloc].mux != QPU_MUX_A) {
337 continue;
338 }
339
340 if (!reg_in_use[alloc])
341 break;
342 }
343 assert(alloc != ARRAY_SIZE(reg_in_use) && "need better reg alloc");
344 reg_in_use[alloc] = true;
345 reg_allocated[qinst->dst.index] = alloc;
346 }
347
348 dst = allocate_to_qpu_reg[reg_allocated[qinst->dst.index]];
349
350 reg_uses_remaining[qinst->dst.index]--;
351 if (reg_uses_remaining[qinst->dst.index] == 0) {
352 reg_in_use[reg_allocated[qinst->dst.index]] =
353 false;
354 }
355 break;
356
357 case QFILE_VARY:
358 case QFILE_UNIF:
359 assert(!"not reached");
360 break;
361 }
362
363 switch (qinst->op) {
364 case QOP_MOV:
365 /* Skip emitting the MOV if it's a no-op. */
366 if (dst.mux == QPU_MUX_A || dst.mux == QPU_MUX_B ||
367 dst.mux != src[0].mux || dst.addr != src[0].addr) {
368 queue(c, qpu_inst(qpu_a_MOV(dst, src[0]),
369 qpu_m_NOP()));
370 }
371 break;
372
373 case QOP_CMP:
374 queue(c, qpu_inst(qpu_a_MOV(qpu_ra(QPU_W_NOP),
375 src[0]),
376 qpu_m_NOP()));
377 *last_inst(c) |= QPU_SF;
378
379 if (dst.mux <= QPU_MUX_R3) {
380 fixup_raddr_conflict(c, src[1], &src[2]);
381 queue(c, qpu_inst(qpu_a_MOV(dst, src[1]),
382 qpu_m_MOV(dst, src[2])));
383 *last_inst(c) = qpu_set_cond_add(*last_inst(c),
384 QPU_COND_NS);
385 *last_inst(c) = qpu_set_cond_mul(*last_inst(c),
386 QPU_COND_NC);
387 } else {
388 if (dst.mux == src[1].mux &&
389 dst.addr == src[1].addr) {
390 queue(c, qpu_inst(qpu_a_MOV(dst, src[1]),
391 qpu_m_NOP()));
392
393 queue(c, qpu_inst(qpu_a_MOV(dst, src[2]),
394 qpu_m_NOP()));
395 *last_inst(c) = qpu_set_cond_add(*last_inst(c),
396 QPU_COND_NC);
397 } else {
398 queue(c, qpu_inst(qpu_a_MOV(dst, src[2]),
399 qpu_m_NOP()));
400
401 queue(c, qpu_inst(qpu_a_MOV(dst, src[1]),
402 qpu_m_NOP()));
403 *last_inst(c) = qpu_set_cond_add(*last_inst(c),
404 QPU_COND_NS);
405 }
406 }
407 break;
408
409 case QOP_SEQ:
410 case QOP_SNE:
411 case QOP_SGE:
412 case QOP_SLT:
413 fixup_raddr_conflict(c, src[0], &src[1]);
414 queue(c, qpu_inst(qpu_a_SUB(qpu_ra(QPU_W_NOP),
415 src[0], src[1]),
416 qpu_m_NOP()));
417 *last_inst(c) |= QPU_SF;
418
419 queue(c, qpu_load_imm_f(dst, 0.0));
420 queue(c, qpu_load_imm_f(dst, 1.0));
421 *last_inst(c) = qpu_set_cond_add(*last_inst(c),
422 compareflags[qinst->op - QOP_SEQ]);
423
424
425 break;
426
427 case QOP_VPM_WRITE:
428 queue(c, qpu_inst(qpu_a_MOV(qpu_ra(QPU_W_VPM), src[0]),
429 qpu_m_NOP()));
430 break;
431
432 case QOP_VPM_READ:
433 queue(c, qpu_inst(qpu_a_MOV(dst, qpu_ra(QPU_R_VPM)),
434 qpu_m_NOP()));
435 break;
436
437 case QOP_RCP:
438 case QOP_RSQ:
439 case QOP_EXP2:
440 case QOP_LOG2:
441 switch (qinst->op) {
442 case QOP_RCP:
443 queue(c, qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP),
444 src[0]),
445 qpu_m_NOP()));
446 break;
447 case QOP_RSQ:
448 queue(c, qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIPSQRT),
449 src[0]),
450 qpu_m_NOP()));
451 break;
452 case QOP_EXP2:
453 queue(c, qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_EXP),
454 src[0]),
455 qpu_m_NOP()));
456 break;
457 case QOP_LOG2:
458 queue(c, qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_LOG),
459 src[0]),
460 qpu_m_NOP()));
461 break;
462 default:
463 abort();
464 }
465
466 queue(c, qpu_inst(qpu_a_MOV(dst, qpu_r4()),
467 qpu_m_NOP()));
468
469 break;
470
471 case QOP_PACK_COLORS:
472 for (int i = 0; i < 4; i++) {
473 queue(c, qpu_inst(qpu_a_NOP(),
474 qpu_m_MOV(qpu_r3(), src[i])));
475 *last_inst(c) |= QPU_PM;
476 *last_inst(c) |= QPU_SET_FIELD(QPU_PACK_MUL_8A + i,
477 QPU_PACK);
478 }
479
480 queue(c, qpu_inst(qpu_a_MOV(dst, qpu_r3()),
481 qpu_m_NOP()));
482
483 break;
484
485 case QOP_FRAG_X:
486 queue(c, qpu_inst(qpu_a_ITOF(dst,
487 qpu_ra(QPU_R_XY_PIXEL_COORD)),
488 qpu_m_NOP()));
489 break;
490
491 case QOP_FRAG_Y:
492 queue(c, qpu_inst(qpu_a_ITOF(dst,
493 qpu_rb(QPU_R_XY_PIXEL_COORD)),
494 qpu_m_NOP()));
495 break;
496
497 case QOP_FRAG_Z:
498 queue(c, qpu_inst(qpu_a_ITOF(dst,
499 qpu_rb(QPU_R_FRAG_PAYLOAD_ZW)),
500 qpu_m_NOP()));
501 break;
502
503 case QOP_FRAG_RCP_W:
504 queue(c, qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP),
505 qpu_ra(QPU_R_FRAG_PAYLOAD_ZW)),
506 qpu_m_NOP()));
507
508 queue(c, qpu_inst(qpu_a_MOV(dst, qpu_r4()),
509 qpu_m_NOP()));
510 break;
511
512 case QOP_TLB_DISCARD_SETUP:
513 discard = true;
514 queue(c, qpu_inst(qpu_a_MOV(src[0], src[0]),
515 qpu_m_NOP()));
516 *last_inst(c) |= QPU_SF;
517 break;
518
519 case QOP_TLB_PASSTHROUGH_Z_WRITE:
520 queue(c, qpu_inst(qpu_a_MOV(qpu_ra(QPU_W_TLB_Z),
521 qpu_rb(QPU_R_FRAG_PAYLOAD_ZW)),
522 qpu_m_NOP()));
523 if (discard) {
524 *last_inst(c) = qpu_set_cond_add(*last_inst(c),
525 QPU_COND_ZS);
526 }
527 break;
528
529 case QOP_TLB_COLOR_READ:
530 queue(c, qpu_inst(qpu_a_NOP(), qpu_m_NOP()));
531 *last_inst(c) = qpu_set_sig(*last_inst(c),
532 QPU_SIG_COLOR_LOAD);
533
534 break;
535
536 case QOP_TLB_COLOR_WRITE:
537 queue(c, qpu_inst(qpu_a_MOV(qpu_tlbc(),
538 src[0]),
539 qpu_m_NOP()));
540 if (discard) {
541 *last_inst(c) = qpu_set_cond_add(*last_inst(c),
542 QPU_COND_ZS);
543 }
544 break;
545
546 case QOP_VARY_ADD_C:
547 queue(c, qpu_inst(qpu_a_FADD(dst,
548 src[0], qpu_r5()),
549 qpu_m_NOP()));
550 break;
551
552 case QOP_PACK_SCALED: {
553 uint64_t a = (qpu_inst(qpu_a_MOV(dst, src[0]),
554 qpu_m_NOP()) |
555 QPU_SET_FIELD(QPU_PACK_A_16A,
556 QPU_PACK));
557 uint64_t b = (qpu_inst(qpu_a_MOV(dst, src[1]),
558 qpu_m_NOP()) |
559 QPU_SET_FIELD(QPU_PACK_A_16B,
560 QPU_PACK));
561
562 if (dst.mux == src[1].mux && dst.addr == src[1].addr) {
563 queue(c, b);
564 queue(c, a);
565 } else {
566 queue(c, a);
567 queue(c, b);
568 }
569 break;
570 }
571
572 case QOP_TEX_S:
573 case QOP_TEX_T:
574 case QOP_TEX_R:
575 case QOP_TEX_B:
576 queue(c, qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_TMU0_S +
577 (qinst->op -
578 QOP_TEX_S)),
579 src[0]),
580 qpu_m_NOP()));
581 break;
582
583 case QOP_TEX_RESULT:
584 queue(c, qpu_inst(qpu_a_NOP(), qpu_m_NOP()));
585 *last_inst(c) = qpu_set_sig(*last_inst(c),
586 QPU_SIG_LOAD_TMU0);
587
588 break;
589
590 case QOP_R4_UNPACK_A:
591 case QOP_R4_UNPACK_B:
592 case QOP_R4_UNPACK_C:
593 case QOP_R4_UNPACK_D:
594 queue(c, qpu_inst(qpu_a_MOV(dst, qpu_r4()),
595 qpu_m_NOP()));
596 *last_inst(c) |= QPU_PM;
597 *last_inst(c) |= QPU_SET_FIELD(QPU_UNPACK_R4_8A +
598 (qinst->op -
599 QOP_R4_UNPACK_A),
600 QPU_UNPACK);
601
602 break;
603
604 default:
605 assert(qinst->op < ARRAY_SIZE(translate));
606 assert(translate[qinst->op].op != 0); /* NOPs */
607
608 /* If we have only one source, put it in the second
609 * argument slot as well so that we don't take up
610 * another raddr just to get unused data.
611 */
612 if (qir_get_op_nsrc(qinst->op) == 1)
613 src[1] = src[0];
614
615 fixup_raddr_conflict(c, src[0], &src[1]);
616
617 if (translate[qinst->op].is_mul) {
618 queue(c, qpu_inst(qpu_a_NOP(),
619 qpu_m_alu2(translate[qinst->op].op,
620 dst,
621 src[0], src[1])));
622 } else {
623 queue(c, qpu_inst(qpu_a_alu2(translate[qinst->op].op,
624 dst,
625 src[0], src[1]),
626 qpu_m_NOP()));
627 }
628 break;
629 }
630 }
631
632 serialize_insts(c);
633
634 /* thread end can't have VPM write */
635 if (QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
636 QPU_WADDR_ADD) == QPU_W_VPM ||
637 QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
638 QPU_WADDR_MUL) == QPU_W_VPM) {
639 serialize_one_inst(c, qpu_inst(qpu_a_NOP(), qpu_m_NOP()));
640 }
641
642 c->qpu_insts[c->qpu_inst_count - 1] =
643 qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
644 QPU_SIG_PROG_END);
645 serialize_one_inst(c, qpu_inst(qpu_a_NOP(), qpu_m_NOP()));
646 serialize_one_inst(c, qpu_inst(qpu_a_NOP(), qpu_m_NOP()));
647
648 switch (c->stage) {
649 case QSTAGE_VERT:
650 case QSTAGE_COORD:
651 break;
652 case QSTAGE_FRAG:
653 c->qpu_insts[c->qpu_inst_count - 1] =
654 qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
655 QPU_SIG_SCOREBOARD_UNLOCK);
656 break;
657 }
658
659 if (vc4_debug & VC4_DEBUG_QPU)
660 vc4_dump_program(c);
661
662 vc4_qpu_validate(c->qpu_insts, c->qpu_inst_count);
663 }