vc4: Avoid using undefined values when there's no color write.
[mesa.git] / src / gallium / drivers / vc4 / vc4_qpu_emit.c
1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <inttypes.h>
25
26 #include "vc4_context.h"
27 #include "vc4_qir.h"
28 #include "vc4_qpu.h"
29
30 static void
31 vc4_dump_program(struct qcompile *c)
32 {
33 fprintf(stderr, "%s:\n", qir_get_stage_name(c->stage));
34
35 for (int i = 0; i < c->qpu_inst_count; i++) {
36 fprintf(stderr, "0x%016"PRIx64" ", c->qpu_insts[i]);
37 vc4_qpu_disasm(&c->qpu_insts[i], 1);
38 fprintf(stderr, "\n");
39 }
40 }
41
42 struct queued_qpu_inst {
43 struct simple_node link;
44 uint64_t inst;
45 };
46
47 static void
48 queue(struct qcompile *c, uint64_t inst)
49 {
50 struct queued_qpu_inst *q = calloc(1, sizeof(*q));
51 q->inst = inst;
52 insert_at_tail(&c->qpu_inst_list, &q->link);
53 }
54
55 static uint64_t *
56 last_inst(struct qcompile *c)
57 {
58 struct queued_qpu_inst *q =
59 (struct queued_qpu_inst *)last_elem(&c->qpu_inst_list);
60 return &q->inst;
61 }
62
63 /**
64 * This is used to resolve the fact that we might register-allocate two
65 * different operands of an instruction to the same physical register file
66 * even though instructions have only one field for the register file source
67 * address.
68 *
69 * In that case, we need to move one to a temporary that can be used in the
70 * instruction, instead.
71 */
72 static void
73 fixup_raddr_conflict(struct qcompile *c,
74 struct qpu_reg src0, struct qpu_reg *src1)
75 {
76 if ((src0.mux == QPU_MUX_A || src0.mux == QPU_MUX_B) &&
77 (src1->mux == QPU_MUX_A || src1->mux == QPU_MUX_B) &&
78 src0.addr != src1->addr) {
79 queue(c, qpu_inst(qpu_a_MOV(qpu_r3(), *src1),
80 qpu_m_NOP()));
81 *src1 = qpu_r3();
82 }
83 }
84
85 static void
86 serialize_one_inst(struct qcompile *c, uint64_t inst)
87 {
88 if (c->qpu_inst_count >= c->qpu_inst_size) {
89 c->qpu_inst_size = MAX2(16, c->qpu_inst_size * 2);
90 c->qpu_insts = realloc(c->qpu_insts,
91 c->qpu_inst_size * sizeof(uint64_t));
92 }
93 c->qpu_insts[c->qpu_inst_count++] = inst;
94 }
95
96 static void
97 serialize_insts(struct qcompile *c)
98 {
99 int last_sfu_write = -10;
100 bool scoreboard_wait_emitted = false;
101
102 while (!is_empty_list(&c->qpu_inst_list)) {
103 struct queued_qpu_inst *q =
104 (struct queued_qpu_inst *)first_elem(&c->qpu_inst_list);
105 uint32_t last_waddr_a = QPU_W_NOP, last_waddr_b = QPU_W_NOP;
106 uint32_t raddr_a = QPU_GET_FIELD(q->inst, QPU_RADDR_A);
107 uint32_t raddr_b = QPU_GET_FIELD(q->inst, QPU_RADDR_B);
108
109 if (c->qpu_inst_count > 0) {
110 uint64_t last_inst = c->qpu_insts[c->qpu_inst_count -
111 1];
112 uint32_t last_waddr_add = QPU_GET_FIELD(last_inst,
113 QPU_WADDR_ADD);
114 uint32_t last_waddr_mul = QPU_GET_FIELD(last_inst,
115 QPU_WADDR_MUL);
116
117 if (last_inst & QPU_WS) {
118 last_waddr_a = last_waddr_mul;
119 last_waddr_b = last_waddr_add;
120 } else {
121 last_waddr_a = last_waddr_add;
122 last_waddr_b = last_waddr_mul;
123 }
124 }
125
126 uint32_t src_muxes[] = {
127 QPU_GET_FIELD(q->inst, QPU_ADD_A),
128 QPU_GET_FIELD(q->inst, QPU_ADD_B),
129 QPU_GET_FIELD(q->inst, QPU_MUL_A),
130 QPU_GET_FIELD(q->inst, QPU_MUL_B),
131 };
132
133 /* "An instruction must not read from a location in physical
134 * regfile A or B that was written to by the previous
135 * instruction."
136 */
137 bool needs_raddr_vs_waddr_nop = false;
138 bool reads_r4 = false;
139 for (int i = 0; i < ARRAY_SIZE(src_muxes); i++) {
140 if ((raddr_a < 32 &&
141 src_muxes[i] == QPU_MUX_A &&
142 last_waddr_a == raddr_a) ||
143 (raddr_b < 32 &&
144 src_muxes[i] == QPU_MUX_B &&
145 last_waddr_b == raddr_b)) {
146 needs_raddr_vs_waddr_nop = true;
147 }
148 if (src_muxes[i] == QPU_MUX_R4)
149 reads_r4 = true;
150 }
151
152 if (needs_raddr_vs_waddr_nop) {
153 serialize_one_inst(c, qpu_inst(qpu_a_NOP(),
154 qpu_m_NOP()));
155 }
156
157 /* "After an SFU lookup instruction, accumulator r4 must not
158 * be read in the following two instructions. Any other
159 * instruction that results in r4 being written (that is, TMU
160 * read, TLB read, SFU lookup) cannot occur in the two
161 * instructions following an SFU lookup."
162 */
163 if (reads_r4) {
164 while (c->qpu_inst_count - last_sfu_write < 3) {
165 serialize_one_inst(c, qpu_inst(qpu_a_NOP(),
166 qpu_m_NOP()));
167 }
168 }
169
170 uint32_t waddr_a = QPU_GET_FIELD(q->inst, QPU_WADDR_ADD);
171 uint32_t waddr_m = QPU_GET_FIELD(q->inst, QPU_WADDR_MUL);
172 if ((waddr_a >= QPU_W_SFU_RECIP && waddr_a <= QPU_W_SFU_LOG) ||
173 (waddr_m >= QPU_W_SFU_RECIP && waddr_m <= QPU_W_SFU_LOG)) {
174 last_sfu_write = c->qpu_inst_count;
175 }
176
177 /* "A scoreboard wait must not occur in the first two
178 * instructions of a fragment shader. This is either the
179 * explicit Wait for Scoreboard signal or an implicit wait
180 * with the first tile-buffer read or write instruction."
181 */
182 if (!scoreboard_wait_emitted &&
183 (waddr_a == QPU_W_TLB_Z || waddr_m == QPU_W_TLB_Z ||
184 waddr_a == QPU_W_TLB_COLOR_MS ||
185 waddr_m == QPU_W_TLB_COLOR_MS ||
186 waddr_a == QPU_W_TLB_COLOR_ALL ||
187 waddr_m == QPU_W_TLB_COLOR_ALL ||
188 QPU_GET_FIELD(q->inst, QPU_SIG) == QPU_SIG_COLOR_LOAD)) {
189 while (c->qpu_inst_count < 3 ||
190 QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
191 QPU_SIG) != QPU_SIG_NONE) {
192 serialize_one_inst(c, qpu_inst(qpu_a_NOP(),
193 qpu_m_NOP()));
194 }
195 c->qpu_insts[c->qpu_inst_count - 1] =
196 qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
197 QPU_SIG_WAIT_FOR_SCOREBOARD);
198 scoreboard_wait_emitted = true;
199 }
200
201 serialize_one_inst(c, q->inst);
202
203 remove_from_list(&q->link);
204 free(q);
205 }
206 }
207
208 void
209 vc4_generate_code(struct qcompile *c)
210 {
211 struct qpu_reg allocate_to_qpu_reg[3 + 32 + 32];
212 bool reg_in_use[ARRAY_SIZE(allocate_to_qpu_reg)];
213 int *reg_allocated = calloc(c->num_temps, sizeof(*reg_allocated));
214 int *reg_uses_remaining =
215 calloc(c->num_temps, sizeof(*reg_uses_remaining));
216
217 for (int i = 0; i < ARRAY_SIZE(reg_in_use); i++)
218 reg_in_use[i] = false;
219 for (int i = 0; i < c->num_temps; i++)
220 reg_allocated[i] = -1;
221 for (int i = 0; i < 3; i++)
222 allocate_to_qpu_reg[i] = qpu_rn(i);
223 for (int i = 0; i < 32; i++)
224 allocate_to_qpu_reg[i + 3] = qpu_ra(i);
225 for (int i = 0; i < 32; i++)
226 allocate_to_qpu_reg[i + 3 + 32] = qpu_rb(i);
227
228 make_empty_list(&c->qpu_inst_list);
229
230 struct simple_node *node;
231 foreach(node, &c->instructions) {
232 struct qinst *qinst = (struct qinst *)node;
233
234 if (qinst->dst.file == QFILE_TEMP)
235 reg_uses_remaining[qinst->dst.index]++;
236 for (int i = 0; i < qir_get_op_nsrc(qinst->op); i++) {
237 if (qinst->src[i].file == QFILE_TEMP)
238 reg_uses_remaining[qinst->src[i].index]++;
239 }
240 if (qinst->op == QOP_TLB_PASSTHROUGH_Z_WRITE ||
241 qinst->op == QOP_FRAG_Z)
242 reg_in_use[3 + 32 + QPU_R_FRAG_PAYLOAD_ZW] = true;
243 }
244
245 switch (c->stage) {
246 case QSTAGE_VERT:
247 case QSTAGE_COORD:
248 queue(c, qpu_load_imm_ui(qpu_vrsetup(),
249 (0x00001a00 +
250 0x00100000 * c->num_inputs)));
251 queue(c, qpu_load_imm_ui(qpu_vwsetup(), 0x00001a00));
252 break;
253 case QSTAGE_FRAG:
254 break;
255 }
256
257 foreach(node, &c->instructions) {
258 struct qinst *qinst = (struct qinst *)node;
259
260 #if 0
261 fprintf(stderr, "translating qinst to qpu: ");
262 qir_dump_inst(qinst);
263 fprintf(stderr, "\n");
264 #endif
265
266 static const struct {
267 uint32_t op;
268 bool is_mul;
269 } translate[] = {
270 #define A(name) [QOP_##name] = {QPU_A_##name, false}
271 #define M(name) [QOP_##name] = {QPU_M_##name, true}
272 A(FADD),
273 A(FSUB),
274 A(FMIN),
275 A(FMAX),
276 A(FMINABS),
277 A(FMAXABS),
278 A(FTOI),
279 A(ITOF),
280
281 M(FMUL),
282 };
283
284 static const uint32_t compareflags[] = {
285 [QOP_SEQ - QOP_SEQ] = QPU_COND_ZS,
286 [QOP_SNE - QOP_SEQ] = QPU_COND_ZC,
287 [QOP_SLT - QOP_SEQ] = QPU_COND_NS,
288 [QOP_SGE - QOP_SEQ] = QPU_COND_NC,
289 };
290
291 struct qpu_reg src[4];
292 for (int i = 0; i < qir_get_op_nsrc(qinst->op); i++) {
293 int index = qinst->src[i].index;
294 switch (qinst->src[i].file) {
295 case QFILE_NULL:
296 src[i] = qpu_rn(0);
297 break;
298 case QFILE_TEMP:
299 if (reg_allocated[index] == -1) {
300 fprintf(stderr, "undefined reg use: ");
301 qir_dump_inst(qinst);
302 fprintf(stderr, "\n");
303
304 src[i] = qpu_rn(0);
305 } else {
306 src[i] = allocate_to_qpu_reg[reg_allocated[index]];
307 reg_uses_remaining[index]--;
308 if (reg_uses_remaining[index] == 0)
309 reg_in_use[reg_allocated[index]] = false;
310 }
311 break;
312 case QFILE_UNIF:
313 src[i] = qpu_unif();
314 break;
315 case QFILE_VARY:
316 src[i] = qpu_vary();
317 break;
318 }
319 }
320
321 struct qpu_reg dst;
322 switch (qinst->dst.file) {
323 case QFILE_NULL:
324 dst = qpu_ra(QPU_W_NOP);
325 break;
326
327 case QFILE_TEMP:
328 if (reg_allocated[qinst->dst.index] == -1) {
329 int alloc;
330 for (alloc = 0;
331 alloc < ARRAY_SIZE(reg_in_use);
332 alloc++) {
333 /* The pack flags require an A-file register. */
334 if (qinst->op == QOP_PACK_SCALED &&
335 allocate_to_qpu_reg[alloc].mux != QPU_MUX_A) {
336 continue;
337 }
338
339 if (!reg_in_use[alloc])
340 break;
341 }
342 assert(alloc != ARRAY_SIZE(reg_in_use) && "need better reg alloc");
343 reg_in_use[alloc] = true;
344 reg_allocated[qinst->dst.index] = alloc;
345 }
346
347 dst = allocate_to_qpu_reg[reg_allocated[qinst->dst.index]];
348
349 reg_uses_remaining[qinst->dst.index]--;
350 if (reg_uses_remaining[qinst->dst.index] == 0) {
351 reg_in_use[reg_allocated[qinst->dst.index]] =
352 false;
353 }
354 break;
355
356 case QFILE_VARY:
357 case QFILE_UNIF:
358 assert(!"not reached");
359 break;
360 }
361
362 switch (qinst->op) {
363 case QOP_MOV:
364 /* Skip emitting the MOV if it's a no-op. */
365 if (dst.mux == QPU_MUX_A || dst.mux == QPU_MUX_B ||
366 dst.mux != src[0].mux || dst.addr != src[0].addr) {
367 queue(c, qpu_inst(qpu_a_MOV(dst, src[0]),
368 qpu_m_NOP()));
369 }
370 break;
371
372 case QOP_CMP:
373 queue(c, qpu_inst(qpu_a_MOV(qpu_ra(QPU_W_NOP),
374 src[0]),
375 qpu_m_NOP()));
376 *last_inst(c) |= QPU_SF;
377
378 if (dst.mux <= QPU_MUX_R3) {
379 fixup_raddr_conflict(c, src[1], &src[2]);
380 queue(c, qpu_inst(qpu_a_MOV(dst, src[1]),
381 qpu_m_MOV(dst, src[2])));
382 *last_inst(c) = ((*last_inst(c) & ~(QPU_COND_ADD_MASK |
383 QPU_COND_MUL_MASK))
384 | QPU_SET_FIELD(QPU_COND_NS,
385 QPU_COND_ADD)
386 | QPU_SET_FIELD(QPU_COND_NC,
387 QPU_COND_MUL));
388 } else {
389 if (dst.mux == src[1].mux &&
390 dst.addr == src[1].addr) {
391 queue(c, qpu_inst(qpu_a_MOV(dst, src[1]),
392 qpu_m_NOP()));
393
394 queue(c, qpu_inst(qpu_a_MOV(dst, src[2]),
395 qpu_m_NOP()));
396 *last_inst(c) = ((*last_inst(c) & ~(QPU_COND_ADD_MASK))
397 | QPU_SET_FIELD(QPU_COND_NC,
398 QPU_COND_ADD));
399 } else {
400 queue(c, qpu_inst(qpu_a_MOV(dst, src[2]),
401 qpu_m_NOP()));
402
403 queue(c, qpu_inst(qpu_a_MOV(dst, src[1]),
404 qpu_m_NOP()));
405 *last_inst(c) = ((*last_inst(c) & ~(QPU_COND_ADD_MASK))
406 | QPU_SET_FIELD(QPU_COND_NS,
407 QPU_COND_ADD));
408 }
409 }
410 break;
411
412 case QOP_SEQ:
413 case QOP_SNE:
414 case QOP_SGE:
415 case QOP_SLT:
416 fixup_raddr_conflict(c, src[0], &src[1]);
417 queue(c, qpu_inst(qpu_a_SUB(qpu_ra(QPU_W_NOP),
418 src[0], src[1]),
419 qpu_m_NOP()));
420 *last_inst(c) |= QPU_SF;
421
422 queue(c, qpu_load_imm_f(dst, 0.0));
423 queue(c, qpu_load_imm_f(dst, 1.0));
424 *last_inst(c) = ((*last_inst(c) & ~QPU_COND_ADD_MASK)
425 | QPU_SET_FIELD(compareflags[qinst->op - QOP_SEQ],
426 QPU_COND_ADD));
427
428 break;
429
430 case QOP_VPM_WRITE:
431 queue(c, qpu_inst(qpu_a_MOV(qpu_ra(QPU_W_VPM), src[0]),
432 qpu_m_NOP()));
433 break;
434
435 case QOP_VPM_READ:
436 queue(c, qpu_inst(qpu_a_MOV(dst, qpu_ra(QPU_R_VPM)),
437 qpu_m_NOP()));
438 break;
439
440 case QOP_RCP:
441 case QOP_RSQ:
442 case QOP_EXP2:
443 case QOP_LOG2:
444 switch (qinst->op) {
445 case QOP_RCP:
446 queue(c, qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP),
447 src[0]),
448 qpu_m_NOP()));
449 break;
450 case QOP_RSQ:
451 queue(c, qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIPSQRT),
452 src[0]),
453 qpu_m_NOP()));
454 break;
455 case QOP_EXP2:
456 queue(c, qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_EXP),
457 src[0]),
458 qpu_m_NOP()));
459 break;
460 case QOP_LOG2:
461 queue(c, qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_LOG),
462 src[0]),
463 qpu_m_NOP()));
464 break;
465 default:
466 abort();
467 }
468
469 queue(c, qpu_inst(qpu_a_MOV(dst, qpu_r4()),
470 qpu_m_NOP()));
471
472 break;
473
474 case QOP_PACK_COLORS:
475 for (int i = 0; i < 4; i++) {
476 queue(c, qpu_inst(qpu_a_NOP(),
477 qpu_m_MOV(qpu_r3(), src[i])));
478 *last_inst(c) |= QPU_PM;
479 *last_inst(c) |= QPU_SET_FIELD(QPU_PACK_MUL_8A + i,
480 QPU_PACK);
481 }
482
483 queue(c, qpu_inst(qpu_a_MOV(dst, qpu_r3()),
484 qpu_m_NOP()));
485
486 break;
487
488 case QOP_FRAG_X:
489 queue(c, qpu_inst(qpu_a_ITOF(dst,
490 qpu_ra(QPU_R_XY_PIXEL_COORD)),
491 qpu_m_NOP()));
492 break;
493
494 case QOP_FRAG_Y:
495 queue(c, qpu_inst(qpu_a_ITOF(dst,
496 qpu_rb(QPU_R_XY_PIXEL_COORD)),
497 qpu_m_NOP()));
498 break;
499
500 case QOP_FRAG_Z:
501 queue(c, qpu_inst(qpu_a_ITOF(dst,
502 qpu_rb(QPU_R_FRAG_PAYLOAD_ZW)),
503 qpu_m_NOP()));
504 break;
505
506 case QOP_FRAG_RCP_W:
507 queue(c, qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP),
508 qpu_ra(QPU_R_FRAG_PAYLOAD_ZW)),
509 qpu_m_NOP()));
510
511 queue(c, qpu_inst(qpu_a_MOV(dst, qpu_r4()),
512 qpu_m_NOP()));
513 break;
514
515 case QOP_TLB_PASSTHROUGH_Z_WRITE:
516 queue(c, qpu_inst(qpu_a_MOV(qpu_ra(QPU_W_TLB_Z),
517 qpu_rb(QPU_R_FRAG_PAYLOAD_ZW)),
518 qpu_m_NOP()));
519 break;
520
521 case QOP_TLB_COLOR_READ:
522 queue(c, qpu_inst(qpu_a_NOP(), qpu_m_NOP()));
523 *last_inst(c) = qpu_set_sig(*last_inst(c),
524 QPU_SIG_COLOR_LOAD);
525
526 break;
527
528 case QOP_TLB_COLOR_WRITE:
529 queue(c, qpu_inst(qpu_a_MOV(qpu_tlbc(),
530 src[0]),
531 qpu_m_NOP()));
532 break;
533
534 case QOP_VARY_ADD_C:
535 queue(c, qpu_inst(qpu_a_FADD(dst,
536 src[0], qpu_r5()),
537 qpu_m_NOP()));
538 break;
539
540 case QOP_PACK_SCALED: {
541 uint64_t a = (qpu_inst(qpu_a_MOV(dst, src[0]),
542 qpu_m_NOP()) |
543 QPU_SET_FIELD(QPU_PACK_A_16A,
544 QPU_PACK));
545 uint64_t b = (qpu_inst(qpu_a_MOV(dst, src[1]),
546 qpu_m_NOP()) |
547 QPU_SET_FIELD(QPU_PACK_A_16B,
548 QPU_PACK));
549
550 if (dst.mux == src[1].mux && dst.addr == src[1].addr) {
551 queue(c, b);
552 queue(c, a);
553 } else {
554 queue(c, a);
555 queue(c, b);
556 }
557 break;
558 }
559
560 case QOP_TEX_S:
561 case QOP_TEX_T:
562 case QOP_TEX_R:
563 case QOP_TEX_B:
564 queue(c, qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_TMU0_S +
565 (qinst->op -
566 QOP_TEX_S)),
567 src[0]),
568 qpu_m_NOP()));
569 break;
570
571 case QOP_TEX_RESULT:
572 queue(c, qpu_inst(qpu_a_NOP(), qpu_m_NOP()));
573 *last_inst(c) = qpu_set_sig(*last_inst(c),
574 QPU_SIG_LOAD_TMU0);
575
576 break;
577
578 case QOP_R4_UNPACK_A:
579 case QOP_R4_UNPACK_B:
580 case QOP_R4_UNPACK_C:
581 case QOP_R4_UNPACK_D:
582 queue(c, qpu_inst(qpu_a_MOV(dst, qpu_r4()),
583 qpu_m_NOP()));
584 *last_inst(c) |= QPU_PM;
585 *last_inst(c) |= QPU_SET_FIELD(QPU_UNPACK_R4_8A +
586 (qinst->op -
587 QOP_R4_UNPACK_A),
588 QPU_UNPACK);
589
590 break;
591
592 default:
593 assert(qinst->op < ARRAY_SIZE(translate));
594 assert(translate[qinst->op].op != 0); /* NOPs */
595
596 /* If we have only one source, put it in the second
597 * argument slot as well so that we don't take up
598 * another raddr just to get unused data.
599 */
600 if (qir_get_op_nsrc(qinst->op) == 1)
601 src[1] = src[0];
602
603 fixup_raddr_conflict(c, src[0], &src[1]);
604
605 if (translate[qinst->op].is_mul) {
606 queue(c, qpu_inst(qpu_a_NOP(),
607 qpu_m_alu2(translate[qinst->op].op,
608 dst,
609 src[0], src[1])));
610 } else {
611 queue(c, qpu_inst(qpu_a_alu2(translate[qinst->op].op,
612 dst,
613 src[0], src[1]),
614 qpu_m_NOP()));
615 }
616 break;
617 }
618 }
619
620 serialize_insts(c);
621
622 /* thread end can't have VPM write */
623 if (QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
624 QPU_WADDR_ADD) == QPU_W_VPM ||
625 QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
626 QPU_WADDR_MUL) == QPU_W_VPM) {
627 serialize_one_inst(c, qpu_inst(qpu_a_NOP(), qpu_m_NOP()));
628 }
629
630 c->qpu_insts[c->qpu_inst_count - 1] =
631 qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
632 QPU_SIG_PROG_END);
633 serialize_one_inst(c, qpu_inst(qpu_a_NOP(), qpu_m_NOP()));
634 serialize_one_inst(c, qpu_inst(qpu_a_NOP(), qpu_m_NOP()));
635
636 switch (c->stage) {
637 case QSTAGE_VERT:
638 case QSTAGE_COORD:
639 break;
640 case QSTAGE_FRAG:
641 c->qpu_insts[c->qpu_inst_count - 1] =
642 qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
643 QPU_SIG_SCOREBOARD_UNLOCK);
644 break;
645 }
646
647 if (vc4_debug & VC4_DEBUG_QPU)
648 vc4_dump_program(c);
649
650 vc4_qpu_validate(c->qpu_insts, c->qpu_inst_count);
651 }