4e8a6b2d8e9506aee1d2386bd48af64071794084
[mesa.git] / src / gallium / drivers / vc4 / vc4_qpu_emit.c
1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <stdio.h>
25 #include <inttypes.h>
26
27 #include "vc4_context.h"
28 #include "vc4_qir.h"
29 #include "vc4_qpu.h"
30
31 static void
32 vc4_dump_program(struct qcompile *c)
33 {
34 fprintf(stderr, "%s:\n", qir_get_stage_name(c->stage));
35
36 for (int i = 0; i < c->qpu_inst_count; i++) {
37 fprintf(stderr, "0x%016"PRIx64" ", c->qpu_insts[i]);
38 vc4_qpu_disasm(&c->qpu_insts[i], 1);
39 fprintf(stderr, "\n");
40 }
41 }
42
43 struct queued_qpu_inst {
44 struct simple_node link;
45 uint64_t inst;
46 };
47
48 static void
49 queue(struct qcompile *c, uint64_t inst)
50 {
51 struct queued_qpu_inst *q = calloc(1, sizeof(*q));
52 q->inst = inst;
53 insert_at_tail(&c->qpu_inst_list, &q->link);
54 }
55
56 static uint64_t *
57 last_inst(struct qcompile *c)
58 {
59 struct queued_qpu_inst *q =
60 (struct queued_qpu_inst *)last_elem(&c->qpu_inst_list);
61 return &q->inst;
62 }
63
64 /**
65 * This is used to resolve the fact that we might register-allocate two
66 * different operands of an instruction to the same physical register file
67 * even though instructions have only one field for the register file source
68 * address.
69 *
70 * In that case, we need to move one to a temporary that can be used in the
71 * instruction, instead.
72 */
73 static void
74 fixup_raddr_conflict(struct qcompile *c,
75 struct qpu_reg src0, struct qpu_reg *src1)
76 {
77 if ((src0.mux == QPU_MUX_A || src0.mux == QPU_MUX_B) &&
78 (src1->mux == QPU_MUX_A || src1->mux == QPU_MUX_B) &&
79 src0.addr != src1->addr) {
80 queue(c, qpu_inst(qpu_a_MOV(qpu_r3(), *src1),
81 qpu_m_NOP()));
82 *src1 = qpu_r3();
83 }
84 }
85
86 static void
87 serialize_one_inst(struct qcompile *c, uint64_t inst)
88 {
89 if (c->qpu_inst_count >= c->qpu_inst_size) {
90 c->qpu_inst_size = MAX2(16, c->qpu_inst_size * 2);
91 c->qpu_insts = realloc(c->qpu_insts,
92 c->qpu_inst_size * sizeof(uint64_t));
93 }
94 c->qpu_insts[c->qpu_inst_count++] = inst;
95 }
96
97 static void
98 serialize_insts(struct qcompile *c)
99 {
100 int last_sfu_write = -10;
101
102 while (!is_empty_list(&c->qpu_inst_list)) {
103 struct queued_qpu_inst *q =
104 (struct queued_qpu_inst *)first_elem(&c->qpu_inst_list);
105 uint32_t last_waddr_a = QPU_W_NOP, last_waddr_b = QPU_W_NOP;
106 uint32_t raddr_a = QPU_GET_FIELD(q->inst, QPU_RADDR_A);
107 uint32_t raddr_b = QPU_GET_FIELD(q->inst, QPU_RADDR_B);
108
109 if (c->qpu_inst_count > 0) {
110 uint64_t last_inst = c->qpu_insts[c->qpu_inst_count -
111 1];
112 uint32_t last_waddr_add = QPU_GET_FIELD(last_inst,
113 QPU_WADDR_ADD);
114 uint32_t last_waddr_mul = QPU_GET_FIELD(last_inst,
115 QPU_WADDR_MUL);
116
117 if (last_inst & QPU_WS) {
118 last_waddr_a = last_waddr_mul;
119 last_waddr_b = last_waddr_add;
120 } else {
121 last_waddr_a = last_waddr_add;
122 last_waddr_b = last_waddr_mul;
123 }
124 }
125
126 uint32_t src_muxes[] = {
127 QPU_GET_FIELD(q->inst, QPU_ADD_A),
128 QPU_GET_FIELD(q->inst, QPU_ADD_B),
129 QPU_GET_FIELD(q->inst, QPU_MUL_A),
130 QPU_GET_FIELD(q->inst, QPU_MUL_B),
131 };
132
133 /* "An instruction must not read from a location in physical
134 * regfile A or B that was written to by the previous
135 * instruction."
136 */
137 bool needs_raddr_vs_waddr_nop = false;
138 bool reads_r4 = false;
139 for (int i = 0; i < ARRAY_SIZE(src_muxes); i++) {
140 if ((raddr_a < 32 &&
141 src_muxes[i] == QPU_MUX_A &&
142 last_waddr_a == raddr_a) ||
143 (raddr_b < 32 &&
144 src_muxes[i] == QPU_MUX_B &&
145 last_waddr_b == raddr_b)) {
146 needs_raddr_vs_waddr_nop = true;
147 }
148 if (src_muxes[i] == QPU_MUX_R4)
149 reads_r4 = true;
150 }
151
152 if (needs_raddr_vs_waddr_nop) {
153 serialize_one_inst(c, qpu_inst(qpu_a_NOP(),
154 qpu_m_NOP()));
155 }
156
157 /* "After an SFU lookup instruction, accumulator r4 must not
158 * be read in the following two instructions. Any other
159 * instruction that results in r4 being written (that is, TMU
160 * read, TLB read, SFU lookup) cannot occur in the two
161 * instructions following an SFU lookup."
162 */
163 if (reads_r4) {
164 while (c->qpu_inst_count - last_sfu_write < 3) {
165 serialize_one_inst(c, qpu_inst(qpu_a_NOP(),
166 qpu_m_NOP()));
167 }
168 }
169
170 uint32_t waddr_a = QPU_GET_FIELD(q->inst, QPU_WADDR_ADD);
171 uint32_t waddr_m = QPU_GET_FIELD(q->inst, QPU_WADDR_MUL);
172 if ((waddr_a >= QPU_W_SFU_RECIP && waddr_a <= QPU_W_SFU_LOG) ||
173 (waddr_m >= QPU_W_SFU_RECIP && waddr_m <= QPU_W_SFU_LOG)) {
174 last_sfu_write = c->qpu_inst_count;
175 }
176
177 serialize_one_inst(c, q->inst);
178
179 remove_from_list(&q->link);
180 free(q);
181 }
182 }
183
184 void
185 vc4_generate_code(struct qcompile *c)
186 {
187 struct qpu_reg allocate_to_qpu_reg[3 + 32 + 32];
188 bool reg_in_use[ARRAY_SIZE(allocate_to_qpu_reg)];
189 int *reg_allocated = calloc(c->num_temps, sizeof(*reg_allocated));
190 int *reg_uses_remaining =
191 calloc(c->num_temps, sizeof(*reg_uses_remaining));
192
193 for (int i = 0; i < ARRAY_SIZE(reg_in_use); i++)
194 reg_in_use[i] = false;
195 for (int i = 0; i < c->num_temps; i++)
196 reg_allocated[i] = -1;
197 for (int i = 0; i < 3; i++)
198 allocate_to_qpu_reg[i] = qpu_rn(i);
199 for (int i = 0; i < 32; i++)
200 allocate_to_qpu_reg[i + 3] = qpu_ra(i);
201 for (int i = 0; i < 32; i++)
202 allocate_to_qpu_reg[i + 3 + 32] = qpu_rb(i);
203
204 make_empty_list(&c->qpu_inst_list);
205
206 struct simple_node *node;
207 foreach(node, &c->instructions) {
208 struct qinst *qinst = (struct qinst *)node;
209
210 if (qinst->dst.file == QFILE_TEMP)
211 reg_uses_remaining[qinst->dst.index]++;
212 for (int i = 0; i < qir_get_op_nsrc(qinst->op); i++) {
213 if (qinst->src[i].file == QFILE_TEMP)
214 reg_uses_remaining[qinst->src[i].index]++;
215 }
216 if (qinst->op == QOP_TLB_PASSTHROUGH_Z_WRITE ||
217 qinst->op == QOP_FRAG_Z)
218 reg_in_use[3 + 32 + QPU_R_FRAG_PAYLOAD_ZW] = true;
219 }
220
221 switch (c->stage) {
222 case QSTAGE_VERT:
223 case QSTAGE_COORD:
224 queue(c, qpu_load_imm_ui(qpu_vrsetup(),
225 (0x00001a00 +
226 0x00100000 * c->num_inputs)));
227 queue(c, qpu_load_imm_ui(qpu_vwsetup(), 0x00001a00));
228 break;
229 case QSTAGE_FRAG:
230 break;
231 }
232
233 foreach(node, &c->instructions) {
234 struct qinst *qinst = (struct qinst *)node;
235
236 #if 0
237 fprintf(stderr, "translating qinst to qpu: ");
238 qir_dump_inst(qinst);
239 fprintf(stderr, "\n");
240 #endif
241
242 static const struct {
243 uint32_t op;
244 bool is_mul;
245 } translate[] = {
246 #define A(name) [QOP_##name] = {QPU_A_##name, false}
247 #define M(name) [QOP_##name] = {QPU_M_##name, true}
248 A(FADD),
249 A(FSUB),
250 A(FMIN),
251 A(FMAX),
252 A(FMINABS),
253 A(FMAXABS),
254 A(FTOI),
255 A(ITOF),
256
257 M(FMUL),
258 };
259
260 static const uint32_t compareflags[] = {
261 [QOP_SEQ - QOP_SEQ] = QPU_COND_ZS,
262 [QOP_SNE - QOP_SEQ] = QPU_COND_ZC,
263 [QOP_SLT - QOP_SEQ] = QPU_COND_NS,
264 [QOP_SGE - QOP_SEQ] = QPU_COND_NC,
265 };
266
267 struct qpu_reg src[4];
268 for (int i = 0; i < qir_get_op_nsrc(qinst->op); i++) {
269 int index = qinst->src[i].index;
270 switch (qinst->src[i].file) {
271 case QFILE_NULL:
272 src[i] = qpu_rn(0);
273 break;
274 case QFILE_TEMP:
275 if (reg_allocated[index] == -1) {
276 fprintf(stderr, "undefined reg use: ");
277 qir_dump_inst(qinst);
278 fprintf(stderr, "\n");
279
280 src[i] = qpu_rn(0);
281 } else {
282 src[i] = allocate_to_qpu_reg[reg_allocated[index]];
283 reg_uses_remaining[index]--;
284 if (reg_uses_remaining[index] == 0)
285 reg_in_use[reg_allocated[index]] = false;
286 }
287 break;
288 case QFILE_UNIF:
289 src[i] = qpu_unif();
290 break;
291 case QFILE_VARY:
292 src[i] = qpu_vary();
293 break;
294 }
295 }
296
297 struct qpu_reg dst;
298 switch (qinst->dst.file) {
299 case QFILE_NULL:
300 dst = qpu_ra(QPU_W_NOP);
301 break;
302
303 case QFILE_TEMP:
304 if (reg_allocated[qinst->dst.index] == -1) {
305 int alloc;
306 for (alloc = 0;
307 alloc < ARRAY_SIZE(reg_in_use);
308 alloc++) {
309 /* The pack flags require an A-file register. */
310 if (qinst->op == QOP_PACK_SCALED &&
311 allocate_to_qpu_reg[alloc].mux != QPU_MUX_A) {
312 continue;
313 }
314
315 if (!reg_in_use[alloc])
316 break;
317 }
318 assert(alloc != ARRAY_SIZE(reg_in_use) && "need better reg alloc");
319 reg_in_use[alloc] = true;
320 reg_allocated[qinst->dst.index] = alloc;
321 }
322
323 dst = allocate_to_qpu_reg[reg_allocated[qinst->dst.index]];
324
325 reg_uses_remaining[qinst->dst.index]--;
326 if (reg_uses_remaining[qinst->dst.index] == 0) {
327 reg_in_use[reg_allocated[qinst->dst.index]] =
328 false;
329 }
330 break;
331
332 case QFILE_VARY:
333 case QFILE_UNIF:
334 assert(!"not reached");
335 break;
336 }
337
338 switch (qinst->op) {
339 case QOP_MOV:
340 /* Skip emitting the MOV if it's a no-op. */
341 if (dst.mux == QPU_MUX_A || dst.mux == QPU_MUX_B ||
342 dst.mux != src[0].mux || dst.addr != src[0].addr) {
343 queue(c, qpu_inst(qpu_a_MOV(dst, src[0]),
344 qpu_m_NOP()));
345 }
346 break;
347
348 case QOP_CMP:
349 queue(c, qpu_inst(qpu_a_MOV(qpu_ra(QPU_W_NOP),
350 src[0]),
351 qpu_m_NOP()));
352 *last_inst(c) |= QPU_SF;
353
354 if (dst.mux <= QPU_MUX_R3) {
355 fixup_raddr_conflict(c, src[1], &src[2]);
356 queue(c, qpu_inst(qpu_a_MOV(dst, src[1]),
357 qpu_m_MOV(dst, src[2])));
358 *last_inst(c) = ((*last_inst(c) & ~(QPU_COND_ADD_MASK |
359 QPU_COND_MUL_MASK))
360 | QPU_SET_FIELD(QPU_COND_NS,
361 QPU_COND_ADD)
362 | QPU_SET_FIELD(QPU_COND_NC,
363 QPU_COND_MUL));
364 } else {
365 if (dst.mux == src[1].mux &&
366 dst.addr == src[1].addr) {
367 queue(c, qpu_inst(qpu_a_MOV(dst, src[1]),
368 qpu_m_NOP()));
369
370 queue(c, qpu_inst(qpu_a_MOV(dst, src[2]),
371 qpu_m_NOP()));
372 *last_inst(c) = ((*last_inst(c) & ~(QPU_COND_ADD_MASK))
373 | QPU_SET_FIELD(QPU_COND_NC,
374 QPU_COND_ADD));
375 } else {
376 queue(c, qpu_inst(qpu_a_MOV(dst, src[2]),
377 qpu_m_NOP()));
378
379 queue(c, qpu_inst(qpu_a_MOV(dst, src[1]),
380 qpu_m_NOP()));
381 *last_inst(c) = ((*last_inst(c) & ~(QPU_COND_ADD_MASK))
382 | QPU_SET_FIELD(QPU_COND_NS,
383 QPU_COND_ADD));
384 }
385 }
386 break;
387
388 case QOP_SEQ:
389 case QOP_SNE:
390 case QOP_SGE:
391 case QOP_SLT:
392 fixup_raddr_conflict(c, src[0], &src[1]);
393 queue(c, qpu_inst(qpu_a_SUB(qpu_ra(QPU_W_NOP),
394 src[0], src[1]),
395 qpu_m_NOP()));
396 *last_inst(c) |= QPU_SF;
397
398 queue(c, qpu_load_imm_f(dst, 0.0));
399 queue(c, qpu_load_imm_f(dst, 1.0));
400 *last_inst(c) = ((*last_inst(c) & ~QPU_COND_ADD_MASK)
401 | QPU_SET_FIELD(compareflags[qinst->op - QOP_SEQ],
402 QPU_COND_ADD));
403
404 break;
405
406 case QOP_VPM_WRITE:
407 queue(c, qpu_inst(qpu_a_MOV(qpu_ra(QPU_W_VPM), src[0]),
408 qpu_m_NOP()));
409 break;
410
411 case QOP_VPM_READ:
412 queue(c, qpu_inst(qpu_a_MOV(dst, qpu_ra(QPU_R_VPM)),
413 qpu_m_NOP()));
414 break;
415
416 case QOP_RCP:
417 case QOP_RSQ:
418 case QOP_EXP2:
419 case QOP_LOG2:
420 switch (qinst->op) {
421 case QOP_RCP:
422 queue(c, qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP),
423 src[0]),
424 qpu_m_NOP()));
425 break;
426 case QOP_RSQ:
427 queue(c, qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIPSQRT),
428 src[0]),
429 qpu_m_NOP()));
430 break;
431 case QOP_EXP2:
432 queue(c, qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_EXP),
433 src[0]),
434 qpu_m_NOP()));
435 break;
436 case QOP_LOG2:
437 queue(c, qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_LOG),
438 src[0]),
439 qpu_m_NOP()));
440 break;
441 default:
442 abort();
443 }
444
445 queue(c, qpu_inst(qpu_a_MOV(dst, qpu_r4()),
446 qpu_m_NOP()));
447
448 break;
449
450 case QOP_PACK_COLORS:
451 for (int i = 0; i < 4; i++) {
452 queue(c, qpu_inst(qpu_a_NOP(),
453 qpu_m_MOV(qpu_r3(), src[i])));
454 *last_inst(c) |= QPU_PM;
455 *last_inst(c) |= QPU_SET_FIELD(QPU_PACK_MUL_8A + i,
456 QPU_PACK);
457 }
458
459 queue(c, qpu_inst(qpu_a_MOV(dst, qpu_r3()),
460 qpu_m_NOP()));
461
462 break;
463
464 case QOP_FRAG_X:
465 queue(c, qpu_inst(qpu_a_ITOF(dst,
466 qpu_ra(QPU_R_XY_PIXEL_COORD)),
467 qpu_m_NOP()));
468 break;
469
470 case QOP_FRAG_Y:
471 queue(c, qpu_inst(qpu_a_ITOF(dst,
472 qpu_rb(QPU_R_XY_PIXEL_COORD)),
473 qpu_m_NOP()));
474 break;
475
476 case QOP_FRAG_Z:
477 queue(c, qpu_inst(qpu_a_ITOF(dst,
478 qpu_rb(QPU_R_FRAG_PAYLOAD_ZW)),
479 qpu_m_NOP()));
480 break;
481
482 case QOP_FRAG_RCP_W:
483 queue(c, qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP),
484 qpu_ra(QPU_R_FRAG_PAYLOAD_ZW)),
485 qpu_m_NOP()));
486
487 queue(c, qpu_inst(qpu_a_MOV(dst, qpu_r4()),
488 qpu_m_NOP()));
489 break;
490
491 case QOP_TLB_PASSTHROUGH_Z_WRITE:
492 queue(c, qpu_inst(qpu_a_MOV(qpu_ra(QPU_W_TLB_Z),
493 qpu_rb(QPU_R_FRAG_PAYLOAD_ZW)),
494 qpu_m_NOP()));
495 break;
496
497 case QOP_TLB_COLOR_READ:
498 queue(c, qpu_inst(qpu_a_NOP(), qpu_m_NOP()));
499 *last_inst(c) = qpu_set_sig(*last_inst(c),
500 QPU_SIG_COLOR_LOAD);
501
502 break;
503
504 case QOP_TLB_COLOR_WRITE:
505 queue(c, qpu_inst(qpu_a_MOV(qpu_tlbc(),
506 src[0]),
507 qpu_m_NOP()));
508 break;
509
510 case QOP_VARY_ADD_C:
511 queue(c, qpu_inst(qpu_a_FADD(dst,
512 src[0], qpu_r5()),
513 qpu_m_NOP()));
514 break;
515
516 case QOP_PACK_SCALED: {
517 uint64_t a = (qpu_inst(qpu_a_MOV(dst, src[0]),
518 qpu_m_NOP()) |
519 QPU_SET_FIELD(QPU_PACK_A_16A,
520 QPU_PACK));
521 uint64_t b = (qpu_inst(qpu_a_MOV(dst, src[1]),
522 qpu_m_NOP()) |
523 QPU_SET_FIELD(QPU_PACK_A_16B,
524 QPU_PACK));
525
526 if (dst.mux == src[1].mux && dst.addr == src[1].addr) {
527 queue(c, b);
528 queue(c, a);
529 } else {
530 queue(c, a);
531 queue(c, b);
532 }
533 break;
534 }
535
536 case QOP_TEX_S:
537 case QOP_TEX_T:
538 case QOP_TEX_R:
539 case QOP_TEX_B:
540 queue(c, qpu_inst(qpu_a_MOV(qpu_rb(QPU_W_TMU0_S +
541 (qinst->op -
542 QOP_TEX_S)),
543 src[0]),
544 qpu_m_NOP()));
545 break;
546
547 case QOP_TEX_RESULT:
548 queue(c, qpu_inst(qpu_a_NOP(), qpu_m_NOP()));
549 *last_inst(c) = qpu_set_sig(*last_inst(c),
550 QPU_SIG_LOAD_TMU0);
551
552 break;
553
554 case QOP_R4_UNPACK_A:
555 case QOP_R4_UNPACK_B:
556 case QOP_R4_UNPACK_C:
557 case QOP_R4_UNPACK_D:
558 queue(c, qpu_inst(qpu_a_MOV(dst, qpu_r4()),
559 qpu_m_NOP()));
560 *last_inst(c) |= QPU_PM;
561 *last_inst(c) |= QPU_SET_FIELD(QPU_UNPACK_R4_8A +
562 (qinst->op -
563 QOP_R4_UNPACK_A),
564 QPU_UNPACK);
565
566 break;
567
568 default:
569 assert(qinst->op < ARRAY_SIZE(translate));
570 assert(translate[qinst->op].op != 0); /* NOPs */
571
572 /* If we have only one source, put it in the second
573 * argument slot as well so that we don't take up
574 * another raddr just to get unused data.
575 */
576 if (qir_get_op_nsrc(qinst->op) == 1)
577 src[1] = src[0];
578
579 fixup_raddr_conflict(c, src[0], &src[1]);
580
581 if (translate[qinst->op].is_mul) {
582 queue(c, qpu_inst(qpu_a_NOP(),
583 qpu_m_alu2(translate[qinst->op].op,
584 dst,
585 src[0], src[1])));
586 } else {
587 queue(c, qpu_inst(qpu_a_alu2(translate[qinst->op].op,
588 dst,
589 src[0], src[1]),
590 qpu_m_NOP()));
591 }
592 break;
593 }
594 }
595
596 serialize_insts(c);
597
598 /* thread end can't have VPM write */
599 if (QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
600 QPU_WADDR_ADD) == QPU_W_VPM ||
601 QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
602 QPU_WADDR_MUL) == QPU_W_VPM) {
603 serialize_one_inst(c, qpu_inst(qpu_a_NOP(), qpu_m_NOP()));
604 }
605
606 c->qpu_insts[c->qpu_inst_count - 1] =
607 qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
608 QPU_SIG_PROG_END);
609 serialize_one_inst(c, qpu_inst(qpu_a_NOP(), qpu_m_NOP()));
610 serialize_one_inst(c, qpu_inst(qpu_a_NOP(), qpu_m_NOP()));
611
612 switch (c->stage) {
613 case QSTAGE_VERT:
614 case QSTAGE_COORD:
615 break;
616 case QSTAGE_FRAG:
617 c->qpu_insts[2] = qpu_set_sig(c->qpu_insts[2],
618 QPU_SIG_WAIT_FOR_SCOREBOARD);
619 c->qpu_insts[c->qpu_inst_count - 1] =
620 qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
621 QPU_SIG_SCOREBOARD_UNLOCK);
622 break;
623 }
624
625 if (vc4_debug & VC4_DEBUG_QPU)
626 vc4_dump_program(c);
627
628 vc4_qpu_validate(c->qpu_insts, c->qpu_inst_count);
629 }