vc4: Stop being so clever in CMP handling.
[mesa.git] / src / gallium / drivers / vc4 / vc4_qpu_emit.c
1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <inttypes.h>
25
26 #include "vc4_context.h"
27 #include "vc4_qir.h"
28 #include "vc4_qpu.h"
29
30 static void
31 vc4_dump_program(struct qcompile *c)
32 {
33 fprintf(stderr, "%s:\n", qir_get_stage_name(c->stage));
34
35 for (int i = 0; i < c->qpu_inst_count; i++) {
36 fprintf(stderr, "0x%016"PRIx64" ", c->qpu_insts[i]);
37 vc4_qpu_disasm(&c->qpu_insts[i], 1);
38 fprintf(stderr, "\n");
39 }
40 }
41
42 struct queued_qpu_inst {
43 struct simple_node link;
44 uint64_t inst;
45 };
46
47 static void
48 queue(struct qcompile *c, uint64_t inst)
49 {
50 struct queued_qpu_inst *q = calloc(1, sizeof(*q));
51 q->inst = inst;
52 insert_at_tail(&c->qpu_inst_list, &q->link);
53 }
54
55 static uint64_t *
56 last_inst(struct qcompile *c)
57 {
58 struct queued_qpu_inst *q =
59 (struct queued_qpu_inst *)last_elem(&c->qpu_inst_list);
60 return &q->inst;
61 }
62
63 /**
64 * This is used to resolve the fact that we might register-allocate two
65 * different operands of an instruction to the same physical register file
66 * even though instructions have only one field for the register file source
67 * address.
68 *
69 * In that case, we need to move one to a temporary that can be used in the
70 * instruction, instead.
71 */
72 static void
73 fixup_raddr_conflict(struct qcompile *c,
74 struct qpu_reg src0, struct qpu_reg *src1)
75 {
76 if ((src0.mux == QPU_MUX_A || src0.mux == QPU_MUX_B) &&
77 (src1->mux == QPU_MUX_A || src1->mux == QPU_MUX_B) &&
78 src0.addr != src1->addr) {
79 queue(c, qpu_a_MOV(qpu_r3(), *src1));
80 *src1 = qpu_r3();
81 }
82 }
83
84 static void
85 serialize_one_inst(struct qcompile *c, uint64_t inst)
86 {
87 if (c->qpu_inst_count >= c->qpu_inst_size) {
88 c->qpu_inst_size = MAX2(16, c->qpu_inst_size * 2);
89 c->qpu_insts = realloc(c->qpu_insts,
90 c->qpu_inst_size * sizeof(uint64_t));
91 }
92 c->qpu_insts[c->qpu_inst_count++] = inst;
93 }
94
95 static void
96 serialize_insts(struct qcompile *c)
97 {
98 int last_sfu_write = -10;
99 bool scoreboard_wait_emitted = false;
100
101 while (!is_empty_list(&c->qpu_inst_list)) {
102 struct queued_qpu_inst *q =
103 (struct queued_qpu_inst *)first_elem(&c->qpu_inst_list);
104 uint32_t last_waddr_a = QPU_W_NOP, last_waddr_b = QPU_W_NOP;
105 uint32_t raddr_a = QPU_GET_FIELD(q->inst, QPU_RADDR_A);
106 uint32_t raddr_b = QPU_GET_FIELD(q->inst, QPU_RADDR_B);
107
108 if (c->qpu_inst_count > 0) {
109 uint64_t last_inst = c->qpu_insts[c->qpu_inst_count -
110 1];
111 uint32_t last_waddr_add = QPU_GET_FIELD(last_inst,
112 QPU_WADDR_ADD);
113 uint32_t last_waddr_mul = QPU_GET_FIELD(last_inst,
114 QPU_WADDR_MUL);
115
116 if (last_inst & QPU_WS) {
117 last_waddr_a = last_waddr_mul;
118 last_waddr_b = last_waddr_add;
119 } else {
120 last_waddr_a = last_waddr_add;
121 last_waddr_b = last_waddr_mul;
122 }
123 }
124
125 uint32_t src_muxes[] = {
126 QPU_GET_FIELD(q->inst, QPU_ADD_A),
127 QPU_GET_FIELD(q->inst, QPU_ADD_B),
128 QPU_GET_FIELD(q->inst, QPU_MUL_A),
129 QPU_GET_FIELD(q->inst, QPU_MUL_B),
130 };
131
132 /* "An instruction must not read from a location in physical
133 * regfile A or B that was written to by the previous
134 * instruction."
135 */
136 bool needs_raddr_vs_waddr_nop = false;
137 bool reads_r4 = false;
138 for (int i = 0; i < ARRAY_SIZE(src_muxes); i++) {
139 if ((raddr_a < 32 &&
140 src_muxes[i] == QPU_MUX_A &&
141 last_waddr_a == raddr_a) ||
142 (raddr_b < 32 &&
143 src_muxes[i] == QPU_MUX_B &&
144 last_waddr_b == raddr_b)) {
145 needs_raddr_vs_waddr_nop = true;
146 }
147 if (src_muxes[i] == QPU_MUX_R4)
148 reads_r4 = true;
149 }
150
151 if (needs_raddr_vs_waddr_nop) {
152 serialize_one_inst(c, qpu_NOP());
153 }
154
155 /* "After an SFU lookup instruction, accumulator r4 must not
156 * be read in the following two instructions. Any other
157 * instruction that results in r4 being written (that is, TMU
158 * read, TLB read, SFU lookup) cannot occur in the two
159 * instructions following an SFU lookup."
160 */
161 if (reads_r4) {
162 while (c->qpu_inst_count - last_sfu_write < 3) {
163 serialize_one_inst(c, qpu_NOP());
164 }
165 }
166
167 uint32_t waddr_a = QPU_GET_FIELD(q->inst, QPU_WADDR_ADD);
168 uint32_t waddr_m = QPU_GET_FIELD(q->inst, QPU_WADDR_MUL);
169 if ((waddr_a >= QPU_W_SFU_RECIP && waddr_a <= QPU_W_SFU_LOG) ||
170 (waddr_m >= QPU_W_SFU_RECIP && waddr_m <= QPU_W_SFU_LOG)) {
171 last_sfu_write = c->qpu_inst_count;
172 }
173
174 /* "A scoreboard wait must not occur in the first two
175 * instructions of a fragment shader. This is either the
176 * explicit Wait for Scoreboard signal or an implicit wait
177 * with the first tile-buffer read or write instruction."
178 */
179 if (!scoreboard_wait_emitted &&
180 (waddr_a == QPU_W_TLB_Z || waddr_m == QPU_W_TLB_Z ||
181 waddr_a == QPU_W_TLB_COLOR_MS ||
182 waddr_m == QPU_W_TLB_COLOR_MS ||
183 waddr_a == QPU_W_TLB_COLOR_ALL ||
184 waddr_m == QPU_W_TLB_COLOR_ALL ||
185 QPU_GET_FIELD(q->inst, QPU_SIG) == QPU_SIG_COLOR_LOAD)) {
186 while (c->qpu_inst_count < 3 ||
187 QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
188 QPU_SIG) != QPU_SIG_NONE) {
189 serialize_one_inst(c, qpu_NOP());
190 }
191 c->qpu_insts[c->qpu_inst_count - 1] =
192 qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
193 QPU_SIG_WAIT_FOR_SCOREBOARD);
194 scoreboard_wait_emitted = true;
195 }
196
197 serialize_one_inst(c, q->inst);
198
199 remove_from_list(&q->link);
200 free(q);
201 }
202 }
203
204 void
205 vc4_generate_code(struct qcompile *c)
206 {
207 struct qpu_reg allocate_to_qpu_reg[3 + 32 + 32];
208 bool reg_in_use[ARRAY_SIZE(allocate_to_qpu_reg)];
209 int *reg_allocated = calloc(c->num_temps, sizeof(*reg_allocated));
210 int *reg_uses_remaining =
211 calloc(c->num_temps, sizeof(*reg_uses_remaining));
212 bool discard = false;
213
214 for (int i = 0; i < ARRAY_SIZE(reg_in_use); i++)
215 reg_in_use[i] = false;
216 for (int i = 0; i < c->num_temps; i++)
217 reg_allocated[i] = -1;
218 for (int i = 0; i < 3; i++)
219 allocate_to_qpu_reg[i] = qpu_rn(i);
220 for (int i = 0; i < 32; i++)
221 allocate_to_qpu_reg[i + 3] = qpu_ra(i);
222 for (int i = 0; i < 32; i++)
223 allocate_to_qpu_reg[i + 3 + 32] = qpu_rb(i);
224
225 make_empty_list(&c->qpu_inst_list);
226
227 struct simple_node *node;
228 foreach(node, &c->instructions) {
229 struct qinst *qinst = (struct qinst *)node;
230
231 if (qinst->dst.file == QFILE_TEMP)
232 reg_uses_remaining[qinst->dst.index]++;
233 for (int i = 0; i < qir_get_op_nsrc(qinst->op); i++) {
234 if (qinst->src[i].file == QFILE_TEMP)
235 reg_uses_remaining[qinst->src[i].index]++;
236 }
237 if (qinst->op == QOP_TLB_PASSTHROUGH_Z_WRITE ||
238 qinst->op == QOP_FRAG_Z)
239 reg_in_use[3 + 32 + QPU_R_FRAG_PAYLOAD_ZW] = true;
240 }
241
242 switch (c->stage) {
243 case QSTAGE_VERT:
244 case QSTAGE_COORD:
245 queue(c, qpu_load_imm_ui(qpu_vrsetup(),
246 (0x00001a00 +
247 0x00100000 * c->num_inputs)));
248 queue(c, qpu_load_imm_ui(qpu_vwsetup(), 0x00001a00));
249 break;
250 case QSTAGE_FRAG:
251 break;
252 }
253
254 foreach(node, &c->instructions) {
255 struct qinst *qinst = (struct qinst *)node;
256
257 #if 0
258 fprintf(stderr, "translating qinst to qpu: ");
259 qir_dump_inst(qinst);
260 fprintf(stderr, "\n");
261 #endif
262
263 static const struct {
264 uint32_t op;
265 bool is_mul;
266 } translate[] = {
267 #define A(name) [QOP_##name] = {QPU_A_##name, false}
268 #define M(name) [QOP_##name] = {QPU_M_##name, true}
269 A(FADD),
270 A(FSUB),
271 A(FMIN),
272 A(FMAX),
273 A(FMINABS),
274 A(FMAXABS),
275 A(FTOI),
276 A(ITOF),
277
278 M(FMUL),
279 };
280
281 static const uint32_t compareflags[] = {
282 [QOP_SEQ - QOP_SEQ] = QPU_COND_ZS,
283 [QOP_SNE - QOP_SEQ] = QPU_COND_ZC,
284 [QOP_SLT - QOP_SEQ] = QPU_COND_NS,
285 [QOP_SGE - QOP_SEQ] = QPU_COND_NC,
286 };
287
288 struct qpu_reg src[4];
289 for (int i = 0; i < qir_get_op_nsrc(qinst->op); i++) {
290 int index = qinst->src[i].index;
291 switch (qinst->src[i].file) {
292 case QFILE_NULL:
293 src[i] = qpu_rn(0);
294 break;
295 case QFILE_TEMP:
296 if (reg_allocated[index] == -1) {
297 fprintf(stderr, "undefined reg use: ");
298 qir_dump_inst(qinst);
299 fprintf(stderr, "\n");
300
301 src[i] = qpu_rn(0);
302 } else {
303 src[i] = allocate_to_qpu_reg[reg_allocated[index]];
304 reg_uses_remaining[index]--;
305 if (reg_uses_remaining[index] == 0)
306 reg_in_use[reg_allocated[index]] = false;
307 }
308 break;
309 case QFILE_UNIF:
310 src[i] = qpu_unif();
311 break;
312 case QFILE_VARY:
313 src[i] = qpu_vary();
314 break;
315 }
316 }
317
318 struct qpu_reg dst;
319 switch (qinst->dst.file) {
320 case QFILE_NULL:
321 dst = qpu_ra(QPU_W_NOP);
322 break;
323
324 case QFILE_TEMP:
325 if (reg_allocated[qinst->dst.index] == -1) {
326 int alloc;
327 for (alloc = 0;
328 alloc < ARRAY_SIZE(reg_in_use);
329 alloc++) {
330 /* The pack flags require an A-file register. */
331 if (qinst->op == QOP_PACK_SCALED &&
332 allocate_to_qpu_reg[alloc].mux != QPU_MUX_A) {
333 continue;
334 }
335
336 if (!reg_in_use[alloc])
337 break;
338 }
339 assert(alloc != ARRAY_SIZE(reg_in_use) && "need better reg alloc");
340 reg_in_use[alloc] = true;
341 reg_allocated[qinst->dst.index] = alloc;
342 }
343
344 dst = allocate_to_qpu_reg[reg_allocated[qinst->dst.index]];
345
346 reg_uses_remaining[qinst->dst.index]--;
347 if (reg_uses_remaining[qinst->dst.index] == 0) {
348 reg_in_use[reg_allocated[qinst->dst.index]] =
349 false;
350 }
351 break;
352
353 case QFILE_VARY:
354 case QFILE_UNIF:
355 assert(!"not reached");
356 break;
357 }
358
359 switch (qinst->op) {
360 case QOP_MOV:
361 /* Skip emitting the MOV if it's a no-op. */
362 if (dst.mux == QPU_MUX_A || dst.mux == QPU_MUX_B ||
363 dst.mux != src[0].mux || dst.addr != src[0].addr) {
364 queue(c, qpu_a_MOV(dst, src[0]));
365 }
366 break;
367
368 case QOP_CMP:
369 queue(c, qpu_a_MOV(qpu_ra(QPU_W_NOP), src[0]));
370 *last_inst(c) |= QPU_SF;
371
372 queue(c, qpu_a_MOV(dst, src[1]));
373 *last_inst(c) = qpu_set_cond_add(*last_inst(c),
374 QPU_COND_NS);
375
376 queue(c, qpu_a_MOV(dst, src[2]));
377 *last_inst(c) = qpu_set_cond_add(*last_inst(c),
378 QPU_COND_NC);
379 break;
380
381 case QOP_SEQ:
382 case QOP_SNE:
383 case QOP_SGE:
384 case QOP_SLT:
385 fixup_raddr_conflict(c, src[0], &src[1]);
386 queue(c, qpu_a_FSUB(qpu_ra(QPU_W_NOP), src[0], src[1]));
387 *last_inst(c) |= QPU_SF;
388
389 queue(c, qpu_load_imm_f(dst, 0.0));
390 queue(c, qpu_load_imm_f(dst, 1.0));
391 *last_inst(c) = qpu_set_cond_add(*last_inst(c),
392 compareflags[qinst->op - QOP_SEQ]);
393
394
395 break;
396
397 case QOP_VPM_WRITE:
398 queue(c, qpu_a_MOV(qpu_ra(QPU_W_VPM), src[0]));
399 break;
400
401 case QOP_VPM_READ:
402 queue(c, qpu_a_MOV(dst, qpu_ra(QPU_R_VPM)));
403 break;
404
405 case QOP_RCP:
406 case QOP_RSQ:
407 case QOP_EXP2:
408 case QOP_LOG2:
409 switch (qinst->op) {
410 case QOP_RCP:
411 queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP),
412 src[0]));
413 break;
414 case QOP_RSQ:
415 queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIPSQRT),
416 src[0]));
417 break;
418 case QOP_EXP2:
419 queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_EXP),
420 src[0]));
421 break;
422 case QOP_LOG2:
423 queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_LOG),
424 src[0]));
425 break;
426 default:
427 abort();
428 }
429
430 queue(c, qpu_a_MOV(dst, qpu_r4()));
431
432 break;
433
434 case QOP_PACK_COLORS:
435 for (int i = 0; i < 4; i++) {
436 queue(c, qpu_m_MOV(qpu_r3(), src[i]));
437 *last_inst(c) |= QPU_PM;
438 *last_inst(c) |= QPU_SET_FIELD(QPU_PACK_MUL_8A + i,
439 QPU_PACK);
440 }
441
442 queue(c, qpu_a_MOV(dst, qpu_r3()));
443
444 break;
445
446 case QOP_FRAG_X:
447 queue(c, qpu_a_ITOF(dst,
448 qpu_ra(QPU_R_XY_PIXEL_COORD)));
449 break;
450
451 case QOP_FRAG_Y:
452 queue(c, qpu_a_ITOF(dst,
453 qpu_rb(QPU_R_XY_PIXEL_COORD)));
454 break;
455
456 case QOP_FRAG_Z:
457 queue(c, qpu_a_ITOF(dst,
458 qpu_rb(QPU_R_FRAG_PAYLOAD_ZW)));
459 break;
460
461 case QOP_FRAG_RCP_W:
462 queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP),
463 qpu_ra(QPU_R_FRAG_PAYLOAD_ZW)));
464
465 queue(c, qpu_a_MOV(dst, qpu_r4()));
466 break;
467
468 case QOP_TLB_DISCARD_SETUP:
469 discard = true;
470 queue(c, qpu_a_MOV(src[0], src[0]));
471 *last_inst(c) |= QPU_SF;
472 break;
473
474 case QOP_TLB_PASSTHROUGH_Z_WRITE:
475 queue(c, qpu_a_MOV(qpu_ra(QPU_W_TLB_Z),
476 qpu_rb(QPU_R_FRAG_PAYLOAD_ZW)));
477 if (discard) {
478 *last_inst(c) = qpu_set_cond_add(*last_inst(c),
479 QPU_COND_ZS);
480 }
481 break;
482
483 case QOP_TLB_COLOR_READ:
484 queue(c, qpu_NOP());
485 *last_inst(c) = qpu_set_sig(*last_inst(c),
486 QPU_SIG_COLOR_LOAD);
487
488 break;
489
490 case QOP_TLB_COLOR_WRITE:
491 queue(c, qpu_a_MOV(qpu_tlbc(), src[0]));
492 if (discard) {
493 *last_inst(c) = qpu_set_cond_add(*last_inst(c),
494 QPU_COND_ZS);
495 }
496 break;
497
498 case QOP_VARY_ADD_C:
499 queue(c, qpu_a_FADD(dst, src[0], qpu_r5()));
500 break;
501
502 case QOP_PACK_SCALED: {
503 uint64_t a = (qpu_a_MOV(dst, src[0]) |
504 QPU_SET_FIELD(QPU_PACK_A_16A,
505 QPU_PACK));
506 uint64_t b = (qpu_a_MOV(dst, src[1]) |
507 QPU_SET_FIELD(QPU_PACK_A_16B,
508 QPU_PACK));
509
510 if (dst.mux == src[1].mux && dst.addr == src[1].addr) {
511 queue(c, b);
512 queue(c, a);
513 } else {
514 queue(c, a);
515 queue(c, b);
516 }
517 break;
518 }
519
520 case QOP_TEX_S:
521 case QOP_TEX_T:
522 case QOP_TEX_R:
523 case QOP_TEX_B:
524 queue(c, qpu_a_MOV(qpu_rb(QPU_W_TMU0_S +
525 (qinst->op - QOP_TEX_S)),
526 src[0]));
527 break;
528
529 case QOP_TEX_RESULT:
530 queue(c, qpu_NOP());
531 *last_inst(c) = qpu_set_sig(*last_inst(c),
532 QPU_SIG_LOAD_TMU0);
533
534 break;
535
536 case QOP_R4_UNPACK_A:
537 case QOP_R4_UNPACK_B:
538 case QOP_R4_UNPACK_C:
539 case QOP_R4_UNPACK_D:
540 queue(c, qpu_a_MOV(dst, qpu_r4()));
541 *last_inst(c) |= QPU_PM;
542 *last_inst(c) |= QPU_SET_FIELD(QPU_UNPACK_R4_8A +
543 (qinst->op -
544 QOP_R4_UNPACK_A),
545 QPU_UNPACK);
546
547 break;
548
549 default:
550 assert(qinst->op < ARRAY_SIZE(translate));
551 assert(translate[qinst->op].op != 0); /* NOPs */
552
553 /* If we have only one source, put it in the second
554 * argument slot as well so that we don't take up
555 * another raddr just to get unused data.
556 */
557 if (qir_get_op_nsrc(qinst->op) == 1)
558 src[1] = src[0];
559
560 fixup_raddr_conflict(c, src[0], &src[1]);
561
562 if (translate[qinst->op].is_mul) {
563 queue(c, qpu_m_alu2(translate[qinst->op].op,
564 dst,
565 src[0], src[1]));
566 } else {
567 queue(c, qpu_a_alu2(translate[qinst->op].op,
568 dst,
569 src[0], src[1]));
570 }
571 break;
572 }
573 }
574
575 serialize_insts(c);
576
577 /* thread end can't have VPM write */
578 if (QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
579 QPU_WADDR_ADD) == QPU_W_VPM ||
580 QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
581 QPU_WADDR_MUL) == QPU_W_VPM) {
582 serialize_one_inst(c, qpu_NOP());
583 }
584
585 c->qpu_insts[c->qpu_inst_count - 1] =
586 qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
587 QPU_SIG_PROG_END);
588 serialize_one_inst(c, qpu_NOP());
589 serialize_one_inst(c, qpu_NOP());
590
591 switch (c->stage) {
592 case QSTAGE_VERT:
593 case QSTAGE_COORD:
594 break;
595 case QSTAGE_FRAG:
596 c->qpu_insts[c->qpu_inst_count - 1] =
597 qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
598 QPU_SIG_SCOREBOARD_UNLOCK);
599 break;
600 }
601
602 if (vc4_debug & VC4_DEBUG_QPU)
603 vc4_dump_program(c);
604
605 vc4_qpu_validate(c->qpu_insts, c->qpu_inst_count);
606 }