2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "vc4_context.h"
29 #include "util/ralloc.h"
32 vc4_dump_program(struct vc4_compile
*c
)
34 fprintf(stderr
, "%s prog %d/%d QPU:\n",
35 qir_get_stage_name(c
->stage
),
36 c
->program_id
, c
->variant_id
);
38 for (int i
= 0; i
< c
->qpu_inst_count
; i
++) {
39 fprintf(stderr
, "0x%016"PRIx64
" ", c
->qpu_insts
[i
]);
40 vc4_qpu_disasm(&c
->qpu_insts
[i
], 1);
41 fprintf(stderr
, "\n");
46 queue(struct vc4_compile
*c
, uint64_t inst
)
48 struct queued_qpu_inst
*q
= rzalloc(c
, struct queued_qpu_inst
);
50 list_addtail(&q
->link
, &c
->qpu_inst_list
);
54 last_inst(struct vc4_compile
*c
)
56 struct queued_qpu_inst
*q
=
57 (struct queued_qpu_inst
*)c
->qpu_inst_list
.prev
;
62 set_last_cond_add(struct vc4_compile
*c
, uint32_t cond
)
64 *last_inst(c
) = qpu_set_cond_add(*last_inst(c
), cond
);
68 * Some special registers can be read from either file, which lets us resolve
69 * raddr conflicts without extra MOVs.
72 swap_file(struct qpu_reg
*src
)
77 if (src
->mux
== QPU_MUX_SMALL_IMM
) {
80 if (src
->mux
== QPU_MUX_A
)
93 * This is used to resolve the fact that we might register-allocate two
94 * different operands of an instruction to the same physical register file
95 * even though instructions have only one field for the register file source
98 * In that case, we need to move one to a temporary that can be used in the
99 * instruction, instead. We reserve ra31/rb31 for this purpose.
102 fixup_raddr_conflict(struct vc4_compile
*c
,
104 struct qpu_reg
*src0
, struct qpu_reg
*src1
)
106 uint32_t mux0
= src0
->mux
== QPU_MUX_SMALL_IMM
? QPU_MUX_B
: src0
->mux
;
107 uint32_t mux1
= src1
->mux
== QPU_MUX_SMALL_IMM
? QPU_MUX_B
: src1
->mux
;
109 if (mux0
<= QPU_MUX_R5
||
111 (src0
->addr
== src1
->addr
&&
112 src0
->mux
== src1
->mux
)) {
116 if (swap_file(src0
) || swap_file(src1
))
119 if (mux0
== QPU_MUX_A
) {
120 queue(c
, qpu_a_MOV(qpu_rb(31), *src0
));
123 queue(c
, qpu_a_MOV(qpu_ra(31), *src0
));
129 vc4_generate_code(struct vc4_context
*vc4
, struct vc4_compile
*c
)
131 struct qpu_reg
*temp_registers
= vc4_register_allocate(vc4
, c
);
132 bool discard
= false;
133 uint32_t inputs_remaining
= c
->num_inputs
;
134 uint32_t vpm_read_fifo_count
= 0;
135 uint32_t vpm_read_offset
= 0;
136 int last_vpm_read_index
= -1;
137 /* Map from the QIR ops enum order to QPU unpack bits. */
138 static const uint32_t unpack_map
[] = {
143 QPU_UNPACK_16A_TO_F32
,
144 QPU_UNPACK_16B_TO_F32
,
147 list_inithead(&c
->qpu_inst_list
);
152 /* There's a 4-entry FIFO for VPMVCD reads, each of which can
153 * load up to 16 dwords (4 vec4s) per vertex.
155 while (inputs_remaining
) {
156 uint32_t num_entries
= MIN2(inputs_remaining
, 16);
157 queue(c
, qpu_load_imm_ui(qpu_vrsetup(),
160 ((num_entries
& 0xf) << 20)));
161 inputs_remaining
-= num_entries
;
162 vpm_read_offset
+= num_entries
;
163 vpm_read_fifo_count
++;
165 assert(vpm_read_fifo_count
<= 4);
167 queue(c
, qpu_load_imm_ui(qpu_vwsetup(), 0x00001a00));
173 list_for_each_entry(struct qinst
, qinst
, &c
->instructions
, link
) {
175 fprintf(stderr
, "translating qinst to qpu: ");
176 qir_dump_inst(qinst
);
177 fprintf(stderr
, "\n");
180 static const struct {
184 #define A(name) [QOP_##name] = {QPU_A_##name, false}
185 #define M(name) [QOP_##name] = {QPU_M_##name, true}
210 struct qpu_reg src
[4];
211 for (int i
= 0; i
< qir_get_op_nsrc(qinst
->op
); i
++) {
212 int index
= qinst
->src
[i
].index
;
213 switch (qinst
->src
[i
].file
) {
218 src
[i
] = temp_registers
[index
];
226 case QFILE_SMALL_IMM
:
227 src
[i
].mux
= QPU_MUX_SMALL_IMM
;
228 src
[i
].addr
= qpu_encode_small_immediate(qinst
->src
[i
].index
);
229 /* This should only have returned a valid
230 * small immediate field, not ~0 for failure.
232 assert(src
[i
].addr
<= 47);
235 assert((int)qinst
->src
[i
].index
>=
236 last_vpm_read_index
);
237 (void)last_vpm_read_index
;
238 last_vpm_read_index
= qinst
->src
[i
].index
;
239 src
[i
] = qpu_ra(QPU_R_VPM
);
245 switch (qinst
->dst
.file
) {
247 dst
= qpu_ra(QPU_W_NOP
);
250 dst
= temp_registers
[qinst
->dst
.index
];
253 dst
= qpu_ra(QPU_W_VPM
);
257 case QFILE_SMALL_IMM
:
258 assert(!"not reached");
264 /* Skip emitting the MOV if it's a no-op. */
265 if (dst
.mux
== QPU_MUX_A
|| dst
.mux
== QPU_MUX_B
||
266 dst
.mux
!= src
[0].mux
|| dst
.addr
!= src
[0].addr
) {
267 queue(c
, qpu_a_MOV(dst
, src
[0]));
275 queue(c
, qpu_a_MOV(dst
, src
[0]));
276 set_last_cond_add(c
, qinst
->op
- QOP_SEL_X_0_ZS
+
279 queue(c
, qpu_a_XOR(dst
, qpu_r0(), qpu_r0()));
280 set_last_cond_add(c
, ((qinst
->op
- QOP_SEL_X_0_ZS
) ^
288 queue(c
, qpu_a_MOV(dst
, src
[0]));
289 set_last_cond_add(c
, qinst
->op
- QOP_SEL_X_Y_ZS
+
292 queue(c
, qpu_a_MOV(dst
, src
[1]));
293 set_last_cond_add(c
, ((qinst
->op
- QOP_SEL_X_Y_ZS
) ^
304 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP
),
308 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIPSQRT
),
312 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_EXP
),
316 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_LOG
),
323 if (dst
.mux
!= QPU_MUX_R4
)
324 queue(c
, qpu_a_MOV(dst
, qpu_r4()));
328 case QOP_PACK_8888_F
:
329 queue(c
, qpu_m_MOV(dst
, src
[0]));
330 *last_inst(c
) |= QPU_PM
;
331 *last_inst(c
) |= QPU_SET_FIELD(QPU_PACK_MUL_8888
,
340 qpu_m_MOV(dst
, src
[0]) |
342 QPU_SET_FIELD(QPU_PACK_MUL_8A
+
343 qinst
->op
- QOP_PACK_8A_F
,
348 queue(c
, qpu_a_ITOF(dst
,
349 qpu_ra(QPU_R_XY_PIXEL_COORD
)));
353 queue(c
, qpu_a_ITOF(dst
,
354 qpu_rb(QPU_R_XY_PIXEL_COORD
)));
357 case QOP_FRAG_REV_FLAG
:
358 queue(c
, qpu_a_ITOF(dst
,
359 qpu_rb(QPU_R_MS_REV_FLAGS
)));
364 /* QOP_FRAG_Z/W don't emit instructions, just allocate
365 * the register to the Z/W payload.
369 case QOP_TLB_DISCARD_SETUP
:
371 queue(c
, qpu_a_MOV(src
[0], src
[0]));
372 *last_inst(c
) |= QPU_SF
;
375 case QOP_TLB_STENCIL_SETUP
:
376 queue(c
, qpu_a_MOV(qpu_ra(QPU_W_TLB_STENCIL_SETUP
), src
[0]));
379 case QOP_TLB_Z_WRITE
:
380 queue(c
, qpu_a_MOV(qpu_ra(QPU_W_TLB_Z
), src
[0]));
382 set_last_cond_add(c
, QPU_COND_ZS
);
386 case QOP_TLB_COLOR_READ
:
388 *last_inst(c
) = qpu_set_sig(*last_inst(c
),
391 if (dst
.mux
!= QPU_MUX_R4
)
392 queue(c
, qpu_a_MOV(dst
, qpu_r4()));
395 case QOP_TLB_COLOR_WRITE
:
396 queue(c
, qpu_a_MOV(qpu_tlbc(), src
[0]));
398 set_last_cond_add(c
, QPU_COND_ZS
);
403 queue(c
, qpu_a_FADD(dst
, src
[0], qpu_r5()));
409 qpu_a_MOV(dst
, src
[0]) |
410 QPU_SET_FIELD(qinst
->op
== QOP_PACK_16A_I
?
411 QPU_PACK_A_16A
: QPU_PACK_A_16B
,
419 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_TMU0_S
+
420 (qinst
->op
- QOP_TEX_S
)),
425 fixup_raddr_conflict(c
, dst
, &src
[0], &src
[1]);
426 queue(c
, qpu_a_ADD(qpu_rb(QPU_W_TMU0_S
), src
[0], src
[1]));
431 *last_inst(c
) = qpu_set_sig(*last_inst(c
),
433 if (dst
.mux
!= QPU_MUX_R4
)
434 queue(c
, qpu_a_MOV(dst
, qpu_r4()));
437 case QOP_UNPACK_8A_F
:
438 case QOP_UNPACK_8B_F
:
439 case QOP_UNPACK_8C_F
:
440 case QOP_UNPACK_8D_F
:
441 case QOP_UNPACK_16A_F
:
442 case QOP_UNPACK_16B_F
: {
443 if (src
[0].mux
== QPU_MUX_R4
) {
444 queue(c
, qpu_a_MOV(dst
, src
[0]));
445 *last_inst(c
) |= QPU_PM
;
446 *last_inst(c
) |= QPU_SET_FIELD(QPU_UNPACK_8A
+
451 assert(src
[0].mux
== QPU_MUX_A
);
453 /* Since we're setting the pack bits, if the
454 * destination is in A it would get re-packed.
456 queue(c
, qpu_a_FMAX((dst
.mux
== QPU_MUX_A
?
460 QPU_SET_FIELD(unpack_map
[qinst
->op
-
464 if (dst
.mux
== QPU_MUX_A
) {
465 queue(c
, qpu_a_MOV(dst
, qpu_rb(31)));
471 case QOP_UNPACK_8A_I
:
472 case QOP_UNPACK_8B_I
:
473 case QOP_UNPACK_8C_I
:
474 case QOP_UNPACK_8D_I
:
475 case QOP_UNPACK_16A_I
:
476 case QOP_UNPACK_16B_I
: {
477 assert(src
[0].mux
== QPU_MUX_A
);
479 /* Since we're setting the pack bits, if the
480 * destination is in A it would get re-packed.
482 queue(c
, qpu_a_MOV((dst
.mux
== QPU_MUX_A
?
483 qpu_rb(31) : dst
), src
[0]));
484 *last_inst(c
) |= QPU_SET_FIELD(unpack_map
[qinst
->op
-
488 if (dst
.mux
== QPU_MUX_A
) {
489 queue(c
, qpu_a_MOV(dst
, qpu_rb(31)));
495 assert(qinst
->op
< ARRAY_SIZE(translate
));
496 assert(translate
[qinst
->op
].op
!= 0); /* NOPs */
498 /* If we have only one source, put it in the second
499 * argument slot as well so that we don't take up
500 * another raddr just to get unused data.
502 if (qir_get_op_nsrc(qinst
->op
) == 1)
505 fixup_raddr_conflict(c
, dst
, &src
[0], &src
[1]);
507 if (translate
[qinst
->op
].is_mul
) {
508 queue(c
, qpu_m_alu2(translate
[qinst
->op
].op
,
512 queue(c
, qpu_a_alu2(translate
[qinst
->op
].op
,
521 assert(!qir_is_multi_instruction(qinst
));
522 *last_inst(c
) |= QPU_SF
;
526 qpu_schedule_instructions(c
);
528 /* thread end can't have VPM write or read */
529 if (QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
530 QPU_WADDR_ADD
) == QPU_W_VPM
||
531 QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
532 QPU_WADDR_MUL
) == QPU_W_VPM
||
533 QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
534 QPU_RADDR_A
) == QPU_R_VPM
||
535 QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
536 QPU_RADDR_B
) == QPU_R_VPM
) {
537 qpu_serialize_one_inst(c
, qpu_NOP());
540 /* thread end can't have uniform read */
541 if (QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
542 QPU_RADDR_A
) == QPU_R_UNIF
||
543 QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
544 QPU_RADDR_B
) == QPU_R_UNIF
) {
545 qpu_serialize_one_inst(c
, qpu_NOP());
548 /* thread end can't have TLB operations */
549 if (qpu_inst_is_tlb(c
->qpu_insts
[c
->qpu_inst_count
- 1]))
550 qpu_serialize_one_inst(c
, qpu_NOP());
552 c
->qpu_insts
[c
->qpu_inst_count
- 1] =
553 qpu_set_sig(c
->qpu_insts
[c
->qpu_inst_count
- 1],
555 qpu_serialize_one_inst(c
, qpu_NOP());
556 qpu_serialize_one_inst(c
, qpu_NOP());
563 c
->qpu_insts
[c
->qpu_inst_count
- 1] =
564 qpu_set_sig(c
->qpu_insts
[c
->qpu_inst_count
- 1],
565 QPU_SIG_SCOREBOARD_UNLOCK
);
569 if (vc4_debug
& VC4_DEBUG_QPU
)
572 vc4_qpu_validate(c
->qpu_insts
, c
->qpu_inst_count
);
574 free(temp_registers
);