2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "vc4_context.h"
29 #include "util/ralloc.h"
32 vc4_dump_program(struct vc4_compile
*c
)
34 fprintf(stderr
, "%s prog %d/%d QPU:\n",
35 qir_get_stage_name(c
->stage
),
36 c
->program_id
, c
->variant_id
);
38 for (int i
= 0; i
< c
->qpu_inst_count
; i
++) {
39 fprintf(stderr
, "0x%016"PRIx64
" ", c
->qpu_insts
[i
]);
40 vc4_qpu_disasm(&c
->qpu_insts
[i
], 1);
41 fprintf(stderr
, "\n");
46 queue(struct vc4_compile
*c
, uint64_t inst
)
48 struct queued_qpu_inst
*q
= rzalloc(c
, struct queued_qpu_inst
);
50 list_addtail(&q
->link
, &c
->qpu_inst_list
);
54 last_inst(struct vc4_compile
*c
)
56 struct queued_qpu_inst
*q
=
57 (struct queued_qpu_inst
*)c
->qpu_inst_list
.prev
;
62 set_last_cond_add(struct vc4_compile
*c
, uint32_t cond
)
64 *last_inst(c
) = qpu_set_cond_add(*last_inst(c
), cond
);
68 * Some special registers can be read from either file, which lets us resolve
69 * raddr conflicts without extra MOVs.
72 swap_file(struct qpu_reg
*src
)
77 if (src
->mux
== QPU_MUX_SMALL_IMM
) {
80 if (src
->mux
== QPU_MUX_A
)
93 * This is used to resolve the fact that we might register-allocate two
94 * different operands of an instruction to the same physical register file
95 * even though instructions have only one field for the register file source
98 * In that case, we need to move one to a temporary that can be used in the
99 * instruction, instead. We reserve ra31/rb31 for this purpose.
102 fixup_raddr_conflict(struct vc4_compile
*c
,
104 struct qpu_reg
*src0
, struct qpu_reg
*src1
)
106 uint32_t mux0
= src0
->mux
== QPU_MUX_SMALL_IMM
? QPU_MUX_B
: src0
->mux
;
107 uint32_t mux1
= src1
->mux
== QPU_MUX_SMALL_IMM
? QPU_MUX_B
: src1
->mux
;
109 if (mux0
<= QPU_MUX_R5
||
111 (src0
->addr
== src1
->addr
&&
112 src0
->mux
== src1
->mux
)) {
116 if (swap_file(src0
) || swap_file(src1
))
119 if (mux0
== QPU_MUX_A
) {
120 queue(c
, qpu_a_MOV(qpu_rb(31), *src0
));
123 queue(c
, qpu_a_MOV(qpu_ra(31), *src0
));
129 set_last_dst_pack(struct vc4_compile
*c
, struct qinst
*inst
)
131 bool had_pm
= *last_inst(c
) & QPU_PM
;
132 bool had_ws
= *last_inst(c
) & QPU_WS
;
133 uint32_t unpack
= QPU_GET_FIELD(*last_inst(c
), QPU_UNPACK
);
138 *last_inst(c
) |= QPU_SET_FIELD(inst
->dst
.pack
, QPU_PACK
);
140 if (qir_is_mul(inst
)) {
141 assert(!unpack
|| had_pm
);
142 *last_inst(c
) |= QPU_PM
;
144 assert(!unpack
|| !had_pm
);
145 assert(!had_ws
); /* dst must be a-file to pack. */
150 vc4_generate_code(struct vc4_context
*vc4
, struct vc4_compile
*c
)
152 struct qpu_reg
*temp_registers
= vc4_register_allocate(vc4
, c
);
153 bool discard
= false;
154 uint32_t inputs_remaining
= c
->num_inputs
;
155 uint32_t vpm_read_fifo_count
= 0;
156 uint32_t vpm_read_offset
= 0;
157 int last_vpm_read_index
= -1;
159 list_inithead(&c
->qpu_inst_list
);
164 /* There's a 4-entry FIFO for VPMVCD reads, each of which can
165 * load up to 16 dwords (4 vec4s) per vertex.
167 while (inputs_remaining
) {
168 uint32_t num_entries
= MIN2(inputs_remaining
, 16);
169 queue(c
, qpu_load_imm_ui(qpu_vrsetup(),
172 ((num_entries
& 0xf) << 20)));
173 inputs_remaining
-= num_entries
;
174 vpm_read_offset
+= num_entries
;
175 vpm_read_fifo_count
++;
177 assert(vpm_read_fifo_count
<= 4);
179 queue(c
, qpu_load_imm_ui(qpu_vwsetup(), 0x00001a00));
185 list_for_each_entry(struct qinst
, qinst
, &c
->instructions
, link
) {
187 fprintf(stderr
, "translating qinst to qpu: ");
188 qir_dump_inst(qinst
);
189 fprintf(stderr
, "\n");
192 static const struct {
195 #define A(name) [QOP_##name] = {QPU_A_##name}
196 #define M(name) [QOP_##name] = {QPU_M_##name}
225 /* If we replicate src[0] out to src[1], this works
226 * out the same as a MOV.
228 [QOP_MOV
] = { QPU_A_OR
},
229 [QOP_FMOV
] = { QPU_A_FMAX
},
233 struct qpu_reg src
[4];
234 for (int i
= 0; i
< qir_get_op_nsrc(qinst
->op
); i
++) {
235 int index
= qinst
->src
[i
].index
;
236 switch (qinst
->src
[i
].file
) {
241 src
[i
] = temp_registers
[index
];
242 if (qinst
->src
[i
].pack
) {
244 unpack
== qinst
->src
[i
].pack
);
245 unpack
= QPU_SET_FIELD(qinst
->src
[i
].pack
,
247 if (src
[i
].mux
== QPU_MUX_R4
)
257 case QFILE_SMALL_IMM
:
258 src
[i
].mux
= QPU_MUX_SMALL_IMM
;
259 src
[i
].addr
= qpu_encode_small_immediate(qinst
->src
[i
].index
);
260 /* This should only have returned a valid
261 * small immediate field, not ~0 for failure.
263 assert(src
[i
].addr
<= 47);
266 assert((int)qinst
->src
[i
].index
>=
267 last_vpm_read_index
);
268 (void)last_vpm_read_index
;
269 last_vpm_read_index
= qinst
->src
[i
].index
;
270 src
[i
] = qpu_ra(QPU_R_VPM
);
276 switch (qinst
->dst
.file
) {
278 dst
= qpu_ra(QPU_W_NOP
);
281 dst
= temp_registers
[qinst
->dst
.index
];
284 dst
= qpu_ra(QPU_W_VPM
);
288 case QFILE_SMALL_IMM
:
289 assert(!"not reached");
298 queue(c
, qpu_a_MOV(dst
, src
[0]));
299 set_last_cond_add(c
, qinst
->op
- QOP_SEL_X_0_ZS
+
302 queue(c
, qpu_a_XOR(dst
, qpu_r0(), qpu_r0()));
303 set_last_cond_add(c
, ((qinst
->op
- QOP_SEL_X_0_ZS
) ^
311 queue(c
, qpu_a_MOV(dst
, src
[0]));
312 set_last_cond_add(c
, qinst
->op
- QOP_SEL_X_Y_ZS
+
315 queue(c
, qpu_a_MOV(dst
, src
[1]));
316 set_last_cond_add(c
, ((qinst
->op
- QOP_SEL_X_Y_ZS
) ^
327 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP
),
331 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIPSQRT
),
335 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_EXP
),
339 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_LOG
),
346 if (dst
.mux
!= QPU_MUX_R4
)
347 queue(c
, qpu_a_MOV(dst
, qpu_r4()));
351 case QOP_PACK_8888_F
:
352 queue(c
, qpu_m_MOV(dst
, src
[0]));
353 *last_inst(c
) |= QPU_PM
;
354 *last_inst(c
) |= QPU_SET_FIELD(QPU_PACK_MUL_8888
,
363 qpu_m_MOV(dst
, src
[0]) |
365 QPU_SET_FIELD(QPU_PACK_MUL_8A
+
366 qinst
->op
- QOP_PACK_8A_F
,
371 queue(c
, qpu_a_ITOF(dst
,
372 qpu_ra(QPU_R_XY_PIXEL_COORD
)));
376 queue(c
, qpu_a_ITOF(dst
,
377 qpu_rb(QPU_R_XY_PIXEL_COORD
)));
380 case QOP_FRAG_REV_FLAG
:
381 queue(c
, qpu_a_ITOF(dst
,
382 qpu_rb(QPU_R_MS_REV_FLAGS
)));
387 /* QOP_FRAG_Z/W don't emit instructions, just allocate
388 * the register to the Z/W payload.
392 case QOP_TLB_DISCARD_SETUP
:
394 queue(c
, qpu_a_MOV(src
[0], src
[0]));
395 *last_inst(c
) |= QPU_SF
;
398 case QOP_TLB_STENCIL_SETUP
:
399 queue(c
, qpu_a_MOV(qpu_ra(QPU_W_TLB_STENCIL_SETUP
), src
[0]));
402 case QOP_TLB_Z_WRITE
:
403 queue(c
, qpu_a_MOV(qpu_ra(QPU_W_TLB_Z
), src
[0]));
405 set_last_cond_add(c
, QPU_COND_ZS
);
409 case QOP_TLB_COLOR_READ
:
411 *last_inst(c
) = qpu_set_sig(*last_inst(c
),
414 if (dst
.mux
!= QPU_MUX_R4
)
415 queue(c
, qpu_a_MOV(dst
, qpu_r4()));
418 case QOP_TLB_COLOR_WRITE
:
419 queue(c
, qpu_a_MOV(qpu_tlbc(), src
[0]));
421 set_last_cond_add(c
, QPU_COND_ZS
);
426 queue(c
, qpu_a_FADD(dst
, src
[0], qpu_r5()));
433 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_TMU0_S
+
434 (qinst
->op
- QOP_TEX_S
)),
439 fixup_raddr_conflict(c
, dst
, &src
[0], &src
[1]);
440 queue(c
, qpu_a_ADD(qpu_rb(QPU_W_TMU0_S
), src
[0], src
[1]));
445 *last_inst(c
) = qpu_set_sig(*last_inst(c
),
447 if (dst
.mux
!= QPU_MUX_R4
)
448 queue(c
, qpu_a_MOV(dst
, qpu_r4()));
452 assert(qinst
->op
< ARRAY_SIZE(translate
));
453 assert(translate
[qinst
->op
].op
!= 0); /* NOPs */
455 /* Skip emitting the MOV if it's a no-op. */
456 if (qir_is_raw_mov(qinst
) &&
457 dst
.mux
== src
[0].mux
&& dst
.addr
== src
[0].addr
) {
461 /* If we have only one source, put it in the second
462 * argument slot as well so that we don't take up
463 * another raddr just to get unused data.
465 if (qir_get_op_nsrc(qinst
->op
) == 1)
468 fixup_raddr_conflict(c
, dst
, &src
[0], &src
[1]);
470 if (qir_is_mul(qinst
)) {
471 queue(c
, qpu_m_alu2(translate
[qinst
->op
].op
,
475 queue(c
, qpu_a_alu2(translate
[qinst
->op
].op
,
479 set_last_dst_pack(c
, qinst
);
485 assert(!qir_is_multi_instruction(qinst
));
486 *last_inst(c
) |= QPU_SF
;
490 qpu_schedule_instructions(c
);
492 /* thread end can't have VPM write or read */
493 if (QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
494 QPU_WADDR_ADD
) == QPU_W_VPM
||
495 QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
496 QPU_WADDR_MUL
) == QPU_W_VPM
||
497 QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
498 QPU_RADDR_A
) == QPU_R_VPM
||
499 QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
500 QPU_RADDR_B
) == QPU_R_VPM
) {
501 qpu_serialize_one_inst(c
, qpu_NOP());
504 /* thread end can't have uniform read */
505 if (QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
506 QPU_RADDR_A
) == QPU_R_UNIF
||
507 QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
508 QPU_RADDR_B
) == QPU_R_UNIF
) {
509 qpu_serialize_one_inst(c
, qpu_NOP());
512 /* thread end can't have TLB operations */
513 if (qpu_inst_is_tlb(c
->qpu_insts
[c
->qpu_inst_count
- 1]))
514 qpu_serialize_one_inst(c
, qpu_NOP());
516 c
->qpu_insts
[c
->qpu_inst_count
- 1] =
517 qpu_set_sig(c
->qpu_insts
[c
->qpu_inst_count
- 1],
519 qpu_serialize_one_inst(c
, qpu_NOP());
520 qpu_serialize_one_inst(c
, qpu_NOP());
527 c
->qpu_insts
[c
->qpu_inst_count
- 1] =
528 qpu_set_sig(c
->qpu_insts
[c
->qpu_inst_count
- 1],
529 QPU_SIG_SCOREBOARD_UNLOCK
);
533 if (vc4_debug
& VC4_DEBUG_QPU
)
536 vc4_qpu_validate(c
->qpu_insts
, c
->qpu_inst_count
);
538 free(temp_registers
);