2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "vc4_context.h"
31 vc4_dump_program(struct vc4_compile
*c
)
33 fprintf(stderr
, "%s:\n", qir_get_stage_name(c
->stage
));
35 for (int i
= 0; i
< c
->qpu_inst_count
; i
++) {
36 fprintf(stderr
, "0x%016"PRIx64
" ", c
->qpu_insts
[i
]);
37 vc4_qpu_disasm(&c
->qpu_insts
[i
], 1);
38 fprintf(stderr
, "\n");
42 struct queued_qpu_inst
{
43 struct simple_node link
;
48 queue(struct vc4_compile
*c
, uint64_t inst
)
50 struct queued_qpu_inst
*q
= calloc(1, sizeof(*q
));
52 insert_at_tail(&c
->qpu_inst_list
, &q
->link
);
56 last_inst(struct vc4_compile
*c
)
58 struct queued_qpu_inst
*q
=
59 (struct queued_qpu_inst
*)last_elem(&c
->qpu_inst_list
);
64 set_last_cond_add(struct vc4_compile
*c
, uint32_t cond
)
66 *last_inst(c
) = qpu_set_cond_add(*last_inst(c
), cond
);
70 * This is used to resolve the fact that we might register-allocate two
71 * different operands of an instruction to the same physical register file
72 * even though instructions have only one field for the register file source
75 * In that case, we need to move one to a temporary that can be used in the
76 * instruction, instead.
79 fixup_raddr_conflict(struct vc4_compile
*c
,
80 struct qpu_reg src0
, struct qpu_reg
*src1
)
82 if ((src0
.mux
!= QPU_MUX_A
&& src0
.mux
!= QPU_MUX_B
) ||
83 src0
.mux
!= src1
->mux
||
84 src0
.addr
== src1
->addr
) {
88 queue(c
, qpu_a_MOV(qpu_r3(), *src1
));
93 serialize_one_inst(struct vc4_compile
*c
, uint64_t inst
)
95 if (c
->qpu_inst_count
>= c
->qpu_inst_size
) {
96 c
->qpu_inst_size
= MAX2(16, c
->qpu_inst_size
* 2);
97 c
->qpu_insts
= realloc(c
->qpu_insts
,
98 c
->qpu_inst_size
* sizeof(uint64_t));
100 c
->qpu_insts
[c
->qpu_inst_count
++] = inst
;
104 serialize_insts(struct vc4_compile
*c
)
106 int last_sfu_write
= -10;
107 bool scoreboard_wait_emitted
= false;
109 while (!is_empty_list(&c
->qpu_inst_list
)) {
110 struct queued_qpu_inst
*q
=
111 (struct queued_qpu_inst
*)first_elem(&c
->qpu_inst_list
);
112 uint32_t last_waddr_a
= QPU_W_NOP
, last_waddr_b
= QPU_W_NOP
;
113 uint32_t raddr_a
= QPU_GET_FIELD(q
->inst
, QPU_RADDR_A
);
114 uint32_t raddr_b
= QPU_GET_FIELD(q
->inst
, QPU_RADDR_B
);
116 if (c
->qpu_inst_count
> 0) {
117 uint64_t last_inst
= c
->qpu_insts
[c
->qpu_inst_count
-
119 uint32_t last_waddr_add
= QPU_GET_FIELD(last_inst
,
121 uint32_t last_waddr_mul
= QPU_GET_FIELD(last_inst
,
124 if (last_inst
& QPU_WS
) {
125 last_waddr_a
= last_waddr_mul
;
126 last_waddr_b
= last_waddr_add
;
128 last_waddr_a
= last_waddr_add
;
129 last_waddr_b
= last_waddr_mul
;
133 uint32_t src_muxes
[] = {
134 QPU_GET_FIELD(q
->inst
, QPU_ADD_A
),
135 QPU_GET_FIELD(q
->inst
, QPU_ADD_B
),
136 QPU_GET_FIELD(q
->inst
, QPU_MUL_A
),
137 QPU_GET_FIELD(q
->inst
, QPU_MUL_B
),
140 /* "An instruction must not read from a location in physical
141 * regfile A or B that was written to by the previous
144 bool needs_raddr_vs_waddr_nop
= false;
145 bool reads_r4
= false;
146 for (int i
= 0; i
< ARRAY_SIZE(src_muxes
); i
++) {
148 src_muxes
[i
] == QPU_MUX_A
&&
149 last_waddr_a
== raddr_a
) ||
151 src_muxes
[i
] == QPU_MUX_B
&&
152 last_waddr_b
== raddr_b
)) {
153 needs_raddr_vs_waddr_nop
= true;
155 if (src_muxes
[i
] == QPU_MUX_R4
)
159 if (needs_raddr_vs_waddr_nop
) {
160 serialize_one_inst(c
, qpu_NOP());
163 /* "After an SFU lookup instruction, accumulator r4 must not
164 * be read in the following two instructions. Any other
165 * instruction that results in r4 being written (that is, TMU
166 * read, TLB read, SFU lookup) cannot occur in the two
167 * instructions following an SFU lookup."
170 while (c
->qpu_inst_count
- last_sfu_write
< 3) {
171 serialize_one_inst(c
, qpu_NOP());
175 uint32_t waddr_a
= QPU_GET_FIELD(q
->inst
, QPU_WADDR_ADD
);
176 uint32_t waddr_m
= QPU_GET_FIELD(q
->inst
, QPU_WADDR_MUL
);
177 if ((waddr_a
>= QPU_W_SFU_RECIP
&& waddr_a
<= QPU_W_SFU_LOG
) ||
178 (waddr_m
>= QPU_W_SFU_RECIP
&& waddr_m
<= QPU_W_SFU_LOG
)) {
179 last_sfu_write
= c
->qpu_inst_count
;
182 /* "A scoreboard wait must not occur in the first two
183 * instructions of a fragment shader. This is either the
184 * explicit Wait for Scoreboard signal or an implicit wait
185 * with the first tile-buffer read or write instruction."
187 if (!scoreboard_wait_emitted
&&
188 (waddr_a
== QPU_W_TLB_Z
|| waddr_m
== QPU_W_TLB_Z
||
189 waddr_a
== QPU_W_TLB_COLOR_MS
||
190 waddr_m
== QPU_W_TLB_COLOR_MS
||
191 waddr_a
== QPU_W_TLB_COLOR_ALL
||
192 waddr_m
== QPU_W_TLB_COLOR_ALL
||
193 QPU_GET_FIELD(q
->inst
, QPU_SIG
) == QPU_SIG_COLOR_LOAD
)) {
194 while (c
->qpu_inst_count
< 3 ||
195 QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
196 QPU_SIG
) != QPU_SIG_NONE
) {
197 serialize_one_inst(c
, qpu_NOP());
199 c
->qpu_insts
[c
->qpu_inst_count
- 1] =
200 qpu_set_sig(c
->qpu_insts
[c
->qpu_inst_count
- 1],
201 QPU_SIG_WAIT_FOR_SCOREBOARD
);
202 scoreboard_wait_emitted
= true;
205 serialize_one_inst(c
, q
->inst
);
207 remove_from_list(&q
->link
);
213 vc4_generate_code(struct vc4_context
*vc4
, struct vc4_compile
*c
)
215 struct qpu_reg
*temp_registers
= vc4_register_allocate(vc4
, c
);
216 bool discard
= false;
218 make_empty_list(&c
->qpu_inst_list
);
223 queue(c
, qpu_load_imm_ui(qpu_vrsetup(),
225 0x00100000 * c
->num_inputs
)));
226 queue(c
, qpu_load_imm_ui(qpu_vwsetup(), 0x00001a00));
232 struct simple_node
*node
;
233 foreach(node
, &c
->instructions
) {
234 struct qinst
*qinst
= (struct qinst
*)node
;
237 fprintf(stderr
, "translating qinst to qpu: ");
238 qir_dump_inst(qinst
);
239 fprintf(stderr
, "\n");
242 static const struct {
246 #define A(name) [QOP_##name] = {QPU_A_##name, false}
247 #define M(name) [QOP_##name] = {QPU_M_##name, true}
272 struct qpu_reg src
[4];
273 for (int i
= 0; i
< qir_get_op_nsrc(qinst
->op
); i
++) {
274 int index
= qinst
->src
[i
].index
;
275 switch (qinst
->src
[i
].file
) {
280 src
[i
] = temp_registers
[index
];
292 switch (qinst
->dst
.file
) {
294 dst
= qpu_ra(QPU_W_NOP
);
297 dst
= temp_registers
[qinst
->dst
.index
];
301 assert(!"not reached");
307 /* Skip emitting the MOV if it's a no-op. */
308 if (dst
.mux
== QPU_MUX_A
|| dst
.mux
== QPU_MUX_B
||
309 dst
.mux
!= src
[0].mux
|| dst
.addr
!= src
[0].addr
) {
310 queue(c
, qpu_a_MOV(dst
, src
[0]));
315 queue(c
, qpu_a_MOV(qpu_ra(QPU_W_NOP
), src
[0]));
316 *last_inst(c
) |= QPU_SF
;
323 queue(c
, qpu_a_MOV(dst
, src
[0]));
324 set_last_cond_add(c
, qinst
->op
- QOP_SEL_X_0_ZS
+
327 queue(c
, qpu_a_XOR(dst
, qpu_r0(), qpu_r0()));
328 set_last_cond_add(c
, ((qinst
->op
- QOP_SEL_X_0_ZS
) ^
336 queue(c
, qpu_a_MOV(dst
, src
[0]));
337 set_last_cond_add(c
, qinst
->op
- QOP_SEL_X_Y_ZS
+
340 queue(c
, qpu_a_MOV(dst
, src
[1]));
341 set_last_cond_add(c
, ((qinst
->op
- QOP_SEL_X_Y_ZS
) ^
347 queue(c
, qpu_a_MOV(qpu_ra(QPU_W_VPM
), src
[0]));
351 queue(c
, qpu_a_MOV(dst
, qpu_ra(QPU_R_VPM
)));
360 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP
),
364 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIPSQRT
),
368 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_EXP
),
372 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_LOG
),
379 queue(c
, qpu_a_MOV(dst
, qpu_r4()));
383 case QOP_PACK_COLORS
:
384 for (int i
= 0; i
< 4; i
++) {
385 queue(c
, qpu_m_MOV(qpu_r3(), src
[i
]));
386 *last_inst(c
) |= QPU_PM
;
387 *last_inst(c
) |= QPU_SET_FIELD(QPU_PACK_MUL_8A
+ i
,
391 queue(c
, qpu_a_MOV(dst
, qpu_r3()));
396 queue(c
, qpu_a_ITOF(dst
,
397 qpu_ra(QPU_R_XY_PIXEL_COORD
)));
401 queue(c
, qpu_a_ITOF(dst
,
402 qpu_rb(QPU_R_XY_PIXEL_COORD
)));
407 /* QOP_FRAG_Z/W don't emit instructions, just allocate
408 * the register to the Z/W payload.
412 case QOP_TLB_DISCARD_SETUP
:
414 queue(c
, qpu_a_MOV(src
[0], src
[0]));
415 *last_inst(c
) |= QPU_SF
;
418 case QOP_TLB_STENCIL_SETUP
:
419 queue(c
, qpu_a_MOV(qpu_ra(QPU_W_TLB_STENCIL_SETUP
), src
[0]));
422 case QOP_TLB_Z_WRITE
:
423 queue(c
, qpu_a_MOV(qpu_ra(QPU_W_TLB_Z
), src
[0]));
425 set_last_cond_add(c
, QPU_COND_ZS
);
429 case QOP_TLB_COLOR_READ
:
431 *last_inst(c
) = qpu_set_sig(*last_inst(c
),
436 case QOP_TLB_COLOR_WRITE
:
437 queue(c
, qpu_a_MOV(qpu_tlbc(), src
[0]));
439 set_last_cond_add(c
, QPU_COND_ZS
);
444 queue(c
, qpu_a_FADD(dst
, src
[0], qpu_r5()));
447 case QOP_PACK_SCALED
: {
448 uint64_t a
= (qpu_a_MOV(dst
, src
[0]) |
449 QPU_SET_FIELD(QPU_PACK_A_16A
,
451 uint64_t b
= (qpu_a_MOV(dst
, src
[1]) |
452 QPU_SET_FIELD(QPU_PACK_A_16B
,
455 if (dst
.mux
== src
[1].mux
&& dst
.addr
== src
[1].addr
) {
469 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_TMU0_S
+
470 (qinst
->op
- QOP_TEX_S
)),
476 *last_inst(c
) = qpu_set_sig(*last_inst(c
),
481 case QOP_R4_UNPACK_A
:
482 case QOP_R4_UNPACK_B
:
483 case QOP_R4_UNPACK_C
:
484 case QOP_R4_UNPACK_D
:
485 assert(src
[0].mux
== QPU_MUX_R4
);
486 queue(c
, qpu_a_MOV(dst
, src
[0]));
487 *last_inst(c
) |= QPU_PM
;
488 *last_inst(c
) |= QPU_SET_FIELD(QPU_UNPACK_8A
+
498 case QOP_UNPACK_8D
: {
499 assert(src
[0].mux
== QPU_MUX_A
);
501 /* And, since we're setting the pack bits, if the
502 * destination is in A it would get re-packed.
504 struct qpu_reg orig_dst
= dst
;
505 if (orig_dst
.mux
== QPU_MUX_A
)
508 queue(c
, qpu_a_FMAX(dst
, src
[0], src
[0]));
509 *last_inst(c
) |= QPU_SET_FIELD(QPU_UNPACK_8A
+
514 if (orig_dst
.mux
== QPU_MUX_A
) {
515 queue(c
, qpu_a_MOV(orig_dst
, dst
));
521 assert(qinst
->op
< ARRAY_SIZE(translate
));
522 assert(translate
[qinst
->op
].op
!= 0); /* NOPs */
524 /* If we have only one source, put it in the second
525 * argument slot as well so that we don't take up
526 * another raddr just to get unused data.
528 if (qir_get_op_nsrc(qinst
->op
) == 1)
531 fixup_raddr_conflict(c
, src
[0], &src
[1]);
533 if (translate
[qinst
->op
].is_mul
) {
534 queue(c
, qpu_m_alu2(translate
[qinst
->op
].op
,
538 queue(c
, qpu_a_alu2(translate
[qinst
->op
].op
,
548 /* thread end can't have VPM write */
549 if (QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
550 QPU_WADDR_ADD
) == QPU_W_VPM
||
551 QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
552 QPU_WADDR_MUL
) == QPU_W_VPM
) {
553 serialize_one_inst(c
, qpu_NOP());
556 /* thread end can't have uniform read */
557 if (QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
558 QPU_RADDR_A
) == QPU_R_UNIF
||
559 QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
560 QPU_RADDR_B
) == QPU_R_UNIF
) {
561 serialize_one_inst(c
, qpu_NOP());
564 c
->qpu_insts
[c
->qpu_inst_count
- 1] =
565 qpu_set_sig(c
->qpu_insts
[c
->qpu_inst_count
- 1],
567 serialize_one_inst(c
, qpu_NOP());
568 serialize_one_inst(c
, qpu_NOP());
575 c
->qpu_insts
[c
->qpu_inst_count
- 1] =
576 qpu_set_sig(c
->qpu_insts
[c
->qpu_inst_count
- 1],
577 QPU_SIG_SCOREBOARD_UNLOCK
);
581 if (vc4_debug
& VC4_DEBUG_QPU
)
584 vc4_qpu_validate(c
->qpu_insts
, c
->qpu_inst_count
);
586 free(temp_registers
);