2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "vc4_context.h"
31 vc4_dump_program(struct vc4_compile
*c
)
33 fprintf(stderr
, "%s:\n", qir_get_stage_name(c
->stage
));
35 for (int i
= 0; i
< c
->qpu_inst_count
; i
++) {
36 fprintf(stderr
, "0x%016"PRIx64
" ", c
->qpu_insts
[i
]);
37 vc4_qpu_disasm(&c
->qpu_insts
[i
], 1);
38 fprintf(stderr
, "\n");
42 struct queued_qpu_inst
{
43 struct simple_node link
;
48 queue(struct vc4_compile
*c
, uint64_t inst
)
50 struct queued_qpu_inst
*q
= calloc(1, sizeof(*q
));
52 insert_at_tail(&c
->qpu_inst_list
, &q
->link
);
56 last_inst(struct vc4_compile
*c
)
58 struct queued_qpu_inst
*q
=
59 (struct queued_qpu_inst
*)last_elem(&c
->qpu_inst_list
);
64 set_last_cond_add(struct vc4_compile
*c
, uint32_t cond
)
66 *last_inst(c
) = qpu_set_cond_add(*last_inst(c
), cond
);
70 * This is used to resolve the fact that we might register-allocate two
71 * different operands of an instruction to the same physical register file
72 * even though instructions have only one field for the register file source
75 * In that case, we need to move one to a temporary that can be used in the
76 * instruction, instead.
79 fixup_raddr_conflict(struct vc4_compile
*c
,
80 struct qpu_reg src0
, struct qpu_reg
*src1
)
82 if ((src0
.mux
== QPU_MUX_A
|| src0
.mux
== QPU_MUX_B
) &&
83 (src1
->mux
== QPU_MUX_A
|| src1
->mux
== QPU_MUX_B
) &&
84 src0
.addr
!= src1
->addr
) {
85 queue(c
, qpu_a_MOV(qpu_r3(), *src1
));
91 serialize_one_inst(struct vc4_compile
*c
, uint64_t inst
)
93 if (c
->qpu_inst_count
>= c
->qpu_inst_size
) {
94 c
->qpu_inst_size
= MAX2(16, c
->qpu_inst_size
* 2);
95 c
->qpu_insts
= realloc(c
->qpu_insts
,
96 c
->qpu_inst_size
* sizeof(uint64_t));
98 c
->qpu_insts
[c
->qpu_inst_count
++] = inst
;
102 serialize_insts(struct vc4_compile
*c
)
104 int last_sfu_write
= -10;
105 bool scoreboard_wait_emitted
= false;
107 while (!is_empty_list(&c
->qpu_inst_list
)) {
108 struct queued_qpu_inst
*q
=
109 (struct queued_qpu_inst
*)first_elem(&c
->qpu_inst_list
);
110 uint32_t last_waddr_a
= QPU_W_NOP
, last_waddr_b
= QPU_W_NOP
;
111 uint32_t raddr_a
= QPU_GET_FIELD(q
->inst
, QPU_RADDR_A
);
112 uint32_t raddr_b
= QPU_GET_FIELD(q
->inst
, QPU_RADDR_B
);
114 if (c
->qpu_inst_count
> 0) {
115 uint64_t last_inst
= c
->qpu_insts
[c
->qpu_inst_count
-
117 uint32_t last_waddr_add
= QPU_GET_FIELD(last_inst
,
119 uint32_t last_waddr_mul
= QPU_GET_FIELD(last_inst
,
122 if (last_inst
& QPU_WS
) {
123 last_waddr_a
= last_waddr_mul
;
124 last_waddr_b
= last_waddr_add
;
126 last_waddr_a
= last_waddr_add
;
127 last_waddr_b
= last_waddr_mul
;
131 uint32_t src_muxes
[] = {
132 QPU_GET_FIELD(q
->inst
, QPU_ADD_A
),
133 QPU_GET_FIELD(q
->inst
, QPU_ADD_B
),
134 QPU_GET_FIELD(q
->inst
, QPU_MUL_A
),
135 QPU_GET_FIELD(q
->inst
, QPU_MUL_B
),
138 /* "An instruction must not read from a location in physical
139 * regfile A or B that was written to by the previous
142 bool needs_raddr_vs_waddr_nop
= false;
143 bool reads_r4
= false;
144 for (int i
= 0; i
< ARRAY_SIZE(src_muxes
); i
++) {
146 src_muxes
[i
] == QPU_MUX_A
&&
147 last_waddr_a
== raddr_a
) ||
149 src_muxes
[i
] == QPU_MUX_B
&&
150 last_waddr_b
== raddr_b
)) {
151 needs_raddr_vs_waddr_nop
= true;
153 if (src_muxes
[i
] == QPU_MUX_R4
)
157 if (needs_raddr_vs_waddr_nop
) {
158 serialize_one_inst(c
, qpu_NOP());
161 /* "After an SFU lookup instruction, accumulator r4 must not
162 * be read in the following two instructions. Any other
163 * instruction that results in r4 being written (that is, TMU
164 * read, TLB read, SFU lookup) cannot occur in the two
165 * instructions following an SFU lookup."
168 while (c
->qpu_inst_count
- last_sfu_write
< 3) {
169 serialize_one_inst(c
, qpu_NOP());
173 uint32_t waddr_a
= QPU_GET_FIELD(q
->inst
, QPU_WADDR_ADD
);
174 uint32_t waddr_m
= QPU_GET_FIELD(q
->inst
, QPU_WADDR_MUL
);
175 if ((waddr_a
>= QPU_W_SFU_RECIP
&& waddr_a
<= QPU_W_SFU_LOG
) ||
176 (waddr_m
>= QPU_W_SFU_RECIP
&& waddr_m
<= QPU_W_SFU_LOG
)) {
177 last_sfu_write
= c
->qpu_inst_count
;
180 /* "A scoreboard wait must not occur in the first two
181 * instructions of a fragment shader. This is either the
182 * explicit Wait for Scoreboard signal or an implicit wait
183 * with the first tile-buffer read or write instruction."
185 if (!scoreboard_wait_emitted
&&
186 (waddr_a
== QPU_W_TLB_Z
|| waddr_m
== QPU_W_TLB_Z
||
187 waddr_a
== QPU_W_TLB_COLOR_MS
||
188 waddr_m
== QPU_W_TLB_COLOR_MS
||
189 waddr_a
== QPU_W_TLB_COLOR_ALL
||
190 waddr_m
== QPU_W_TLB_COLOR_ALL
||
191 QPU_GET_FIELD(q
->inst
, QPU_SIG
) == QPU_SIG_COLOR_LOAD
)) {
192 while (c
->qpu_inst_count
< 3 ||
193 QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
194 QPU_SIG
) != QPU_SIG_NONE
) {
195 serialize_one_inst(c
, qpu_NOP());
197 c
->qpu_insts
[c
->qpu_inst_count
- 1] =
198 qpu_set_sig(c
->qpu_insts
[c
->qpu_inst_count
- 1],
199 QPU_SIG_WAIT_FOR_SCOREBOARD
);
200 scoreboard_wait_emitted
= true;
203 serialize_one_inst(c
, q
->inst
);
205 remove_from_list(&q
->link
);
211 vc4_generate_code(struct vc4_compile
*c
)
213 struct qpu_reg allocate_to_qpu_reg
[3 + 32 + 32];
214 bool reg_in_use
[ARRAY_SIZE(allocate_to_qpu_reg
)];
215 int *reg_allocated
= calloc(c
->num_temps
, sizeof(*reg_allocated
));
216 int *reg_uses_remaining
=
217 calloc(c
->num_temps
, sizeof(*reg_uses_remaining
));
218 bool discard
= false;
220 for (int i
= 0; i
< ARRAY_SIZE(reg_in_use
); i
++)
221 reg_in_use
[i
] = false;
222 for (int i
= 0; i
< c
->num_temps
; i
++)
223 reg_allocated
[i
] = -1;
224 for (int i
= 0; i
< 3; i
++)
225 allocate_to_qpu_reg
[i
] = qpu_rn(i
);
226 for (int i
= 0; i
< 32; i
++)
227 allocate_to_qpu_reg
[i
+ 3] = qpu_ra(i
);
228 for (int i
= 0; i
< 32; i
++)
229 allocate_to_qpu_reg
[i
+ 3 + 32] = qpu_rb(i
);
231 make_empty_list(&c
->qpu_inst_list
);
233 struct simple_node
*node
;
234 foreach(node
, &c
->instructions
) {
235 struct qinst
*qinst
= (struct qinst
*)node
;
237 if (qinst
->dst
.file
== QFILE_TEMP
)
238 reg_uses_remaining
[qinst
->dst
.index
]++;
239 for (int i
= 0; i
< qir_get_op_nsrc(qinst
->op
); i
++) {
240 if (qinst
->src
[i
].file
== QFILE_TEMP
)
241 reg_uses_remaining
[qinst
->src
[i
].index
]++;
243 if (qinst
->op
== QOP_TLB_PASSTHROUGH_Z_WRITE
||
244 qinst
->op
== QOP_FRAG_Z
)
245 reg_in_use
[3 + 32 + QPU_R_FRAG_PAYLOAD_ZW
] = true;
251 queue(c
, qpu_load_imm_ui(qpu_vrsetup(),
253 0x00100000 * c
->num_inputs
)));
254 queue(c
, qpu_load_imm_ui(qpu_vwsetup(), 0x00001a00));
260 foreach(node
, &c
->instructions
) {
261 struct qinst
*qinst
= (struct qinst
*)node
;
264 fprintf(stderr
, "translating qinst to qpu: ");
265 qir_dump_inst(qinst
);
266 fprintf(stderr
, "\n");
269 static const struct {
273 #define A(name) [QOP_##name] = {QPU_A_##name, false}
274 #define M(name) [QOP_##name] = {QPU_M_##name, true}
299 struct qpu_reg src
[4];
300 for (int i
= 0; i
< qir_get_op_nsrc(qinst
->op
); i
++) {
301 int index
= qinst
->src
[i
].index
;
302 switch (qinst
->src
[i
].file
) {
307 if (reg_allocated
[index
] == -1) {
308 fprintf(stderr
, "undefined reg use: ");
309 qir_dump_inst(qinst
);
310 fprintf(stderr
, "\n");
314 src
[i
] = allocate_to_qpu_reg
[reg_allocated
[index
]];
315 reg_uses_remaining
[index
]--;
316 if (reg_uses_remaining
[index
] == 0)
317 reg_in_use
[reg_allocated
[index
]] = false;
330 switch (qinst
->dst
.file
) {
332 dst
= qpu_ra(QPU_W_NOP
);
336 if (reg_allocated
[qinst
->dst
.index
] == -1) {
339 alloc
< ARRAY_SIZE(reg_in_use
);
341 /* The pack flags require an A-file register. */
342 if (qinst
->op
== QOP_PACK_SCALED
&&
343 allocate_to_qpu_reg
[alloc
].mux
!= QPU_MUX_A
) {
347 if (!reg_in_use
[alloc
])
350 assert(alloc
!= ARRAY_SIZE(reg_in_use
) && "need better reg alloc");
351 reg_in_use
[alloc
] = true;
352 reg_allocated
[qinst
->dst
.index
] = alloc
;
355 dst
= allocate_to_qpu_reg
[reg_allocated
[qinst
->dst
.index
]];
357 reg_uses_remaining
[qinst
->dst
.index
]--;
358 if (reg_uses_remaining
[qinst
->dst
.index
] == 0) {
359 reg_in_use
[reg_allocated
[qinst
->dst
.index
]] =
366 assert(!"not reached");
372 /* Skip emitting the MOV if it's a no-op. */
373 if (dst
.mux
== QPU_MUX_A
|| dst
.mux
== QPU_MUX_B
||
374 dst
.mux
!= src
[0].mux
|| dst
.addr
!= src
[0].addr
) {
375 queue(c
, qpu_a_MOV(dst
, src
[0]));
380 fixup_raddr_conflict(c
, src
[0], &src
[1]);
381 queue(c
, qpu_a_MOV(qpu_ra(QPU_W_NOP
), src
[0]));
382 *last_inst(c
) |= QPU_SF
;
389 queue(c
, qpu_a_MOV(dst
, src
[0]));
390 set_last_cond_add(c
, qinst
->op
- QOP_SEL_X_0_ZS
+
393 queue(c
, qpu_a_XOR(dst
, qpu_r0(), qpu_r0()));
394 set_last_cond_add(c
, ((qinst
->op
- QOP_SEL_X_0_ZS
) ^
402 queue(c
, qpu_a_MOV(dst
, src
[0]));
403 set_last_cond_add(c
, qinst
->op
- QOP_SEL_X_Y_ZS
+
406 queue(c
, qpu_a_MOV(dst
, src
[1]));
407 set_last_cond_add(c
, ((qinst
->op
- QOP_SEL_X_Y_ZS
) ^
413 queue(c
, qpu_a_MOV(qpu_ra(QPU_W_VPM
), src
[0]));
417 queue(c
, qpu_a_MOV(dst
, qpu_ra(QPU_R_VPM
)));
426 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP
),
430 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIPSQRT
),
434 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_EXP
),
438 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_LOG
),
445 queue(c
, qpu_a_MOV(dst
, qpu_r4()));
449 case QOP_PACK_COLORS
:
450 for (int i
= 0; i
< 4; i
++) {
451 queue(c
, qpu_m_MOV(qpu_r3(), src
[i
]));
452 *last_inst(c
) |= QPU_PM
;
453 *last_inst(c
) |= QPU_SET_FIELD(QPU_PACK_MUL_8A
+ i
,
457 queue(c
, qpu_a_MOV(dst
, qpu_r3()));
462 queue(c
, qpu_a_ITOF(dst
,
463 qpu_ra(QPU_R_XY_PIXEL_COORD
)));
467 queue(c
, qpu_a_ITOF(dst
,
468 qpu_rb(QPU_R_XY_PIXEL_COORD
)));
472 queue(c
, qpu_a_ITOF(dst
,
473 qpu_rb(QPU_R_FRAG_PAYLOAD_ZW
)));
477 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP
),
478 qpu_ra(QPU_R_FRAG_PAYLOAD_ZW
)));
480 queue(c
, qpu_a_MOV(dst
, qpu_r4()));
483 case QOP_TLB_DISCARD_SETUP
:
485 queue(c
, qpu_a_MOV(src
[0], src
[0]));
486 *last_inst(c
) |= QPU_SF
;
489 case QOP_TLB_PASSTHROUGH_Z_WRITE
:
490 queue(c
, qpu_a_MOV(qpu_ra(QPU_W_TLB_Z
),
491 qpu_rb(QPU_R_FRAG_PAYLOAD_ZW
)));
493 set_last_cond_add(c
, QPU_COND_ZS
);
497 case QOP_TLB_COLOR_READ
:
499 *last_inst(c
) = qpu_set_sig(*last_inst(c
),
504 case QOP_TLB_COLOR_WRITE
:
505 queue(c
, qpu_a_MOV(qpu_tlbc(), src
[0]));
507 set_last_cond_add(c
, QPU_COND_ZS
);
512 queue(c
, qpu_a_FADD(dst
, src
[0], qpu_r5()));
515 case QOP_PACK_SCALED
: {
516 uint64_t a
= (qpu_a_MOV(dst
, src
[0]) |
517 QPU_SET_FIELD(QPU_PACK_A_16A
,
519 uint64_t b
= (qpu_a_MOV(dst
, src
[1]) |
520 QPU_SET_FIELD(QPU_PACK_A_16B
,
523 if (dst
.mux
== src
[1].mux
&& dst
.addr
== src
[1].addr
) {
537 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_TMU0_S
+
538 (qinst
->op
- QOP_TEX_S
)),
544 *last_inst(c
) = qpu_set_sig(*last_inst(c
),
549 case QOP_R4_UNPACK_A
:
550 case QOP_R4_UNPACK_B
:
551 case QOP_R4_UNPACK_C
:
552 case QOP_R4_UNPACK_D
:
553 queue(c
, qpu_a_MOV(dst
, qpu_r4()));
554 *last_inst(c
) |= QPU_PM
;
555 *last_inst(c
) |= QPU_SET_FIELD(QPU_UNPACK_R4_8A
+
563 assert(qinst
->op
< ARRAY_SIZE(translate
));
564 assert(translate
[qinst
->op
].op
!= 0); /* NOPs */
566 /* If we have only one source, put it in the second
567 * argument slot as well so that we don't take up
568 * another raddr just to get unused data.
570 if (qir_get_op_nsrc(qinst
->op
) == 1)
573 fixup_raddr_conflict(c
, src
[0], &src
[1]);
575 if (translate
[qinst
->op
].is_mul
) {
576 queue(c
, qpu_m_alu2(translate
[qinst
->op
].op
,
580 queue(c
, qpu_a_alu2(translate
[qinst
->op
].op
,
590 /* thread end can't have VPM write */
591 if (QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
592 QPU_WADDR_ADD
) == QPU_W_VPM
||
593 QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
594 QPU_WADDR_MUL
) == QPU_W_VPM
) {
595 serialize_one_inst(c
, qpu_NOP());
598 c
->qpu_insts
[c
->qpu_inst_count
- 1] =
599 qpu_set_sig(c
->qpu_insts
[c
->qpu_inst_count
- 1],
601 serialize_one_inst(c
, qpu_NOP());
602 serialize_one_inst(c
, qpu_NOP());
609 c
->qpu_insts
[c
->qpu_inst_count
- 1] =
610 qpu_set_sig(c
->qpu_insts
[c
->qpu_inst_count
- 1],
611 QPU_SIG_SCOREBOARD_UNLOCK
);
615 if (vc4_debug
& VC4_DEBUG_QPU
)
618 vc4_qpu_validate(c
->qpu_insts
, c
->qpu_inst_count
);