2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "vc4_context.h"
31 vc4_dump_program(struct qcompile
*c
)
33 fprintf(stderr
, "%s:\n", qir_get_stage_name(c
->stage
));
35 for (int i
= 0; i
< c
->qpu_inst_count
; i
++) {
36 fprintf(stderr
, "0x%016"PRIx64
" ", c
->qpu_insts
[i
]);
37 vc4_qpu_disasm(&c
->qpu_insts
[i
], 1);
38 fprintf(stderr
, "\n");
42 struct queued_qpu_inst
{
43 struct simple_node link
;
48 queue(struct qcompile
*c
, uint64_t inst
)
50 struct queued_qpu_inst
*q
= calloc(1, sizeof(*q
));
52 insert_at_tail(&c
->qpu_inst_list
, &q
->link
);
56 last_inst(struct qcompile
*c
)
58 struct queued_qpu_inst
*q
=
59 (struct queued_qpu_inst
*)last_elem(&c
->qpu_inst_list
);
64 * This is used to resolve the fact that we might register-allocate two
65 * different operands of an instruction to the same physical register file
66 * even though instructions have only one field for the register file source
69 * In that case, we need to move one to a temporary that can be used in the
70 * instruction, instead.
73 fixup_raddr_conflict(struct qcompile
*c
,
74 struct qpu_reg src0
, struct qpu_reg
*src1
)
76 if ((src0
.mux
== QPU_MUX_A
|| src0
.mux
== QPU_MUX_B
) &&
77 (src1
->mux
== QPU_MUX_A
|| src1
->mux
== QPU_MUX_B
) &&
78 src0
.addr
!= src1
->addr
) {
79 queue(c
, qpu_a_MOV(qpu_r3(), *src1
));
85 serialize_one_inst(struct qcompile
*c
, uint64_t inst
)
87 if (c
->qpu_inst_count
>= c
->qpu_inst_size
) {
88 c
->qpu_inst_size
= MAX2(16, c
->qpu_inst_size
* 2);
89 c
->qpu_insts
= realloc(c
->qpu_insts
,
90 c
->qpu_inst_size
* sizeof(uint64_t));
92 c
->qpu_insts
[c
->qpu_inst_count
++] = inst
;
96 serialize_insts(struct qcompile
*c
)
98 int last_sfu_write
= -10;
99 bool scoreboard_wait_emitted
= false;
101 while (!is_empty_list(&c
->qpu_inst_list
)) {
102 struct queued_qpu_inst
*q
=
103 (struct queued_qpu_inst
*)first_elem(&c
->qpu_inst_list
);
104 uint32_t last_waddr_a
= QPU_W_NOP
, last_waddr_b
= QPU_W_NOP
;
105 uint32_t raddr_a
= QPU_GET_FIELD(q
->inst
, QPU_RADDR_A
);
106 uint32_t raddr_b
= QPU_GET_FIELD(q
->inst
, QPU_RADDR_B
);
108 if (c
->qpu_inst_count
> 0) {
109 uint64_t last_inst
= c
->qpu_insts
[c
->qpu_inst_count
-
111 uint32_t last_waddr_add
= QPU_GET_FIELD(last_inst
,
113 uint32_t last_waddr_mul
= QPU_GET_FIELD(last_inst
,
116 if (last_inst
& QPU_WS
) {
117 last_waddr_a
= last_waddr_mul
;
118 last_waddr_b
= last_waddr_add
;
120 last_waddr_a
= last_waddr_add
;
121 last_waddr_b
= last_waddr_mul
;
125 uint32_t src_muxes
[] = {
126 QPU_GET_FIELD(q
->inst
, QPU_ADD_A
),
127 QPU_GET_FIELD(q
->inst
, QPU_ADD_B
),
128 QPU_GET_FIELD(q
->inst
, QPU_MUL_A
),
129 QPU_GET_FIELD(q
->inst
, QPU_MUL_B
),
132 /* "An instruction must not read from a location in physical
133 * regfile A or B that was written to by the previous
136 bool needs_raddr_vs_waddr_nop
= false;
137 bool reads_r4
= false;
138 for (int i
= 0; i
< ARRAY_SIZE(src_muxes
); i
++) {
140 src_muxes
[i
] == QPU_MUX_A
&&
141 last_waddr_a
== raddr_a
) ||
143 src_muxes
[i
] == QPU_MUX_B
&&
144 last_waddr_b
== raddr_b
)) {
145 needs_raddr_vs_waddr_nop
= true;
147 if (src_muxes
[i
] == QPU_MUX_R4
)
151 if (needs_raddr_vs_waddr_nop
) {
152 serialize_one_inst(c
, qpu_NOP());
155 /* "After an SFU lookup instruction, accumulator r4 must not
156 * be read in the following two instructions. Any other
157 * instruction that results in r4 being written (that is, TMU
158 * read, TLB read, SFU lookup) cannot occur in the two
159 * instructions following an SFU lookup."
162 while (c
->qpu_inst_count
- last_sfu_write
< 3) {
163 serialize_one_inst(c
, qpu_NOP());
167 uint32_t waddr_a
= QPU_GET_FIELD(q
->inst
, QPU_WADDR_ADD
);
168 uint32_t waddr_m
= QPU_GET_FIELD(q
->inst
, QPU_WADDR_MUL
);
169 if ((waddr_a
>= QPU_W_SFU_RECIP
&& waddr_a
<= QPU_W_SFU_LOG
) ||
170 (waddr_m
>= QPU_W_SFU_RECIP
&& waddr_m
<= QPU_W_SFU_LOG
)) {
171 last_sfu_write
= c
->qpu_inst_count
;
174 /* "A scoreboard wait must not occur in the first two
175 * instructions of a fragment shader. This is either the
176 * explicit Wait for Scoreboard signal or an implicit wait
177 * with the first tile-buffer read or write instruction."
179 if (!scoreboard_wait_emitted
&&
180 (waddr_a
== QPU_W_TLB_Z
|| waddr_m
== QPU_W_TLB_Z
||
181 waddr_a
== QPU_W_TLB_COLOR_MS
||
182 waddr_m
== QPU_W_TLB_COLOR_MS
||
183 waddr_a
== QPU_W_TLB_COLOR_ALL
||
184 waddr_m
== QPU_W_TLB_COLOR_ALL
||
185 QPU_GET_FIELD(q
->inst
, QPU_SIG
) == QPU_SIG_COLOR_LOAD
)) {
186 while (c
->qpu_inst_count
< 3 ||
187 QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
188 QPU_SIG
) != QPU_SIG_NONE
) {
189 serialize_one_inst(c
, qpu_NOP());
191 c
->qpu_insts
[c
->qpu_inst_count
- 1] =
192 qpu_set_sig(c
->qpu_insts
[c
->qpu_inst_count
- 1],
193 QPU_SIG_WAIT_FOR_SCOREBOARD
);
194 scoreboard_wait_emitted
= true;
197 serialize_one_inst(c
, q
->inst
);
199 remove_from_list(&q
->link
);
205 vc4_generate_code(struct qcompile
*c
)
207 struct qpu_reg allocate_to_qpu_reg
[3 + 32 + 32];
208 bool reg_in_use
[ARRAY_SIZE(allocate_to_qpu_reg
)];
209 int *reg_allocated
= calloc(c
->num_temps
, sizeof(*reg_allocated
));
210 int *reg_uses_remaining
=
211 calloc(c
->num_temps
, sizeof(*reg_uses_remaining
));
212 bool discard
= false;
214 for (int i
= 0; i
< ARRAY_SIZE(reg_in_use
); i
++)
215 reg_in_use
[i
] = false;
216 for (int i
= 0; i
< c
->num_temps
; i
++)
217 reg_allocated
[i
] = -1;
218 for (int i
= 0; i
< 3; i
++)
219 allocate_to_qpu_reg
[i
] = qpu_rn(i
);
220 for (int i
= 0; i
< 32; i
++)
221 allocate_to_qpu_reg
[i
+ 3] = qpu_ra(i
);
222 for (int i
= 0; i
< 32; i
++)
223 allocate_to_qpu_reg
[i
+ 3 + 32] = qpu_rb(i
);
225 make_empty_list(&c
->qpu_inst_list
);
227 struct simple_node
*node
;
228 foreach(node
, &c
->instructions
) {
229 struct qinst
*qinst
= (struct qinst
*)node
;
231 if (qinst
->dst
.file
== QFILE_TEMP
)
232 reg_uses_remaining
[qinst
->dst
.index
]++;
233 for (int i
= 0; i
< qir_get_op_nsrc(qinst
->op
); i
++) {
234 if (qinst
->src
[i
].file
== QFILE_TEMP
)
235 reg_uses_remaining
[qinst
->src
[i
].index
]++;
237 if (qinst
->op
== QOP_TLB_PASSTHROUGH_Z_WRITE
||
238 qinst
->op
== QOP_FRAG_Z
)
239 reg_in_use
[3 + 32 + QPU_R_FRAG_PAYLOAD_ZW
] = true;
245 queue(c
, qpu_load_imm_ui(qpu_vrsetup(),
247 0x00100000 * c
->num_inputs
)));
248 queue(c
, qpu_load_imm_ui(qpu_vwsetup(), 0x00001a00));
254 foreach(node
, &c
->instructions
) {
255 struct qinst
*qinst
= (struct qinst
*)node
;
258 fprintf(stderr
, "translating qinst to qpu: ");
259 qir_dump_inst(qinst
);
260 fprintf(stderr
, "\n");
263 static const struct {
267 #define A(name) [QOP_##name] = {QPU_A_##name, false}
268 #define M(name) [QOP_##name] = {QPU_M_##name, true}
281 static const uint32_t compareflags
[] = {
282 [QOP_SEQ
- QOP_SEQ
] = QPU_COND_ZS
,
283 [QOP_SNE
- QOP_SEQ
] = QPU_COND_ZC
,
284 [QOP_SLT
- QOP_SEQ
] = QPU_COND_NS
,
285 [QOP_SGE
- QOP_SEQ
] = QPU_COND_NC
,
288 struct qpu_reg src
[4];
289 for (int i
= 0; i
< qir_get_op_nsrc(qinst
->op
); i
++) {
290 int index
= qinst
->src
[i
].index
;
291 switch (qinst
->src
[i
].file
) {
296 if (reg_allocated
[index
] == -1) {
297 fprintf(stderr
, "undefined reg use: ");
298 qir_dump_inst(qinst
);
299 fprintf(stderr
, "\n");
303 src
[i
] = allocate_to_qpu_reg
[reg_allocated
[index
]];
304 reg_uses_remaining
[index
]--;
305 if (reg_uses_remaining
[index
] == 0)
306 reg_in_use
[reg_allocated
[index
]] = false;
319 switch (qinst
->dst
.file
) {
321 dst
= qpu_ra(QPU_W_NOP
);
325 if (reg_allocated
[qinst
->dst
.index
] == -1) {
328 alloc
< ARRAY_SIZE(reg_in_use
);
330 /* The pack flags require an A-file register. */
331 if (qinst
->op
== QOP_PACK_SCALED
&&
332 allocate_to_qpu_reg
[alloc
].mux
!= QPU_MUX_A
) {
336 if (!reg_in_use
[alloc
])
339 assert(alloc
!= ARRAY_SIZE(reg_in_use
) && "need better reg alloc");
340 reg_in_use
[alloc
] = true;
341 reg_allocated
[qinst
->dst
.index
] = alloc
;
344 dst
= allocate_to_qpu_reg
[reg_allocated
[qinst
->dst
.index
]];
346 reg_uses_remaining
[qinst
->dst
.index
]--;
347 if (reg_uses_remaining
[qinst
->dst
.index
] == 0) {
348 reg_in_use
[reg_allocated
[qinst
->dst
.index
]] =
355 assert(!"not reached");
361 /* Skip emitting the MOV if it's a no-op. */
362 if (dst
.mux
== QPU_MUX_A
|| dst
.mux
== QPU_MUX_B
||
363 dst
.mux
!= src
[0].mux
|| dst
.addr
!= src
[0].addr
) {
364 queue(c
, qpu_a_MOV(dst
, src
[0]));
369 queue(c
, qpu_a_MOV(qpu_ra(QPU_W_NOP
), src
[0]));
370 *last_inst(c
) |= QPU_SF
;
372 if (dst
.mux
<= QPU_MUX_R3
) {
373 fixup_raddr_conflict(c
, src
[1], &src
[2]);
374 queue(c
, qpu_inst(qpu_a_MOV(dst
, src
[1]),
375 qpu_m_MOV(dst
, src
[2])));
376 *last_inst(c
) = qpu_set_cond_add(*last_inst(c
),
378 *last_inst(c
) = qpu_set_cond_mul(*last_inst(c
),
381 if (dst
.mux
== src
[1].mux
&&
382 dst
.addr
== src
[1].addr
) {
383 queue(c
, qpu_a_MOV(dst
, src
[1]));
385 queue(c
, qpu_a_MOV(dst
, src
[2]));
386 *last_inst(c
) = qpu_set_cond_add(*last_inst(c
),
389 queue(c
, qpu_a_MOV(dst
, src
[2]));
391 queue(c
, qpu_a_MOV(dst
, src
[1]));
392 *last_inst(c
) = qpu_set_cond_add(*last_inst(c
),
402 fixup_raddr_conflict(c
, src
[0], &src
[1]);
403 queue(c
, qpu_a_FSUB(qpu_ra(QPU_W_NOP
), src
[0], src
[1]));
404 *last_inst(c
) |= QPU_SF
;
406 queue(c
, qpu_load_imm_f(dst
, 0.0));
407 queue(c
, qpu_load_imm_f(dst
, 1.0));
408 *last_inst(c
) = qpu_set_cond_add(*last_inst(c
),
409 compareflags
[qinst
->op
- QOP_SEQ
]);
415 queue(c
, qpu_a_MOV(qpu_ra(QPU_W_VPM
), src
[0]));
419 queue(c
, qpu_a_MOV(dst
, qpu_ra(QPU_R_VPM
)));
428 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP
),
432 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIPSQRT
),
436 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_EXP
),
440 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_LOG
),
447 queue(c
, qpu_a_MOV(dst
, qpu_r4()));
451 case QOP_PACK_COLORS
:
452 for (int i
= 0; i
< 4; i
++) {
453 queue(c
, qpu_m_MOV(qpu_r3(), src
[i
]));
454 *last_inst(c
) |= QPU_PM
;
455 *last_inst(c
) |= QPU_SET_FIELD(QPU_PACK_MUL_8A
+ i
,
459 queue(c
, qpu_a_MOV(dst
, qpu_r3()));
464 queue(c
, qpu_a_ITOF(dst
,
465 qpu_ra(QPU_R_XY_PIXEL_COORD
)));
469 queue(c
, qpu_a_ITOF(dst
,
470 qpu_rb(QPU_R_XY_PIXEL_COORD
)));
474 queue(c
, qpu_a_ITOF(dst
,
475 qpu_rb(QPU_R_FRAG_PAYLOAD_ZW
)));
479 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP
),
480 qpu_ra(QPU_R_FRAG_PAYLOAD_ZW
)));
482 queue(c
, qpu_a_MOV(dst
, qpu_r4()));
485 case QOP_TLB_DISCARD_SETUP
:
487 queue(c
, qpu_a_MOV(src
[0], src
[0]));
488 *last_inst(c
) |= QPU_SF
;
491 case QOP_TLB_PASSTHROUGH_Z_WRITE
:
492 queue(c
, qpu_a_MOV(qpu_ra(QPU_W_TLB_Z
),
493 qpu_rb(QPU_R_FRAG_PAYLOAD_ZW
)));
495 *last_inst(c
) = qpu_set_cond_add(*last_inst(c
),
500 case QOP_TLB_COLOR_READ
:
502 *last_inst(c
) = qpu_set_sig(*last_inst(c
),
507 case QOP_TLB_COLOR_WRITE
:
508 queue(c
, qpu_a_MOV(qpu_tlbc(), src
[0]));
510 *last_inst(c
) = qpu_set_cond_add(*last_inst(c
),
516 queue(c
, qpu_a_FADD(dst
, src
[0], qpu_r5()));
519 case QOP_PACK_SCALED
: {
520 uint64_t a
= (qpu_a_MOV(dst
, src
[0]) |
521 QPU_SET_FIELD(QPU_PACK_A_16A
,
523 uint64_t b
= (qpu_a_MOV(dst
, src
[1]) |
524 QPU_SET_FIELD(QPU_PACK_A_16B
,
527 if (dst
.mux
== src
[1].mux
&& dst
.addr
== src
[1].addr
) {
541 queue(c
, qpu_a_MOV(qpu_rb(QPU_W_TMU0_S
+
542 (qinst
->op
- QOP_TEX_S
)),
548 *last_inst(c
) = qpu_set_sig(*last_inst(c
),
553 case QOP_R4_UNPACK_A
:
554 case QOP_R4_UNPACK_B
:
555 case QOP_R4_UNPACK_C
:
556 case QOP_R4_UNPACK_D
:
557 queue(c
, qpu_a_MOV(dst
, qpu_r4()));
558 *last_inst(c
) |= QPU_PM
;
559 *last_inst(c
) |= QPU_SET_FIELD(QPU_UNPACK_R4_8A
+
567 assert(qinst
->op
< ARRAY_SIZE(translate
));
568 assert(translate
[qinst
->op
].op
!= 0); /* NOPs */
570 /* If we have only one source, put it in the second
571 * argument slot as well so that we don't take up
572 * another raddr just to get unused data.
574 if (qir_get_op_nsrc(qinst
->op
) == 1)
577 fixup_raddr_conflict(c
, src
[0], &src
[1]);
579 if (translate
[qinst
->op
].is_mul
) {
580 queue(c
, qpu_m_alu2(translate
[qinst
->op
].op
,
584 queue(c
, qpu_a_alu2(translate
[qinst
->op
].op
,
594 /* thread end can't have VPM write */
595 if (QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
596 QPU_WADDR_ADD
) == QPU_W_VPM
||
597 QPU_GET_FIELD(c
->qpu_insts
[c
->qpu_inst_count
- 1],
598 QPU_WADDR_MUL
) == QPU_W_VPM
) {
599 serialize_one_inst(c
, qpu_NOP());
602 c
->qpu_insts
[c
->qpu_inst_count
- 1] =
603 qpu_set_sig(c
->qpu_insts
[c
->qpu_inst_count
- 1],
605 serialize_one_inst(c
, qpu_NOP());
606 serialize_one_inst(c
, qpu_NOP());
613 c
->qpu_insts
[c
->qpu_inst_count
- 1] =
614 qpu_set_sig(c
->qpu_insts
[c
->qpu_inst_count
- 1],
615 QPU_SIG_SCOREBOARD_UNLOCK
);
619 if (vc4_debug
& VC4_DEBUG_QPU
)
622 vc4_qpu_validate(c
->qpu_insts
, c
->qpu_inst_count
);