2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/ralloc.h"
29 #define QPU_MUX(mux, muxfield) \
30 QPU_SET_FIELD(mux != QPU_MUX_SMALL_IMM ? mux : QPU_MUX_B, muxfield)
33 set_src_raddr(uint64_t inst
, struct qpu_reg src
)
35 if (src
.mux
== QPU_MUX_A
) {
36 assert(QPU_GET_FIELD(inst
, QPU_RADDR_A
) == QPU_R_NOP
||
37 QPU_GET_FIELD(inst
, QPU_RADDR_A
) == src
.addr
);
38 return QPU_UPDATE_FIELD(inst
, src
.addr
, QPU_RADDR_A
);
41 if (src
.mux
== QPU_MUX_B
) {
42 assert((QPU_GET_FIELD(inst
, QPU_RADDR_B
) == QPU_R_NOP
||
43 QPU_GET_FIELD(inst
, QPU_RADDR_B
) == src
.addr
) &&
44 QPU_GET_FIELD(inst
, QPU_SIG
) != QPU_SIG_SMALL_IMM
);
45 return QPU_UPDATE_FIELD(inst
, src
.addr
, QPU_RADDR_B
);
48 if (src
.mux
== QPU_MUX_SMALL_IMM
) {
49 if (QPU_GET_FIELD(inst
, QPU_SIG
) == QPU_SIG_SMALL_IMM
) {
50 assert(QPU_GET_FIELD(inst
, QPU_RADDR_B
) == src
.addr
);
52 inst
= qpu_set_sig(inst
, QPU_SIG_SMALL_IMM
);
53 assert(QPU_GET_FIELD(inst
, QPU_RADDR_B
) == QPU_R_NOP
);
55 return ((inst
& ~QPU_RADDR_B_MASK
) |
56 QPU_SET_FIELD(src
.addr
, QPU_RADDR_B
));
67 inst
|= QPU_SET_FIELD(QPU_A_NOP
, QPU_OP_ADD
);
68 inst
|= QPU_SET_FIELD(QPU_M_NOP
, QPU_OP_MUL
);
70 /* Note: These field values are actually non-zero */
71 inst
|= QPU_SET_FIELD(QPU_W_NOP
, QPU_WADDR_ADD
);
72 inst
|= QPU_SET_FIELD(QPU_W_NOP
, QPU_WADDR_MUL
);
73 inst
|= QPU_SET_FIELD(QPU_R_NOP
, QPU_RADDR_A
);
74 inst
|= QPU_SET_FIELD(QPU_R_NOP
, QPU_RADDR_B
);
75 inst
|= QPU_SET_FIELD(QPU_SIG_NONE
, QPU_SIG
);
81 qpu_a_dst(struct qpu_reg dst
)
85 if (dst
.mux
<= QPU_MUX_R5
) {
86 /* Translate the mux to the ACCn values. */
87 inst
|= QPU_SET_FIELD(32 + dst
.mux
, QPU_WADDR_ADD
);
89 inst
|= QPU_SET_FIELD(dst
.addr
, QPU_WADDR_ADD
);
90 if (dst
.mux
== QPU_MUX_B
)
98 qpu_m_dst(struct qpu_reg dst
)
102 if (dst
.mux
<= QPU_MUX_R5
) {
103 /* Translate the mux to the ACCn values. */
104 inst
|= QPU_SET_FIELD(32 + dst
.mux
, QPU_WADDR_MUL
);
106 inst
|= QPU_SET_FIELD(dst
.addr
, QPU_WADDR_MUL
);
107 if (dst
.mux
== QPU_MUX_A
)
115 qpu_a_MOV(struct qpu_reg dst
, struct qpu_reg src
)
119 inst
|= QPU_SET_FIELD(QPU_SIG_NONE
, QPU_SIG
);
120 inst
|= QPU_SET_FIELD(QPU_A_OR
, QPU_OP_ADD
);
121 inst
|= QPU_SET_FIELD(QPU_R_NOP
, QPU_RADDR_A
);
122 inst
|= QPU_SET_FIELD(QPU_R_NOP
, QPU_RADDR_B
);
123 inst
|= qpu_a_dst(dst
);
124 inst
|= QPU_SET_FIELD(QPU_COND_ALWAYS
, QPU_COND_ADD
);
125 inst
|= QPU_MUX(src
.mux
, QPU_ADD_A
);
126 inst
|= QPU_MUX(src
.mux
, QPU_ADD_B
);
127 inst
= set_src_raddr(inst
, src
);
128 inst
|= QPU_SET_FIELD(QPU_W_NOP
, QPU_WADDR_MUL
);
134 qpu_m_MOV(struct qpu_reg dst
, struct qpu_reg src
)
138 inst
|= QPU_SET_FIELD(QPU_SIG_NONE
, QPU_SIG
);
139 inst
|= QPU_SET_FIELD(QPU_M_V8MIN
, QPU_OP_MUL
);
140 inst
|= QPU_SET_FIELD(QPU_R_NOP
, QPU_RADDR_A
);
141 inst
|= QPU_SET_FIELD(QPU_R_NOP
, QPU_RADDR_B
);
142 inst
|= qpu_m_dst(dst
);
143 inst
|= QPU_SET_FIELD(QPU_COND_ALWAYS
, QPU_COND_MUL
);
144 inst
|= QPU_MUX(src
.mux
, QPU_MUL_A
);
145 inst
|= QPU_MUX(src
.mux
, QPU_MUL_B
);
146 inst
= set_src_raddr(inst
, src
);
147 inst
|= QPU_SET_FIELD(QPU_W_NOP
, QPU_WADDR_ADD
);
153 qpu_load_imm_ui(struct qpu_reg dst
, uint32_t val
)
157 inst
|= qpu_a_dst(dst
);
158 inst
|= QPU_SET_FIELD(QPU_W_NOP
, QPU_WADDR_MUL
);
159 inst
|= QPU_SET_FIELD(QPU_COND_ALWAYS
, QPU_COND_ADD
);
160 inst
|= QPU_SET_FIELD(QPU_COND_ALWAYS
, QPU_COND_MUL
);
161 inst
|= QPU_SET_FIELD(QPU_SIG_LOAD_IMM
, QPU_SIG
);
168 qpu_a_alu2(enum qpu_op_add op
,
169 struct qpu_reg dst
, struct qpu_reg src0
, struct qpu_reg src1
)
173 inst
|= QPU_SET_FIELD(QPU_SIG_NONE
, QPU_SIG
);
174 inst
|= QPU_SET_FIELD(op
, QPU_OP_ADD
);
175 inst
|= QPU_SET_FIELD(QPU_R_NOP
, QPU_RADDR_A
);
176 inst
|= QPU_SET_FIELD(QPU_R_NOP
, QPU_RADDR_B
);
177 inst
|= qpu_a_dst(dst
);
178 inst
|= QPU_SET_FIELD(QPU_COND_ALWAYS
, QPU_COND_ADD
);
179 inst
|= QPU_MUX(src0
.mux
, QPU_ADD_A
);
180 inst
= set_src_raddr(inst
, src0
);
181 inst
|= QPU_MUX(src1
.mux
, QPU_ADD_B
);
182 inst
= set_src_raddr(inst
, src1
);
183 inst
|= QPU_SET_FIELD(QPU_W_NOP
, QPU_WADDR_MUL
);
189 qpu_m_alu2(enum qpu_op_mul op
,
190 struct qpu_reg dst
, struct qpu_reg src0
, struct qpu_reg src1
)
194 inst
|= QPU_SET_FIELD(QPU_SIG_NONE
, QPU_SIG
);
195 inst
|= QPU_SET_FIELD(op
, QPU_OP_MUL
);
196 inst
|= QPU_SET_FIELD(QPU_R_NOP
, QPU_RADDR_A
);
197 inst
|= QPU_SET_FIELD(QPU_R_NOP
, QPU_RADDR_B
);
198 inst
|= qpu_m_dst(dst
);
199 inst
|= QPU_SET_FIELD(QPU_COND_ALWAYS
, QPU_COND_MUL
);
200 inst
|= QPU_MUX(src0
.mux
, QPU_MUL_A
);
201 inst
= set_src_raddr(inst
, src0
);
202 inst
|= QPU_MUX(src1
.mux
, QPU_MUL_B
);
203 inst
= set_src_raddr(inst
, src1
);
204 inst
|= QPU_SET_FIELD(QPU_W_NOP
, QPU_WADDR_ADD
);
210 merge_fields(uint64_t *merge
,
211 uint64_t a
, uint64_t b
,
212 uint64_t mask
, uint64_t ignore
)
214 if ((a
& mask
) == ignore
) {
215 *merge
= (*merge
& ~mask
) | (b
& mask
);
216 } else if ((b
& mask
) == ignore
) {
217 *merge
= (*merge
& ~mask
) | (a
& mask
);
219 if ((a
& mask
) != (b
& mask
))
227 qpu_num_sf_accesses(uint64_t inst
)
230 static const uint32_t specials
[] = {
247 uint32_t waddr_add
= QPU_GET_FIELD(inst
, QPU_WADDR_ADD
);
248 uint32_t waddr_mul
= QPU_GET_FIELD(inst
, QPU_WADDR_MUL
);
249 uint32_t raddr_a
= QPU_GET_FIELD(inst
, QPU_RADDR_A
);
250 uint32_t raddr_b
= QPU_GET_FIELD(inst
, QPU_RADDR_B
);
252 for (int j
= 0; j
< ARRAY_SIZE(specials
); j
++) {
253 if (waddr_add
== specials
[j
])
255 if (waddr_mul
== specials
[j
])
259 if (raddr_a
== QPU_R_MUTEX_ACQUIRE
)
261 if (raddr_b
== QPU_R_MUTEX_ACQUIRE
&&
262 QPU_GET_FIELD(inst
, QPU_SIG
) != QPU_SIG_SMALL_IMM
)
265 /* XXX: semaphore, combined color read/write? */
266 switch (QPU_GET_FIELD(inst
, QPU_SIG
)) {
267 case QPU_SIG_COLOR_LOAD
:
268 case QPU_SIG_COLOR_LOAD_END
:
269 case QPU_SIG_LOAD_TMU0
:
270 case QPU_SIG_LOAD_TMU1
:
278 qpu_waddr_ignores_ws(uint32_t waddr
)
286 case QPU_W_TLB_COLOR_MS
:
287 case QPU_W_TLB_COLOR_ALL
:
288 case QPU_W_TLB_ALPHA_MASK
:
290 case QPU_W_SFU_RECIP
:
291 case QPU_W_SFU_RECIPSQRT
:
309 swap_ra_file_mux_helper(uint64_t *merge
, uint64_t *a
, uint32_t mux_shift
)
311 uint64_t mux_mask
= (uint64_t)0x7 << mux_shift
;
312 uint64_t mux_a_val
= (uint64_t)QPU_MUX_A
<< mux_shift
;
313 uint64_t mux_b_val
= (uint64_t)QPU_MUX_B
<< mux_shift
;
315 if ((*a
& mux_mask
) == mux_a_val
) {
316 *a
= (*a
& ~mux_mask
) | mux_b_val
;
317 *merge
= (*merge
& ~mux_mask
) | mux_b_val
;
322 try_swap_ra_file(uint64_t *merge
, uint64_t *a
, uint64_t *b
)
324 uint32_t raddr_a_a
= QPU_GET_FIELD(*a
, QPU_RADDR_A
);
325 uint32_t raddr_a_b
= QPU_GET_FIELD(*a
, QPU_RADDR_B
);
326 uint32_t raddr_b_a
= QPU_GET_FIELD(*b
, QPU_RADDR_A
);
327 uint32_t raddr_b_b
= QPU_GET_FIELD(*b
, QPU_RADDR_B
);
329 if (raddr_a_b
!= QPU_R_NOP
)
340 if (raddr_b_b
!= QPU_R_NOP
&&
341 raddr_b_b
!= raddr_a_a
)
344 /* Move raddr A to B in instruction a. */
345 *a
= (*a
& ~QPU_RADDR_A_MASK
) | QPU_SET_FIELD(QPU_R_NOP
, QPU_RADDR_A
);
346 *a
= (*a
& ~QPU_RADDR_B_MASK
) | QPU_SET_FIELD(raddr_a_a
, QPU_RADDR_B
);
347 *merge
= QPU_UPDATE_FIELD(*merge
, raddr_b_a
, QPU_RADDR_A
);
348 *merge
= QPU_UPDATE_FIELD(*merge
, raddr_a_a
, QPU_RADDR_B
);
349 swap_ra_file_mux_helper(merge
, a
, QPU_ADD_A_SHIFT
);
350 swap_ra_file_mux_helper(merge
, a
, QPU_ADD_B_SHIFT
);
351 swap_ra_file_mux_helper(merge
, a
, QPU_MUL_A_SHIFT
);
352 swap_ra_file_mux_helper(merge
, a
, QPU_MUL_B_SHIFT
);
358 convert_mov(uint64_t *inst
)
360 uint32_t add_a
= QPU_GET_FIELD(*inst
, QPU_ADD_A
);
361 uint32_t waddr_add
= QPU_GET_FIELD(*inst
, QPU_WADDR_ADD
);
362 uint32_t cond_add
= QPU_GET_FIELD(*inst
, QPU_COND_ADD
);
365 if (QPU_GET_FIELD(*inst
, QPU_OP_ADD
) != QPU_A_OR
||
366 (add_a
!= QPU_GET_FIELD(*inst
, QPU_ADD_B
))) {
370 if (QPU_GET_FIELD(*inst
, QPU_SIG
) != QPU_SIG_NONE
)
373 /* We could maybe support this in the .8888 and .8a-.8d cases. */
377 *inst
= QPU_UPDATE_FIELD(*inst
, QPU_A_NOP
, QPU_OP_ADD
);
378 *inst
= QPU_UPDATE_FIELD(*inst
, QPU_M_V8MIN
, QPU_OP_MUL
);
380 *inst
= QPU_UPDATE_FIELD(*inst
, add_a
, QPU_MUL_A
);
381 *inst
= QPU_UPDATE_FIELD(*inst
, add_a
, QPU_MUL_B
);
382 *inst
= QPU_UPDATE_FIELD(*inst
, QPU_MUX_R0
, QPU_ADD_A
);
383 *inst
= QPU_UPDATE_FIELD(*inst
, QPU_MUX_R0
, QPU_ADD_B
);
385 *inst
= QPU_UPDATE_FIELD(*inst
, waddr_add
, QPU_WADDR_MUL
);
386 *inst
= QPU_UPDATE_FIELD(*inst
, QPU_W_NOP
, QPU_WADDR_ADD
);
388 *inst
= QPU_UPDATE_FIELD(*inst
, cond_add
, QPU_COND_MUL
);
389 *inst
= QPU_UPDATE_FIELD(*inst
, QPU_COND_NEVER
, QPU_COND_ADD
);
391 if (!qpu_waddr_ignores_ws(waddr_add
))
398 qpu_merge_inst(uint64_t a
, uint64_t b
)
400 uint64_t merge
= a
| b
;
402 uint32_t a_sig
= QPU_GET_FIELD(a
, QPU_SIG
);
403 uint32_t b_sig
= QPU_GET_FIELD(b
, QPU_SIG
);
405 if (QPU_GET_FIELD(a
, QPU_OP_ADD
) != QPU_A_NOP
&&
406 QPU_GET_FIELD(b
, QPU_OP_ADD
) != QPU_A_NOP
) {
407 if (QPU_GET_FIELD(a
, QPU_OP_MUL
) != QPU_M_NOP
||
408 QPU_GET_FIELD(b
, QPU_OP_MUL
) != QPU_M_NOP
||
409 !(convert_mov(&a
) || convert_mov(&b
))) {
416 if (QPU_GET_FIELD(a
, QPU_OP_MUL
) != QPU_M_NOP
&&
417 QPU_GET_FIELD(b
, QPU_OP_MUL
) != QPU_M_NOP
)
420 if (qpu_num_sf_accesses(a
) && qpu_num_sf_accesses(b
))
423 if (a_sig
== QPU_SIG_LOAD_IMM
||
424 b_sig
== QPU_SIG_LOAD_IMM
||
425 a_sig
== QPU_SIG_SMALL_IMM
||
426 b_sig
== QPU_SIG_SMALL_IMM
) {
430 ok
= ok
&& merge_fields(&merge
, a
, b
, QPU_SIG_MASK
,
431 QPU_SET_FIELD(QPU_SIG_NONE
, QPU_SIG
));
433 /* Misc fields that have to match exactly. */
434 ok
= ok
&& merge_fields(&merge
, a
, b
, QPU_SF
| QPU_PM
,
437 if (!merge_fields(&merge
, a
, b
, QPU_RADDR_A_MASK
,
438 QPU_SET_FIELD(QPU_R_NOP
, QPU_RADDR_A
))) {
439 /* Since we tend to use regfile A by default both for register
440 * allocation and for our special values (uniforms and
441 * varyings), try swapping uniforms and varyings to regfile B
442 * to resolve raddr A conflicts.
444 if (!try_swap_ra_file(&merge
, &a
, &b
) &&
445 !try_swap_ra_file(&merge
, &b
, &a
)) {
450 ok
= ok
&& merge_fields(&merge
, a
, b
, QPU_RADDR_B_MASK
,
451 QPU_SET_FIELD(QPU_R_NOP
, QPU_RADDR_B
));
453 ok
= ok
&& merge_fields(&merge
, a
, b
, QPU_WADDR_ADD_MASK
,
454 QPU_SET_FIELD(QPU_W_NOP
, QPU_WADDR_ADD
));
455 ok
= ok
&& merge_fields(&merge
, a
, b
, QPU_WADDR_MUL_MASK
,
456 QPU_SET_FIELD(QPU_W_NOP
, QPU_WADDR_MUL
));
458 /* Allow disagreement on WS (swapping A vs B physical reg file as the
459 * destination for ADD/MUL) if one of the original instructions
460 * ignores it (probably because it's just writing to accumulators).
462 if (qpu_waddr_ignores_ws(QPU_GET_FIELD(a
, QPU_WADDR_ADD
)) &&
463 qpu_waddr_ignores_ws(QPU_GET_FIELD(a
, QPU_WADDR_MUL
))) {
464 merge
= (merge
& ~QPU_WS
) | (b
& QPU_WS
);
465 } else if (qpu_waddr_ignores_ws(QPU_GET_FIELD(b
, QPU_WADDR_ADD
)) &&
466 qpu_waddr_ignores_ws(QPU_GET_FIELD(b
, QPU_WADDR_MUL
))) {
467 merge
= (merge
& ~QPU_WS
) | (a
& QPU_WS
);
469 if ((a
& QPU_WS
) != (b
& QPU_WS
))
480 qpu_set_sig(uint64_t inst
, uint32_t sig
)
482 assert(QPU_GET_FIELD(inst
, QPU_SIG
) == QPU_SIG_NONE
);
483 return QPU_UPDATE_FIELD(inst
, sig
, QPU_SIG
);
487 qpu_set_cond_add(uint64_t inst
, uint32_t cond
)
489 assert(QPU_GET_FIELD(inst
, QPU_COND_ADD
) == QPU_COND_ALWAYS
);
490 return QPU_UPDATE_FIELD(inst
, cond
, QPU_COND_ADD
);
494 qpu_set_cond_mul(uint64_t inst
, uint32_t cond
)
496 assert(QPU_GET_FIELD(inst
, QPU_COND_MUL
) == QPU_COND_ALWAYS
);
497 return QPU_UPDATE_FIELD(inst
, cond
, QPU_COND_MUL
);
501 qpu_waddr_is_tlb(uint32_t waddr
)
504 case QPU_W_TLB_COLOR_ALL
:
505 case QPU_W_TLB_COLOR_MS
:
514 qpu_inst_is_tlb(uint64_t inst
)
516 uint32_t sig
= QPU_GET_FIELD(inst
, QPU_SIG
);
518 return (qpu_waddr_is_tlb(QPU_GET_FIELD(inst
, QPU_WADDR_ADD
)) ||
519 qpu_waddr_is_tlb(QPU_GET_FIELD(inst
, QPU_WADDR_MUL
)) ||
520 sig
== QPU_SIG_COLOR_LOAD
||
521 sig
== QPU_SIG_WAIT_FOR_SCOREBOARD
);
525 * Returns the small immediate value to be encoded in to the raddr b field if
526 * the argument can be represented as one, or ~0 otherwise.
529 qpu_encode_small_immediate(uint32_t i
)
533 if ((int)i
< 0 && (int)i
>= -16)
575 qpu_serialize_one_inst(struct vc4_compile
*c
, uint64_t inst
)
577 if (c
->qpu_inst_count
>= c
->qpu_inst_size
) {
578 c
->qpu_inst_size
= MAX2(16, c
->qpu_inst_size
* 2);
579 c
->qpu_insts
= reralloc(c
, c
->qpu_insts
,
580 uint64_t, c
->qpu_inst_size
);
582 c
->qpu_insts
[c
->qpu_inst_count
++] = inst
;