2 * Copyright © 2016 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/macros.h"
26 #include "broadcom/common/v3d_device_info.h"
27 #include "qpu_instr.h"
30 #define QPU_MASK(high, low) ((((uint64_t)1<<((high)-(low)+1))-1)<<(low))
31 /* Using the GNU statement expression extension */
32 #define QPU_SET_FIELD(value, field) \
34 uint64_t fieldval = (uint64_t)(value) << field ## _SHIFT; \
35 assert((fieldval & ~ field ## _MASK) == 0); \
36 fieldval & field ## _MASK; \
39 #define QPU_GET_FIELD(word, field) ((uint32_t)(((word) & field ## _MASK) >> field ## _SHIFT))
41 #define QPU_UPDATE_FIELD(inst, value, field) \
42 (((inst) & ~(field ## _MASK)) | QPU_SET_FIELD(value, field))
45 #define VC5_QPU_OP_MUL_SHIFT 58
46 #define VC5_QPU_OP_MUL_MASK QPU_MASK(63, 58)
48 #define VC5_QPU_SIG_SHIFT 53
49 #define VC5_QPU_SIG_MASK QPU_MASK(57, 53)
51 #define VC5_QPU_COND_SHIFT 46
52 #define VC5_QPU_COND_MASK QPU_MASK(52, 46)
54 #define VC5_QPU_COND_IFA 0
55 #define VC5_QPU_COND_IFB 1
56 #define VC5_QPU_COND_IFNA 2
57 #define VC5_QPU_COND_IFNB 3
59 #define VC5_QPU_MM QPU_MASK(45, 45)
60 #define VC5_QPU_MA QPU_MASK(44, 44)
62 #define V3D_QPU_WADDR_M_SHIFT 38
63 #define V3D_QPU_WADDR_M_MASK QPU_MASK(43, 38)
65 #define VC5_QPU_BRANCH_ADDR_LOW_SHIFT 35
66 #define VC5_QPU_BRANCH_ADDR_LOW_MASK QPU_MASK(55, 35)
68 #define V3D_QPU_WADDR_A_SHIFT 32
69 #define V3D_QPU_WADDR_A_MASK QPU_MASK(37, 32)
71 #define VC5_QPU_BRANCH_COND_SHIFT 32
72 #define VC5_QPU_BRANCH_COND_MASK QPU_MASK(34, 32)
74 #define VC5_QPU_BRANCH_ADDR_HIGH_SHIFT 24
75 #define VC5_QPU_BRANCH_ADDR_HIGH_MASK QPU_MASK(31, 24)
77 #define VC5_QPU_OP_ADD_SHIFT 24
78 #define VC5_QPU_OP_ADD_MASK QPU_MASK(31, 24)
80 #define VC5_QPU_MUL_B_SHIFT 21
81 #define VC5_QPU_MUL_B_MASK QPU_MASK(23, 21)
83 #define VC5_QPU_BRANCH_MSFIGN_SHIFT 21
84 #define VC5_QPU_BRANCH_MSFIGN_MASK QPU_MASK(22, 21)
86 #define VC5_QPU_MUL_A_SHIFT 18
87 #define VC5_QPU_MUL_A_MASK QPU_MASK(20, 18)
89 #define VC5_QPU_ADD_B_SHIFT 15
90 #define VC5_QPU_ADD_B_MASK QPU_MASK(17, 15)
92 #define VC5_QPU_BRANCH_BDU_SHIFT 15
93 #define VC5_QPU_BRANCH_BDU_MASK QPU_MASK(17, 15)
95 #define VC5_QPU_BRANCH_UB QPU_MASK(14, 14)
97 #define VC5_QPU_ADD_A_SHIFT 12
98 #define VC5_QPU_ADD_A_MASK QPU_MASK(14, 12)
100 #define VC5_QPU_BRANCH_BDI_SHIFT 12
101 #define VC5_QPU_BRANCH_BDI_MASK QPU_MASK(13, 12)
103 #define VC5_QPU_RADDR_A_SHIFT 6
104 #define VC5_QPU_RADDR_A_MASK QPU_MASK(11, 6)
106 #define VC5_QPU_RADDR_B_SHIFT 0
107 #define VC5_QPU_RADDR_B_MASK QPU_MASK(5, 0)
110 v3d_qpu_magic_waddr_name(enum v3d_qpu_waddr waddr
)
112 static const char *waddr_magic
[] = {
113 [V3D_QPU_WADDR_R0
] = "r0",
114 [V3D_QPU_WADDR_R1
] = "r1",
115 [V3D_QPU_WADDR_R2
] = "r2",
116 [V3D_QPU_WADDR_R3
] = "r3",
117 [V3D_QPU_WADDR_R4
] = "r4",
118 [V3D_QPU_WADDR_R5
] = "r5",
119 [V3D_QPU_WADDR_NOP
] = "-",
120 [V3D_QPU_WADDR_TLB
] = "tlb",
121 [V3D_QPU_WADDR_TLBU
] = "tlbu",
122 [V3D_QPU_WADDR_TMU
] = "tmu",
123 [V3D_QPU_WADDR_TMUL
] = "tmul",
124 [V3D_QPU_WADDR_TMUD
] = "tmud",
125 [V3D_QPU_WADDR_TMUA
] = "tmua",
126 [V3D_QPU_WADDR_TMUAU
] = "tmuau",
127 [V3D_QPU_WADDR_VPM
] = "vpm",
128 [V3D_QPU_WADDR_VPMU
] = "vpmu",
129 [V3D_QPU_WADDR_SYNC
] = "sync",
130 [V3D_QPU_WADDR_SYNCU
] = "syncu",
131 [V3D_QPU_WADDR_RECIP
] = "recip",
132 [V3D_QPU_WADDR_RSQRT
] = "rsqrt",
133 [V3D_QPU_WADDR_EXP
] = "exp",
134 [V3D_QPU_WADDR_LOG
] = "log",
135 [V3D_QPU_WADDR_SIN
] = "sin",
136 [V3D_QPU_WADDR_RSQRT2
] = "rsqrt2",
139 return waddr_magic
[waddr
];
143 v3d_qpu_add_op_name(enum v3d_qpu_add_op op
)
145 static const char *op_names
[] = {
146 [V3D_QPU_A_FADD
] = "fadd",
147 [V3D_QPU_A_FADDNF
] = "faddnf",
148 [V3D_QPU_A_VFPACK
] = "vfpack",
149 [V3D_QPU_A_ADD
] = "add",
150 [V3D_QPU_A_SUB
] = "sub",
151 [V3D_QPU_A_FSUB
] = "fsub",
152 [V3D_QPU_A_MIN
] = "min",
153 [V3D_QPU_A_MAX
] = "max",
154 [V3D_QPU_A_UMIN
] = "umin",
155 [V3D_QPU_A_UMAX
] = "umax",
156 [V3D_QPU_A_SHL
] = "shl",
157 [V3D_QPU_A_SHR
] = "shr",
158 [V3D_QPU_A_ASR
] = "asr",
159 [V3D_QPU_A_ROR
] = "ror",
160 [V3D_QPU_A_FMIN
] = "fmin",
161 [V3D_QPU_A_FMAX
] = "fmax",
162 [V3D_QPU_A_VFMIN
] = "vfmin",
163 [V3D_QPU_A_AND
] = "and",
164 [V3D_QPU_A_OR
] = "or",
165 [V3D_QPU_A_XOR
] = "xor",
166 [V3D_QPU_A_VADD
] = "vadd",
167 [V3D_QPU_A_VSUB
] = "vsub",
168 [V3D_QPU_A_NOT
] = "not",
169 [V3D_QPU_A_NEG
] = "neg",
170 [V3D_QPU_A_FLAPUSH
] = "flapush",
171 [V3D_QPU_A_FLBPUSH
] = "flbpush",
172 [V3D_QPU_A_FLBPOP
] = "flbpop",
173 [V3D_QPU_A_SETMSF
] = "setmsf",
174 [V3D_QPU_A_SETREVF
] = "setrevf",
175 [V3D_QPU_A_NOP
] = "nop",
176 [V3D_QPU_A_TIDX
] = "tidx",
177 [V3D_QPU_A_EIDX
] = "eidx",
178 [V3D_QPU_A_LR
] = "lr",
179 [V3D_QPU_A_VFLA
] = "vfla",
180 [V3D_QPU_A_VFLNA
] = "vflna",
181 [V3D_QPU_A_VFLB
] = "vflb",
182 [V3D_QPU_A_VFLNB
] = "vflnb",
183 [V3D_QPU_A_FXCD
] = "fxcd",
184 [V3D_QPU_A_XCD
] = "xcd",
185 [V3D_QPU_A_FYCD
] = "fycd",
186 [V3D_QPU_A_YCD
] = "ycd",
187 [V3D_QPU_A_MSF
] = "msf",
188 [V3D_QPU_A_REVF
] = "revf",
189 [V3D_QPU_A_VDWWT
] = "vdwwt",
190 [V3D_QPU_A_IID
] = "iid",
191 [V3D_QPU_A_SAMPID
] = "sampid",
192 [V3D_QPU_A_PATCHID
] = "patchid",
193 [V3D_QPU_A_TMUWT
] = "tmuwt",
194 [V3D_QPU_A_VPMSETUP
] = "vpmsetup",
195 [V3D_QPU_A_VPMWT
] = "vpmwt",
196 [V3D_QPU_A_LDVPMV
] = "ldvpmv",
197 [V3D_QPU_A_LDVPMD
] = "ldvpmd",
198 [V3D_QPU_A_LDVPMP
] = "ldvpmp",
199 [V3D_QPU_A_LDVPMG
] = "ldvpmg",
200 [V3D_QPU_A_FCMP
] = "fcmp",
201 [V3D_QPU_A_VFMAX
] = "vfmax",
202 [V3D_QPU_A_FROUND
] = "fround",
203 [V3D_QPU_A_FTOIN
] = "ftoin",
204 [V3D_QPU_A_FTRUNC
] = "ftrunc",
205 [V3D_QPU_A_FTOIZ
] = "ftoiz",
206 [V3D_QPU_A_FFLOOR
] = "ffloor",
207 [V3D_QPU_A_FTOUZ
] = "ftouz",
208 [V3D_QPU_A_FCEIL
] = "fceil",
209 [V3D_QPU_A_FTOC
] = "ftoc",
210 [V3D_QPU_A_FDX
] = "fdx",
211 [V3D_QPU_A_FDY
] = "fdy",
212 [V3D_QPU_A_STVPMV
] = "stvpmv",
213 [V3D_QPU_A_STVPMD
] = "stvpmd",
214 [V3D_QPU_A_STVPMP
] = "stvpmp",
215 [V3D_QPU_A_ITOF
] = "itof",
216 [V3D_QPU_A_CLZ
] = "clz",
217 [V3D_QPU_A_UTOF
] = "utof",
220 if (op
>= ARRAY_SIZE(op_names
))
227 v3d_qpu_mul_op_name(enum v3d_qpu_mul_op op
)
229 static const char *op_names
[] = {
230 [V3D_QPU_M_ADD
] = "add",
231 [V3D_QPU_M_SUB
] = "sub",
232 [V3D_QPU_M_UMUL24
] = "umul24",
233 [V3D_QPU_M_VFMUL
] = "vfmul",
234 [V3D_QPU_M_SMUL24
] = "smul24",
235 [V3D_QPU_M_MULTOP
] = "multop",
236 [V3D_QPU_M_FMOV
] = "fmov",
237 [V3D_QPU_M_MOV
] = "mov",
238 [V3D_QPU_M_NOP
] = "nop",
239 [V3D_QPU_M_FMUL
] = "fmul",
242 if (op
>= ARRAY_SIZE(op_names
))
249 v3d_qpu_cond_name(enum v3d_qpu_cond cond
)
252 case V3D_QPU_COND_NONE
:
254 case V3D_QPU_COND_IFA
:
256 case V3D_QPU_COND_IFB
:
258 case V3D_QPU_COND_IFNA
:
260 case V3D_QPU_COND_IFNB
:
263 unreachable("bad cond value");
268 v3d_qpu_branch_cond_name(enum v3d_qpu_branch_cond cond
)
271 case V3D_QPU_BRANCH_COND_ALWAYS
:
273 case V3D_QPU_BRANCH_COND_A0
:
275 case V3D_QPU_BRANCH_COND_NA0
:
277 case V3D_QPU_BRANCH_COND_ALLA
:
279 case V3D_QPU_BRANCH_COND_ANYNA
:
281 case V3D_QPU_BRANCH_COND_ANYA
:
283 case V3D_QPU_BRANCH_COND_ALLNA
:
286 unreachable("bad branch cond value");
291 v3d_qpu_msfign_name(enum v3d_qpu_msfign msfign
)
294 case V3D_QPU_MSFIGN_NONE
:
296 case V3D_QPU_MSFIGN_P
:
298 case V3D_QPU_MSFIGN_Q
:
301 unreachable("bad branch cond value");
306 v3d_qpu_pf_name(enum v3d_qpu_pf pf
)
309 case V3D_QPU_PF_NONE
:
311 case V3D_QPU_PF_PUSHZ
:
313 case V3D_QPU_PF_PUSHN
:
315 case V3D_QPU_PF_PUSHC
:
318 unreachable("bad pf value");
323 v3d_qpu_uf_name(enum v3d_qpu_uf uf
)
326 case V3D_QPU_UF_NONE
:
328 case V3D_QPU_UF_ANDZ
:
330 case V3D_QPU_UF_ANDNZ
:
332 case V3D_QPU_UF_NORZ
:
334 case V3D_QPU_UF_NORNZ
:
336 case V3D_QPU_UF_ANDN
:
338 case V3D_QPU_UF_ANDNN
:
340 case V3D_QPU_UF_NORN
:
342 case V3D_QPU_UF_NORNN
:
344 case V3D_QPU_UF_ANDC
:
346 case V3D_QPU_UF_ANDNC
:
348 case V3D_QPU_UF_NORC
:
350 case V3D_QPU_UF_NORNC
:
353 unreachable("bad pf value");
358 v3d_qpu_pack_name(enum v3d_qpu_output_pack pack
)
361 case V3D_QPU_PACK_NONE
:
368 unreachable("bad pack value");
373 v3d_qpu_unpack_name(enum v3d_qpu_input_unpack unpack
)
376 case V3D_QPU_UNPACK_NONE
:
378 case V3D_QPU_UNPACK_L
:
380 case V3D_QPU_UNPACK_H
:
382 case V3D_QPU_UNPACK_ABS
:
384 case V3D_QPU_UNPACK_REPLICATE_32F_16
:
386 case V3D_QPU_UNPACK_REPLICATE_L_16
:
388 case V3D_QPU_UNPACK_REPLICATE_H_16
:
390 case V3D_QPU_UNPACK_SWAP_16
:
393 unreachable("bad unpack value");
400 static const uint8_t add_op_args
[] = {
401 [V3D_QPU_A_FADD
] = D
| A
| B
,
402 [V3D_QPU_A_FADDNF
] = D
| A
| B
,
403 [V3D_QPU_A_VFPACK
] = D
| A
| B
,
404 [V3D_QPU_A_ADD
] = D
| A
| B
,
405 [V3D_QPU_A_VFPACK
] = D
| A
| B
,
406 [V3D_QPU_A_SUB
] = D
| A
| B
,
407 [V3D_QPU_A_VFPACK
] = D
| A
| B
,
408 [V3D_QPU_A_FSUB
] = D
| A
| B
,
409 [V3D_QPU_A_MIN
] = D
| A
| B
,
410 [V3D_QPU_A_MAX
] = D
| A
| B
,
411 [V3D_QPU_A_UMIN
] = D
| A
| B
,
412 [V3D_QPU_A_UMAX
] = D
| A
| B
,
413 [V3D_QPU_A_SHL
] = D
| A
| B
,
414 [V3D_QPU_A_SHR
] = D
| A
| B
,
415 [V3D_QPU_A_ASR
] = D
| A
| B
,
416 [V3D_QPU_A_ROR
] = D
| A
| B
,
417 [V3D_QPU_A_FMIN
] = D
| A
| B
,
418 [V3D_QPU_A_FMAX
] = D
| A
| B
,
419 [V3D_QPU_A_VFMIN
] = D
| A
| B
,
421 [V3D_QPU_A_AND
] = D
| A
| B
,
422 [V3D_QPU_A_OR
] = D
| A
| B
,
423 [V3D_QPU_A_XOR
] = D
| A
| B
,
425 [V3D_QPU_A_VADD
] = D
| A
| B
,
426 [V3D_QPU_A_VSUB
] = D
| A
| B
,
427 [V3D_QPU_A_NOT
] = D
| A
,
428 [V3D_QPU_A_NEG
] = D
| A
,
429 [V3D_QPU_A_FLAPUSH
] = D
| A
,
430 [V3D_QPU_A_FLBPUSH
] = D
| A
,
431 [V3D_QPU_A_FLBPOP
] = D
| A
,
432 [V3D_QPU_A_SETMSF
] = D
| A
,
433 [V3D_QPU_A_SETREVF
] = D
| A
,
435 [V3D_QPU_A_TIDX
] = D
,
436 [V3D_QPU_A_EIDX
] = D
,
438 [V3D_QPU_A_VFLA
] = D
,
439 [V3D_QPU_A_VFLNA
] = D
,
440 [V3D_QPU_A_VFLB
] = D
,
441 [V3D_QPU_A_VFLNB
] = D
,
443 [V3D_QPU_A_FXCD
] = D
,
445 [V3D_QPU_A_FYCD
] = D
,
449 [V3D_QPU_A_REVF
] = D
,
450 [V3D_QPU_A_VDWWT
] = D
,
452 [V3D_QPU_A_SAMPID
] = D
,
453 [V3D_QPU_A_PATCHID
] = D
,
454 [V3D_QPU_A_TMUWT
] = D
,
455 [V3D_QPU_A_VPMWT
] = D
,
457 [V3D_QPU_A_VPMSETUP
] = D
| A
,
459 [V3D_QPU_A_LDVPMV
] = D
| A
,
460 [V3D_QPU_A_LDVPMD
] = D
| A
,
461 [V3D_QPU_A_LDVPMP
] = D
| A
,
462 [V3D_QPU_A_LDVPMG
] = D
| A
| B
,
464 /* FIXME: MOVABSNEG */
466 [V3D_QPU_A_FCMP
] = D
| A
| B
,
467 [V3D_QPU_A_VFMAX
] = D
| A
| B
,
469 [V3D_QPU_A_FROUND
] = D
| A
,
470 [V3D_QPU_A_FTOIN
] = D
| A
,
471 [V3D_QPU_A_FTRUNC
] = D
| A
,
472 [V3D_QPU_A_FTOIZ
] = D
| A
,
473 [V3D_QPU_A_FFLOOR
] = D
| A
,
474 [V3D_QPU_A_FTOUZ
] = D
| A
,
475 [V3D_QPU_A_FCEIL
] = D
| A
,
476 [V3D_QPU_A_FTOC
] = D
| A
,
478 [V3D_QPU_A_FDX
] = D
| A
,
479 [V3D_QPU_A_FDY
] = D
| A
,
481 [V3D_QPU_A_STVPMV
] = A
| B
,
482 [V3D_QPU_A_STVPMD
] = A
| B
,
483 [V3D_QPU_A_STVPMP
] = A
| B
,
485 [V3D_QPU_A_ITOF
] = D
| A
,
486 [V3D_QPU_A_CLZ
] = D
| A
,
487 [V3D_QPU_A_UTOF
] = D
| A
,
490 static const uint8_t mul_op_args
[] = {
491 [V3D_QPU_M_ADD
] = D
| A
| B
,
492 [V3D_QPU_M_SUB
] = D
| A
| B
,
493 [V3D_QPU_M_UMUL24
] = D
| A
| B
,
494 [V3D_QPU_M_VFMUL
] = D
| A
| B
,
495 [V3D_QPU_M_SMUL24
] = D
| A
| B
,
496 [V3D_QPU_M_MULTOP
] = D
| A
| B
,
497 [V3D_QPU_M_FMOV
] = D
| A
,
499 [V3D_QPU_M_MOV
] = D
| A
,
500 [V3D_QPU_M_FMUL
] = D
| A
| B
,
504 v3d_qpu_add_op_has_dst(enum v3d_qpu_add_op op
)
506 assert(op
< ARRAY_SIZE(add_op_args
));
508 return add_op_args
[op
] & D
;
512 v3d_qpu_mul_op_has_dst(enum v3d_qpu_mul_op op
)
514 assert(op
< ARRAY_SIZE(mul_op_args
));
516 return mul_op_args
[op
] & D
;
520 v3d_qpu_add_op_num_src(enum v3d_qpu_add_op op
)
522 assert(op
< ARRAY_SIZE(add_op_args
));
524 uint8_t args
= add_op_args
[op
];
534 v3d_qpu_mul_op_num_src(enum v3d_qpu_mul_op op
)
536 assert(op
< ARRAY_SIZE(mul_op_args
));
538 uint8_t args
= mul_op_args
[op
];
548 v3d_qpu_magic_waddr_is_sfu(enum v3d_qpu_waddr waddr
)
551 case V3D_QPU_WADDR_RECIP
:
552 case V3D_QPU_WADDR_RSQRT
:
553 case V3D_QPU_WADDR_EXP
:
554 case V3D_QPU_WADDR_LOG
:
555 case V3D_QPU_WADDR_SIN
:
556 case V3D_QPU_WADDR_RSQRT2
:
564 v3d_qpu_magic_waddr_is_tmu(enum v3d_qpu_waddr waddr
)
567 case V3D_QPU_WADDR_TMU
:
568 case V3D_QPU_WADDR_TMUL
:
569 case V3D_QPU_WADDR_TMUD
:
570 case V3D_QPU_WADDR_TMUA
:
571 case V3D_QPU_WADDR_TMUAU
:
579 v3d_qpu_magic_waddr_is_tlb(enum v3d_qpu_waddr waddr
)
581 return (waddr
== V3D_QPU_WADDR_TLB
||
582 waddr
== V3D_QPU_WADDR_TLBU
);
586 v3d_qpu_magic_waddr_is_vpm(enum v3d_qpu_waddr waddr
)
588 return (waddr
== V3D_QPU_WADDR_VPM
||
589 waddr
== V3D_QPU_WADDR_VPMU
);
593 v3d_qpu_magic_waddr_is_tsy(enum v3d_qpu_waddr waddr
)
595 return (waddr
== V3D_QPU_WADDR_SYNC
||
596 waddr
== V3D_QPU_WADDR_SYNCU
);
600 v3d_qpu_writes_r3(const struct v3d_device_info
*devinfo
,
601 const struct v3d_qpu_instr
*inst
)
603 if (inst
->type
== V3D_QPU_INSTR_TYPE_ALU
) {
604 if (inst
->alu
.add
.magic_write
&&
605 inst
->alu
.add
.waddr
== V3D_QPU_WADDR_R3
) {
609 if (inst
->alu
.mul
.magic_write
&&
610 inst
->alu
.mul
.waddr
== V3D_QPU_WADDR_R3
) {
615 if (v3d_qpu_sig_writes_address(devinfo
, &inst
->sig
) &&
616 inst
->sig_magic
&& inst
->sig_addr
== V3D_QPU_WADDR_R3
) {
620 return inst
->sig
.ldvary
|| inst
->sig
.ldvpm
;
624 v3d_qpu_writes_r4(const struct v3d_device_info
*devinfo
,
625 const struct v3d_qpu_instr
*inst
)
630 if (inst
->type
== V3D_QPU_INSTR_TYPE_ALU
) {
631 if (inst
->alu
.add
.magic_write
&&
632 (inst
->alu
.add
.waddr
== V3D_QPU_WADDR_R4
||
633 v3d_qpu_magic_waddr_is_sfu(inst
->alu
.add
.waddr
))) {
637 if (inst
->alu
.mul
.magic_write
&&
638 (inst
->alu
.mul
.waddr
== V3D_QPU_WADDR_R4
||
639 v3d_qpu_magic_waddr_is_sfu(inst
->alu
.mul
.waddr
))) {
644 if (v3d_qpu_sig_writes_address(devinfo
, &inst
->sig
) &&
645 inst
->sig_magic
&& inst
->sig_addr
== V3D_QPU_WADDR_R4
) {
653 v3d_qpu_writes_r5(const struct v3d_device_info
*devinfo
,
654 const struct v3d_qpu_instr
*inst
)
656 if (inst
->type
== V3D_QPU_INSTR_TYPE_ALU
) {
657 if (inst
->alu
.add
.magic_write
&&
658 inst
->alu
.add
.waddr
== V3D_QPU_WADDR_R5
) {
662 if (inst
->alu
.mul
.magic_write
&&
663 inst
->alu
.mul
.waddr
== V3D_QPU_WADDR_R5
) {
668 if (v3d_qpu_sig_writes_address(devinfo
, &inst
->sig
) &&
669 inst
->sig_magic
&& inst
->sig_addr
== V3D_QPU_WADDR_R5
) {
673 return inst
->sig
.ldvary
|| inst
->sig
.ldunif
|| inst
->sig
.ldunifa
;
677 v3d_qpu_uses_mux(const struct v3d_qpu_instr
*inst
, enum v3d_qpu_mux mux
)
679 int add_nsrc
= v3d_qpu_add_op_num_src(inst
->alu
.add
.op
);
680 int mul_nsrc
= v3d_qpu_mul_op_num_src(inst
->alu
.mul
.op
);
682 return ((add_nsrc
> 0 && inst
->alu
.add
.a
== mux
) ||
683 (add_nsrc
> 1 && inst
->alu
.add
.b
== mux
) ||
684 (mul_nsrc
> 0 && inst
->alu
.mul
.a
== mux
) ||
685 (mul_nsrc
> 1 && inst
->alu
.mul
.b
== mux
));
689 v3d_qpu_sig_writes_address(const struct v3d_device_info
*devinfo
,
690 const struct v3d_qpu_sig
*sig
)
692 if (devinfo
->ver
< 41)
695 return (sig
->ldunifrf
||