2 * Copyright © 2016 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/macros.h"
27 #include "broadcom/common/v3d_device_info.h"
28 #include "qpu_instr.h"
31 #define QPU_MASK(high, low) ((((uint64_t)1<<((high)-(low)+1))-1)<<(low))
32 /* Using the GNU statement expression extension */
33 #define QPU_SET_FIELD(value, field) \
35 uint64_t fieldval = (uint64_t)(value) << field ## _SHIFT; \
36 assert((fieldval & ~ field ## _MASK) == 0); \
37 fieldval & field ## _MASK; \
40 #define QPU_GET_FIELD(word, field) ((uint32_t)(((word) & field ## _MASK) >> field ## _SHIFT))
42 #define QPU_UPDATE_FIELD(inst, value, field) \
43 (((inst) & ~(field ## _MASK)) | QPU_SET_FIELD(value, field))
46 #define VC5_QPU_OP_MUL_SHIFT 58
47 #define VC5_QPU_OP_MUL_MASK QPU_MASK(63, 58)
49 #define VC5_QPU_SIG_SHIFT 53
50 #define VC5_QPU_SIG_MASK QPU_MASK(57, 53)
51 # define VC5_QPU_SIG_THRSW_BIT 0x1
52 # define VC5_QPU_SIG_LDUNIF_BIT 0x2
53 # define VC5_QPU_SIG_LDTMU_BIT 0x4
54 # define VC5_QPU_SIG_LDVARY_BIT 0x8
56 #define VC5_QPU_COND_SHIFT 46
57 #define VC5_QPU_COND_MASK QPU_MASK(52, 46)
58 #define VC5_QPU_COND_SIG_MAGIC_ADDR (1 << 6)
60 #define VC5_QPU_MM QPU_MASK(45, 45)
61 #define VC5_QPU_MA QPU_MASK(44, 44)
63 #define V3D_QPU_WADDR_M_SHIFT 38
64 #define V3D_QPU_WADDR_M_MASK QPU_MASK(43, 38)
66 #define VC5_QPU_BRANCH_ADDR_LOW_SHIFT 35
67 #define VC5_QPU_BRANCH_ADDR_LOW_MASK QPU_MASK(55, 35)
69 #define V3D_QPU_WADDR_A_SHIFT 32
70 #define V3D_QPU_WADDR_A_MASK QPU_MASK(37, 32)
72 #define VC5_QPU_BRANCH_COND_SHIFT 32
73 #define VC5_QPU_BRANCH_COND_MASK QPU_MASK(34, 32)
75 #define VC5_QPU_BRANCH_ADDR_HIGH_SHIFT 24
76 #define VC5_QPU_BRANCH_ADDR_HIGH_MASK QPU_MASK(31, 24)
78 #define VC5_QPU_OP_ADD_SHIFT 24
79 #define VC5_QPU_OP_ADD_MASK QPU_MASK(31, 24)
81 #define VC5_QPU_MUL_B_SHIFT 21
82 #define VC5_QPU_MUL_B_MASK QPU_MASK(23, 21)
84 #define VC5_QPU_BRANCH_MSFIGN_SHIFT 21
85 #define VC5_QPU_BRANCH_MSFIGN_MASK QPU_MASK(22, 21)
87 #define VC5_QPU_MUL_A_SHIFT 18
88 #define VC5_QPU_MUL_A_MASK QPU_MASK(20, 18)
90 #define VC5_QPU_ADD_B_SHIFT 15
91 #define VC5_QPU_ADD_B_MASK QPU_MASK(17, 15)
93 #define VC5_QPU_BRANCH_BDU_SHIFT 15
94 #define VC5_QPU_BRANCH_BDU_MASK QPU_MASK(17, 15)
96 #define VC5_QPU_BRANCH_UB QPU_MASK(14, 14)
98 #define VC5_QPU_ADD_A_SHIFT 12
99 #define VC5_QPU_ADD_A_MASK QPU_MASK(14, 12)
101 #define VC5_QPU_BRANCH_BDI_SHIFT 12
102 #define VC5_QPU_BRANCH_BDI_MASK QPU_MASK(13, 12)
104 #define VC5_QPU_RADDR_A_SHIFT 6
105 #define VC5_QPU_RADDR_A_MASK QPU_MASK(11, 6)
107 #define VC5_QPU_RADDR_B_SHIFT 0
108 #define VC5_QPU_RADDR_B_MASK QPU_MASK(5, 0)
110 #define THRSW .thrsw = true
111 #define LDUNIF .ldunif = true
112 #define LDUNIFRF .ldunifrf = true
113 #define LDUNIFA .ldunifa = true
114 #define LDUNIFARF .ldunifarf = true
115 #define LDTMU .ldtmu = true
116 #define LDVARY .ldvary = true
117 #define LDVPM .ldvpm = true
118 #define SMIMM .small_imm = true
119 #define LDTLB .ldtlb = true
120 #define LDTLBU .ldtlbu = true
121 #define UCB .ucb = true
122 #define ROT .rotate = true
123 #define WRTMUC .wrtmuc = true
125 static const struct v3d_qpu_sig v33_sig_map
[] = {
130 [3] = { THRSW
, LDUNIF
},
132 [5] = { THRSW
, LDTMU
, },
133 [6] = { LDTMU
, LDUNIF
},
134 [7] = { THRSW
, LDTMU
, LDUNIF
},
136 [9] = { THRSW
, LDVARY
, },
137 [10] = { LDVARY
, LDUNIF
},
138 [11] = { THRSW
, LDVARY
, LDUNIF
},
139 [12] = { LDVARY
, LDTMU
, },
140 [13] = { THRSW
, LDVARY
, LDTMU
, },
141 [14] = { SMIMM
, LDVARY
, },
149 [25] = { THRSW
, LDVPM
, },
150 [26] = { LDVPM
, LDUNIF
},
151 [27] = { THRSW
, LDVPM
, LDUNIF
},
152 [28] = { LDVPM
, LDTMU
, },
153 [29] = { THRSW
, LDVPM
, LDTMU
, },
154 [30] = { SMIMM
, LDVPM
, },
158 static const struct v3d_qpu_sig v40_sig_map
[] = {
163 [3] = { THRSW
, LDUNIF
},
165 [5] = { THRSW
, LDTMU
, },
166 [6] = { LDTMU
, LDUNIF
},
167 [7] = { THRSW
, LDTMU
, LDUNIF
},
169 [9] = { THRSW
, LDVARY
, },
170 [10] = { LDVARY
, LDUNIF
},
171 [11] = { THRSW
, LDVARY
, LDUNIF
},
173 [14] = { SMIMM
, LDVARY
, },
178 [19] = { THRSW
, WRTMUC
},
179 [20] = { LDVARY
, WRTMUC
},
180 [21] = { THRSW
, LDVARY
, WRTMUC
},
184 [31] = { SMIMM
, LDTMU
, },
187 static const struct v3d_qpu_sig v41_sig_map
[] = {
192 [3] = { THRSW
, LDUNIF
},
194 [5] = { THRSW
, LDTMU
, },
195 [6] = { LDTMU
, LDUNIF
},
196 [7] = { THRSW
, LDTMU
, LDUNIF
},
198 [9] = { THRSW
, LDVARY
, },
199 [10] = { LDVARY
, LDUNIF
},
200 [11] = { THRSW
, LDVARY
, LDUNIF
},
202 [13] = { THRSW
, LDUNIFRF
},
203 [14] = { SMIMM
, LDVARY
, },
208 [19] = { THRSW
, WRTMUC
},
209 [20] = { LDVARY
, WRTMUC
},
210 [21] = { THRSW
, LDVARY
, WRTMUC
},
215 [25] = { LDUNIFARF
},
216 [31] = { SMIMM
, LDTMU
, },
220 v3d_qpu_sig_unpack(const struct v3d_device_info
*devinfo
,
222 struct v3d_qpu_sig
*sig
)
224 if (packed_sig
>= ARRAY_SIZE(v33_sig_map
))
227 if (devinfo
->ver
>= 41)
228 *sig
= v41_sig_map
[packed_sig
];
229 else if (devinfo
->ver
== 40)
230 *sig
= v40_sig_map
[packed_sig
];
232 *sig
= v33_sig_map
[packed_sig
];
234 /* Signals with zeroed unpacked contents after element 0 are reserved. */
235 return (packed_sig
== 0 ||
236 memcmp(sig
, &v33_sig_map
[0], sizeof(*sig
)) != 0);
240 v3d_qpu_sig_pack(const struct v3d_device_info
*devinfo
,
241 const struct v3d_qpu_sig
*sig
,
242 uint32_t *packed_sig
)
244 static const struct v3d_qpu_sig
*map
;
246 if (devinfo
->ver
>= 41)
248 else if (devinfo
->ver
== 40)
253 for (int i
= 0; i
< ARRAY_SIZE(v33_sig_map
); i
++) {
254 if (memcmp(&map
[i
], sig
, sizeof(*sig
)) == 0) {
264 v3d_qpu_flags_unpack(const struct v3d_device_info
*devinfo
,
265 uint32_t packed_cond
,
266 struct v3d_qpu_flags
*cond
)
268 static const enum v3d_qpu_cond cond_map
[4] = {
269 [0] = V3D_QPU_COND_IFA
,
270 [1] = V3D_QPU_COND_IFB
,
271 [2] = V3D_QPU_COND_IFNA
,
272 [3] = V3D_QPU_COND_IFNB
,
275 cond
->ac
= V3D_QPU_COND_NONE
;
276 cond
->mc
= V3D_QPU_COND_NONE
;
277 cond
->apf
= V3D_QPU_PF_NONE
;
278 cond
->mpf
= V3D_QPU_PF_NONE
;
279 cond
->auf
= V3D_QPU_UF_NONE
;
280 cond
->muf
= V3D_QPU_UF_NONE
;
282 if (packed_cond
== 0) {
284 } else if (packed_cond
>> 2 == 0) {
285 cond
->apf
= packed_cond
& 0x3;
286 } else if (packed_cond
>> 4 == 0) {
287 cond
->auf
= (packed_cond
& 0xf) - 4 + V3D_QPU_UF_ANDZ
;
288 } else if (packed_cond
== 0x10) {
290 } else if (packed_cond
>> 2 == 0x4) {
291 cond
->mpf
= packed_cond
& 0x3;
292 } else if (packed_cond
>> 4 == 0x1) {
293 cond
->muf
= (packed_cond
& 0xf) - 4 + V3D_QPU_UF_ANDZ
;
294 } else if (packed_cond
>> 4 == 0x2) {
295 cond
->ac
= ((packed_cond
>> 2) & 0x3) + V3D_QPU_COND_IFA
;
296 cond
->mpf
= packed_cond
& 0x3;
297 } else if (packed_cond
>> 4 == 0x3) {
298 cond
->mc
= ((packed_cond
>> 2) & 0x3) + V3D_QPU_COND_IFA
;
299 cond
->apf
= packed_cond
& 0x3;
300 } else if (packed_cond
>> 6) {
301 cond
->mc
= cond_map
[(packed_cond
>> 4) & 0x3];
302 if (((packed_cond
>> 2) & 0x3) == 0) {
303 cond
->ac
= cond_map
[packed_cond
& 0x3];
305 cond
->auf
= (packed_cond
& 0xf) - 4 + V3D_QPU_UF_ANDZ
;
313 v3d_qpu_flags_pack(const struct v3d_device_info
*devinfo
,
314 const struct v3d_qpu_flags
*cond
,
315 uint32_t *packed_cond
)
323 static const struct {
324 uint8_t flags_present
;
333 { AC
| MPF
, (1 << 5) },
334 { MC
, (1 << 5) | (1 << 4) },
335 { MC
| APF
, (1 << 5) | (1 << 4) },
336 { MC
| AC
, (1 << 6) },
337 { MC
| AUF
, (1 << 6) },
340 uint8_t flags_present
= 0;
341 if (cond
->ac
!= V3D_QPU_COND_NONE
)
343 if (cond
->mc
!= V3D_QPU_COND_NONE
)
345 if (cond
->apf
!= V3D_QPU_PF_NONE
)
346 flags_present
|= APF
;
347 if (cond
->mpf
!= V3D_QPU_PF_NONE
)
348 flags_present
|= MPF
;
349 if (cond
->auf
!= V3D_QPU_UF_NONE
)
350 flags_present
|= AUF
;
351 if (cond
->muf
!= V3D_QPU_UF_NONE
)
352 flags_present
|= MUF
;
354 for (int i
= 0; i
< ARRAY_SIZE(flags_table
); i
++) {
355 if (flags_table
[i
].flags_present
!= flags_present
)
358 *packed_cond
= flags_table
[i
].bits
;
360 *packed_cond
|= cond
->apf
;
361 *packed_cond
|= cond
->mpf
;
363 if (flags_present
& AUF
)
364 *packed_cond
|= cond
->auf
- V3D_QPU_UF_ANDZ
+ 4;
365 if (flags_present
& MUF
)
366 *packed_cond
|= cond
->muf
- V3D_QPU_UF_ANDZ
+ 4;
368 if (flags_present
& AC
)
369 *packed_cond
|= (cond
->ac
- V3D_QPU_COND_IFA
) << 2;
371 if (flags_present
& MC
) {
372 if (*packed_cond
& (1 << 6))
373 *packed_cond
|= (cond
->mc
-
374 V3D_QPU_COND_IFA
) << 4;
376 *packed_cond
|= (cond
->mc
-
377 V3D_QPU_COND_IFA
) << 2;
386 /* Make a mapping of the table of opcodes in the spec. The opcode is
387 * determined by a combination of the opcode field, and in the case of 0 or
388 * 1-arg opcodes, the mux_b field as well.
390 #define MUX_MASK(bot, top) (((1 << (top + 1)) - 1) - ((1 << (bot)) - 1))
391 #define ANYMUX MUX_MASK(0, 7)
394 uint8_t opcode_first
;
399 /* 0 if it's the same across V3D versions, or a specific V3D version. */
403 static const struct opcode_desc add_ops
[] = {
404 /* FADD is FADDNF depending on the order of the mux_a/mux_b. */
405 { 0, 47, ANYMUX
, ANYMUX
, V3D_QPU_A_FADD
},
406 { 0, 47, ANYMUX
, ANYMUX
, V3D_QPU_A_FADDNF
},
407 { 53, 55, ANYMUX
, ANYMUX
, V3D_QPU_A_VFPACK
},
408 { 56, 56, ANYMUX
, ANYMUX
, V3D_QPU_A_ADD
},
409 { 57, 59, ANYMUX
, ANYMUX
, V3D_QPU_A_VFPACK
},
410 { 60, 60, ANYMUX
, ANYMUX
, V3D_QPU_A_SUB
},
411 { 61, 63, ANYMUX
, ANYMUX
, V3D_QPU_A_VFPACK
},
412 { 64, 111, ANYMUX
, ANYMUX
, V3D_QPU_A_FSUB
},
413 { 120, 120, ANYMUX
, ANYMUX
, V3D_QPU_A_MIN
},
414 { 121, 121, ANYMUX
, ANYMUX
, V3D_QPU_A_MAX
},
415 { 122, 122, ANYMUX
, ANYMUX
, V3D_QPU_A_UMIN
},
416 { 123, 123, ANYMUX
, ANYMUX
, V3D_QPU_A_UMAX
},
417 { 124, 124, ANYMUX
, ANYMUX
, V3D_QPU_A_SHL
},
418 { 125, 125, ANYMUX
, ANYMUX
, V3D_QPU_A_SHR
},
419 { 126, 126, ANYMUX
, ANYMUX
, V3D_QPU_A_ASR
},
420 { 127, 127, ANYMUX
, ANYMUX
, V3D_QPU_A_ROR
},
421 /* FMIN is instead FMAX depending on the order of the mux_a/mux_b. */
422 { 128, 175, ANYMUX
, ANYMUX
, V3D_QPU_A_FMIN
},
423 { 128, 175, ANYMUX
, ANYMUX
, V3D_QPU_A_FMAX
},
424 { 176, 180, ANYMUX
, ANYMUX
, V3D_QPU_A_VFMIN
},
426 { 181, 181, ANYMUX
, ANYMUX
, V3D_QPU_A_AND
},
427 { 182, 182, ANYMUX
, ANYMUX
, V3D_QPU_A_OR
},
428 { 183, 183, ANYMUX
, ANYMUX
, V3D_QPU_A_XOR
},
430 { 184, 184, ANYMUX
, ANYMUX
, V3D_QPU_A_VADD
},
431 { 185, 185, ANYMUX
, ANYMUX
, V3D_QPU_A_VSUB
},
432 { 186, 186, 1 << 0, ANYMUX
, V3D_QPU_A_NOT
},
433 { 186, 186, 1 << 1, ANYMUX
, V3D_QPU_A_NEG
},
434 { 186, 186, 1 << 2, ANYMUX
, V3D_QPU_A_FLAPUSH
},
435 { 186, 186, 1 << 3, ANYMUX
, V3D_QPU_A_FLBPUSH
},
436 { 186, 186, 1 << 4, ANYMUX
, V3D_QPU_A_FLBPOP
},
437 { 186, 186, 1 << 6, ANYMUX
, V3D_QPU_A_SETMSF
},
438 { 186, 186, 1 << 7, ANYMUX
, V3D_QPU_A_SETREVF
},
439 { 187, 187, 1 << 0, 1 << 0, V3D_QPU_A_NOP
, 0 },
440 { 187, 187, 1 << 0, 1 << 1, V3D_QPU_A_TIDX
},
441 { 187, 187, 1 << 0, 1 << 2, V3D_QPU_A_EIDX
},
442 { 187, 187, 1 << 0, 1 << 3, V3D_QPU_A_LR
},
443 { 187, 187, 1 << 0, 1 << 4, V3D_QPU_A_VFLA
},
444 { 187, 187, 1 << 0, 1 << 5, V3D_QPU_A_VFLNA
},
445 { 187, 187, 1 << 0, 1 << 6, V3D_QPU_A_VFLB
},
446 { 187, 187, 1 << 0, 1 << 7, V3D_QPU_A_VFLNB
},
448 { 187, 187, 1 << 1, MUX_MASK(0, 2), V3D_QPU_A_FXCD
},
449 { 187, 187, 1 << 1, 1 << 3, V3D_QPU_A_XCD
},
450 { 187, 187, 1 << 1, MUX_MASK(4, 6), V3D_QPU_A_FYCD
},
451 { 187, 187, 1 << 1, 1 << 7, V3D_QPU_A_YCD
},
453 { 187, 187, 1 << 2, 1 << 0, V3D_QPU_A_MSF
},
454 { 187, 187, 1 << 2, 1 << 1, V3D_QPU_A_REVF
},
455 { 187, 187, 1 << 2, 1 << 2, V3D_QPU_A_VDWWT
},
456 { 187, 187, 1 << 2, 1 << 5, V3D_QPU_A_TMUWT
},
457 { 187, 187, 1 << 2, 1 << 6, V3D_QPU_A_VPMWT
},
459 { 187, 187, 1 << 3, ANYMUX
, V3D_QPU_A_VPMSETUP
},
461 /* FIXME: MORE COMPLICATED */
462 /* { 190, 191, ANYMUX, ANYMUX, V3D_QPU_A_VFMOVABSNEGNAB }, */
464 { 192, 239, ANYMUX
, ANYMUX
, V3D_QPU_A_FCMP
},
465 { 240, 244, ANYMUX
, ANYMUX
, V3D_QPU_A_VFMAX
},
467 { 245, 245, MUX_MASK(0, 2), ANYMUX
, V3D_QPU_A_FROUND
},
468 { 245, 245, 1 << 3, ANYMUX
, V3D_QPU_A_FTOIN
},
469 { 245, 245, MUX_MASK(4, 6), ANYMUX
, V3D_QPU_A_FTRUNC
},
470 { 245, 245, 1 << 7, ANYMUX
, V3D_QPU_A_FTOIZ
},
471 { 246, 246, MUX_MASK(0, 2), ANYMUX
, V3D_QPU_A_FFLOOR
},
472 { 246, 246, 1 << 3, ANYMUX
, V3D_QPU_A_FTOUZ
},
473 { 246, 246, MUX_MASK(4, 6), ANYMUX
, V3D_QPU_A_FCEIL
},
474 { 246, 246, 1 << 7, ANYMUX
, V3D_QPU_A_FTOC
},
476 { 247, 247, MUX_MASK(0, 2), ANYMUX
, V3D_QPU_A_FDX
},
477 { 247, 247, MUX_MASK(4, 6), ANYMUX
, V3D_QPU_A_FDY
},
479 /* The stvpms are distinguished by the waddr field. */
480 { 248, 248, ANYMUX
, ANYMUX
, V3D_QPU_A_STVPMV
},
481 { 248, 248, ANYMUX
, ANYMUX
, V3D_QPU_A_STVPMD
},
482 { 248, 248, ANYMUX
, ANYMUX
, V3D_QPU_A_STVPMP
},
484 { 252, 252, MUX_MASK(0, 2), ANYMUX
, V3D_QPU_A_ITOF
},
485 { 252, 252, 1 << 3, ANYMUX
, V3D_QPU_A_CLZ
},
486 { 252, 252, MUX_MASK(4, 6), ANYMUX
, V3D_QPU_A_UTOF
},
489 static const struct opcode_desc mul_ops
[] = {
490 { 1, 1, ANYMUX
, ANYMUX
, V3D_QPU_M_ADD
},
491 { 2, 2, ANYMUX
, ANYMUX
, V3D_QPU_M_SUB
},
492 { 3, 3, ANYMUX
, ANYMUX
, V3D_QPU_M_UMUL24
},
493 { 4, 8, ANYMUX
, ANYMUX
, V3D_QPU_M_VFMUL
},
494 { 9, 9, ANYMUX
, ANYMUX
, V3D_QPU_M_SMUL24
},
495 { 10, 10, ANYMUX
, ANYMUX
, V3D_QPU_M_MULTOP
},
496 { 14, 14, ANYMUX
, ANYMUX
, V3D_QPU_M_FMOV
},
497 { 15, 15, MUX_MASK(0, 3), ANYMUX
, V3D_QPU_M_FMOV
},
498 { 15, 15, 1 << 4, 1 << 0, V3D_QPU_M_NOP
, 0 },
499 { 15, 15, 1 << 7, ANYMUX
, V3D_QPU_M_MOV
},
500 { 16, 63, ANYMUX
, ANYMUX
, V3D_QPU_M_FMUL
},
503 static const struct opcode_desc
*
504 lookup_opcode(const struct opcode_desc
*opcodes
, size_t num_opcodes
,
505 uint32_t opcode
, uint32_t mux_a
, uint32_t mux_b
)
507 for (int i
= 0; i
< num_opcodes
; i
++) {
508 const struct opcode_desc
*op_desc
= &opcodes
[i
];
510 if (opcode
< op_desc
->opcode_first
||
511 opcode
> op_desc
->opcode_last
)
514 if (!(op_desc
->mux_b_mask
& (1 << mux_b
)))
517 if (!(op_desc
->mux_a_mask
& (1 << mux_a
)))
527 v3d_qpu_float32_unpack_unpack(uint32_t packed
,
528 enum v3d_qpu_input_unpack
*unpacked
)
532 *unpacked
= V3D_QPU_UNPACK_ABS
;
535 *unpacked
= V3D_QPU_UNPACK_NONE
;
538 *unpacked
= V3D_QPU_UNPACK_L
;
541 *unpacked
= V3D_QPU_UNPACK_H
;
549 v3d_qpu_float32_unpack_pack(enum v3d_qpu_input_unpack unpacked
,
553 case V3D_QPU_UNPACK_ABS
:
556 case V3D_QPU_UNPACK_NONE
:
559 case V3D_QPU_UNPACK_L
:
562 case V3D_QPU_UNPACK_H
:
571 v3d_qpu_float16_unpack_unpack(uint32_t packed
,
572 enum v3d_qpu_input_unpack
*unpacked
)
576 *unpacked
= V3D_QPU_UNPACK_NONE
;
579 *unpacked
= V3D_QPU_UNPACK_REPLICATE_32F_16
;
582 *unpacked
= V3D_QPU_UNPACK_REPLICATE_L_16
;
585 *unpacked
= V3D_QPU_UNPACK_REPLICATE_H_16
;
588 *unpacked
= V3D_QPU_UNPACK_SWAP_16
;
596 v3d_qpu_float16_unpack_pack(enum v3d_qpu_input_unpack unpacked
,
600 case V3D_QPU_UNPACK_NONE
:
603 case V3D_QPU_UNPACK_REPLICATE_32F_16
:
606 case V3D_QPU_UNPACK_REPLICATE_L_16
:
609 case V3D_QPU_UNPACK_REPLICATE_H_16
:
612 case V3D_QPU_UNPACK_SWAP_16
:
621 v3d_qpu_float32_pack_pack(enum v3d_qpu_input_unpack unpacked
,
625 case V3D_QPU_PACK_NONE
:
640 v3d_qpu_add_unpack(const struct v3d_device_info
*devinfo
, uint64_t packed_inst
,
641 struct v3d_qpu_instr
*instr
)
643 uint32_t op
= QPU_GET_FIELD(packed_inst
, VC5_QPU_OP_ADD
);
644 uint32_t mux_a
= QPU_GET_FIELD(packed_inst
, VC5_QPU_ADD_A
);
645 uint32_t mux_b
= QPU_GET_FIELD(packed_inst
, VC5_QPU_ADD_B
);
646 uint32_t waddr
= QPU_GET_FIELD(packed_inst
, V3D_QPU_WADDR_A
);
648 uint32_t map_op
= op
;
649 /* Some big clusters of opcodes are replicated with unpack
652 if (map_op
>= 249 && map_op
<= 251)
653 map_op
= (map_op
- 249 + 245);
654 if (map_op
>= 253 && map_op
<= 255)
655 map_op
= (map_op
- 253 + 245);
657 const struct opcode_desc
*desc
=
658 lookup_opcode(add_ops
, ARRAY_SIZE(add_ops
),
659 map_op
, mux_a
, mux_b
);
663 instr
->alu
.add
.op
= desc
->op
;
665 /* FADD/FADDNF and FMIN/FMAX are determined by the orders of the
668 if (((op
>> 2) & 3) * 8 + mux_a
> (op
& 3) * 8 + mux_b
) {
669 if (instr
->alu
.add
.op
== V3D_QPU_A_FMIN
)
670 instr
->alu
.add
.op
= V3D_QPU_A_FMAX
;
671 if (instr
->alu
.add
.op
== V3D_QPU_A_FADD
)
672 instr
->alu
.add
.op
= V3D_QPU_A_FADDNF
;
675 /* Some QPU ops require a bit more than just basic opcode and mux a/b
676 * comparisons to distinguish them.
678 switch (instr
->alu
.add
.op
) {
679 case V3D_QPU_A_STVPMV
:
680 case V3D_QPU_A_STVPMD
:
681 case V3D_QPU_A_STVPMP
:
684 instr
->alu
.add
.op
= V3D_QPU_A_STVPMV
;
687 instr
->alu
.add
.op
= V3D_QPU_A_STVPMD
;
690 instr
->alu
.add
.op
= V3D_QPU_A_STVPMP
;
700 switch (instr
->alu
.add
.op
) {
702 case V3D_QPU_A_FADDNF
:
707 instr
->alu
.add
.output_pack
= (op
>> 4) & 0x3;
709 if (!v3d_qpu_float32_unpack_unpack((op
>> 2) & 0x3,
710 &instr
->alu
.add
.a_unpack
)) {
714 if (!v3d_qpu_float32_unpack_unpack((op
>> 0) & 0x3,
715 &instr
->alu
.add
.b_unpack
)) {
720 case V3D_QPU_A_FFLOOR
:
721 case V3D_QPU_A_FROUND
:
722 case V3D_QPU_A_FTRUNC
:
723 case V3D_QPU_A_FCEIL
:
726 instr
->alu
.add
.output_pack
= mux_b
& 0x3;
728 if (!v3d_qpu_float32_unpack_unpack((op
>> 2) & 0x3,
729 &instr
->alu
.add
.a_unpack
)) {
734 case V3D_QPU_A_FTOIN
:
735 case V3D_QPU_A_FTOIZ
:
736 case V3D_QPU_A_FTOUZ
:
738 instr
->alu
.add
.output_pack
= V3D_QPU_PACK_NONE
;
740 if (!v3d_qpu_float32_unpack_unpack((op
>> 2) & 0x3,
741 &instr
->alu
.add
.a_unpack
)) {
746 case V3D_QPU_A_VFMIN
:
747 case V3D_QPU_A_VFMAX
:
748 if (!v3d_qpu_float16_unpack_unpack(op
& 0x7,
749 &instr
->alu
.add
.a_unpack
)) {
753 instr
->alu
.add
.output_pack
= V3D_QPU_PACK_NONE
;
754 instr
->alu
.add
.b_unpack
= V3D_QPU_UNPACK_NONE
;
758 instr
->alu
.add
.output_pack
= V3D_QPU_PACK_NONE
;
759 instr
->alu
.add
.a_unpack
= V3D_QPU_UNPACK_NONE
;
760 instr
->alu
.add
.b_unpack
= V3D_QPU_UNPACK_NONE
;
764 instr
->alu
.add
.a
= mux_a
;
765 instr
->alu
.add
.b
= mux_b
;
766 instr
->alu
.add
.waddr
= QPU_GET_FIELD(packed_inst
, V3D_QPU_WADDR_A
);
767 instr
->alu
.add
.magic_write
= packed_inst
& VC5_QPU_MA
;
773 v3d_qpu_mul_unpack(const struct v3d_device_info
*devinfo
, uint64_t packed_inst
,
774 struct v3d_qpu_instr
*instr
)
776 uint32_t op
= QPU_GET_FIELD(packed_inst
, VC5_QPU_OP_MUL
);
777 uint32_t mux_a
= QPU_GET_FIELD(packed_inst
, VC5_QPU_MUL_A
);
778 uint32_t mux_b
= QPU_GET_FIELD(packed_inst
, VC5_QPU_MUL_B
);
781 const struct opcode_desc
*desc
=
782 lookup_opcode(mul_ops
, ARRAY_SIZE(mul_ops
),
787 instr
->alu
.mul
.op
= desc
->op
;
790 switch (instr
->alu
.mul
.op
) {
792 instr
->alu
.mul
.output_pack
= ((op
>> 4) & 0x3) - 1;
794 if (!v3d_qpu_float32_unpack_unpack((op
>> 2) & 0x3,
795 &instr
->alu
.mul
.a_unpack
)) {
799 if (!v3d_qpu_float32_unpack_unpack((op
>> 0) & 0x3,
800 &instr
->alu
.mul
.b_unpack
)) {
807 instr
->alu
.mul
.output_pack
= (((op
& 1) << 1) +
810 if (!v3d_qpu_float32_unpack_unpack(mux_b
& 0x3,
811 &instr
->alu
.mul
.a_unpack
)) {
817 case V3D_QPU_M_VFMUL
:
818 instr
->alu
.mul
.output_pack
= V3D_QPU_PACK_NONE
;
820 if (!v3d_qpu_float16_unpack_unpack(((op
& 0x7) - 4) & 7,
821 &instr
->alu
.mul
.a_unpack
)) {
825 instr
->alu
.mul
.b_unpack
= V3D_QPU_UNPACK_NONE
;
830 instr
->alu
.mul
.output_pack
= V3D_QPU_PACK_NONE
;
831 instr
->alu
.mul
.a_unpack
= V3D_QPU_UNPACK_NONE
;
832 instr
->alu
.mul
.b_unpack
= V3D_QPU_UNPACK_NONE
;
836 instr
->alu
.mul
.a
= mux_a
;
837 instr
->alu
.mul
.b
= mux_b
;
838 instr
->alu
.mul
.waddr
= QPU_GET_FIELD(packed_inst
, V3D_QPU_WADDR_M
);
839 instr
->alu
.mul
.magic_write
= packed_inst
& VC5_QPU_MM
;
845 v3d_qpu_add_pack(const struct v3d_device_info
*devinfo
,
846 const struct v3d_qpu_instr
*instr
, uint64_t *packed_instr
)
848 uint32_t waddr
= instr
->alu
.add
.waddr
;
849 uint32_t mux_a
= instr
->alu
.add
.a
;
850 uint32_t mux_b
= instr
->alu
.add
.b
;
851 int nsrc
= v3d_qpu_add_op_num_src(instr
->alu
.add
.op
);
852 const struct opcode_desc
*desc
;
855 for (desc
= add_ops
; desc
!= &add_ops
[ARRAY_SIZE(add_ops
)];
857 if (desc
->op
== instr
->alu
.add
.op
)
860 if (desc
== &add_ops
[ARRAY_SIZE(add_ops
)])
863 opcode
= desc
->opcode_first
;
865 /* If an operation doesn't use an arg, its mux values may be used to
866 * identify the operation type.
869 mux_b
= ffs(desc
->mux_b_mask
) - 1;
872 mux_a
= ffs(desc
->mux_a_mask
) - 1;
874 switch (instr
->alu
.add
.op
) {
875 case V3D_QPU_A_STVPMV
:
878 case V3D_QPU_A_STVPMD
:
881 case V3D_QPU_A_STVPMP
:
888 switch (instr
->alu
.add
.op
) {
890 case V3D_QPU_A_FADDNF
:
894 case V3D_QPU_A_FCMP
: {
895 uint32_t output_pack
;
899 if (!v3d_qpu_float32_pack_pack(instr
->alu
.add
.output_pack
,
903 opcode
|= output_pack
<< 4;
905 if (!v3d_qpu_float32_unpack_pack(instr
->alu
.add
.a_unpack
,
910 if (!v3d_qpu_float32_unpack_pack(instr
->alu
.add
.b_unpack
,
915 /* These operations with commutative operands are
916 * distinguished by which order their operands come in.
918 bool ordering
= a_unpack
* 8 + mux_a
> b_unpack
* 8 + mux_b
;
919 if (((instr
->alu
.add
.op
== V3D_QPU_A_FMIN
||
920 instr
->alu
.add
.op
== V3D_QPU_A_FADD
) && ordering
) ||
921 ((instr
->alu
.add
.op
== V3D_QPU_A_FMAX
||
922 instr
->alu
.add
.op
== V3D_QPU_A_FADDNF
) && !ordering
)) {
934 opcode
|= a_unpack
<< 2;
935 opcode
|= b_unpack
<< 0;
939 case V3D_QPU_A_FFLOOR
:
940 case V3D_QPU_A_FROUND
:
941 case V3D_QPU_A_FTRUNC
:
942 case V3D_QPU_A_FCEIL
:
944 case V3D_QPU_A_FDY
: {
947 if (!v3d_qpu_float32_pack_pack(instr
->alu
.add
.output_pack
,
953 if (!v3d_qpu_float32_unpack_pack(instr
->alu
.add
.a_unpack
,
959 opcode
|= packed
<< 2;
963 case V3D_QPU_A_FTOIN
:
964 case V3D_QPU_A_FTOIZ
:
965 case V3D_QPU_A_FTOUZ
:
967 if (instr
->alu
.add
.output_pack
!= V3D_QPU_PACK_NONE
)
971 if (!v3d_qpu_float32_unpack_pack(instr
->alu
.add
.a_unpack
,
977 opcode
|= packed
<< 2;
981 case V3D_QPU_A_VFMIN
:
982 case V3D_QPU_A_VFMAX
:
983 if (instr
->alu
.add
.output_pack
!= V3D_QPU_PACK_NONE
||
984 instr
->alu
.add
.b_unpack
!= V3D_QPU_UNPACK_NONE
) {
988 if (!v3d_qpu_float16_unpack_pack(instr
->alu
.add
.a_unpack
,
996 if (instr
->alu
.add
.op
!= V3D_QPU_A_NOP
&&
997 (instr
->alu
.add
.output_pack
!= V3D_QPU_PACK_NONE
||
998 instr
->alu
.add
.a_unpack
!= V3D_QPU_UNPACK_NONE
||
999 instr
->alu
.add
.b_unpack
!= V3D_QPU_UNPACK_NONE
)) {
1005 *packed_instr
|= QPU_SET_FIELD(mux_a
, VC5_QPU_ADD_A
);
1006 *packed_instr
|= QPU_SET_FIELD(mux_b
, VC5_QPU_ADD_B
);
1007 *packed_instr
|= QPU_SET_FIELD(opcode
, VC5_QPU_OP_ADD
);
1008 *packed_instr
|= QPU_SET_FIELD(waddr
, V3D_QPU_WADDR_A
);
1009 if (instr
->alu
.add
.magic_write
)
1010 *packed_instr
|= VC5_QPU_MA
;
1016 v3d_qpu_mul_pack(const struct v3d_device_info
*devinfo
,
1017 const struct v3d_qpu_instr
*instr
, uint64_t *packed_instr
)
1019 uint32_t mux_a
= instr
->alu
.mul
.a
;
1020 uint32_t mux_b
= instr
->alu
.mul
.b
;
1021 int nsrc
= v3d_qpu_mul_op_num_src(instr
->alu
.mul
.op
);
1022 const struct opcode_desc
*desc
;
1024 for (desc
= mul_ops
; desc
!= &mul_ops
[ARRAY_SIZE(mul_ops
)];
1026 if (desc
->op
== instr
->alu
.mul
.op
)
1029 if (desc
== &mul_ops
[ARRAY_SIZE(mul_ops
)])
1032 uint32_t opcode
= desc
->opcode_first
;
1034 /* Some opcodes have a single valid value for their mux a/b, so set
1035 * that here. If mux a/b determine packing, it will be set below.
1038 mux_b
= ffs(desc
->mux_b_mask
) - 1;
1041 mux_a
= ffs(desc
->mux_a_mask
) - 1;
1043 switch (instr
->alu
.mul
.op
) {
1044 case V3D_QPU_M_FMUL
: {
1047 if (!v3d_qpu_float32_pack_pack(instr
->alu
.mul
.output_pack
,
1051 /* No need for a +1 because desc->opcode_first has a 1 in this
1054 opcode
+= packed
<< 4;
1056 if (!v3d_qpu_float32_unpack_pack(instr
->alu
.mul
.a_unpack
,
1060 opcode
|= packed
<< 2;
1062 if (!v3d_qpu_float32_unpack_pack(instr
->alu
.mul
.b_unpack
,
1066 opcode
|= packed
<< 0;
1070 case V3D_QPU_M_FMOV
: {
1073 if (!v3d_qpu_float32_pack_pack(instr
->alu
.mul
.output_pack
,
1077 opcode
|= (packed
>> 1) & 1;
1078 mux_b
= (packed
& 1) << 2;
1080 if (!v3d_qpu_float32_unpack_pack(instr
->alu
.mul
.a_unpack
,
1088 case V3D_QPU_M_VFMUL
: {
1091 if (instr
->alu
.mul
.output_pack
!= V3D_QPU_PACK_NONE
)
1094 if (!v3d_qpu_float16_unpack_pack(instr
->alu
.mul
.a_unpack
,
1098 if (instr
->alu
.mul
.a_unpack
== V3D_QPU_UNPACK_SWAP_16
)
1101 opcode
|= (packed
+ 4) & 7;
1103 if (instr
->alu
.mul
.b_unpack
!= V3D_QPU_UNPACK_NONE
)
1113 *packed_instr
|= QPU_SET_FIELD(mux_a
, VC5_QPU_MUL_A
);
1114 *packed_instr
|= QPU_SET_FIELD(mux_b
, VC5_QPU_MUL_B
);
1116 *packed_instr
|= QPU_SET_FIELD(opcode
, VC5_QPU_OP_MUL
);
1117 *packed_instr
|= QPU_SET_FIELD(instr
->alu
.mul
.waddr
, V3D_QPU_WADDR_M
);
1118 if (instr
->alu
.mul
.magic_write
)
1119 *packed_instr
|= VC5_QPU_MM
;
1125 v3d_qpu_instr_unpack_alu(const struct v3d_device_info
*devinfo
,
1126 uint64_t packed_instr
,
1127 struct v3d_qpu_instr
*instr
)
1129 instr
->type
= V3D_QPU_INSTR_TYPE_ALU
;
1131 if (!v3d_qpu_sig_unpack(devinfo
,
1132 QPU_GET_FIELD(packed_instr
, VC5_QPU_SIG
),
1136 uint32_t packed_cond
= QPU_GET_FIELD(packed_instr
, VC5_QPU_COND
);
1137 if (v3d_qpu_sig_writes_address(devinfo
, &instr
->sig
)) {
1138 instr
->sig_addr
= packed_cond
& ~VC5_QPU_COND_SIG_MAGIC_ADDR
;
1139 instr
->sig_magic
= packed_cond
& VC5_QPU_COND_SIG_MAGIC_ADDR
;
1141 instr
->flags
.ac
= V3D_QPU_COND_NONE
;
1142 instr
->flags
.mc
= V3D_QPU_COND_NONE
;
1143 instr
->flags
.apf
= V3D_QPU_PF_NONE
;
1144 instr
->flags
.mpf
= V3D_QPU_PF_NONE
;
1145 instr
->flags
.auf
= V3D_QPU_UF_NONE
;
1146 instr
->flags
.muf
= V3D_QPU_UF_NONE
;
1148 if (!v3d_qpu_flags_unpack(devinfo
, packed_cond
, &instr
->flags
))
1152 instr
->raddr_a
= QPU_GET_FIELD(packed_instr
, VC5_QPU_RADDR_A
);
1153 instr
->raddr_b
= QPU_GET_FIELD(packed_instr
, VC5_QPU_RADDR_B
);
1155 if (!v3d_qpu_add_unpack(devinfo
, packed_instr
, instr
))
1158 if (!v3d_qpu_mul_unpack(devinfo
, packed_instr
, instr
))
1165 v3d_qpu_instr_unpack_branch(const struct v3d_device_info
*devinfo
,
1166 uint64_t packed_instr
,
1167 struct v3d_qpu_instr
*instr
)
1169 instr
->type
= V3D_QPU_INSTR_TYPE_BRANCH
;
1171 uint32_t cond
= QPU_GET_FIELD(packed_instr
, VC5_QPU_BRANCH_COND
);
1173 instr
->branch
.cond
= V3D_QPU_BRANCH_COND_ALWAYS
;
1174 else if (V3D_QPU_BRANCH_COND_A0
+ (cond
- 2) <=
1175 V3D_QPU_BRANCH_COND_ALLNA
)
1176 instr
->branch
.cond
= V3D_QPU_BRANCH_COND_A0
+ (cond
- 2);
1180 uint32_t msfign
= QPU_GET_FIELD(packed_instr
, VC5_QPU_BRANCH_MSFIGN
);
1183 instr
->branch
.msfign
= msfign
;
1185 instr
->branch
.bdi
= QPU_GET_FIELD(packed_instr
, VC5_QPU_BRANCH_BDI
);
1187 instr
->branch
.ub
= packed_instr
& VC5_QPU_BRANCH_UB
;
1188 if (instr
->branch
.ub
) {
1189 instr
->branch
.bdu
= QPU_GET_FIELD(packed_instr
,
1190 VC5_QPU_BRANCH_BDU
);
1193 instr
->branch
.raddr_a
= QPU_GET_FIELD(packed_instr
,
1196 instr
->branch
.offset
= 0;
1198 instr
->branch
.offset
+=
1199 QPU_GET_FIELD(packed_instr
,
1200 VC5_QPU_BRANCH_ADDR_LOW
) << 3;
1202 instr
->branch
.offset
+=
1203 QPU_GET_FIELD(packed_instr
,
1204 VC5_QPU_BRANCH_ADDR_HIGH
) << 24;
1210 v3d_qpu_instr_unpack(const struct v3d_device_info
*devinfo
,
1211 uint64_t packed_instr
,
1212 struct v3d_qpu_instr
*instr
)
1214 if (QPU_GET_FIELD(packed_instr
, VC5_QPU_OP_MUL
) != 0) {
1215 return v3d_qpu_instr_unpack_alu(devinfo
, packed_instr
, instr
);
1217 uint32_t sig
= QPU_GET_FIELD(packed_instr
, VC5_QPU_SIG
);
1219 if ((sig
& 24) == 16) {
1220 return v3d_qpu_instr_unpack_branch(devinfo
, packed_instr
,
1229 v3d_qpu_instr_pack_alu(const struct v3d_device_info
*devinfo
,
1230 const struct v3d_qpu_instr
*instr
,
1231 uint64_t *packed_instr
)
1234 if (!v3d_qpu_sig_pack(devinfo
, &instr
->sig
, &sig
))
1236 *packed_instr
|= QPU_SET_FIELD(sig
, VC5_QPU_SIG
);
1238 if (instr
->type
== V3D_QPU_INSTR_TYPE_ALU
) {
1239 *packed_instr
|= QPU_SET_FIELD(instr
->raddr_a
, VC5_QPU_RADDR_A
);
1240 *packed_instr
|= QPU_SET_FIELD(instr
->raddr_b
, VC5_QPU_RADDR_B
);
1242 if (!v3d_qpu_add_pack(devinfo
, instr
, packed_instr
))
1244 if (!v3d_qpu_mul_pack(devinfo
, instr
, packed_instr
))
1248 if (v3d_qpu_sig_writes_address(devinfo
, &instr
->sig
)) {
1249 if (instr
->flags
.ac
!= V3D_QPU_COND_NONE
||
1250 instr
->flags
.mc
!= V3D_QPU_COND_NONE
||
1251 instr
->flags
.apf
!= V3D_QPU_PF_NONE
||
1252 instr
->flags
.mpf
!= V3D_QPU_PF_NONE
||
1253 instr
->flags
.auf
!= V3D_QPU_UF_NONE
||
1254 instr
->flags
.muf
!= V3D_QPU_UF_NONE
) {
1258 flags
= instr
->sig_addr
;
1259 if (instr
->sig_magic
)
1260 flags
|= VC5_QPU_COND_SIG_MAGIC_ADDR
;
1262 if (!v3d_qpu_flags_pack(devinfo
, &instr
->flags
, &flags
))
1266 *packed_instr
|= QPU_SET_FIELD(flags
, VC5_QPU_COND
);
1268 if (v3d_qpu_sig_writes_address(devinfo
, &instr
->sig
))
1276 v3d_qpu_instr_pack_branch(const struct v3d_device_info
*devinfo
,
1277 const struct v3d_qpu_instr
*instr
,
1278 uint64_t *packed_instr
)
1280 *packed_instr
|= QPU_SET_FIELD(16, VC5_QPU_SIG
);
1282 if (instr
->branch
.cond
!= V3D_QPU_BRANCH_COND_ALWAYS
) {
1283 *packed_instr
|= QPU_SET_FIELD(2 + (instr
->branch
.cond
-
1284 V3D_QPU_BRANCH_COND_A0
),
1285 VC5_QPU_BRANCH_COND
);
1288 *packed_instr
|= QPU_SET_FIELD(instr
->branch
.msfign
,
1289 VC5_QPU_BRANCH_MSFIGN
);
1291 *packed_instr
|= QPU_SET_FIELD(instr
->branch
.bdi
,
1292 VC5_QPU_BRANCH_BDI
);
1294 if (instr
->branch
.ub
) {
1295 *packed_instr
|= VC5_QPU_BRANCH_UB
;
1296 *packed_instr
|= QPU_SET_FIELD(instr
->branch
.bdu
,
1297 VC5_QPU_BRANCH_BDU
);
1300 switch (instr
->branch
.bdi
) {
1301 case V3D_QPU_BRANCH_DEST_ABS
:
1302 case V3D_QPU_BRANCH_DEST_REL
:
1303 *packed_instr
|= QPU_SET_FIELD(instr
->branch
.msfign
,
1304 VC5_QPU_BRANCH_MSFIGN
);
1306 *packed_instr
|= QPU_SET_FIELD((instr
->branch
.offset
&
1308 VC5_QPU_BRANCH_ADDR_LOW
);
1310 *packed_instr
|= QPU_SET_FIELD(instr
->branch
.offset
>> 24,
1311 VC5_QPU_BRANCH_ADDR_HIGH
);
1313 case V3D_QPU_BRANCH_DEST_REGFILE
:
1314 *packed_instr
|= QPU_SET_FIELD(instr
->branch
.raddr_a
,
1326 v3d_qpu_instr_pack(const struct v3d_device_info
*devinfo
,
1327 const struct v3d_qpu_instr
*instr
,
1328 uint64_t *packed_instr
)
1332 switch (instr
->type
) {
1333 case V3D_QPU_INSTR_TYPE_ALU
:
1334 return v3d_qpu_instr_pack_alu(devinfo
, instr
, packed_instr
);
1335 case V3D_QPU_INSTR_TYPE_BRANCH
:
1336 return v3d_qpu_instr_pack_branch(devinfo
, instr
, packed_instr
);