2 * Copyright © 2016 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 * Definitions of the unpacked form of QPU instructions. Assembly and
28 * disassembly will use this for talking about instructions, with qpu_encode.c
29 * and qpu_decode.c handling the pack and unpack of the actual 64-bit QPU
38 #include "util/macros.h"
40 struct v3d_device_info
;
97 /* 6 is reserved, but note 3.2.2.8: "Result Writes" */
98 V3D_QPU_WADDR_NOP
= 6,
99 V3D_QPU_WADDR_TLB
= 7,
100 V3D_QPU_WADDR_TLBU
= 8,
101 V3D_QPU_WADDR_TMU
= 9,
102 V3D_QPU_WADDR_TMUL
= 10,
103 V3D_QPU_WADDR_TMUD
= 11,
104 V3D_QPU_WADDR_TMUA
= 12,
105 V3D_QPU_WADDR_TMUAU
= 13,
106 V3D_QPU_WADDR_VPM
= 14,
107 V3D_QPU_WADDR_VPMU
= 15,
108 V3D_QPU_WADDR_SYNC
= 16,
109 V3D_QPU_WADDR_SYNCU
= 17,
111 V3D_QPU_WADDR_RECIP
= 19,
112 V3D_QPU_WADDR_RSQRT
= 20,
113 V3D_QPU_WADDR_EXP
= 21,
114 V3D_QPU_WADDR_LOG
= 22,
115 V3D_QPU_WADDR_SIN
= 23,
116 V3D_QPU_WADDR_RSQRT2
= 24,
119 struct v3d_qpu_flags
{
120 enum v3d_qpu_cond ac
, mc
;
121 enum v3d_qpu_pf apf
, mpf
;
122 enum v3d_qpu_uf auf
, muf
;
125 enum v3d_qpu_add_op
{
200 enum v3d_qpu_mul_op
{
213 enum v3d_qpu_output_pack
{
216 * Convert to 16-bit float, put in low 16 bits of destination leaving
221 * Convert to 16-bit float, put in high 16 bits of destination leaving
227 enum v3d_qpu_input_unpack
{
229 * No-op input unpacking. Note that this enum's value doesn't match
230 * the packed QPU instruction value of the field (we use 0 so that the
231 * default on new instruction creation is no-op).
234 /** Absolute value. Only available for some operations. */
236 /** Convert low 16 bits from 16-bit float to 32-bit float. */
238 /** Convert high 16 bits from 16-bit float to 32-bit float. */
241 /** Convert to 16f and replicate it to the high bits. */
242 V3D_QPU_UNPACK_REPLICATE_32F_16
,
244 /** Replicate low 16 bits to high */
245 V3D_QPU_UNPACK_REPLICATE_L_16
,
247 /** Replicate high 16 bits to low */
248 V3D_QPU_UNPACK_REPLICATE_H_16
,
250 /** Swap high and low 16 bits */
251 V3D_QPU_UNPACK_SWAP_16
,
265 struct v3d_qpu_alu_instr
{
267 enum v3d_qpu_add_op op
;
268 enum v3d_qpu_mux a
, b
;
271 enum v3d_qpu_output_pack output_pack
;
272 enum v3d_qpu_input_unpack a_unpack
;
273 enum v3d_qpu_input_unpack b_unpack
;
277 enum v3d_qpu_mul_op op
;
278 enum v3d_qpu_mux a
, b
;
281 enum v3d_qpu_output_pack output_pack
;
282 enum v3d_qpu_input_unpack a_unpack
;
283 enum v3d_qpu_input_unpack b_unpack
;
287 enum v3d_qpu_branch_cond
{
288 V3D_QPU_BRANCH_COND_ALWAYS
,
289 V3D_QPU_BRANCH_COND_A0
,
290 V3D_QPU_BRANCH_COND_NA0
,
291 V3D_QPU_BRANCH_COND_ALLA
,
292 V3D_QPU_BRANCH_COND_ANYNA
,
293 V3D_QPU_BRANCH_COND_ANYA
,
294 V3D_QPU_BRANCH_COND_ALLNA
,
297 enum v3d_qpu_msfign
{
298 /** Ignore multisample flags when determining branch condition. */
301 * If no multisample flags are set in the lane (a pixel in the FS, a
302 * vertex in the VS), ignore the lane's condition when computing the
307 * If no multisample flags are set in a 2x2 quad in the FS, ignore the
308 * quad's a/b conditions.
313 enum v3d_qpu_branch_dest
{
314 V3D_QPU_BRANCH_DEST_ABS
,
315 V3D_QPU_BRANCH_DEST_REL
,
316 V3D_QPU_BRANCH_DEST_LINK_REG
,
317 V3D_QPU_BRANCH_DEST_REGFILE
,
320 struct v3d_qpu_branch_instr
{
321 enum v3d_qpu_branch_cond cond
;
322 enum v3d_qpu_msfign msfign
;
324 /** Selects how to compute the new IP if the branch is taken. */
325 enum v3d_qpu_branch_dest bdi
;
328 * Selects how to compute the new uniforms pointer if the branch is
329 * taken. (ABS/REL implicitly load a uniform and use that)
331 enum v3d_qpu_branch_dest bdu
;
334 * If set, then udest determines how the uniform stream will branch,
335 * otherwise the uniform stream is left as is.
344 enum v3d_qpu_instr_type
{
345 V3D_QPU_INSTR_TYPE_ALU
,
346 V3D_QPU_INSTR_TYPE_BRANCH
,
349 struct v3d_qpu_instr
{
350 enum v3d_qpu_instr_type type
;
352 struct v3d_qpu_sig sig
;
354 bool sig_magic
; /* If the signal writes to a magic address */
357 struct v3d_qpu_flags flags
;
360 struct v3d_qpu_alu_instr alu
;
361 struct v3d_qpu_branch_instr branch
;
365 const char *v3d_qpu_magic_waddr_name(enum v3d_qpu_waddr waddr
);
366 const char *v3d_qpu_add_op_name(enum v3d_qpu_add_op op
);
367 const char *v3d_qpu_mul_op_name(enum v3d_qpu_mul_op op
);
368 const char *v3d_qpu_cond_name(enum v3d_qpu_cond cond
);
369 const char *v3d_qpu_pf_name(enum v3d_qpu_pf pf
);
370 const char *v3d_qpu_uf_name(enum v3d_qpu_uf uf
);
371 const char *v3d_qpu_pack_name(enum v3d_qpu_output_pack pack
);
372 const char *v3d_qpu_unpack_name(enum v3d_qpu_input_unpack unpack
);
373 const char *v3d_qpu_branch_cond_name(enum v3d_qpu_branch_cond cond
);
374 const char *v3d_qpu_msfign_name(enum v3d_qpu_msfign msfign
);
376 bool v3d_qpu_add_op_has_dst(enum v3d_qpu_add_op op
);
377 bool v3d_qpu_mul_op_has_dst(enum v3d_qpu_mul_op op
);
378 int v3d_qpu_add_op_num_src(enum v3d_qpu_add_op op
);
379 int v3d_qpu_mul_op_num_src(enum v3d_qpu_mul_op op
);
381 bool v3d_qpu_sig_pack(const struct v3d_device_info
*devinfo
,
382 const struct v3d_qpu_sig
*sig
,
383 uint32_t *packed_sig
);
384 bool v3d_qpu_sig_unpack(const struct v3d_device_info
*devinfo
,
386 struct v3d_qpu_sig
*sig
);
389 v3d_qpu_flags_pack(const struct v3d_device_info
*devinfo
,
390 const struct v3d_qpu_flags
*cond
,
391 uint32_t *packed_cond
);
393 v3d_qpu_flags_unpack(const struct v3d_device_info
*devinfo
,
394 uint32_t packed_cond
,
395 struct v3d_qpu_flags
*cond
);
398 v3d_qpu_instr_pack(const struct v3d_device_info
*devinfo
,
399 const struct v3d_qpu_instr
*instr
,
400 uint64_t *packed_instr
);
402 v3d_qpu_instr_unpack(const struct v3d_device_info
*devinfo
,
403 uint64_t packed_instr
,
404 struct v3d_qpu_instr
*instr
);
406 bool v3d_qpu_magic_waddr_is_sfu(enum v3d_qpu_waddr waddr
) ATTRIBUTE_CONST
;
407 bool v3d_qpu_magic_waddr_is_tmu(enum v3d_qpu_waddr waddr
) ATTRIBUTE_CONST
;
408 bool v3d_qpu_magic_waddr_is_tlb(enum v3d_qpu_waddr waddr
) ATTRIBUTE_CONST
;
409 bool v3d_qpu_magic_waddr_is_vpm(enum v3d_qpu_waddr waddr
) ATTRIBUTE_CONST
;
410 bool v3d_qpu_magic_waddr_is_tsy(enum v3d_qpu_waddr waddr
) ATTRIBUTE_CONST
;
411 bool v3d_qpu_writes_r3(const struct v3d_device_info
*devinfo
,
412 const struct v3d_qpu_instr
*instr
) ATTRIBUTE_CONST
;
413 bool v3d_qpu_writes_r4(const struct v3d_device_info
*devinfo
,
414 const struct v3d_qpu_instr
*instr
) ATTRIBUTE_CONST
;
415 bool v3d_qpu_writes_r5(const struct v3d_device_info
*devinfo
,
416 const struct v3d_qpu_instr
*instr
) ATTRIBUTE_CONST
;
417 bool v3d_qpu_uses_mux(const struct v3d_qpu_instr
*inst
, enum v3d_qpu_mux mux
);
418 bool v3d_qpu_sig_writes_address(const struct v3d_device_info
*devinfo
,
419 const struct v3d_qpu_sig
*sig
) ATTRIBUTE_CONST
;