2 * Copyright © 2016 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 * Definitions of the unpacked form of QPU instructions. Assembly and
28 * disassembly will use this for talking about instructions, with qpu_encode.c
29 * and qpu_decode.c handling the pack and unpack of the actual 64-bit QPU
38 #include "util/macros.h"
40 struct v3d_device_info
;
94 /* 6 is reserved, but note 3.2.2.8: "Result Writes" */
95 V3D_QPU_WADDR_NOP
= 6,
96 V3D_QPU_WADDR_TLB
= 7,
97 V3D_QPU_WADDR_TLBU
= 8,
98 V3D_QPU_WADDR_TMU
= 9,
99 V3D_QPU_WADDR_TMUL
= 10,
100 V3D_QPU_WADDR_TMUD
= 11,
101 V3D_QPU_WADDR_TMUA
= 12,
102 V3D_QPU_WADDR_TMUAU
= 13,
103 V3D_QPU_WADDR_VPM
= 14,
104 V3D_QPU_WADDR_VPMU
= 15,
105 V3D_QPU_WADDR_SYNC
= 16,
106 V3D_QPU_WADDR_SYNCU
= 17,
108 V3D_QPU_WADDR_RECIP
= 19,
109 V3D_QPU_WADDR_RSQRT
= 20,
110 V3D_QPU_WADDR_EXP
= 21,
111 V3D_QPU_WADDR_LOG
= 22,
112 V3D_QPU_WADDR_SIN
= 23,
113 V3D_QPU_WADDR_RSQRT2
= 24,
116 struct v3d_qpu_flags
{
117 enum v3d_qpu_cond ac
, mc
;
118 enum v3d_qpu_pf apf
, mpf
;
119 enum v3d_qpu_uf auf
, muf
;
122 enum v3d_qpu_add_op
{
197 enum v3d_qpu_mul_op
{
210 enum v3d_qpu_output_pack
{
213 * Convert to 16-bit float, put in low 16 bits of destination leaving
218 * Convert to 16-bit float, put in high 16 bits of destination leaving
224 enum v3d_qpu_input_unpack
{
226 * No-op input unpacking. Note that this enum's value doesn't match
227 * the packed QPU instruction value of the field (we use 0 so that the
228 * default on new instruction creation is no-op).
231 /** Absolute value. Only available for some operations. */
233 /** Convert low 16 bits from 16-bit float to 32-bit float. */
235 /** Convert high 16 bits from 16-bit float to 32-bit float. */
238 /** Convert to 16f and replicate it to the high bits. */
239 V3D_QPU_UNPACK_REPLICATE_32F_16
,
241 /** Replicate low 16 bits to high */
242 V3D_QPU_UNPACK_REPLICATE_L_16
,
244 /** Replicate high 16 bits to low */
245 V3D_QPU_UNPACK_REPLICATE_H_16
,
247 /** Swap high and low 16 bits */
248 V3D_QPU_UNPACK_SWAP_16
,
262 struct v3d_qpu_alu_instr
{
264 enum v3d_qpu_add_op op
;
265 enum v3d_qpu_mux a
, b
;
268 enum v3d_qpu_output_pack output_pack
;
269 enum v3d_qpu_input_unpack a_unpack
;
270 enum v3d_qpu_input_unpack b_unpack
;
274 enum v3d_qpu_mul_op op
;
275 enum v3d_qpu_mux a
, b
;
278 enum v3d_qpu_output_pack output_pack
;
279 enum v3d_qpu_input_unpack a_unpack
;
280 enum v3d_qpu_input_unpack b_unpack
;
284 enum v3d_qpu_branch_cond
{
285 V3D_QPU_BRANCH_COND_ALWAYS
,
286 V3D_QPU_BRANCH_COND_A0
,
287 V3D_QPU_BRANCH_COND_NA0
,
288 V3D_QPU_BRANCH_COND_ALLA
,
289 V3D_QPU_BRANCH_COND_ANYNA
,
290 V3D_QPU_BRANCH_COND_ANYA
,
291 V3D_QPU_BRANCH_COND_ALLNA
,
294 enum v3d_qpu_msfign
{
295 /** Ignore multisample flags when determining branch condition. */
298 * If no multisample flags are set in the lane (a pixel in the FS, a
299 * vertex in the VS), ignore the lane's condition when computing the
304 * If no multisample flags are set in a 2x2 quad in the FS, ignore the
305 * quad's a/b conditions.
310 enum v3d_qpu_branch_dest
{
311 V3D_QPU_BRANCH_DEST_ABS
,
312 V3D_QPU_BRANCH_DEST_REL
,
313 V3D_QPU_BRANCH_DEST_LINK_REG
,
314 V3D_QPU_BRANCH_DEST_REGFILE
,
317 struct v3d_qpu_branch_instr
{
318 enum v3d_qpu_branch_cond cond
;
319 enum v3d_qpu_msfign msfign
;
321 /** Selects how to compute the new IP if the branch is taken. */
322 enum v3d_qpu_branch_dest bdi
;
325 * Selects how to compute the new uniforms pointer if the branch is
326 * taken. (ABS/REL implicitly load a uniform and use that)
328 enum v3d_qpu_branch_dest bdu
;
331 * If set, then udest determines how the uniform stream will branch,
332 * otherwise the uniform stream is left as is.
341 enum v3d_qpu_instr_type
{
342 V3D_QPU_INSTR_TYPE_ALU
,
343 V3D_QPU_INSTR_TYPE_BRANCH
,
346 struct v3d_qpu_instr
{
347 enum v3d_qpu_instr_type type
;
349 struct v3d_qpu_sig sig
;
352 struct v3d_qpu_flags flags
;
355 struct v3d_qpu_alu_instr alu
;
356 struct v3d_qpu_branch_instr branch
;
360 const char *v3d_qpu_magic_waddr_name(enum v3d_qpu_waddr waddr
);
361 const char *v3d_qpu_add_op_name(enum v3d_qpu_add_op op
);
362 const char *v3d_qpu_mul_op_name(enum v3d_qpu_mul_op op
);
363 const char *v3d_qpu_cond_name(enum v3d_qpu_cond cond
);
364 const char *v3d_qpu_pf_name(enum v3d_qpu_pf pf
);
365 const char *v3d_qpu_uf_name(enum v3d_qpu_uf uf
);
366 const char *v3d_qpu_pack_name(enum v3d_qpu_output_pack pack
);
367 const char *v3d_qpu_unpack_name(enum v3d_qpu_input_unpack unpack
);
368 const char *v3d_qpu_branch_cond_name(enum v3d_qpu_branch_cond cond
);
369 const char *v3d_qpu_msfign_name(enum v3d_qpu_msfign msfign
);
371 bool v3d_qpu_add_op_has_dst(enum v3d_qpu_add_op op
);
372 bool v3d_qpu_mul_op_has_dst(enum v3d_qpu_mul_op op
);
373 int v3d_qpu_add_op_num_src(enum v3d_qpu_add_op op
);
374 int v3d_qpu_mul_op_num_src(enum v3d_qpu_mul_op op
);
376 bool v3d_qpu_sig_pack(const struct v3d_device_info
*devinfo
,
377 const struct v3d_qpu_sig
*sig
,
378 uint32_t *packed_sig
);
379 bool v3d_qpu_sig_unpack(const struct v3d_device_info
*devinfo
,
381 struct v3d_qpu_sig
*sig
);
384 v3d_qpu_flags_pack(const struct v3d_device_info
*devinfo
,
385 const struct v3d_qpu_flags
*cond
,
386 uint32_t *packed_cond
);
388 v3d_qpu_flags_unpack(const struct v3d_device_info
*devinfo
,
389 uint32_t packed_cond
,
390 struct v3d_qpu_flags
*cond
);
393 v3d_qpu_instr_pack(const struct v3d_device_info
*devinfo
,
394 const struct v3d_qpu_instr
*instr
,
395 uint64_t *packed_instr
);
397 v3d_qpu_instr_unpack(const struct v3d_device_info
*devinfo
,
398 uint64_t packed_instr
,
399 struct v3d_qpu_instr
*instr
);
401 bool v3d_qpu_magic_waddr_is_sfu(enum v3d_qpu_waddr waddr
) ATTRIBUTE_CONST
;
402 bool v3d_qpu_magic_waddr_is_tmu(enum v3d_qpu_waddr waddr
) ATTRIBUTE_CONST
;
403 bool v3d_qpu_magic_waddr_is_tlb(enum v3d_qpu_waddr waddr
) ATTRIBUTE_CONST
;
404 bool v3d_qpu_magic_waddr_is_vpm(enum v3d_qpu_waddr waddr
) ATTRIBUTE_CONST
;
405 bool v3d_qpu_magic_waddr_is_tsy(enum v3d_qpu_waddr waddr
) ATTRIBUTE_CONST
;
406 bool v3d_qpu_writes_r3(const struct v3d_qpu_instr
*instr
) ATTRIBUTE_CONST
;
407 bool v3d_qpu_writes_r4(const struct v3d_qpu_instr
*instr
) ATTRIBUTE_CONST
;
408 bool v3d_qpu_writes_r5(const struct v3d_qpu_instr
*instr
) ATTRIBUTE_CONST
;
409 bool v3d_qpu_uses_mux(const struct v3d_qpu_instr
*inst
, enum v3d_qpu_mux mux
);