1 /* Copyright (c) 2018-2019 Alyssa Rosenzweig (alyssa@rosenzweig.io)
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to deal
5 * in the Software without restriction, including without limitation the rights
6 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 * copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 #ifndef __MDG_HELPERS_H
23 #define __MDG_HELPERS_H
25 #include "util/macros.h"
28 #define OP_IS_LOAD_VARY_F(op) (\
29 op == midgard_op_ld_vary_16 || \
30 op == midgard_op_ld_vary_32 \
33 #define OP_IS_STORE_VARY(op) (\
34 op == midgard_op_st_vary_16 || \
35 op == midgard_op_st_vary_32 || \
36 op == midgard_op_st_vary_32u || \
37 op == midgard_op_st_vary_32i \
40 #define OP_IS_STORE_R26(op) (\
41 OP_IS_STORE_VARY(op) || \
42 op == midgard_op_st_char || \
43 op == midgard_op_st_char2 || \
44 op == midgard_op_st_char4 || \
45 op == midgard_op_st_short4 || \
46 op == midgard_op_st_int4 \
49 #define OP_IS_STORE(op) (\
53 #define OP_IS_PROJECTION(op) ( \
54 op == midgard_op_ldst_perspective_division_z || \
55 op == midgard_op_ldst_perspective_division_w \
58 #define OP_IS_VEC4_ONLY(op) ( \
59 OP_IS_PROJECTION(op) || \
60 op == midgard_op_ld_cubemap_coords \
63 #define OP_IS_MOVE(op) ( \
64 op == midgard_alu_op_fmov || \
65 op == midgard_alu_op_imov \
68 #define OP_IS_UBO_READ(op) ( \
69 op == midgard_op_ld_ubo_char || \
70 op == midgard_op_ld_ubo_char2 || \
71 op == midgard_op_ld_ubo_char4 || \
72 op == midgard_op_ld_ubo_short4 || \
73 op == midgard_op_ld_ubo_int4 \
76 #define OP_IS_CSEL(op) ( \
77 op == midgard_alu_op_icsel || \
78 op == midgard_alu_op_icsel_v || \
79 op == midgard_alu_op_fcsel_v || \
80 op == midgard_alu_op_fcsel \
83 #define OP_IS_DERIVATIVE(op) ( \
84 op == TEXTURE_OP_DFDX || \
85 op == TEXTURE_OP_DFDY \
88 /* ALU control words are single bit fields with a lot of space */
90 #define ALU_ENAB_VEC_MUL (1 << 17)
91 #define ALU_ENAB_SCAL_ADD (1 << 19)
92 #define ALU_ENAB_VEC_ADD (1 << 21)
93 #define ALU_ENAB_SCAL_MUL (1 << 23)
94 #define ALU_ENAB_VEC_LUT (1 << 25)
95 #define ALU_ENAB_BR_COMPACT (1 << 26)
96 #define ALU_ENAB_BRANCH (1 << 27)
98 /* Other opcode properties that don't conflict with the ALU_ENABs, non-ISA */
100 /* Denotes an opcode that takes a vector input with a fixed-number of
101 * channels, but outputs to only a single output channel, like dot products.
102 * For these, to determine the effective mask, this quirk can be set. We have
103 * an intentional off-by-one (a la MALI_POSITIVE), since 0-channel makes no
104 * sense but we need to fit 4 channels in 2-bits. Similarly, 1-channel doesn't
105 * make sense (since then why are we quirked?), so that corresponds to "no
108 #define OP_CHANNEL_COUNT(c) ((c - 1) << 0)
109 #define GET_CHANNEL_COUNT(c) ((c & (0x3 << 0)) ? ((c & (0x3 << 0)) + 1) : 0)
111 /* For instructions that take a single argument, normally the first argument
112 * slot is used for the argument and the second slot is a dummy #0 constant.
113 * However, there are exceptions: instructions like fmov store their argument
114 * in the _second_ slot and store a dummy r24 in the first slot, designated by
115 * QUIRK_FLIPPED_R24 */
117 #define QUIRK_FLIPPED_R24 (1 << 2)
119 /* Is the op commutative? */
120 #define OP_COMMUTES (1 << 3)
122 /* Does the op convert types between int- and float- space (i2f/f2u/etc) */
123 #define OP_TYPE_CONVERT (1 << 4)
125 /* Vector-independant shorthands for the above; these numbers are arbitrary and
126 * not from the ISA. Convert to the above with unit_enum_to_midgard */
132 /* 4-bit type tags */
134 #define TAG_TEXTURE_4_VTX 0x2
135 #define TAG_TEXTURE_4 0x3
136 #define TAG_LOAD_STORE_4 0x5
137 #define TAG_ALU_4 0x8
138 #define TAG_ALU_8 0x9
139 #define TAG_ALU_12 0xA
140 #define TAG_ALU_16 0xB
143 quadword_size(int tag
)
147 case TAG_LOAD_STORE_4
:
149 case TAG_TEXTURE_4_VTX
:
158 unreachable("Unknown tag");
162 #define IS_ALU(tag) (tag == TAG_ALU_4 || tag == TAG_ALU_8 || \
163 tag == TAG_ALU_12 || tag == TAG_ALU_16)
165 /* Special register aliases */
167 #define MAX_WORK_REGISTERS 16
169 /* Uniforms are begin at (REGISTER_UNIFORMS - uniform_count) */
170 #define REGISTER_UNIFORMS 24
172 #define REGISTER_UNUSED 24
173 #define REGISTER_CONSTANT 26
174 #define REGISTER_LDST_BASE 26
175 #define REGISTER_TEXTURE_BASE 28
176 #define REGISTER_SELECT 31
178 /* SSA helper aliases to mimic the registers. */
180 #define SSA_UNUSED ~0
181 #define SSA_FIXED_SHIFT 24
182 #define SSA_FIXED_REGISTER(reg) (((1 + (reg)) << SSA_FIXED_SHIFT) | 1)
183 #define SSA_REG_FROM_FIXED(reg) ((((reg) & ~1) >> SSA_FIXED_SHIFT) - 1)
184 #define SSA_FIXED_MINIMUM SSA_FIXED_REGISTER(0)
186 /* Swizzle support */
188 #define SWIZZLE(A, B, C, D) ((D << 6) | (C << 4) | (B << 2) | (A << 0))
189 #define SWIZZLE_FROM_ARRAY(r) SWIZZLE(r[0], r[1], r[2], r[3])
190 #define COMPONENT_X 0x0
191 #define COMPONENT_Y 0x1
192 #define COMPONENT_Z 0x2
193 #define COMPONENT_W 0x3
195 #define SWIZZLE_XXXX SWIZZLE(COMPONENT_X, COMPONENT_X, COMPONENT_X, COMPONENT_X)
196 #define SWIZZLE_XYXX SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_X, COMPONENT_X)
197 #define SWIZZLE_XYZX SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_X)
198 #define SWIZZLE_XYZW SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_W)
199 #define SWIZZLE_XYXZ SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_X, COMPONENT_Z)
200 #define SWIZZLE_XYZZ SWIZZLE(COMPONENT_X, COMPONENT_Y, COMPONENT_Z, COMPONENT_Z)
201 #define SWIZZLE_XXXY SWIZZLE(COMPONENT_X, COMPONENT_X, COMPONENT_X, COMPONENT_Y)
202 #define SWIZZLE_ZZZW SWIZZLE(COMPONENT_Z, COMPONENT_Z, COMPONENT_Z, COMPONENT_W)
203 #define SWIZZLE_ZWWW SWIZZLE(COMPONENT_Z, COMPONENT_W, COMPONENT_W, COMPONENT_W)
204 #define SWIZZLE_WWWW SWIZZLE(COMPONENT_W, COMPONENT_W, COMPONENT_W, COMPONENT_W)
206 static inline unsigned
207 swizzle_of(unsigned comp
)
219 unreachable("Invalid component count");
223 static inline unsigned
224 mask_of(unsigned nr_comp
)
226 return (1 << nr_comp
) - 1;
234 /* There are five ALU units: VMUL, VADD, SMUL, SADD, LUT. A given opcode is
235 * implemented on some subset of these units (or occassionally all of them).
236 * This table encodes a bit mask of valid units for each opcode, so the
237 * scheduler can figure where to plonk the instruction. */
239 /* Shorthands for each unit */
240 #define UNIT_VMUL ALU_ENAB_VEC_MUL
241 #define UNIT_SADD ALU_ENAB_SCAL_ADD
242 #define UNIT_VADD ALU_ENAB_VEC_ADD
243 #define UNIT_SMUL ALU_ENAB_SCAL_MUL
244 #define UNIT_VLUT ALU_ENAB_VEC_LUT
246 /* Shorthands for usual combinations of units */
248 #define UNITS_MUL (UNIT_VMUL | UNIT_SMUL)
249 #define UNITS_ADD (UNIT_VADD | UNIT_SADD)
250 #define UNITS_MOST (UNITS_MUL | UNITS_ADD)
251 #define UNITS_ALL (UNITS_MOST | UNIT_VLUT)
252 #define UNITS_SCALAR (UNIT_SADD | UNIT_SMUL)
253 #define UNITS_VECTOR (UNIT_VMUL | UNIT_VADD)
254 #define UNITS_ANY_VECTOR (UNITS_VECTOR | UNIT_VLUT)
256 struct mir_op_props
{
261 /* This file is common, so don't define the tables themselves. #include
262 * midgard_op.h if you need that, or edit midgard_ops.c directly */
264 /* Duplicate bits to convert a 4-bit writemask to duplicated 8-bit format,
265 * which is used for 32-bit vector units */
267 static inline unsigned
268 expand_writemask_32(unsigned mask
)
272 for (int i
= 0; i
< 4; ++i
)
279 /* Coerce structs to integer */
281 static inline unsigned
282 vector_alu_srco_unsigned(midgard_vector_alu_src src
)
285 memcpy(&u
, &src
, sizeof(src
));
289 static inline midgard_vector_alu_src
290 vector_alu_from_unsigned(unsigned u
)
292 midgard_vector_alu_src s
;
293 memcpy(&s
, &u
, sizeof(s
));
297 /* Composes two swizzles */
298 static inline unsigned
299 pan_compose_swizzle(unsigned left
, unsigned right
)
303 for (unsigned c
= 0; c
< 4; ++c
) {
304 unsigned s
= (left
>> (2*c
)) & 0x3;
305 unsigned q
= (right
>> (2*s
)) & 0x3;
313 /* Applies a swizzle to an ALU source */
315 static inline unsigned
316 vector_alu_apply_swizzle(unsigned src
, unsigned swizzle
)
318 midgard_vector_alu_src s
=
319 vector_alu_from_unsigned(src
);
321 s
.swizzle
= pan_compose_swizzle(s
.swizzle
, swizzle
);
323 return vector_alu_srco_unsigned(s
);
326 /* Checks for an xyzw.. swizzle, given a mask */
329 mir_is_simple_swizzle(unsigned swizzle
, unsigned mask
)
331 for (unsigned i
= 0; i
< 16; ++i
) {
332 if (!(mask
& (1 << i
))) continue;
334 if (((swizzle
>> (2 * i
)) & 0x3) != i
)
341 /* Packs a load/store argument */
343 static inline uint8_t
344 midgard_ldst_reg(unsigned reg
, unsigned component
)
346 assert((reg
== REGISTER_LDST_BASE
) || (reg
== REGISTER_LDST_BASE
+ 1));
348 midgard_ldst_register_select sel
= {
349 .component
= component
,
354 memcpy(&packed
, &sel
, sizeof(packed
));
359 /* Unpacks a load/store argument */
361 static inline midgard_ldst_register_select
362 midgard_ldst_select(uint8_t u
)
364 midgard_ldst_register_select sel
;
365 memcpy(&sel
, &u
, sizeof(u
));
369 static inline uint8_t
370 midgard_ldst_pack(midgard_ldst_register_select sel
)
373 memcpy(&packed
, &sel
, sizeof(packed
));
377 /* Gets a swizzle like yyyy and returns y */
379 static inline unsigned
380 swizzle_to_component(unsigned swizzle
)
382 unsigned c
= swizzle
& 3;
383 assert(((swizzle
>> 2) & 3) == c
);
384 assert(((swizzle
>> 4) & 3) == c
);
385 assert(((swizzle
>> 6) & 3) == c
);
390 static inline unsigned
391 component_to_swizzle(unsigned c
)
393 return SWIZZLE(c
, c
, c
, c
);