1 /* Copyright (c) 2018-2019 Alyssa Rosenzweig (alyssa@rosenzweig.io)
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to deal
5 * in the Software without restriction, including without limitation the rights
6 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 * copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 #ifndef __MDG_HELPERS_H
23 #define __MDG_HELPERS_H
25 #include "util/macros.h"
29 #define OP_IS_LOAD_VARY_F(op) (\
30 op == midgard_op_ld_vary_16 || \
31 op == midgard_op_ld_vary_32 \
34 #define OP_IS_PROJECTION(op) ( \
35 op == midgard_op_ldst_perspective_division_z || \
36 op == midgard_op_ldst_perspective_division_w \
39 #define OP_IS_VEC4_ONLY(op) ( \
40 OP_IS_PROJECTION(op) || \
41 op == midgard_op_ld_cubemap_coords \
44 #define OP_IS_MOVE(op) ( \
45 op == midgard_alu_op_fmov || \
46 op == midgard_alu_op_imov \
49 #define OP_IS_UBO_READ(op) ( \
50 op == midgard_op_ld_ubo_char || \
51 op == midgard_op_ld_ubo_char2 || \
52 op == midgard_op_ld_ubo_char4 || \
53 op == midgard_op_ld_ubo_short4 || \
54 op == midgard_op_ld_ubo_int4 \
57 #define OP_IS_CSEL_V(op) ( \
58 op == midgard_alu_op_icsel_v || \
59 op == midgard_alu_op_fcsel_v \
62 #define OP_IS_CSEL(op) ( \
64 op == midgard_alu_op_icsel || \
65 op == midgard_alu_op_fcsel \
68 #define OP_IS_DERIVATIVE(op) ( \
69 op == TEXTURE_OP_DFDX || \
70 op == TEXTURE_OP_DFDY \
73 #define OP_IS_UNSIGNED_CMP(op) ( \
74 op == midgard_alu_op_ult || \
75 op == midgard_alu_op_ule \
78 #define OP_IS_INTEGER_CMP(op) ( \
79 op == midgard_alu_op_ieq || \
80 op == midgard_alu_op_ine || \
81 op == midgard_alu_op_ilt || \
82 op == midgard_alu_op_ile || \
83 OP_IS_UNSIGNED_CMP(op) \
86 /* ALU control words are single bit fields with a lot of space */
88 #define ALU_ENAB_VEC_MUL (1 << 17)
89 #define ALU_ENAB_SCAL_ADD (1 << 19)
90 #define ALU_ENAB_VEC_ADD (1 << 21)
91 #define ALU_ENAB_SCAL_MUL (1 << 23)
92 #define ALU_ENAB_VEC_LUT (1 << 25)
93 #define ALU_ENAB_BR_COMPACT (1 << 26)
94 #define ALU_ENAB_BRANCH (1 << 27)
96 /* Other opcode properties that don't conflict with the ALU_ENABs, non-ISA */
98 /* Denotes an opcode that takes a vector input with a fixed-number of
99 * channels, but outputs to only a single output channel, like dot products.
100 * For these, to determine the effective mask, this quirk can be set. We have
101 * an intentional off-by-one (a la MALI_POSITIVE), since 0-channel makes no
102 * sense but we need to fit 4 channels in 2-bits. Similarly, 1-channel doesn't
103 * make sense (since then why are we quirked?), so that corresponds to "no
106 #define OP_CHANNEL_COUNT(c) ((c - 1) << 0)
107 #define GET_CHANNEL_COUNT(c) ((c & (0x3 << 0)) ? ((c & (0x3 << 0)) + 1) : 0)
109 /* For instructions that take a single argument, normally the first argument
110 * slot is used for the argument and the second slot is a dummy #0 constant.
111 * However, there are exceptions: instructions like fmov store their argument
112 * in the _second_ slot and store a dummy r24 in the first slot, designated by
113 * QUIRK_FLIPPED_R24 */
115 #define QUIRK_FLIPPED_R24 (1 << 2)
117 /* Is the op commutative? */
118 #define OP_COMMUTES (1 << 3)
120 /* Does the op convert types between int- and float- space (i2f/f2u/etc) */
121 #define OP_TYPE_CONVERT (1 << 4)
123 /* Is this opcode the first in a f2x (rte, rtz, rtn, rtp) sequence? If so,
124 * takes a roundmode argument in the IR. This has the semantic of rounding the
125 * source (it's all fused in), which is why it doesn't necessarily make sense
126 * for i2f (though folding there might be necessary for OpenCL reasons). Comes
127 * up in format conversion, i.e. f2u_rte */
128 #define MIDGARD_ROUNDS (1 << 5)
130 /* Vector-independant shorthands for the above; these numbers are arbitrary and
131 * not from the ISA. Convert to the above with unit_enum_to_midgard */
137 #define IS_ALU(tag) (tag >= TAG_ALU_4)
139 /* Special register aliases */
141 #define MAX_WORK_REGISTERS 16
143 /* Uniforms are begin at (REGISTER_UNIFORMS - uniform_count) */
144 #define REGISTER_UNIFORMS 24
146 /* r24 and r25 are special registers that only exist during the pipeline,
147 * by using them when we don't care about the register we skip a roundtrip
148 * to the register file. */
149 #define REGISTER_UNUSED 24
150 #define REGISTER_CONSTANT 26
151 #define REGISTER_LDST_BASE 26
152 #define REGISTER_TEXTURE_BASE 28
153 #define REGISTER_SELECT 31
155 /* SSA helper aliases to mimic the registers. */
157 #define SSA_FIXED_SHIFT 24
158 #define SSA_FIXED_REGISTER(reg) (((1 + (reg)) << SSA_FIXED_SHIFT) | 1)
159 #define SSA_REG_FROM_FIXED(reg) ((((reg) & ~1) >> SSA_FIXED_SHIFT) - 1)
160 #define SSA_FIXED_MINIMUM SSA_FIXED_REGISTER(0)
162 #define COMPONENT_X 0x0
163 #define COMPONENT_Y 0x1
164 #define COMPONENT_Z 0x2
165 #define COMPONENT_W 0x3
167 #define SWIZZLE_IDENTITY { \
168 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, \
169 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, \
170 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, \
171 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 } \
174 #define SWIZZLE_IDENTITY_4 { \
175 { 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
176 { 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
177 { 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
178 { 0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, \
181 static inline unsigned
182 mask_of(unsigned nr_comp
)
184 return (1 << nr_comp
) - 1;
191 /* There are five ALU units: VMUL, VADD, SMUL, SADD, LUT. A given opcode is
192 * implemented on some subset of these units (or occassionally all of them).
193 * This table encodes a bit mask of valid units for each opcode, so the
194 * scheduler can figure where to plonk the instruction. */
196 /* Shorthands for each unit */
197 #define UNIT_VMUL ALU_ENAB_VEC_MUL
198 #define UNIT_SADD ALU_ENAB_SCAL_ADD
199 #define UNIT_VADD ALU_ENAB_VEC_ADD
200 #define UNIT_SMUL ALU_ENAB_SCAL_MUL
201 #define UNIT_VLUT ALU_ENAB_VEC_LUT
203 /* Shorthands for usual combinations of units */
205 #define UNITS_MUL (UNIT_VMUL | UNIT_SMUL)
206 #define UNITS_ADD (UNIT_VADD | UNIT_SADD)
207 #define UNITS_MOST (UNITS_MUL | UNITS_ADD)
208 #define UNITS_ALL (UNITS_MOST | UNIT_VLUT)
209 #define UNITS_SCALAR (UNIT_SADD | UNIT_SMUL)
210 #define UNITS_VECTOR (UNIT_VMUL | UNIT_VADD)
211 #define UNITS_ANY_VECTOR (UNITS_VECTOR | UNIT_VLUT)
213 struct mir_op_props
{
220 struct mir_ldst_op_props
{
225 struct mir_tag_props
{
230 /* Lower 2-bits are a midgard_reg_mode */
231 #define GET_LDST_SIZE(c) (c & 3)
233 /* Store (so the primary register is a source, not a destination */
234 #define LDST_STORE (1 << 2)
236 /* Mask has special meaning and should not be manipulated directly */
237 #define LDST_SPECIAL_MASK (1 << 3)
239 /* Non-store operation has side effects and should not be eliminated even if
241 #define LDST_SIDE_FX (1 << 4)
243 /* Computes an address according to indirects/zext/shift/etc */
244 #define LDST_ADDRESS (1 << 5)
246 /* This file is common, so don't define the tables themselves. #include
247 * midgard_op.h if you need that, or edit midgard_ops.c directly */
249 /* Duplicate bits to convert a per-component to duplicated 8-bit format,
250 * which is used for vector units */
252 static inline unsigned
253 expand_writemask(unsigned mask
, unsigned log2_channels
)
256 unsigned factor
= 8 >> log2_channels
;
257 unsigned expanded
= (1 << factor
) - 1;
259 for (unsigned i
= 0; i
< (1 << log2_channels
); ++i
)
261 o
|= (expanded
<< (factor
* i
));
266 /* Coerce structs to integer */
268 static inline unsigned
269 vector_alu_srco_unsigned(midgard_vector_alu_src src
)
272 memcpy(&u
, &src
, sizeof(src
));
276 static inline midgard_vector_alu_src
277 vector_alu_from_unsigned(unsigned u
)
279 midgard_vector_alu_src s
;
280 memcpy(&s
, &u
, sizeof(s
));
285 mir_compose_swizzle(unsigned *left
, unsigned *right
, unsigned *final_out
)
289 for (unsigned c
= 0; c
< 16; ++c
)
290 out
[c
] = right
[left
[c
]];
292 memcpy(final_out
, out
, sizeof(out
));
295 /* Checks for an xyzw.. swizzle, given a mask */
298 mir_is_simple_swizzle(unsigned *swizzle
, unsigned mask
)
300 for (unsigned i
= 0; i
< 16; ++i
) {
301 if (!(mask
& (1 << i
))) continue;
310 /* Packs a load/store argument */
312 static inline uint8_t
313 midgard_ldst_reg(unsigned reg
, unsigned component
, unsigned size
)
315 assert((reg
== REGISTER_LDST_BASE
) || (reg
== REGISTER_LDST_BASE
+ 1));
316 assert(size
== 16 || size
== 32 || size
== 64);
318 /* Shift so everything is in terms of 32-bit units */
320 assert(component
< 2);
322 } else if (size
== 16) {
323 assert((component
& 1) == 0);
327 midgard_ldst_register_select sel
= {
328 .component
= component
,
333 memcpy(&packed
, &sel
, sizeof(packed
));
339 midgard_is_branch_unit(unsigned unit
)
341 return (unit
== ALU_ENAB_BRANCH
) || (unit
== ALU_ENAB_BR_COMPACT
);
344 /* Packs ALU mod argument */
345 struct midgard_instruction
;
346 unsigned mir_pack_mod(struct midgard_instruction
*ins
, unsigned i
, bool scalar
);
349 mir_print_constant_component(FILE *fp
, const midgard_constants
*consts
,
350 unsigned c
, midgard_reg_mode reg_mode
, bool half
,
351 unsigned mod
, midgard_alu_op op
);