4 * Copyright (c) 2018 Alyssa Rosenzweig (alyssa@rosenzweig.io)
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* Some constants and macros not found in the disassembler */
27 #define OP_IS_STORE_VARY(op) (\
28 op == midgard_op_store_vary_16 || \
29 op == midgard_op_store_vary_32 \
32 #define OP_IS_STORE(op) (\
33 OP_IS_STORE_VARY(op) || \
34 op == midgard_op_store_cubemap_coords \
37 /* ALU control words are single bit fields with a lot of space */
39 #define ALU_ENAB_VEC_MUL (1 << 17)
40 #define ALU_ENAB_SCAL_ADD (1 << 19)
41 #define ALU_ENAB_VEC_ADD (1 << 21)
42 #define ALU_ENAB_SCAL_MUL (1 << 23)
43 #define ALU_ENAB_VEC_LUT (1 << 25)
44 #define ALU_ENAB_BR_COMPACT (1 << 26)
45 #define ALU_ENAB_BRANCH (1 << 27)
47 /* Other opcode properties that don't conflict with the ALU_ENABs, non-ISA */
49 /* Denotes an opcode that takes a vector input with a fixed-number of
50 * channels, but outputs to only a single output channel, like dot products.
51 * For these, to determine the effective mask, this quirk can be set. We have
52 * an intentional off-by-one (a la MALI_POSITIVE), since 0-channel makes no
53 * sense but we need to fit 4 channels in 2-bits. Similarly, 1-channel doesn't
54 * make sense (since then why are we quirked?), so that corresponds to "no
57 #define OP_CHANNEL_COUNT(c) ((c - 1) << 0)
58 #define GET_CHANNEL_COUNT(c) ((c & (0x3 << 0)) ? ((c & (0x3 << 0)) + 1) : 0)
60 /* For instructions that take a single argument, normally the first argument
61 * slot is used for the argument and the second slot is a dummy #0 constant.
62 * However, there are exceptions: instructions like fmov store their argument
63 * in the _second_ slot and store a dummy r24 in the first slot, designated by
64 * QUIRK_FLIPPED_R24 */
66 #define QUIRK_FLIPPED_R24 (1 << 2)
68 /* Vector-independant shorthands for the above; these numbers are arbitrary and
69 * not from the ISA. Convert to the above with unit_enum_to_midgard */
77 #define TAG_TEXTURE_4 0x3
78 #define TAG_LOAD_STORE_4 0x5
81 #define TAG_ALU_12 0xA
82 #define TAG_ALU_16 0xB
84 /* Special register aliases */
86 #define MAX_WORK_REGISTERS 16
88 /* Uniforms are begin at (REGISTER_UNIFORMS - uniform_count) */
89 #define REGISTER_UNIFORMS 24
91 #define REGISTER_UNUSED 24
92 #define REGISTER_CONSTANT 26
93 #define REGISTER_VARYING_BASE 26
94 #define REGISTER_OFFSET 27
95 #define REGISTER_TEXTURE_BASE 28
96 #define REGISTER_SELECT 31
98 /* SSA helper aliases to mimic the registers. UNUSED_0 encoded as an inline
99 * constant. UNUSED_1 encoded as REGISTER_UNUSED */
101 #define SSA_UNUSED_0 0
102 #define SSA_UNUSED_1 -2
104 #define SSA_FIXED_SHIFT 24
105 #define SSA_FIXED_REGISTER(reg) ((1 + reg) << SSA_FIXED_SHIFT)
106 #define SSA_REG_FROM_FIXED(reg) ((reg >> SSA_FIXED_SHIFT) - 1)
107 #define SSA_FIXED_MINIMUM SSA_FIXED_REGISTER(0)
109 /* Swizzle support */
111 #define SWIZZLE(A, B, C, D) ((D << 6) | (C << 4) | (B << 2) | (A << 0))
112 #define SWIZZLE_FROM_ARRAY(r) SWIZZLE(r[0], r[1], r[2], r[3])
113 #define COMPONENT_X 0x0
114 #define COMPONENT_Y 0x1
115 #define COMPONENT_Z 0x2
116 #define COMPONENT_W 0x3
122 /* Is this opcode that of an integer? */
124 midgard_is_integer_op(int op
)
127 case midgard_alu_op_iadd
:
128 case midgard_alu_op_ishladd
:
129 case midgard_alu_op_isub
:
130 case midgard_alu_op_imul
:
131 case midgard_alu_op_imin
:
132 case midgard_alu_op_imax
:
133 case midgard_alu_op_iasr
:
134 case midgard_alu_op_ilsr
:
135 case midgard_alu_op_ishl
:
136 case midgard_alu_op_iand
:
137 case midgard_alu_op_ior
:
138 case midgard_alu_op_inot
:
139 case midgard_alu_op_iandnot
:
140 case midgard_alu_op_ixor
:
141 case midgard_alu_op_imov
:
143 //case midgard_alu_op_f2i:
144 //case midgard_alu_op_f2u:
145 case midgard_alu_op_ieq
:
146 case midgard_alu_op_iabs
:
147 case midgard_alu_op_ine
:
148 case midgard_alu_op_ilt
:
149 case midgard_alu_op_ile
:
150 case midgard_alu_op_iball_eq
:
151 case midgard_alu_op_ibany_neq
:
153 //case midgard_alu_op_i2f:
154 //case midgard_alu_op_u2f:
155 case midgard_alu_op_icsel
:
163 /* Is this unit a branch? */
165 midgard_is_branch_unit(unsigned unit
)
167 return (unit
== ALU_ENAB_BRANCH
) || (unit
== ALU_ENAB_BR_COMPACT
);
170 /* There are five ALU units: VMUL, VADD, SMUL, SADD, LUT. A given opcode is
171 * implemented on some subset of these units (or occassionally all of them).
172 * This table encodes a bit mask of valid units for each opcode, so the
173 * scheduler can figure where to plonk the instruction. */
175 /* Shorthands for each unit */
176 #define UNIT_VMUL ALU_ENAB_VEC_MUL
177 #define UNIT_SADD ALU_ENAB_SCAL_ADD
178 #define UNIT_VADD ALU_ENAB_VEC_ADD
179 #define UNIT_SMUL ALU_ENAB_SCAL_MUL
180 #define UNIT_VLUT ALU_ENAB_VEC_LUT
182 /* Shorthands for usual combinations of units */
184 #define UNITS_MUL (UNIT_VMUL | UNIT_SMUL)
185 #define UNITS_ADD (UNIT_VADD | UNIT_SADD)
186 #define UNITS_MOST (UNITS_MUL | UNITS_ADD)
187 #define UNITS_ALL (UNITS_MOST | UNIT_VLUT)
188 #define UNITS_SCALAR (UNIT_SADD | UNIT_SMUL)
189 #define UNITS_VECTOR (UNIT_VMUL | UNIT_VADD)
190 #define UNITS_ANY_VECTOR (UNITS_VECTOR | UNIT_VLUT)
192 static unsigned alu_opcode_props
[256] = {
193 [midgard_alu_op_fadd
] = UNITS_ADD
,
194 [midgard_alu_op_fmul
] = UNITS_MUL
| UNIT_VLUT
,
195 [midgard_alu_op_fmin
] = UNITS_MUL
| UNITS_ADD
,
196 [midgard_alu_op_fmax
] = UNITS_MUL
| UNITS_ADD
,
197 [midgard_alu_op_imin
] = UNITS_MOST
,
198 [midgard_alu_op_imax
] = UNITS_MOST
,
199 [midgard_alu_op_umin
] = UNITS_MOST
,
200 [midgard_alu_op_umax
] = UNITS_MOST
,
201 [midgard_alu_op_fmov
] = UNITS_ALL
| QUIRK_FLIPPED_R24
,
202 [midgard_alu_op_fround
] = UNITS_ADD
,
203 [midgard_alu_op_froundeven
] = UNITS_ADD
,
204 [midgard_alu_op_ftrunc
] = UNITS_ADD
,
205 [midgard_alu_op_ffloor
] = UNITS_ADD
,
206 [midgard_alu_op_fceil
] = UNITS_ADD
,
207 [midgard_alu_op_ffma
] = UNIT_VLUT
,
209 /* Though they output a scalar, they need to run on a vector unit
210 * since they process vectors */
211 [midgard_alu_op_fdot3
] = UNIT_VMUL
| OP_CHANNEL_COUNT(3),
212 [midgard_alu_op_fdot4
] = UNIT_VMUL
| OP_CHANNEL_COUNT(4),
214 /* Incredibly, iadd can run on vmul, etc */
215 [midgard_alu_op_iadd
] = UNITS_MOST
,
216 [midgard_alu_op_iabs
] = UNITS_MOST
,
217 [midgard_alu_op_isub
] = UNITS_MOST
,
218 [midgard_alu_op_imul
] = UNITS_MUL
,
219 [midgard_alu_op_imov
] = UNITS_MOST
| QUIRK_FLIPPED_R24
,
221 /* For vector comparisons, use ball etc */
222 [midgard_alu_op_feq
] = UNITS_MOST
,
223 [midgard_alu_op_fne
] = UNITS_MOST
,
224 [midgard_alu_op_fle
] = UNITS_MOST
,
225 [midgard_alu_op_flt
] = UNITS_MOST
,
226 [midgard_alu_op_ieq
] = UNITS_MOST
,
227 [midgard_alu_op_ine
] = UNITS_MOST
,
228 [midgard_alu_op_ilt
] = UNITS_MOST
,
229 [midgard_alu_op_ile
] = UNITS_MOST
,
230 [midgard_alu_op_ule
] = UNITS_MOST
,
231 [midgard_alu_op_ult
] = UNITS_MOST
,
233 [midgard_alu_op_icsel
] = UNITS_ADD
,
234 [midgard_alu_op_fcsel_i
] = UNITS_ADD
,
235 [midgard_alu_op_fcsel
] = UNITS_ADD
| UNIT_SMUL
,
237 [midgard_alu_op_frcp
] = UNIT_VLUT
,
238 [midgard_alu_op_frsqrt
] = UNIT_VLUT
,
239 [midgard_alu_op_fsqrt
] = UNIT_VLUT
,
240 [midgard_alu_op_fpow_pt1
] = UNIT_VLUT
,
241 [midgard_alu_op_fexp2
] = UNIT_VLUT
,
242 [midgard_alu_op_flog2
] = UNIT_VLUT
,
244 [midgard_alu_op_f2i
] = UNITS_ADD
,
245 [midgard_alu_op_f2u
] = UNITS_ADD
,
246 [midgard_alu_op_f2u8
] = UNITS_ADD
,
247 [midgard_alu_op_i2f
] = UNITS_ADD
,
248 [midgard_alu_op_u2f
] = UNITS_ADD
,
250 [midgard_alu_op_fsin
] = UNIT_VLUT
,
251 [midgard_alu_op_fcos
] = UNIT_VLUT
,
253 [midgard_alu_op_iand
] = UNITS_ADD
, /* XXX: Test case where it's right on smul but not sadd */
254 [midgard_alu_op_ior
] = UNITS_ADD
,
255 [midgard_alu_op_ixor
] = UNITS_ADD
,
256 [midgard_alu_op_ilzcnt
] = UNITS_ADD
,
257 [midgard_alu_op_ibitcount8
] = UNITS_ADD
,
258 [midgard_alu_op_inot
] = UNITS_MOST
,
259 [midgard_alu_op_ishl
] = UNITS_ADD
,
260 [midgard_alu_op_iasr
] = UNITS_ADD
,
261 [midgard_alu_op_ilsr
] = UNITS_ADD
,
262 [midgard_alu_op_ilsr
] = UNITS_ADD
,
264 [midgard_alu_op_fball_eq
] = UNITS_VECTOR
,
265 [midgard_alu_op_fbany_neq
] = UNITS_VECTOR
,
266 [midgard_alu_op_iball_eq
] = UNITS_VECTOR
,
267 [midgard_alu_op_ibany_neq
] = UNITS_VECTOR