4 * Copyright (c) 2018 Alyssa Rosenzweig (alyssa@rosenzweig.io)
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
25 /* Some constants and macros not found in the disassembler */
27 #define OP_IS_STORE(op) (\
28 op == midgard_op_store_vary_16 || \
29 op == midgard_op_store_vary_32 \
32 /* ALU control words are single bit fields with a lot of space */
34 #define ALU_ENAB_VEC_MUL (1 << 17)
35 #define ALU_ENAB_SCAL_ADD (1 << 19)
36 #define ALU_ENAB_VEC_ADD (1 << 21)
37 #define ALU_ENAB_SCAL_MUL (1 << 23)
38 #define ALU_ENAB_VEC_LUT (1 << 25)
39 #define ALU_ENAB_BR_COMPACT (1 << 26)
40 #define ALU_ENAB_BRANCH (1 << 27)
42 /* Other opcode properties that don't conflict with the ALU_ENABs, non-ISA */
44 /* Denotes an opcode that takes a vector input with a fixed-number of
45 * channels, but outputs to only a single output channel, like dot products.
46 * For these, to determine the effective mask, this quirk can be set. We have
47 * an intentional off-by-one (a la MALI_POSITIVE), since 0-channel makes no
48 * sense but we need to fit 4 channels in 2-bits. Similarly, 1-channel doesn't
49 * make sense (since then why are we quirked?), so that corresponds to "no
52 #define OP_CHANNEL_COUNT(c) ((c - 1) << 0)
53 #define GET_CHANNEL_COUNT(c) ((c & (0x3 << 0)) ? ((c & (0x3 << 0)) + 1) : 0)
55 /* Vector-independant shorthands for the above; these numbers are arbitrary and
56 * not from the ISA. Convert to the above with unit_enum_to_midgard */
64 #define TAG_TEXTURE_4 0x3
65 #define TAG_LOAD_STORE_4 0x5
68 #define TAG_ALU_12 0xA
69 #define TAG_ALU_16 0xB
71 /* Special register aliases */
73 #define MAX_WORK_REGISTERS 16
75 /* Uniforms are begin at (REGISTER_UNIFORMS - uniform_count) */
76 #define REGISTER_UNIFORMS 24
78 #define REGISTER_UNUSED 24
79 #define REGISTER_CONSTANT 26
80 #define REGISTER_VARYING_BASE 26
81 #define REGISTER_OFFSET 27
82 #define REGISTER_TEXTURE_BASE 28
83 #define REGISTER_SELECT 31
85 /* Special uniforms used for e.g. vertex epilogues */
86 #define SPECIAL_UNIFORM_BASE (1 << 24)
87 #define UNIFORM_VIEWPORT (SPECIAL_UNIFORM_BASE + 0)
89 /* SSA helper aliases to mimic the registers. UNUSED_0 encoded as an inline
90 * constant. UNUSED_1 encoded as REGISTER_UNUSED */
92 #define SSA_UNUSED_0 0
93 #define SSA_UNUSED_1 -2
95 #define SSA_FIXED_SHIFT 24
96 #define SSA_FIXED_REGISTER(reg) ((1 + reg) << SSA_FIXED_SHIFT)
97 #define SSA_REG_FROM_FIXED(reg) ((reg >> SSA_FIXED_SHIFT) - 1)
98 #define SSA_FIXED_MINIMUM SSA_FIXED_REGISTER(0)
100 /* Swizzle support */
102 #define SWIZZLE(A, B, C, D) ((D << 6) | (C << 4) | (B << 2) | (A << 0))
103 #define SWIZZLE_FROM_ARRAY(r) SWIZZLE(r[0], r[1], r[2], r[3])
104 #define COMPONENT_X 0x0
105 #define COMPONENT_Y 0x1
106 #define COMPONENT_Z 0x2
107 #define COMPONENT_W 0x3
113 /* Is this opcode that of an integer? */
115 midgard_is_integer_op(int op
)
118 case midgard_alu_op_iadd
:
119 case midgard_alu_op_ishladd
:
120 case midgard_alu_op_isub
:
121 case midgard_alu_op_imul
:
122 case midgard_alu_op_imin
:
123 case midgard_alu_op_imax
:
124 case midgard_alu_op_iasr
:
125 case midgard_alu_op_ilsr
:
126 case midgard_alu_op_ishl
:
127 case midgard_alu_op_iand
:
128 case midgard_alu_op_ior
:
129 case midgard_alu_op_inot
:
130 case midgard_alu_op_iandnot
:
131 case midgard_alu_op_ixor
:
132 case midgard_alu_op_imov
:
134 //case midgard_alu_op_f2i:
135 //case midgard_alu_op_f2u:
136 case midgard_alu_op_ieq
:
137 case midgard_alu_op_ine
:
138 case midgard_alu_op_ilt
:
139 case midgard_alu_op_ile
:
140 case midgard_alu_op_iball_eq
:
141 case midgard_alu_op_ibany_neq
:
143 //case midgard_alu_op_i2f:
144 //case midgard_alu_op_u2f:
145 case midgard_alu_op_icsel
:
153 /* There are five ALU units: VMUL, VADD, SMUL, SADD, LUT. A given opcode is
154 * implemented on some subset of these units (or occassionally all of them).
155 * This table encodes a bit mask of valid units for each opcode, so the
156 * scheduler can figure where to plonk the instruction. */
158 /* Shorthands for each unit */
159 #define UNIT_VMUL ALU_ENAB_VEC_MUL
160 #define UNIT_SADD ALU_ENAB_SCAL_ADD
161 #define UNIT_VADD ALU_ENAB_VEC_ADD
162 #define UNIT_SMUL ALU_ENAB_SCAL_MUL
163 #define UNIT_VLUT ALU_ENAB_VEC_LUT
165 /* Shorthands for usual combinations of units. LUT is intentionally excluded
166 * since it's nutty. */
168 #define UNITS_MUL (UNIT_VMUL | UNIT_SMUL)
169 #define UNITS_ADD (UNIT_VADD | UNIT_SADD)
170 #define UNITS_ALL (UNITS_MUL | UNITS_ADD)
171 #define UNITS_SCALAR (UNIT_SADD | UNIT_SMUL)
172 #define UNITS_VECTOR (UNIT_VMUL | UNIT_VADD)
173 #define UNITS_ANY_VECTOR (UNITS_VECTOR | UNIT_VLUT)
175 static int alu_opcode_props
[256] = {
176 [midgard_alu_op_fadd
] = UNITS_ADD
,
177 [midgard_alu_op_fmul
] = UNITS_MUL
| UNIT_VLUT
,
178 [midgard_alu_op_fmin
] = UNITS_MUL
| UNITS_ADD
,
179 [midgard_alu_op_fmax
] = UNITS_MUL
| UNITS_ADD
,
180 [midgard_alu_op_imin
] = UNITS_ALL
,
181 [midgard_alu_op_imax
] = UNITS_ALL
,
182 [midgard_alu_op_fmov
] = UNITS_ALL
| UNIT_VLUT
,
183 [midgard_alu_op_ffloor
] = UNITS_ADD
,
184 [midgard_alu_op_fceil
] = UNITS_ADD
,
186 /* Though they output a scalar, they need to run on a vector unit
187 * since they process vectors */
188 [midgard_alu_op_fdot3
] = UNIT_VMUL
| OP_CHANNEL_COUNT(3),
189 [midgard_alu_op_fdot4
] = UNIT_VMUL
| OP_CHANNEL_COUNT(4),
191 [midgard_alu_op_iadd
] = UNITS_ADD
,
192 [midgard_alu_op_isub
] = UNITS_ADD
,
193 [midgard_alu_op_imul
] = UNITS_ALL
,
194 [midgard_alu_op_imov
] = UNITS_ALL
,
196 /* For vector comparisons, use ball etc */
197 [midgard_alu_op_feq
] = UNITS_ALL
,
198 [midgard_alu_op_fne
] = UNITS_ALL
,
199 [midgard_alu_op_flt
] = UNIT_SADD
,
200 [midgard_alu_op_ieq
] = UNITS_ALL
,
201 [midgard_alu_op_ine
] = UNITS_ALL
,
202 [midgard_alu_op_ilt
] = UNITS_ALL
,
203 [midgard_alu_op_ile
] = UNITS_ALL
,
205 [midgard_alu_op_icsel
] = UNITS_ADD
,
206 [midgard_alu_op_fcsel
] = UNITS_ADD
| UNIT_SMUL
,
208 [midgard_alu_op_frcp
] = UNIT_VLUT
,
209 [midgard_alu_op_frsqrt
] = UNIT_VLUT
,
210 [midgard_alu_op_fsqrt
] = UNIT_VLUT
,
211 [midgard_alu_op_fexp2
] = UNIT_VLUT
,
212 [midgard_alu_op_flog2
] = UNIT_VLUT
,
214 [midgard_alu_op_f2i
] = UNITS_ADD
,
215 [midgard_alu_op_f2u
] = UNITS_ADD
,
216 [midgard_alu_op_f2u8
] = UNITS_ADD
,
217 [midgard_alu_op_i2f
] = UNITS_ADD
,
218 [midgard_alu_op_u2f
] = UNITS_ADD
,
220 [midgard_alu_op_fsin
] = UNIT_VLUT
,
221 [midgard_alu_op_fcos
] = UNIT_VLUT
,
223 [midgard_alu_op_iand
] = UNITS_ADD
, /* XXX: Test case where it's right on smul but not sadd */
224 [midgard_alu_op_ior
] = UNITS_ADD
,
225 [midgard_alu_op_ixor
] = UNITS_ADD
,
226 [midgard_alu_op_inot
] = UNITS_ALL
,
227 [midgard_alu_op_ishl
] = UNITS_ADD
,
228 [midgard_alu_op_iasr
] = UNITS_ADD
,
229 [midgard_alu_op_ilsr
] = UNITS_ADD
,
230 [midgard_alu_op_ilsr
] = UNITS_ADD
,
232 [midgard_alu_op_fball_eq
] = UNITS_ALL
,
233 [midgard_alu_op_fbany_neq
] = UNITS_ALL
,
234 [midgard_alu_op_iball_eq
] = UNITS_ALL
,
235 [midgard_alu_op_ibany_neq
] = UNITS_ALL