2 # Copyright (C) 2014 Intel Corporation
4 # Permission is hereby granted, free of charge, to any person obtaining a
5 # copy of this software and associated documentation files (the "Software"),
6 # to deal in the Software without restriction, including without limitation
7 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 # and/or sell copies of the Software, and to permit persons to whom the
9 # Software is furnished to do so, subject to the following conditions:
11 # The above copyright notice and this permission notice (including the next
12 # paragraph) shall be included in all copies or substantial portions of the
15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 # Jason Ekstrand (jason@jlekstrand.net)
26 from __future__
import print_function
28 from collections
import OrderedDict
30 from nir_opcodes
import type_sizes
35 # Convenience variables
42 # Written in the form (<search>, <replace>) where <search> is an expression
43 # and <replace> is either an expression or a value. An expression is
44 # defined as a tuple of the form ([~]<op>, <src0>, <src1>, <src2>, <src3>)
45 # where each source is either an expression or a value. A value can be
46 # either a numeric constant or a string representing a variable name.
48 # If the opcode in a search expression is prefixed by a '~' character, this
49 # indicates that the operation is inexact. Such operations will only get
50 # applied to SSA values that do not have the exact bit set. This should be
51 # used by by any optimizations that are not bit-for-bit exact. It should not,
52 # however, be used for backend-requested lowering operations as those need to
53 # happen regardless of precision.
55 # Variable names are specified as "[#]name[@type][(cond)][.swiz]" where:
56 # "#" indicates that the given variable will only match constants,
57 # type indicates that the given variable will only match values from ALU
58 # instructions with the given output type,
59 # (cond) specifies an additional condition function (see nir_search_helpers.h),
60 # swiz is a swizzle applied to the variable (only in the <replace> expression)
62 # For constants, you have to be careful to make sure that it is the right
63 # type because python is unaware of the source and destination types of the
66 # All expression types can have a bit-size specified. For opcodes, this
67 # looks like "op@32", for variables it is "a@32" or "a@uint32" to specify a
68 # type and size. In the search half of the expression this indicates that it
69 # should only match that particular bit-size. In the replace half of the
70 # expression this indicates that the constructed value should have that
73 # If the opcode in a replacement expression is prefixed by a '!' character,
74 # this indicated that the new expression will be marked exact.
76 # A special condition "many-comm-expr" can be used with expressions to note
77 # that the expression and its subexpressions have more commutative expressions
78 # than nir_replace_instr can handle. If this special condition is needed with
79 # another condition, the two can be separated by a comma (e.g.,
80 # "(many-comm-expr,is_used_once)").
82 # based on https://web.archive.org/web/20180105155939/http://forum.devmaster.net/t/fast-and-accurate-sine-cosine/9648
83 def lowered_sincos(c
):
84 x
= ('fsub', ('fmul', 2.0, ('ffract', ('fadd', ('fmul', 0.5 / pi
, a
), c
))), 1.0)
85 x
= ('fmul', ('fsub', x
, ('fmul', x
, ('fabs', x
))), 4.0)
86 return ('ffma', ('ffma', x
, ('fabs', x
), ('fneg', x
)), 0.225, x
)
88 def intBitsToFloat(i
):
89 return struct
.unpack('!f', struct
.pack('!I', i
))[0]
93 (('imul', a
, '#b@32(is_pos_power_of_two)'), ('ishl', a
, ('find_lsb', b
)), '!options->lower_bitops'),
94 (('imul', a
, '#b@32(is_neg_power_of_two)'), ('ineg', ('ishl', a
, ('find_lsb', ('iabs', b
)))), '!options->lower_bitops'),
95 (('ishl', a
, '#b@32'), ('imul', a
, ('ishl', 1, b
)), 'options->lower_bitops'),
97 (('unpack_64_2x32_split_x', ('imul_2x32_64(is_used_once)', a
, b
)), ('imul', a
, b
)),
98 (('unpack_64_2x32_split_x', ('umul_2x32_64(is_used_once)', a
, b
)), ('imul', a
, b
)),
99 (('imul_2x32_64', a
, b
), ('pack_64_2x32_split', ('imul', a
, b
), ('imul_high', a
, b
)), 'options->lower_mul_2x32_64'),
100 (('umul_2x32_64', a
, b
), ('pack_64_2x32_split', ('imul', a
, b
), ('umul_high', a
, b
)), 'options->lower_mul_2x32_64'),
105 (('udiv', a
, '#b@32(is_pos_power_of_two)'), ('ushr', a
, ('find_lsb', b
)), '!options->lower_bitops'),
106 (('idiv', a
, '#b@32(is_pos_power_of_two)'), ('imul', ('isign', a
), ('ushr', ('iabs', a
), ('find_lsb', b
))), 'options->lower_idiv'),
107 (('idiv', a
, '#b@32(is_neg_power_of_two)'), ('ineg', ('imul', ('isign', a
), ('ushr', ('iabs', a
), ('find_lsb', ('iabs', b
))))), 'options->lower_idiv'),
108 (('umod', a
, '#b(is_pos_power_of_two)'), ('iand', a
, ('isub', b
, 1))),
110 (('~fneg', ('fneg', a
)), a
),
111 (('ineg', ('ineg', a
)), a
),
112 (('fabs', ('fneg', a
)), ('fabs', a
)),
113 (('fabs', ('u2f', a
)), ('u2f', a
)),
114 (('iabs', ('iabs', a
)), ('iabs', a
)),
115 (('iabs', ('ineg', a
)), ('iabs', a
)),
116 (('f2b', ('fneg', a
)), ('f2b', a
)),
117 (('i2b', ('ineg', a
)), ('i2b', a
)),
118 (('~fadd', a
, 0.0), a
),
120 (('usadd_4x8', a
, 0), a
),
121 (('usadd_4x8', a
, ~
0), ~
0),
122 (('~fadd', ('fmul', a
, b
), ('fmul', a
, c
)), ('fmul', a
, ('fadd', b
, c
))),
123 (('iadd', ('imul', a
, b
), ('imul', a
, c
)), ('imul', a
, ('iadd', b
, c
))),
124 (('~fadd', ('fneg', a
), a
), 0.0),
125 (('iadd', ('ineg', a
), a
), 0),
126 (('iadd', ('ineg', a
), ('iadd', a
, b
)), b
),
127 (('iadd', a
, ('iadd', ('ineg', a
), b
)), b
),
128 (('~fadd', ('fneg', a
), ('fadd', a
, b
)), b
),
129 (('~fadd', a
, ('fadd', ('fneg', a
), b
)), b
),
130 (('fadd', ('fsat', a
), ('fsat', ('fneg', a
))), ('fsat', ('fabs', a
))),
131 (('~fmul', a
, 0.0), 0.0),
133 (('umul_unorm_4x8', a
, 0), 0),
134 (('umul_unorm_4x8', a
, ~
0), a
),
135 (('~fmul', a
, 1.0), a
),
137 (('fmul', a
, -1.0), ('fneg', a
)),
138 (('imul', a
, -1), ('ineg', a
)),
139 # If a < 0: fsign(a)*a*a => -1*a*a => -a*a => abs(a)*a
140 # If a > 0: fsign(a)*a*a => 1*a*a => a*a => abs(a)*a
141 # If a == 0: fsign(a)*a*a => 0*0*0 => abs(0)*0
142 (('fmul', ('fsign', a
), ('fmul', a
, a
)), ('fmul', ('fabs', a
), a
)),
143 (('fmul', ('fmul', ('fsign', a
), a
), a
), ('fmul', ('fabs', a
), a
)),
144 (('~ffma', 0.0, a
, b
), b
),
145 (('~ffma', a
, b
, 0.0), ('fmul', a
, b
)),
146 (('ffma', 1.0, a
, b
), ('fadd', a
, b
)),
147 (('ffma', -1.0, a
, b
), ('fadd', ('fneg', a
), b
)),
148 (('~flrp', a
, b
, 0.0), a
),
149 (('~flrp', a
, b
, 1.0), b
),
150 (('~flrp', a
, a
, b
), a
),
151 (('~flrp', 0.0, a
, b
), ('fmul', a
, b
)),
153 # flrp(a, a + b, c) => a + flrp(0, b, c) => a + (b * c)
154 (('~flrp', a
, ('fadd(is_used_once)', a
, b
), c
), ('fadd', ('fmul', b
, c
), a
)),
155 (('~flrp@32', a
, ('fadd', a
, b
), c
), ('fadd', ('fmul', b
, c
), a
), 'options->lower_flrp32'),
156 (('~flrp@64', a
, ('fadd', a
, b
), c
), ('fadd', ('fmul', b
, c
), a
), 'options->lower_flrp64'),
158 (('~flrp@32', ('fadd', a
, b
), ('fadd', a
, c
), d
), ('fadd', ('flrp', b
, c
, d
), a
), 'options->lower_flrp32'),
159 (('~flrp@64', ('fadd', a
, b
), ('fadd', a
, c
), d
), ('fadd', ('flrp', b
, c
, d
), a
), 'options->lower_flrp64'),
161 (('~flrp@32', a
, ('fmul(is_used_once)', a
, b
), c
), ('fmul', ('flrp', 1.0, b
, c
), a
), 'options->lower_flrp32'),
162 (('~flrp@64', a
, ('fmul(is_used_once)', a
, b
), c
), ('fmul', ('flrp', 1.0, b
, c
), a
), 'options->lower_flrp64'),
164 (('~flrp', ('fmul(is_used_once)', a
, b
), ('fmul(is_used_once)', a
, c
), d
), ('fmul', ('flrp', b
, c
, d
), a
)),
166 (('~flrp', a
, b
, ('b2f', 'c@1')), ('bcsel', c
, b
, a
), 'options->lower_flrp32'),
167 (('~flrp', a
, 0.0, c
), ('fadd', ('fmul', ('fneg', a
), c
), a
)),
168 (('ftrunc', a
), ('bcsel', ('flt', a
, 0.0), ('fneg', ('ffloor', ('fabs', a
))), ('ffloor', ('fabs', a
))), 'options->lower_ftrunc'),
169 (('ffloor', a
), ('fsub', a
, ('ffract', a
)), 'options->lower_ffloor'),
170 (('fadd', a
, ('fneg', ('ffract', a
))), ('ffloor', a
), '!options->lower_ffloor'),
171 (('ffract', a
), ('fsub', a
, ('ffloor', a
)), 'options->lower_ffract'),
172 (('fceil', a
), ('fneg', ('ffloor', ('fneg', a
))), 'options->lower_fceil'),
173 (('~fadd', ('fmul', a
, ('fadd', 1.0, ('fneg', ('b2f', 'c@1')))), ('fmul', b
, ('b2f', c
))), ('bcsel', c
, b
, a
), 'options->lower_flrp32'),
174 (('~fadd@32', ('fmul', a
, ('fadd', 1.0, ('fneg', c
) )), ('fmul', b
, c
)), ('flrp', a
, b
, c
), '!options->lower_flrp32'),
175 (('~fadd@64', ('fmul', a
, ('fadd', 1.0, ('fneg', c
) )), ('fmul', b
, c
)), ('flrp', a
, b
, c
), '!options->lower_flrp64'),
176 # These are the same as the previous three rules, but it depends on
177 # 1-fsat(x) <=> fsat(1-x). See below.
178 (('~fadd@32', ('fmul', a
, ('fsat', ('fadd', 1.0, ('fneg', c
)))), ('fmul', b
, ('fsat', c
))), ('flrp', a
, b
, ('fsat', c
)), '!options->lower_flrp32'),
179 (('~fadd@64', ('fmul', a
, ('fsat', ('fadd', 1.0, ('fneg', c
)))), ('fmul', b
, ('fsat', c
))), ('flrp', a
, b
, ('fsat', c
)), '!options->lower_flrp64'),
181 (('~fadd', a
, ('fmul', ('b2f', 'c@1'), ('fadd', b
, ('fneg', a
)))), ('bcsel', c
, b
, a
), 'options->lower_flrp32'),
182 (('~fadd@32', a
, ('fmul', c
, ('fadd', b
, ('fneg', a
)))), ('flrp', a
, b
, c
), '!options->lower_flrp32'),
183 (('~fadd@64', a
, ('fmul', c
, ('fadd', b
, ('fneg', a
)))), ('flrp', a
, b
, c
), '!options->lower_flrp64'),
184 (('ffma', a
, b
, c
), ('fadd', ('fmul', a
, b
), c
), 'options->lower_ffma'),
185 (('~fadd', ('fmul', a
, b
), c
), ('ffma', a
, b
, c
), 'options->fuse_ffma'),
187 (('~fmul', ('fadd', ('iand', ('ineg', ('b2i32', 'a@bool')), ('fmul', b
, c
)), '#d'), '#e'),
188 ('bcsel', a
, ('fmul', ('fadd', ('fmul', b
, c
), d
), e
), ('fmul', d
, e
))),
190 (('fdph', a
, b
), ('fdot4', ('vec4', 'a.x', 'a.y', 'a.z', 1.0), b
), 'options->lower_fdph'),
192 (('fdot4', ('vec4', a
, b
, c
, 1.0), d
), ('fdph', ('vec3', a
, b
, c
), d
), '!options->lower_fdph'),
193 (('fdot4', ('vec4', a
, 0.0, 0.0, 0.0), b
), ('fmul', a
, b
)),
194 (('fdot4', ('vec4', a
, b
, 0.0, 0.0), c
), ('fdot2', ('vec2', a
, b
), c
)),
195 (('fdot4', ('vec4', a
, b
, c
, 0.0), d
), ('fdot3', ('vec3', a
, b
, c
), d
)),
197 (('fdot3', ('vec3', a
, 0.0, 0.0), b
), ('fmul', a
, b
)),
198 (('fdot3', ('vec3', a
, b
, 0.0), c
), ('fdot2', ('vec2', a
, b
), c
)),
200 (('fdot2', ('vec2', a
, 0.0), b
), ('fmul', a
, b
)),
201 (('fdot2', a
, 1.0), ('fadd', 'a.x', 'a.y')),
203 # Lower fdot to fsum when it is available
204 (('fdot2', a
, b
), ('fsum2', ('fmul', a
, b
)), 'options->lower_fdot'),
205 (('fdot3', a
, b
), ('fsum3', ('fmul', a
, b
)), 'options->lower_fdot'),
206 (('fdot4', a
, b
), ('fsum4', ('fmul', a
, b
)), 'options->lower_fdot'),
207 (('fsum2', a
), ('fadd', 'a.x', 'a.y'), 'options->lower_fdot'),
209 # If x >= 0 and x <= 1: fsat(1 - x) == 1 - fsat(x) trivially
210 # If x < 0: 1 - fsat(x) => 1 - 0 => 1 and fsat(1 - x) => fsat(> 1) => 1
211 # If x > 1: 1 - fsat(x) => 1 - 1 => 0 and fsat(1 - x) => fsat(< 0) => 0
212 (('~fadd', ('fneg(is_used_once)', ('fsat(is_used_once)', 'a(is_not_fmul)')), 1.0), ('fsat', ('fadd', 1.0, ('fneg', a
)))),
214 # 1 - ((1 - a) * (1 - b))
215 # 1 - (1 - a - b + a*b)
216 # 1 - 1 + a + b - a*b
221 (('~fadd@32', 1.0, ('fneg', ('fmul', ('fadd', 1.0, ('fneg', a
)), ('fadd', 1.0, ('fneg', b
))))),
222 ('flrp', b
, 1.0, a
), '!options->lower_flrp32'),
224 # (a * #b + #c) << #d
225 # ((a * #b) << #d) + (#c << #d)
226 # (a * (#b << #d)) + (#c << #d)
227 (('ishl', ('iadd', ('imul', a
, '#b'), '#c'), '#d'),
228 ('iadd', ('imul', a
, ('ishl', b
, d
)), ('ishl', c
, d
))),
232 (('ishl', ('imul', a
, '#b'), '#c'), ('imul', a
, ('ishl', b
, c
))),
235 # Care must be taken here. Shifts in NIR uses only the lower log2(bitsize)
236 # bits of the second source. These replacements must correctly handle the
237 # case where (b % bitsize) + (c % bitsize) >= bitsize.
238 for s
in [8, 16, 32, 64]:
241 ishl
= "ishl@{}".format(s
)
242 ishr
= "ishr@{}".format(s
)
243 ushr
= "ushr@{}".format(s
)
245 in_bounds
= ('ult', ('iadd', ('iand', b
, mask
), ('iand', c
, mask
)), s
)
247 optimizations
.extend([
248 ((ishl
, (ishl
, a
, '#b'), '#c'), ('bcsel', in_bounds
, (ishl
, a
, ('iadd', b
, c
)), 0)),
249 ((ushr
, (ushr
, a
, '#b'), '#c'), ('bcsel', in_bounds
, (ushr
, a
, ('iadd', b
, c
)), 0)),
251 # To get get -1 for large shifts of negative values, ishr must instead
252 # clamp the shift count to the maximum value.
253 ((ishr
, (ishr
, a
, '#b'), '#c'),
254 (ishr
, a
, ('imin', ('iadd', ('iand', b
, mask
), ('iand', c
, mask
)), s
- 1))),
257 # Optimize a pattern of address calculation created by DXVK where the offset is
258 # divided by 4 and then multipled by 4. This can be turned into an iand and the
259 # additions before can be reassociated to CSE the iand instruction.
260 for log2
in range(1, 7): # powers of two from 2 to 64
262 mask
= 0xffffffff & ~
(v
- 1)
263 b_is_multiple
= '#b(is_unsigned_multiple_of_{})'.format(v
)
265 optimizations
.extend([
266 # 'a >> #b << #b' -> 'a & ~((1 << #b) - 1)'
267 (('ishl@32', ('ushr@32', a
, log2
), log2
), ('iand', a
, mask
)),
269 # Reassociate for improved CSE
270 (('iand@32', ('iadd@32', a
, b_is_multiple
), mask
), ('iadd', ('iand', a
, mask
), b
)),
273 # To save space in the state tables, reduce to the set that is known to help.
274 # Previously, this was range(1, 32). In addition, a couple rules inside the
275 # loop are commented out. Revisit someday, probably after mesa/#2635 has some
277 for i
in [1, 2, 16, 24]:
278 lo_mask
= 0xffffffff >> i
279 hi_mask
= (0xffffffff << i
) & 0xffffffff
281 optimizations
.extend([
282 # This pattern seems to only help in the soft-fp64 code.
283 (('ishl@32', ('iand', 'a@32', lo_mask
), i
), ('ishl', a
, i
)),
284 # (('ushr@32', ('iand', 'a@32', hi_mask), i), ('ushr', a, i)),
285 # (('ishr@32', ('iand', 'a@32', hi_mask), i), ('ishr', a, i)),
287 (('iand', ('ishl', 'a@32', i
), hi_mask
), ('ishl', a
, i
)),
288 (('iand', ('ushr', 'a@32', i
), lo_mask
), ('ushr', a
, i
)),
289 # (('iand', ('ishr', 'a@32', i), lo_mask), ('ushr', a, i)), # Yes, ushr is correct
292 optimizations
.extend([
293 # This is common for address calculations. Reassociating may enable the
294 # 'a<<c' to be CSE'd. It also helps architectures that have an ISHLADD
295 # instruction or a constant offset field for in load / store instructions.
296 (('ishl', ('iadd', a
, '#b'), '#c'), ('iadd', ('ishl', a
, c
), ('ishl', b
, c
))),
298 # Comparison simplifications
299 (('~inot', ('flt', a
, b
)), ('fge', a
, b
)),
300 (('~inot', ('fge', a
, b
)), ('flt', a
, b
)),
301 (('inot', ('feq', a
, b
)), ('fne', a
, b
)),
302 (('inot', ('fne', a
, b
)), ('feq', a
, b
)),
303 (('inot', ('ilt', a
, b
)), ('ige', a
, b
)),
304 (('inot', ('ult', a
, b
)), ('uge', a
, b
)),
305 (('inot', ('ige', a
, b
)), ('ilt', a
, b
)),
306 (('inot', ('uge', a
, b
)), ('ult', a
, b
)),
307 (('inot', ('ieq', a
, b
)), ('ine', a
, b
)),
308 (('inot', ('ine', a
, b
)), ('ieq', a
, b
)),
310 (('iand', ('feq', a
, b
), ('fne', a
, b
)), False),
311 (('iand', ('flt', a
, b
), ('flt', b
, a
)), False),
312 (('iand', ('ieq', a
, b
), ('ine', a
, b
)), False),
313 (('iand', ('ilt', a
, b
), ('ilt', b
, a
)), False),
314 (('iand', ('ult', a
, b
), ('ult', b
, a
)), False),
316 # This helps some shaders because, after some optimizations, they end up
317 # with patterns like (-a < -b) || (b < a). In an ideal world, this sort of
318 # matching would be handled by CSE.
319 (('flt', ('fneg', a
), ('fneg', b
)), ('flt', b
, a
)),
320 (('fge', ('fneg', a
), ('fneg', b
)), ('fge', b
, a
)),
321 (('feq', ('fneg', a
), ('fneg', b
)), ('feq', b
, a
)),
322 (('fne', ('fneg', a
), ('fneg', b
)), ('fne', b
, a
)),
323 (('flt', ('fneg', a
), -1.0), ('flt', 1.0, a
)),
324 (('flt', -1.0, ('fneg', a
)), ('flt', a
, 1.0)),
325 (('fge', ('fneg', a
), -1.0), ('fge', 1.0, a
)),
326 (('fge', -1.0, ('fneg', a
)), ('fge', a
, 1.0)),
327 (('fne', ('fneg', a
), -1.0), ('fne', 1.0, a
)),
328 (('feq', -1.0, ('fneg', a
)), ('feq', a
, 1.0)),
330 (('flt', ('fsat(is_used_once)', a
), '#b(is_gt_0_and_lt_1)'), ('flt', a
, b
)),
331 (('flt', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a
)), ('flt', b
, a
)),
332 (('fge', ('fsat(is_used_once)', a
), '#b(is_gt_0_and_lt_1)'), ('fge', a
, b
)),
333 (('fge', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a
)), ('fge', b
, a
)),
334 (('feq', ('fsat(is_used_once)', a
), '#b(is_gt_0_and_lt_1)'), ('feq', a
, b
)),
335 (('fne', ('fsat(is_used_once)', a
), '#b(is_gt_0_and_lt_1)'), ('fne', a
, b
)),
337 (('fge', ('fsat(is_used_once)', a
), 1.0), ('fge', a
, 1.0)),
338 (('flt', ('fsat(is_used_once)', a
), 1.0), ('flt', a
, 1.0)),
339 (('fge', 0.0, ('fsat(is_used_once)', a
)), ('fge', 0.0, a
)),
340 (('flt', 0.0, ('fsat(is_used_once)', a
)), ('flt', 0.0, a
)),
344 # b2f(a) == 0.0 because b2f(a) can only be 0 or 1
346 (('fge', 0.0, ('b2f', 'a@1')), ('inot', a
)),
348 (('fge', ('fneg', ('b2f', 'a@1')), 0.0), ('inot', a
)),
350 (('fne', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a
, b
)),
351 (('fne', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a
, b
)),
352 (('fne', ('bcsel', a
, 1.0, ('b2f', 'b@1')) , 0.0), ('ior', a
, b
)),
353 (('fne', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), ('ior', a
, b
)),
354 (('fne', ('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a
, b
)),
355 (('fne', ('fmin', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a
, b
)),
356 (('fne', ('bcsel', a
, ('b2f', 'b@1'), 0.0) , 0.0), ('iand', a
, b
)),
357 (('fne', ('fadd', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), 0.0), ('ixor', a
, b
)),
358 (('fne', ('b2f', 'a@1') , ('b2f', 'b@1') ), ('ixor', a
, b
)),
359 (('fne', ('fneg', ('b2f', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('ixor', a
, b
)),
360 (('feq', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('ior', a
, b
))),
361 (('feq', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('ior', a
, b
))),
362 (('feq', ('bcsel', a
, 1.0, ('b2f', 'b@1')) , 0.0), ('inot', ('ior', a
, b
))),
363 (('feq', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), ('inot', ('ior', a
, b
))),
364 (('feq', ('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('iand', a
, b
))),
365 (('feq', ('fmin', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('iand', a
, b
))),
366 (('feq', ('bcsel', a
, ('b2f', 'b@1'), 0.0) , 0.0), ('inot', ('iand', a
, b
))),
367 (('feq', ('fadd', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), 0.0), ('ieq', a
, b
)),
368 (('feq', ('b2f', 'a@1') , ('b2f', 'b@1') ), ('ieq', a
, b
)),
369 (('feq', ('fneg', ('b2f', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('ieq', a
, b
)),
371 # -(b2f(a) + b2f(b)) < 0
372 # 0 < b2f(a) + b2f(b)
373 # 0 != b2f(a) + b2f(b) b2f must be 0 or 1, so the sum is non-negative
375 (('flt', ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), 0.0), ('ior', a
, b
)),
376 (('flt', 0.0, ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('ior', a
, b
)),
378 # -(b2f(a) + b2f(b)) >= 0
379 # 0 >= b2f(a) + b2f(b)
380 # 0 == b2f(a) + b2f(b) b2f must be 0 or 1, so the sum is non-negative
382 (('fge', ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), 0.0), ('inot', ('ior', a
, b
))),
383 (('fge', 0.0, ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('inot', ('ior', a
, b
))),
385 (('flt', a
, ('fneg', a
)), ('flt', a
, 0.0)),
386 (('fge', a
, ('fneg', a
)), ('fge', a
, 0.0)),
388 # Some optimizations (below) convert things like (a < b || c < b) into
389 # (min(a, c) < b). However, this interfers with the previous optimizations
390 # that try to remove comparisons with negated sums of b2f. This just
392 (('flt', ('fmin', c
, ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')))), 0.0),
393 ('ior', ('flt', c
, 0.0), ('ior', a
, b
))),
395 (('~flt', ('fadd', a
, b
), a
), ('flt', b
, 0.0)),
396 (('~fge', ('fadd', a
, b
), a
), ('fge', b
, 0.0)),
397 (('~feq', ('fadd', a
, b
), a
), ('feq', b
, 0.0)),
398 (('~fne', ('fadd', a
, b
), a
), ('fne', b
, 0.0)),
399 (('~flt', ('fadd(is_used_once)', a
, '#b'), '#c'), ('flt', a
, ('fadd', c
, ('fneg', b
)))),
400 (('~flt', ('fneg(is_used_once)', ('fadd(is_used_once)', a
, '#b')), '#c'), ('flt', ('fneg', ('fadd', c
, b
)), a
)),
401 (('~fge', ('fadd(is_used_once)', a
, '#b'), '#c'), ('fge', a
, ('fadd', c
, ('fneg', b
)))),
402 (('~fge', ('fneg(is_used_once)', ('fadd(is_used_once)', a
, '#b')), '#c'), ('fge', ('fneg', ('fadd', c
, b
)), a
)),
403 (('~feq', ('fadd(is_used_once)', a
, '#b'), '#c'), ('feq', a
, ('fadd', c
, ('fneg', b
)))),
404 (('~feq', ('fneg(is_used_once)', ('fadd(is_used_once)', a
, '#b')), '#c'), ('feq', ('fneg', ('fadd', c
, b
)), a
)),
405 (('~fne', ('fadd(is_used_once)', a
, '#b'), '#c'), ('fne', a
, ('fadd', c
, ('fneg', b
)))),
406 (('~fne', ('fneg(is_used_once)', ('fadd(is_used_once)', a
, '#b')), '#c'), ('fne', ('fneg', ('fadd', c
, b
)), a
)),
408 # Cannot remove the addition from ilt or ige due to overflow.
409 (('ieq', ('iadd', a
, b
), a
), ('ieq', b
, 0)),
410 (('ine', ('iadd', a
, b
), a
), ('ine', b
, 0)),
412 # fmin(-b2f(a), b) >= 0.0
413 # -b2f(a) >= 0.0 && b >= 0.0
414 # -b2f(a) == 0.0 && b >= 0.0 -b2f can only be 0 or -1, never >0
415 # b2f(a) == 0.0 && b >= 0.0
416 # a == False && b >= 0.0
419 # The fge in the second replacement is not a typo. I leave the proof that
420 # "fmin(-b2f(a), b) >= 0 <=> fmin(-b2f(a), b) == 0" as an exercise for the
422 (('fge', ('fmin', ('fneg', ('b2f', 'a@1')), 'b@1'), 0.0), ('iand', ('inot', a
), ('fge', b
, 0.0))),
423 (('feq', ('fmin', ('fneg', ('b2f', 'a@1')), 'b@1'), 0.0), ('iand', ('inot', a
), ('fge', b
, 0.0))),
425 (('feq', ('b2f', 'a@1'), 0.0), ('inot', a
)),
426 (('~fne', ('b2f', 'a@1'), 0.0), a
),
427 (('ieq', ('b2i', 'a@1'), 0), ('inot', a
)),
428 (('ine', ('b2i', 'a@1'), 0), a
),
430 (('fne', ('u2f', a
), 0.0), ('ine', a
, 0)),
431 (('feq', ('u2f', a
), 0.0), ('ieq', a
, 0)),
432 (('fge', ('u2f', a
), 0.0), True),
433 (('fge', 0.0, ('u2f', a
)), ('uge', 0, a
)), # ieq instead?
434 (('flt', ('u2f', a
), 0.0), False),
435 (('flt', 0.0, ('u2f', a
)), ('ult', 0, a
)), # ine instead?
436 (('fne', ('i2f', a
), 0.0), ('ine', a
, 0)),
437 (('feq', ('i2f', a
), 0.0), ('ieq', a
, 0)),
438 (('fge', ('i2f', a
), 0.0), ('ige', a
, 0)),
439 (('fge', 0.0, ('i2f', a
)), ('ige', 0, a
)),
440 (('flt', ('i2f', a
), 0.0), ('ilt', a
, 0)),
441 (('flt', 0.0, ('i2f', a
)), ('ilt', 0, a
)),
445 # fabs(a) != 0.0 because fabs(a) must be >= 0
447 (('~flt', 0.0, ('fabs', a
)), ('fne', a
, 0.0)),
451 (('~flt', ('fneg', ('fabs', a
)), 0.0), ('fne', a
, 0.0)),
454 # 0.0 == fabs(a) because fabs(a) must be >= 0
456 (('fge', 0.0, ('fabs', a
)), ('feq', a
, 0.0)),
460 (('fge', ('fneg', ('fabs', a
)), 0.0), ('feq', a
, 0.0)),
462 # (a >= 0.0) && (a <= 1.0) -> fsat(a) == a
463 (('iand', ('fge', a
, 0.0), ('fge', 1.0, a
)), ('feq', a
, ('fsat', a
)), '!options->lower_fsat'),
465 # (a < 0.0) || (a > 1.0)
466 # !(!(a < 0.0) && !(a > 1.0))
467 # !((a >= 0.0) && (a <= 1.0))
470 (('ior', ('flt', a
, 0.0), ('flt', 1.0, a
)), ('fne', a
, ('fsat', a
)), '!options->lower_fsat'),
472 (('fmax', ('b2f(is_used_once)', 'a@1'), ('b2f', 'b@1')), ('b2f', ('ior', a
, b
))),
473 (('fmax', ('fneg(is_used_once)', ('b2f(is_used_once)', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('fneg', ('b2f', ('ior', a
, b
)))),
474 (('fmin', ('b2f(is_used_once)', 'a@1'), ('b2f', 'b@1')), ('b2f', ('iand', a
, b
))),
475 (('fmin', ('fneg(is_used_once)', ('b2f(is_used_once)', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('fneg', ('b2f', ('iand', a
, b
)))),
478 # bcsel(a, fmin(b2f(a), b), fmin(b2f(a), b))
479 # bcsel(a, fmin(b2f(True), b), fmin(b2f(False), b))
480 # bcsel(a, fmin(1.0, b), fmin(0.0, b))
482 # Since b is a constant, constant folding will eliminate the fmin and the
483 # fmax. If b is > 1.0, the bcsel will be replaced with a b2f.
484 (('fmin', ('b2f', 'a@1'), '#b'), ('bcsel', a
, ('fmin', b
, 1.0), ('fmin', b
, 0.0))),
486 (('flt', ('fadd(is_used_once)', a
, ('fneg', b
)), 0.0), ('flt', a
, b
)),
488 (('fge', ('fneg', ('fabs', a
)), 0.0), ('feq', a
, 0.0)),
489 (('~bcsel', ('flt', b
, a
), b
, a
), ('fmin', a
, b
)),
490 (('~bcsel', ('flt', a
, b
), b
, a
), ('fmax', a
, b
)),
491 (('~bcsel', ('fge', a
, b
), b
, a
), ('fmin', a
, b
)),
492 (('~bcsel', ('fge', b
, a
), b
, a
), ('fmax', a
, b
)),
493 (('bcsel', ('i2b', a
), b
, c
), ('bcsel', ('ine', a
, 0), b
, c
)),
494 (('bcsel', ('inot', a
), b
, c
), ('bcsel', a
, c
, b
)),
495 (('bcsel', a
, ('bcsel', a
, b
, c
), d
), ('bcsel', a
, b
, d
)),
496 (('bcsel', a
, b
, ('bcsel', a
, c
, d
)), ('bcsel', a
, b
, d
)),
497 (('bcsel', a
, ('bcsel', b
, c
, d
), ('bcsel(is_used_once)', b
, c
, 'e')), ('bcsel', b
, c
, ('bcsel', a
, d
, 'e'))),
498 (('bcsel', a
, ('bcsel(is_used_once)', b
, c
, d
), ('bcsel', b
, c
, 'e')), ('bcsel', b
, c
, ('bcsel', a
, d
, 'e'))),
499 (('bcsel', a
, ('bcsel', b
, c
, d
), ('bcsel(is_used_once)', b
, 'e', d
)), ('bcsel', b
, ('bcsel', a
, c
, 'e'), d
)),
500 (('bcsel', a
, ('bcsel(is_used_once)', b
, c
, d
), ('bcsel', b
, 'e', d
)), ('bcsel', b
, ('bcsel', a
, c
, 'e'), d
)),
501 (('bcsel', a
, True, b
), ('ior', a
, b
)),
502 (('bcsel', a
, a
, b
), ('ior', a
, b
)),
503 (('bcsel', a
, b
, False), ('iand', a
, b
)),
504 (('bcsel', a
, b
, a
), ('iand', a
, b
)),
505 (('~fmin', a
, a
), a
),
506 (('~fmax', a
, a
), a
),
511 (('fmax', ('fmax', a
, b
), b
), ('fmax', a
, b
)),
512 (('umax', ('umax', a
, b
), b
), ('umax', a
, b
)),
513 (('imax', ('imax', a
, b
), b
), ('imax', a
, b
)),
514 (('fmin', ('fmin', a
, b
), b
), ('fmin', a
, b
)),
515 (('umin', ('umin', a
, b
), b
), ('umin', a
, b
)),
516 (('imin', ('imin', a
, b
), b
), ('imin', a
, b
)),
517 (('iand@32', a
, ('inot', ('ishr', a
, 31))), ('imax', a
, 0)),
519 # Simplify logic to detect sign of an integer.
520 (('ieq', ('iand', 'a@32', 0x80000000), 0x00000000), ('ige', a
, 0)),
521 (('ine', ('iand', 'a@32', 0x80000000), 0x80000000), ('ige', a
, 0)),
522 (('ine', ('iand', 'a@32', 0x80000000), 0x00000000), ('ilt', a
, 0)),
523 (('ieq', ('iand', 'a@32', 0x80000000), 0x80000000), ('ilt', a
, 0)),
524 (('ine', ('ushr', 'a@32', 31), 0), ('ilt', a
, 0)),
525 (('ieq', ('ushr', 'a@32', 31), 0), ('ige', a
, 0)),
526 (('ieq', ('ushr', 'a@32', 31), 1), ('ilt', a
, 0)),
527 (('ine', ('ushr', 'a@32', 31), 1), ('ige', a
, 0)),
528 (('ine', ('ishr', 'a@32', 31), 0), ('ilt', a
, 0)),
529 (('ieq', ('ishr', 'a@32', 31), 0), ('ige', a
, 0)),
530 (('ieq', ('ishr', 'a@32', 31), -1), ('ilt', a
, 0)),
531 (('ine', ('ishr', 'a@32', 31), -1), ('ige', a
, 0)),
533 (('fmin', a
, ('fneg', a
)), ('fneg', ('fabs', a
))),
534 (('imin', a
, ('ineg', a
)), ('ineg', ('iabs', a
))),
535 (('fmin', a
, ('fneg', ('fabs', a
))), ('fneg', ('fabs', a
))),
536 (('imin', a
, ('ineg', ('iabs', a
))), ('ineg', ('iabs', a
))),
537 (('~fmin', a
, ('fabs', a
)), a
),
538 (('imin', a
, ('iabs', a
)), a
),
539 (('~fmax', a
, ('fneg', ('fabs', a
))), a
),
540 (('imax', a
, ('ineg', ('iabs', a
))), a
),
541 (('fmax', a
, ('fabs', a
)), ('fabs', a
)),
542 (('imax', a
, ('iabs', a
)), ('iabs', a
)),
543 (('fmax', a
, ('fneg', a
)), ('fabs', a
)),
544 (('imax', a
, ('ineg', a
)), ('iabs', a
)),
545 (('~fmax', ('fabs', a
), 0.0), ('fabs', a
)),
546 (('fmin', ('fmax', a
, 0.0), 1.0), ('fsat', a
), '!options->lower_fsat'),
547 # fmax(fmin(a, 1.0), 0.0) is inexact because it returns 1.0 on NaN, while
548 # fsat(a) returns 0.0.
549 (('~fmax', ('fmin', a
, 1.0), 0.0), ('fsat', a
), '!options->lower_fsat'),
550 # fmin(fmax(a, -1.0), 0.0) is inexact because it returns -1.0 on NaN, while
551 # fneg(fsat(fneg(a))) returns -0.0 on NaN.
552 (('~fmin', ('fmax', a
, -1.0), 0.0), ('fneg', ('fsat', ('fneg', a
))), '!options->lower_fsat'),
553 # fmax(fmin(a, 0.0), -1.0) is inexact because it returns 0.0 on NaN, while
554 # fneg(fsat(fneg(a))) returns -0.0 on NaN. This only matters if
555 # SignedZeroInfNanPreserve is set, but we don't currently have any way of
556 # representing this in the optimizations other than the usual ~.
557 (('~fmax', ('fmin', a
, 0.0), -1.0), ('fneg', ('fsat', ('fneg', a
))), '!options->lower_fsat'),
558 (('fsat', ('fsign', a
)), ('b2f', ('flt', 0.0, a
))),
559 (('fsat', ('b2f', a
)), ('b2f', a
)),
560 (('fsat', a
), ('fmin', ('fmax', a
, 0.0), 1.0), 'options->lower_fsat'),
561 (('fsat', ('fsat', a
)), ('fsat', a
)),
562 (('fsat', ('fneg(is_used_once)', ('fadd(is_used_once)', a
, b
))), ('fsat', ('fadd', ('fneg', a
), ('fneg', b
))), '!options->lower_fsat'),
563 (('fsat', ('fneg(is_used_once)', ('fmul(is_used_once)', a
, b
))), ('fsat', ('fmul', ('fneg', a
), b
)), '!options->lower_fsat'),
564 (('fsat', ('fabs(is_used_once)', ('fmul(is_used_once)', a
, b
))), ('fsat', ('fmul', ('fabs', a
), ('fabs', b
))), '!options->lower_fsat'),
565 (('fmin', ('fmax', ('fmin', ('fmax', a
, b
), c
), b
), c
), ('fmin', ('fmax', a
, b
), c
)),
566 (('imin', ('imax', ('imin', ('imax', a
, b
), c
), b
), c
), ('imin', ('imax', a
, b
), c
)),
567 (('umin', ('umax', ('umin', ('umax', a
, b
), c
), b
), c
), ('umin', ('umax', a
, b
), c
)),
568 # Both the left and right patterns are "b" when isnan(a), so this is exact.
569 (('fmax', ('fsat', a
), '#b@32(is_zero_to_one)'), ('fsat', ('fmax', a
, b
))),
570 # The left pattern is 0.0 when isnan(a) (because fmin(fsat(NaN), b) ->
571 # fmin(0.0, b)) while the right one is "b", so this optimization is inexact.
572 (('~fmin', ('fsat', a
), '#b@32(is_zero_to_one)'), ('fsat', ('fmin', a
, b
))),
574 # If a in [0,b] then b-a is also in [0,b]. Since b in [0,1], max(b-a, 0) =
577 # If a > b, then b-a < 0 and max(b-a, 0) = fsat(b-a) = 0
579 # This should be NaN safe since max(NaN, 0) = fsat(NaN) = 0.
580 (('fmax', ('fadd(is_used_once)', ('fneg', 'a(is_not_negative)'), '#b@32(is_zero_to_one)'), 0.0),
581 ('fsat', ('fadd', ('fneg', a
), b
)), '!options->lower_fsat'),
583 (('extract_u8', ('imin', ('imax', a
, 0), 0xff), 0), ('imin', ('imax', a
, 0), 0xff)),
584 (('~ior', ('flt(is_used_once)', a
, b
), ('flt', a
, c
)), ('flt', a
, ('fmax', b
, c
))),
585 (('~ior', ('flt(is_used_once)', a
, c
), ('flt', b
, c
)), ('flt', ('fmin', a
, b
), c
)),
586 (('~ior', ('fge(is_used_once)', a
, b
), ('fge', a
, c
)), ('fge', a
, ('fmin', b
, c
))),
587 (('~ior', ('fge(is_used_once)', a
, c
), ('fge', b
, c
)), ('fge', ('fmax', a
, b
), c
)),
588 (('~ior', ('flt', a
, '#b'), ('flt', a
, '#c')), ('flt', a
, ('fmax', b
, c
))),
589 (('~ior', ('flt', '#a', c
), ('flt', '#b', c
)), ('flt', ('fmin', a
, b
), c
)),
590 (('~ior', ('fge', a
, '#b'), ('fge', a
, '#c')), ('fge', a
, ('fmin', b
, c
))),
591 (('~ior', ('fge', '#a', c
), ('fge', '#b', c
)), ('fge', ('fmax', a
, b
), c
)),
592 (('~iand', ('flt(is_used_once)', a
, b
), ('flt', a
, c
)), ('flt', a
, ('fmin', b
, c
))),
593 (('~iand', ('flt(is_used_once)', a
, c
), ('flt', b
, c
)), ('flt', ('fmax', a
, b
), c
)),
594 (('~iand', ('fge(is_used_once)', a
, b
), ('fge', a
, c
)), ('fge', a
, ('fmax', b
, c
))),
595 (('~iand', ('fge(is_used_once)', a
, c
), ('fge', b
, c
)), ('fge', ('fmin', a
, b
), c
)),
596 (('~iand', ('flt', a
, '#b'), ('flt', a
, '#c')), ('flt', a
, ('fmin', b
, c
))),
597 (('~iand', ('flt', '#a', c
), ('flt', '#b', c
)), ('flt', ('fmax', a
, b
), c
)),
598 (('~iand', ('fge', a
, '#b'), ('fge', a
, '#c')), ('fge', a
, ('fmax', b
, c
))),
599 (('~iand', ('fge', '#a', c
), ('fge', '#b', c
)), ('fge', ('fmin', a
, b
), c
)),
601 (('ior', ('ilt(is_used_once)', a
, b
), ('ilt', a
, c
)), ('ilt', a
, ('imax', b
, c
))),
602 (('ior', ('ilt(is_used_once)', a
, c
), ('ilt', b
, c
)), ('ilt', ('imin', a
, b
), c
)),
603 (('ior', ('ige(is_used_once)', a
, b
), ('ige', a
, c
)), ('ige', a
, ('imin', b
, c
))),
604 (('ior', ('ige(is_used_once)', a
, c
), ('ige', b
, c
)), ('ige', ('imax', a
, b
), c
)),
605 (('ior', ('ult(is_used_once)', a
, b
), ('ult', a
, c
)), ('ult', a
, ('umax', b
, c
))),
606 (('ior', ('ult(is_used_once)', a
, c
), ('ult', b
, c
)), ('ult', ('umin', a
, b
), c
)),
607 (('ior', ('uge(is_used_once)', a
, b
), ('uge', a
, c
)), ('uge', a
, ('umin', b
, c
))),
608 (('ior', ('uge(is_used_once)', a
, c
), ('uge', b
, c
)), ('uge', ('umax', a
, b
), c
)),
609 (('iand', ('ilt(is_used_once)', a
, b
), ('ilt', a
, c
)), ('ilt', a
, ('imin', b
, c
))),
610 (('iand', ('ilt(is_used_once)', a
, c
), ('ilt', b
, c
)), ('ilt', ('imax', a
, b
), c
)),
611 (('iand', ('ige(is_used_once)', a
, b
), ('ige', a
, c
)), ('ige', a
, ('imax', b
, c
))),
612 (('iand', ('ige(is_used_once)', a
, c
), ('ige', b
, c
)), ('ige', ('imin', a
, b
), c
)),
613 (('iand', ('ult(is_used_once)', a
, b
), ('ult', a
, c
)), ('ult', a
, ('umin', b
, c
))),
614 (('iand', ('ult(is_used_once)', a
, c
), ('ult', b
, c
)), ('ult', ('umax', a
, b
), c
)),
615 (('iand', ('uge(is_used_once)', a
, b
), ('uge', a
, c
)), ('uge', a
, ('umax', b
, c
))),
616 (('iand', ('uge(is_used_once)', a
, c
), ('uge', b
, c
)), ('uge', ('umin', a
, b
), c
)),
618 # These derive from the previous patterns with the application of b < 0 <=>
619 # 0 < -b. The transformation should be applied if either comparison is
620 # used once as this ensures that the number of comparisons will not
621 # increase. The sources to the ior and iand are not symmetric, so the
622 # rules have to be duplicated to get this behavior.
623 (('~ior', ('flt(is_used_once)', 0.0, 'a@32'), ('flt', 'b@32', 0.0)), ('flt', 0.0, ('fmax', a
, ('fneg', b
)))),
624 (('~ior', ('flt', 0.0, 'a@32'), ('flt(is_used_once)', 'b@32', 0.0)), ('flt', 0.0, ('fmax', a
, ('fneg', b
)))),
625 (('~ior', ('fge(is_used_once)', 0.0, 'a@32'), ('fge', 'b@32', 0.0)), ('fge', 0.0, ('fmin', a
, ('fneg', b
)))),
626 (('~ior', ('fge', 0.0, 'a@32'), ('fge(is_used_once)', 'b@32', 0.0)), ('fge', 0.0, ('fmin', a
, ('fneg', b
)))),
627 (('~iand', ('flt(is_used_once)', 0.0, 'a@32'), ('flt', 'b@32', 0.0)), ('flt', 0.0, ('fmin', a
, ('fneg', b
)))),
628 (('~iand', ('flt', 0.0, 'a@32'), ('flt(is_used_once)', 'b@32', 0.0)), ('flt', 0.0, ('fmin', a
, ('fneg', b
)))),
629 (('~iand', ('fge(is_used_once)', 0.0, 'a@32'), ('fge', 'b@32', 0.0)), ('fge', 0.0, ('fmax', a
, ('fneg', b
)))),
630 (('~iand', ('fge', 0.0, 'a@32'), ('fge(is_used_once)', 'b@32', 0.0)), ('fge', 0.0, ('fmax', a
, ('fneg', b
)))),
632 # Common pattern like 'if (i == 0 || i == 1 || ...)'
633 (('ior', ('ieq', a
, 0), ('ieq', a
, 1)), ('uge', 1, a
)),
634 (('ior', ('uge', 1, a
), ('ieq', a
, 2)), ('uge', 2, a
)),
635 (('ior', ('uge', 2, a
), ('ieq', a
, 3)), ('uge', 3, a
)),
637 # The (i2f32, ...) part is an open-coded fsign. When that is combined with
638 # the bcsel, it's basically copysign(1.0, a). There is no copysign in NIR,
639 # so emit an open-coded version of that.
640 (('bcsel@32', ('feq', a
, 0.0), 1.0, ('i2f32', ('iadd', ('b2i32', ('flt', 0.0, 'a@32')), ('ineg', ('b2i32', ('flt', 'a@32', 0.0)))))),
641 ('ior', 0x3f800000, ('iand', a
, 0x80000000))),
643 (('ior', a
, ('ieq', a
, False)), True),
644 (('ior', a
, ('inot', a
)), -1),
646 (('ine', ('ineg', ('b2i32', 'a@1')), ('ineg', ('b2i32', 'b@1'))), ('ine', a
, b
)),
647 (('b2i32', ('ine', 'a@1', 'b@1')), ('b2i32', ('ixor', a
, b
))),
649 (('iand', ('ieq', 'a@32', 0), ('ieq', 'b@32', 0)), ('ieq', ('ior', a
, b
), 0), '!options->lower_bitops'),
650 (('ior', ('ine', 'a@32', 0), ('ine', 'b@32', 0)), ('ine', ('ior', a
, b
), 0), '!options->lower_bitops'),
652 # This pattern occurs coutresy of __flt64_nonnan in the soft-fp64 code.
653 # The first part of the iand comes from the !__feq64_nonnan.
655 # The second pattern is a reformulation of the first based on the relation
656 # (a == 0 || y == 0) <=> umin(a, y) == 0, where b in the first equation
657 # happens to be y == 0.
658 (('iand', ('inot', ('iand', ('ior', ('ieq', a
, 0), b
), c
)), ('ilt', a
, 0)),
659 ('iand', ('inot', ('iand', b
, c
)), ('ilt', a
, 0))),
660 (('iand', ('inot', ('iand', ('ieq', ('umin', a
, b
), 0), c
)), ('ilt', a
, 0)),
661 ('iand', ('inot', ('iand', ('ieq', b
, 0), c
)), ('ilt', a
, 0))),
663 # These patterns can result when (a < b || a < c) => (a < min(b, c))
664 # transformations occur before constant propagation and loop-unrolling.
665 (('~flt', a
, ('fmax', b
, a
)), ('flt', a
, b
)),
666 (('~flt', ('fmin', a
, b
), a
), ('flt', b
, a
)),
667 (('~fge', a
, ('fmin', b
, a
)), True),
668 (('~fge', ('fmax', a
, b
), a
), True),
669 (('~flt', a
, ('fmin', b
, a
)), False),
670 (('~flt', ('fmax', a
, b
), a
), False),
671 (('~fge', a
, ('fmax', b
, a
)), ('fge', a
, b
)),
672 (('~fge', ('fmin', a
, b
), a
), ('fge', b
, a
)),
674 (('ilt', a
, ('imax', b
, a
)), ('ilt', a
, b
)),
675 (('ilt', ('imin', a
, b
), a
), ('ilt', b
, a
)),
676 (('ige', a
, ('imin', b
, a
)), True),
677 (('ige', ('imax', a
, b
), a
), True),
678 (('ult', a
, ('umax', b
, a
)), ('ult', a
, b
)),
679 (('ult', ('umin', a
, b
), a
), ('ult', b
, a
)),
680 (('uge', a
, ('umin', b
, a
)), True),
681 (('uge', ('umax', a
, b
), a
), True),
682 (('ilt', a
, ('imin', b
, a
)), False),
683 (('ilt', ('imax', a
, b
), a
), False),
684 (('ige', a
, ('imax', b
, a
)), ('ige', a
, b
)),
685 (('ige', ('imin', a
, b
), a
), ('ige', b
, a
)),
686 (('ult', a
, ('umin', b
, a
)), False),
687 (('ult', ('umax', a
, b
), a
), False),
688 (('uge', a
, ('umax', b
, a
)), ('uge', a
, b
)),
689 (('uge', ('umin', a
, b
), a
), ('uge', b
, a
)),
690 (('ult', a
, ('iand', b
, a
)), False),
691 (('ult', ('ior', a
, b
), a
), False),
692 (('uge', a
, ('iand', b
, a
)), True),
693 (('uge', ('ior', a
, b
), a
), True),
695 (('ilt', '#a', ('imax', '#b', c
)), ('ior', ('ilt', a
, b
), ('ilt', a
, c
))),
696 (('ilt', ('imin', '#a', b
), '#c'), ('ior', ('ilt', a
, c
), ('ilt', b
, c
))),
697 (('ige', '#a', ('imin', '#b', c
)), ('ior', ('ige', a
, b
), ('ige', a
, c
))),
698 (('ige', ('imax', '#a', b
), '#c'), ('ior', ('ige', a
, c
), ('ige', b
, c
))),
699 (('ult', '#a', ('umax', '#b', c
)), ('ior', ('ult', a
, b
), ('ult', a
, c
))),
700 (('ult', ('umin', '#a', b
), '#c'), ('ior', ('ult', a
, c
), ('ult', b
, c
))),
701 (('uge', '#a', ('umin', '#b', c
)), ('ior', ('uge', a
, b
), ('uge', a
, c
))),
702 (('uge', ('umax', '#a', b
), '#c'), ('ior', ('uge', a
, c
), ('uge', b
, c
))),
703 (('ilt', '#a', ('imin', '#b', c
)), ('iand', ('ilt', a
, b
), ('ilt', a
, c
))),
704 (('ilt', ('imax', '#a', b
), '#c'), ('iand', ('ilt', a
, c
), ('ilt', b
, c
))),
705 (('ige', '#a', ('imax', '#b', c
)), ('iand', ('ige', a
, b
), ('ige', a
, c
))),
706 (('ige', ('imin', '#a', b
), '#c'), ('iand', ('ige', a
, c
), ('ige', b
, c
))),
707 (('ult', '#a', ('umin', '#b', c
)), ('iand', ('ult', a
, b
), ('ult', a
, c
))),
708 (('ult', ('umax', '#a', b
), '#c'), ('iand', ('ult', a
, c
), ('ult', b
, c
))),
709 (('uge', '#a', ('umax', '#b', c
)), ('iand', ('uge', a
, b
), ('uge', a
, c
))),
710 (('uge', ('umin', '#a', b
), '#c'), ('iand', ('uge', a
, c
), ('uge', b
, c
))),
712 # Thanks to sign extension, the ishr(a, b) is negative if and only if a is
714 (('bcsel', ('ilt', a
, 0), ('ineg', ('ishr', a
, b
)), ('ishr', a
, b
)),
715 ('iabs', ('ishr', a
, b
))),
716 (('iabs', ('ishr', ('iabs', a
), b
)), ('ishr', ('iabs', a
), b
)),
718 (('fabs', ('slt', a
, b
)), ('slt', a
, b
)),
719 (('fabs', ('sge', a
, b
)), ('sge', a
, b
)),
720 (('fabs', ('seq', a
, b
)), ('seq', a
, b
)),
721 (('fabs', ('sne', a
, b
)), ('sne', a
, b
)),
722 (('slt', a
, b
), ('b2f', ('flt', a
, b
)), 'options->lower_scmp'),
723 (('sge', a
, b
), ('b2f', ('fge', a
, b
)), 'options->lower_scmp'),
724 (('seq', a
, b
), ('b2f', ('feq', a
, b
)), 'options->lower_scmp'),
725 (('sne', a
, b
), ('b2f', ('fne', a
, b
)), 'options->lower_scmp'),
726 (('seq', ('seq', a
, b
), 1.0), ('seq', a
, b
)),
727 (('seq', ('sne', a
, b
), 1.0), ('sne', a
, b
)),
728 (('seq', ('slt', a
, b
), 1.0), ('slt', a
, b
)),
729 (('seq', ('sge', a
, b
), 1.0), ('sge', a
, b
)),
730 (('sne', ('seq', a
, b
), 0.0), ('seq', a
, b
)),
731 (('sne', ('sne', a
, b
), 0.0), ('sne', a
, b
)),
732 (('sne', ('slt', a
, b
), 0.0), ('slt', a
, b
)),
733 (('sne', ('sge', a
, b
), 0.0), ('sge', a
, b
)),
734 (('seq', ('seq', a
, b
), 0.0), ('sne', a
, b
)),
735 (('seq', ('sne', a
, b
), 0.0), ('seq', a
, b
)),
736 (('seq', ('slt', a
, b
), 0.0), ('sge', a
, b
)),
737 (('seq', ('sge', a
, b
), 0.0), ('slt', a
, b
)),
738 (('sne', ('seq', a
, b
), 1.0), ('sne', a
, b
)),
739 (('sne', ('sne', a
, b
), 1.0), ('seq', a
, b
)),
740 (('sne', ('slt', a
, b
), 1.0), ('sge', a
, b
)),
741 (('sne', ('sge', a
, b
), 1.0), ('slt', a
, b
)),
742 (('fall_equal2', a
, b
), ('fmin', ('seq', 'a.x', 'b.x'), ('seq', 'a.y', 'b.y')), 'options->lower_vector_cmp'),
743 (('fall_equal3', a
, b
), ('seq', ('fany_nequal3', a
, b
), 0.0), 'options->lower_vector_cmp'),
744 (('fall_equal4', a
, b
), ('seq', ('fany_nequal4', a
, b
), 0.0), 'options->lower_vector_cmp'),
745 (('fany_nequal2', a
, b
), ('fmax', ('sne', 'a.x', 'b.x'), ('sne', 'a.y', 'b.y')), 'options->lower_vector_cmp'),
746 (('fany_nequal3', a
, b
), ('fsat', ('fdot3', ('sne', a
, b
), ('sne', a
, b
))), 'options->lower_vector_cmp'),
747 (('fany_nequal4', a
, b
), ('fsat', ('fdot4', ('sne', a
, b
), ('sne', a
, b
))), 'options->lower_vector_cmp'),
748 (('fne', ('fneg', a
), a
), ('fne', a
, 0.0)),
749 (('feq', ('fneg', a
), a
), ('feq', a
, 0.0)),
751 (('imul', ('b2i', 'a@1'), ('b2i', 'b@1')), ('b2i', ('iand', a
, b
))),
752 (('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), ('b2f', ('iand', a
, b
))),
753 (('fsat', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('b2f', ('ior', a
, b
))),
754 (('iand', 'a@bool32', 1.0), ('b2f', a
)),
755 # True/False are ~0 and 0 in NIR. b2i of True is 1, and -1 is ~0 (True).
756 (('ineg', ('b2i32', 'a@32')), a
),
757 (('flt', ('fneg', ('b2f', 'a@1')), 0), a
), # Generated by TGSI KILL_IF.
758 # Comparison with the same args. Note that these are not done for
759 # the float versions because NaN always returns false on float
761 (('ilt', a
, a
), False),
762 (('ige', a
, a
), True),
763 (('ieq', a
, a
), True),
764 (('ine', a
, a
), False),
765 (('ult', a
, a
), False),
766 (('uge', a
, a
), True),
767 # Logical and bit operations
769 (('iand', a
, ~
0), a
),
773 (('ior', a
, True), True),
776 (('inot', ('inot', a
)), a
),
777 (('ior', ('iand', a
, b
), b
), b
),
778 (('ior', ('ior', a
, b
), b
), ('ior', a
, b
)),
779 (('iand', ('ior', a
, b
), b
), b
),
780 (('iand', ('iand', a
, b
), b
), ('iand', a
, b
)),
782 (('iand', ('inot', a
), ('inot', b
)), ('inot', ('ior', a
, b
))),
783 (('ior', ('inot', a
), ('inot', b
)), ('inot', ('iand', a
, b
))),
784 # Shift optimizations
791 (('ior', ('ishl@16', a
, b
), ('ushr@16', a
, ('iadd', 16, ('ineg', b
)))), ('urol', a
, b
), '!options->lower_rotate'),
792 (('ior', ('ishl@16', a
, b
), ('ushr@16', a
, ('isub', 16, b
))), ('urol', a
, b
), '!options->lower_rotate'),
793 (('ior', ('ishl@32', a
, b
), ('ushr@32', a
, ('iadd', 32, ('ineg', b
)))), ('urol', a
, b
), '!options->lower_rotate'),
794 (('ior', ('ishl@32', a
, b
), ('ushr@32', a
, ('isub', 32, b
))), ('urol', a
, b
), '!options->lower_rotate'),
795 (('ior', ('ushr@16', a
, b
), ('ishl@16', a
, ('iadd', 16, ('ineg', b
)))), ('uror', a
, b
), '!options->lower_rotate'),
796 (('ior', ('ushr@16', a
, b
), ('ishl@16', a
, ('isub', 16, b
))), ('uror', a
, b
), '!options->lower_rotate'),
797 (('ior', ('ushr@32', a
, b
), ('ishl@32', a
, ('iadd', 32, ('ineg', b
)))), ('uror', a
, b
), '!options->lower_rotate'),
798 (('ior', ('ushr@32', a
, b
), ('ishl@32', a
, ('isub', 32, b
))), ('uror', a
, b
), '!options->lower_rotate'),
799 (('urol@16', a
, b
), ('ior', ('ishl', a
, b
), ('ushr', a
, ('isub', 16, b
))), 'options->lower_rotate'),
800 (('urol@32', a
, b
), ('ior', ('ishl', a
, b
), ('ushr', a
, ('isub', 32, b
))), 'options->lower_rotate'),
801 (('uror@16', a
, b
), ('ior', ('ushr', a
, b
), ('ishl', a
, ('isub', 16, b
))), 'options->lower_rotate'),
802 (('uror@32', a
, b
), ('ior', ('ushr', a
, b
), ('ishl', a
, ('isub', 32, b
))), 'options->lower_rotate'),
803 # Exponential/logarithmic identities
804 (('~fexp2', ('flog2', a
)), a
), # 2^lg2(a) = a
805 (('~flog2', ('fexp2', a
)), a
), # lg2(2^a) = a
806 (('fpow', a
, b
), ('fexp2', ('fmul', ('flog2', a
), b
)), 'options->lower_fpow'), # a^b = 2^(lg2(a)*b)
807 (('~fexp2', ('fmul', ('flog2', a
), b
)), ('fpow', a
, b
), '!options->lower_fpow'), # 2^(lg2(a)*b) = a^b
808 (('~fexp2', ('fadd', ('fmul', ('flog2', a
), b
), ('fmul', ('flog2', c
), d
))),
809 ('~fmul', ('fpow', a
, b
), ('fpow', c
, d
)), '!options->lower_fpow'), # 2^(lg2(a) * b + lg2(c) + d) = a^b * c^d
810 (('~fexp2', ('fmul', ('flog2', a
), 0.5)), ('fsqrt', a
)),
811 (('~fexp2', ('fmul', ('flog2', a
), 2.0)), ('fmul', a
, a
)),
812 (('~fexp2', ('fmul', ('flog2', a
), 4.0)), ('fmul', ('fmul', a
, a
), ('fmul', a
, a
))),
813 (('~fpow', a
, 1.0), a
),
814 (('~fpow', a
, 2.0), ('fmul', a
, a
)),
815 (('~fpow', a
, 4.0), ('fmul', ('fmul', a
, a
), ('fmul', a
, a
))),
816 (('~fpow', 2.0, a
), ('fexp2', a
)),
817 (('~fpow', ('fpow', a
, 2.2), 0.454545), a
),
818 (('~fpow', ('fabs', ('fpow', a
, 2.2)), 0.454545), ('fabs', a
)),
819 (('~fsqrt', ('fexp2', a
)), ('fexp2', ('fmul', 0.5, a
))),
820 (('~frcp', ('fexp2', a
)), ('fexp2', ('fneg', a
))),
821 (('~frsq', ('fexp2', a
)), ('fexp2', ('fmul', -0.5, a
))),
822 (('~flog2', ('fsqrt', a
)), ('fmul', 0.5, ('flog2', a
))),
823 (('~flog2', ('frcp', a
)), ('fneg', ('flog2', a
))),
824 (('~flog2', ('frsq', a
)), ('fmul', -0.5, ('flog2', a
))),
825 (('~flog2', ('fpow', a
, b
)), ('fmul', b
, ('flog2', a
))),
826 (('~fmul', ('fexp2(is_used_once)', a
), ('fexp2(is_used_once)', b
)), ('fexp2', ('fadd', a
, b
))),
827 (('bcsel', ('flt', a
, 0.0), 0.0, ('fsqrt', a
)), ('fsqrt', ('fmax', a
, 0.0))),
828 (('~fmul', ('fsqrt', a
), ('fsqrt', a
)), ('fabs',a
)),
829 # Division and reciprocal
830 (('~fdiv', 1.0, a
), ('frcp', a
)),
831 (('fdiv', a
, b
), ('fmul', a
, ('frcp', b
)), 'options->lower_fdiv'),
832 (('~frcp', ('frcp', a
)), a
),
833 (('~frcp', ('fsqrt', a
)), ('frsq', a
)),
834 (('fsqrt', a
), ('frcp', ('frsq', a
)), 'options->lower_fsqrt'),
835 (('~frcp', ('frsq', a
)), ('fsqrt', a
), '!options->lower_fsqrt'),
837 (('fsin', a
), lowered_sincos(0.5), 'options->lower_sincos'),
838 (('fcos', a
), lowered_sincos(0.75), 'options->lower_sincos'),
839 # Boolean simplifications
840 (('i2b32(is_used_by_if)', a
), ('ine32', a
, 0)),
841 (('i2b1(is_used_by_if)', a
), ('ine', a
, 0)),
842 (('ieq', a
, True), a
),
843 (('ine(is_not_used_by_if)', a
, True), ('inot', a
)),
844 (('ine', a
, False), a
),
845 (('ieq(is_not_used_by_if)', a
, False), ('inot', 'a')),
846 (('bcsel', a
, True, False), a
),
847 (('bcsel', a
, False, True), ('inot', a
)),
848 (('bcsel@32', a
, 1.0, 0.0), ('b2f', a
)),
849 (('bcsel@32', a
, 0.0, 1.0), ('b2f', ('inot', a
))),
850 (('bcsel@32', a
, -1.0, -0.0), ('fneg', ('b2f', a
))),
851 (('bcsel@32', a
, -0.0, -1.0), ('fneg', ('b2f', ('inot', a
)))),
852 (('bcsel', True, b
, c
), b
),
853 (('bcsel', False, b
, c
), c
),
854 (('bcsel', a
, ('b2f(is_used_once)', 'b@32'), ('b2f', 'c@32')), ('b2f', ('bcsel', a
, b
, c
))),
856 (('bcsel', a
, b
, b
), b
),
857 (('~fcsel', a
, b
, b
), b
),
859 # D3D Boolean emulation
860 (('bcsel', a
, -1, 0), ('ineg', ('b2i', 'a@1'))),
861 (('bcsel', a
, 0, -1), ('ineg', ('b2i', ('inot', a
)))),
862 (('iand', ('ineg', ('b2i', 'a@1')), ('ineg', ('b2i', 'b@1'))),
863 ('ineg', ('b2i', ('iand', a
, b
)))),
864 (('ior', ('ineg', ('b2i','a@1')), ('ineg', ('b2i', 'b@1'))),
865 ('ineg', ('b2i', ('ior', a
, b
)))),
866 (('ieq', ('ineg', ('b2i', 'a@1')), 0), ('inot', a
)),
867 (('ieq', ('ineg', ('b2i', 'a@1')), -1), a
),
868 (('ine', ('ineg', ('b2i', 'a@1')), 0), a
),
869 (('ine', ('ineg', ('b2i', 'a@1')), -1), ('inot', a
)),
870 (('iand', ('ineg', ('b2i', a
)), 1.0), ('b2f', a
)),
871 (('iand', ('ineg', ('b2i', a
)), 1), ('b2i', a
)),
873 # SM5 32-bit shifts are defined to use the 5 least significant bits
874 (('ishl', 'a@32', ('iand', 31, b
)), ('ishl', a
, b
)),
875 (('ishr', 'a@32', ('iand', 31, b
)), ('ishr', a
, b
)),
876 (('ushr', 'a@32', ('iand', 31, b
)), ('ushr', a
, b
)),
879 (('i2b32', ('b2i', 'a@32')), a
),
880 (('f2i', ('ftrunc', a
)), ('f2i', a
)),
881 (('f2u', ('ftrunc', a
)), ('f2u', a
)),
882 (('i2b', ('ineg', a
)), ('i2b', a
)),
883 (('i2b', ('iabs', a
)), ('i2b', a
)),
884 (('inot', ('f2b1', a
)), ('feq', a
, 0.0)),
886 # The C spec says, "If the value of the integral part cannot be represented
887 # by the integer type, the behavior is undefined." "Undefined" can mean
888 # "the conversion doesn't happen at all."
889 (('~i2f32', ('f2i32', 'a@32')), ('ftrunc', a
)),
891 # Ironically, mark these as imprecise because removing the conversions may
892 # preserve more precision than doing the conversions (e.g.,
893 # uint(float(0x81818181u)) == 0x81818200).
894 (('~f2i32', ('i2f', 'a@32')), a
),
895 (('~f2i32', ('u2f', 'a@32')), a
),
896 (('~f2u32', ('i2f', 'a@32')), a
),
897 (('~f2u32', ('u2f', 'a@32')), a
),
899 # Conversions from 16 bits to 32 bits and back can always be removed
900 (('f2f16', ('f2f32', 'a@16')), a
),
901 (('f2fmp', ('f2f32', 'a@16')), a
),
902 (('i2i16', ('i2i32', 'a@16')), a
),
903 (('i2imp', ('i2i32', 'a@16')), a
),
904 (('u2u16', ('u2u32', 'a@16')), a
),
905 (('u2ump', ('u2u32', 'a@16')), a
),
906 (('f2f16', ('b2f32', 'a@1')), ('b2f16', a
)),
907 (('f2fmp', ('b2f32', 'a@1')), ('b2f16', a
)),
908 (('i2i16', ('b2i32', 'a@1')), ('b2i16', a
)),
909 (('i2imp', ('b2i32', 'a@1')), ('b2i16', a
)),
910 (('u2u16', ('b2i32', 'a@1')), ('b2i16', a
)),
911 (('u2ump', ('b2i32', 'a@1')), ('b2i16', a
)),
912 # Conversions to 16 bits would be lossy so they should only be removed if
913 # the instruction was generated by the precision lowering pass.
914 (('f2f32', ('f2fmp', 'a@32')), a
),
915 (('i2i32', ('i2imp', 'a@32')), a
),
916 (('u2u32', ('u2ump', 'a@32')), a
),
918 (('ffloor', 'a(is_integral)'), a
),
919 (('fceil', 'a(is_integral)'), a
),
920 (('ftrunc', 'a(is_integral)'), a
),
921 # fract(x) = x - floor(x), so fract(NaN) = NaN
922 (('~ffract', 'a(is_integral)'), 0.0),
923 (('fabs', 'a(is_not_negative)'), a
),
924 (('iabs', 'a(is_not_negative)'), a
),
925 (('fsat', 'a(is_not_positive)'), 0.0),
927 # Section 5.4.1 (Conversion and Scalar Constructors) of the GLSL 4.60 spec
930 # It is undefined to convert a negative floating-point value to an
933 # Assuming that (uint)some_float behaves like (uint)(int)some_float allows
934 # some optimizations in the i965 backend to proceed.
935 (('ige', ('f2u', a
), b
), ('ige', ('f2i', a
), b
)),
936 (('ige', b
, ('f2u', a
)), ('ige', b
, ('f2i', a
))),
937 (('ilt', ('f2u', a
), b
), ('ilt', ('f2i', a
), b
)),
938 (('ilt', b
, ('f2u', a
)), ('ilt', b
, ('f2i', a
))),
940 (('~fmin', 'a(is_not_negative)', 1.0), ('fsat', a
), '!options->lower_fsat'),
942 # The result of the multiply must be in [-1, 0], so the result of the ffma
944 (('flt', ('fadd', ('fmul', ('fsat', a
), ('fneg', ('fsat', a
))), 1.0), 0.0), False),
945 (('flt', ('fadd', ('fneg', ('fmul', ('fsat', a
), ('fsat', a
))), 1.0), 0.0), False),
946 (('fmax', ('fadd', ('fmul', ('fsat', a
), ('fneg', ('fsat', a
))), 1.0), 0.0), ('fadd', ('fmul', ('fsat', a
), ('fneg', ('fsat', a
))), 1.0)),
947 (('fmax', ('fadd', ('fneg', ('fmul', ('fsat', a
), ('fsat', a
))), 1.0), 0.0), ('fadd', ('fneg', ('fmul', ('fsat', a
), ('fsat', a
))), 1.0)),
949 (('fne', 'a(is_not_zero)', 0.0), True),
950 (('feq', 'a(is_not_zero)', 0.0), False),
952 # In this chart, + means value > 0 and - means value < 0.
954 # + >= + -> unknown 0 >= + -> false - >= + -> false
955 # + >= 0 -> true 0 >= 0 -> true - >= 0 -> false
956 # + >= - -> true 0 >= - -> true - >= - -> unknown
958 # Using grouping conceptually similar to a Karnaugh map...
960 # (+ >= 0, + >= -, 0 >= 0, 0 >= -) == (is_not_negative >= is_not_positive) -> true
961 # (0 >= +, - >= +) == (is_not_positive >= gt_zero) -> false
962 # (- >= +, - >= 0) == (lt_zero >= is_not_negative) -> false
964 # The flt / ilt cases just invert the expected result.
966 # The results expecting true, must be marked imprecise. The results
967 # expecting false are fine because NaN compared >= or < anything is false.
969 (('~fge', 'a(is_not_negative)', 'b(is_not_positive)'), True),
970 (('fge', 'a(is_not_positive)', 'b(is_gt_zero)'), False),
971 (('fge', 'a(is_lt_zero)', 'b(is_not_negative)'), False),
973 (('flt', 'a(is_not_negative)', 'b(is_not_positive)'), False),
974 (('~flt', 'a(is_not_positive)', 'b(is_gt_zero)'), True),
975 (('~flt', 'a(is_lt_zero)', 'b(is_not_negative)'), True),
977 (('ine', 'a(is_not_zero)', 0), True),
978 (('ieq', 'a(is_not_zero)', 0), False),
980 (('ige', 'a(is_not_negative)', 'b(is_not_positive)'), True),
981 (('ige', 'a(is_not_positive)', 'b(is_gt_zero)'), False),
982 (('ige', 'a(is_lt_zero)', 'b(is_not_negative)'), False),
984 (('ilt', 'a(is_not_negative)', 'b(is_not_positive)'), False),
985 (('ilt', 'a(is_not_positive)', 'b(is_gt_zero)'), True),
986 (('ilt', 'a(is_lt_zero)', 'b(is_not_negative)'), True),
988 (('ult', 0, 'a(is_gt_zero)'), True),
989 (('ult', a
, 0), False),
991 # Packing and then unpacking does nothing
992 (('unpack_64_2x32_split_x', ('pack_64_2x32_split', a
, b
)), a
),
993 (('unpack_64_2x32_split_y', ('pack_64_2x32_split', a
, b
)), b
),
994 (('unpack_64_2x32', ('pack_64_2x32_split', a
, b
)), ('vec2', a
, b
)),
995 (('unpack_64_2x32', ('pack_64_2x32', a
)), a
),
996 (('pack_64_2x32_split', ('unpack_64_2x32_split_x', a
),
997 ('unpack_64_2x32_split_y', a
)), a
),
998 (('pack_64_2x32', ('vec2', ('unpack_64_2x32_split_x', a
),
999 ('unpack_64_2x32_split_y', a
))), a
),
1000 (('pack_64_2x32', ('unpack_64_2x32', a
)), a
),
1002 # Comparing two halves of an unpack separately. While this optimization
1003 # should be correct for non-constant values, it's less obvious that it's
1004 # useful in that case. For constant values, the pack will fold and we're
1005 # guaranteed to reduce the whole tree to one instruction.
1006 (('iand', ('ieq', ('unpack_32_2x16_split_x', a
), '#b'),
1007 ('ieq', ('unpack_32_2x16_split_y', a
), '#c')),
1008 ('ieq', a
, ('pack_32_2x16_split', b
, c
))),
1011 (('ushr', 'a@16', 8), ('extract_u8', a
, 1), '!options->lower_extract_byte'),
1012 (('ushr', 'a@32', 24), ('extract_u8', a
, 3), '!options->lower_extract_byte'),
1013 (('ushr', 'a@64', 56), ('extract_u8', a
, 7), '!options->lower_extract_byte'),
1014 (('ishr', 'a@16', 8), ('extract_i8', a
, 1), '!options->lower_extract_byte'),
1015 (('ishr', 'a@32', 24), ('extract_i8', a
, 3), '!options->lower_extract_byte'),
1016 (('ishr', 'a@64', 56), ('extract_i8', a
, 7), '!options->lower_extract_byte'),
1017 (('iand', 0xff, a
), ('extract_u8', a
, 0), '!options->lower_extract_byte'),
1019 (('ubfe', a
, 0, 8), ('extract_u8', a
, 0), '!options->lower_extract_byte'),
1020 (('ubfe', a
, 8, 8), ('extract_u8', a
, 1), '!options->lower_extract_byte'),
1021 (('ubfe', a
, 16, 8), ('extract_u8', a
, 2), '!options->lower_extract_byte'),
1022 (('ubfe', a
, 24, 8), ('extract_u8', a
, 3), '!options->lower_extract_byte'),
1023 (('ibfe', a
, 0, 8), ('extract_i8', a
, 0), '!options->lower_extract_byte'),
1024 (('ibfe', a
, 8, 8), ('extract_i8', a
, 1), '!options->lower_extract_byte'),
1025 (('ibfe', a
, 16, 8), ('extract_i8', a
, 2), '!options->lower_extract_byte'),
1026 (('ibfe', a
, 24, 8), ('extract_i8', a
, 3), '!options->lower_extract_byte'),
1029 (('ushr', ('ishl', 'a@32', 16), 16), ('extract_u16', a
, 0), '!options->lower_extract_word'),
1030 (('ushr', 'a@32', 16), ('extract_u16', a
, 1), '!options->lower_extract_word'),
1031 (('ishr', ('ishl', 'a@32', 16), 16), ('extract_i16', a
, 0), '!options->lower_extract_word'),
1032 (('ishr', 'a@32', 16), ('extract_i16', a
, 1), '!options->lower_extract_word'),
1033 (('iand', 0xffff, a
), ('extract_u16', a
, 0), '!options->lower_extract_word'),
1035 (('ubfe', a
, 0, 16), ('extract_u16', a
, 0), '!options->lower_extract_word'),
1036 (('ubfe', a
, 16, 16), ('extract_u16', a
, 1), '!options->lower_extract_word'),
1037 (('ibfe', a
, 0, 16), ('extract_i16', a
, 0), '!options->lower_extract_word'),
1038 (('ibfe', a
, 16, 16), ('extract_i16', a
, 1), '!options->lower_extract_word'),
1040 # Useless masking before unpacking
1041 (('unpack_half_2x16_split_x', ('iand', a
, 0xffff)), ('unpack_half_2x16_split_x', a
)),
1042 (('unpack_32_2x16_split_x', ('iand', a
, 0xffff)), ('unpack_32_2x16_split_x', a
)),
1043 (('unpack_64_2x32_split_x', ('iand', a
, 0xffffffff)), ('unpack_64_2x32_split_x', a
)),
1044 (('unpack_half_2x16_split_y', ('iand', a
, 0xffff0000)), ('unpack_half_2x16_split_y', a
)),
1045 (('unpack_32_2x16_split_y', ('iand', a
, 0xffff0000)), ('unpack_32_2x16_split_y', a
)),
1046 (('unpack_64_2x32_split_y', ('iand', a
, 0xffffffff00000000)), ('unpack_64_2x32_split_y', a
)),
1048 (('unpack_half_2x16_split_x', ('extract_u16', a
, 0)), ('unpack_half_2x16_split_x', a
)),
1049 (('unpack_half_2x16_split_x', ('extract_u16', a
, 1)), ('unpack_half_2x16_split_y', a
)),
1050 (('unpack_32_2x16_split_x', ('extract_u16', a
, 0)), ('unpack_32_2x16_split_x', a
)),
1051 (('unpack_32_2x16_split_x', ('extract_u16', a
, 1)), ('unpack_32_2x16_split_y', a
)),
1053 # Optimize half packing
1054 (('ishl', ('pack_half_2x16', ('vec2', a
, 0)), 16), ('pack_half_2x16', ('vec2', 0, a
))),
1055 (('ushr', ('pack_half_2x16', ('vec2', 0, a
)), 16), ('pack_half_2x16', ('vec2', a
, 0))),
1057 (('iadd', ('pack_half_2x16', ('vec2', a
, 0)), ('pack_half_2x16', ('vec2', 0, b
))),
1058 ('pack_half_2x16', ('vec2', a
, b
))),
1059 (('ior', ('pack_half_2x16', ('vec2', a
, 0)), ('pack_half_2x16', ('vec2', 0, b
))),
1060 ('pack_half_2x16', ('vec2', a
, b
))),
1062 (('ishl', ('pack_half_2x16_split', a
, 0), 16), ('pack_half_2x16_split', 0, a
)),
1063 (('ushr', ('pack_half_2x16_split', 0, a
), 16), ('pack_half_2x16_split', a
, 0)),
1064 (('extract_u16', ('pack_half_2x16_split', 0, a
), 1), ('pack_half_2x16_split', a
, 0)),
1066 (('iadd', ('pack_half_2x16_split', a
, 0), ('pack_half_2x16_split', 0, b
)), ('pack_half_2x16_split', a
, b
)),
1067 (('ior', ('pack_half_2x16_split', a
, 0), ('pack_half_2x16_split', 0, b
)), ('pack_half_2x16_split', a
, b
)),
1070 # After the ('extract_u8', a, 0) pattern, above, triggers, there will be
1071 # patterns like those below.
1072 for op
in ('ushr', 'ishr'):
1073 optimizations
.extend([(('extract_u8', (op
, 'a@16', 8), 0), ('extract_u8', a
, 1))])
1074 optimizations
.extend([(('extract_u8', (op
, 'a@32', 8 * i
), 0), ('extract_u8', a
, i
)) for i
in range(1, 4)])
1075 optimizations
.extend([(('extract_u8', (op
, 'a@64', 8 * i
), 0), ('extract_u8', a
, i
)) for i
in range(1, 8)])
1077 optimizations
.extend([(('extract_u8', ('extract_u16', a
, 1), 0), ('extract_u8', a
, 2))])
1079 # After the ('extract_[iu]8', a, 3) patterns, above, trigger, there will be
1080 # patterns like those below.
1081 for op
in ('extract_u8', 'extract_i8'):
1082 optimizations
.extend([((op
, ('ishl', 'a@16', 8), 1), (op
, a
, 0))])
1083 optimizations
.extend([((op
, ('ishl', 'a@32', 24 - 8 * i
), 3), (op
, a
, i
)) for i
in range(2, -1, -1)])
1084 optimizations
.extend([((op
, ('ishl', 'a@64', 56 - 8 * i
), 7), (op
, a
, i
)) for i
in range(6, -1, -1)])
1086 optimizations
.extend([
1088 (('ussub_4x8', a
, 0), a
),
1089 (('ussub_4x8', a
, ~
0), 0),
1090 # Lower all Subtractions first - they can get recombined later
1091 (('fsub', a
, b
), ('fadd', a
, ('fneg', b
))),
1092 (('isub', a
, b
), ('iadd', a
, ('ineg', b
))),
1093 (('uabs_usub', a
, b
), ('bcsel', ('ult', a
, b
), ('ineg', ('isub', a
, b
)), ('isub', a
, b
))),
1094 # This is correct. We don't need isub_sat because the result type is unsigned, so it cannot overflow.
1095 (('uabs_isub', a
, b
), ('bcsel', ('ilt', a
, b
), ('ineg', ('isub', a
, b
)), ('isub', a
, b
))),
1097 # Propagate negation up multiplication chains
1098 (('fmul(is_used_by_non_fsat)', ('fneg', a
), b
), ('fneg', ('fmul', a
, b
))),
1099 (('imul', ('ineg', a
), b
), ('ineg', ('imul', a
, b
))),
1101 # Propagate constants up multiplication chains
1102 (('~fmul(is_used_once)', ('fmul(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('fmul', ('fmul', a
, c
), b
)),
1103 (('imul(is_used_once)', ('imul(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('imul', ('imul', a
, c
), b
)),
1104 (('~fadd(is_used_once)', ('fadd(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('fadd', ('fadd', a
, c
), b
)),
1105 (('iadd(is_used_once)', ('iadd(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('iadd', ('iadd', a
, c
), b
)),
1107 # Reassociate constants in add/mul chains so they can be folded together.
1108 # For now, we mostly only handle cases where the constants are separated by
1109 # a single non-constant. We could do better eventually.
1110 (('~fmul', '#a', ('fmul', 'b(is_not_const)', '#c')), ('fmul', ('fmul', a
, c
), b
)),
1111 (('imul', '#a', ('imul', 'b(is_not_const)', '#c')), ('imul', ('imul', a
, c
), b
)),
1112 (('~fadd', '#a', ('fadd', 'b(is_not_const)', '#c')), ('fadd', ('fadd', a
, c
), b
)),
1113 (('~fadd', '#a', ('fneg', ('fadd', 'b(is_not_const)', '#c'))), ('fadd', ('fadd', a
, ('fneg', c
)), ('fneg', b
))),
1114 (('iadd', '#a', ('iadd', 'b(is_not_const)', '#c')), ('iadd', ('iadd', a
, c
), b
)),
1115 (('iand', '#a', ('iand', 'b(is_not_const)', '#c')), ('iand', ('iand', a
, c
), b
)),
1116 (('ior', '#a', ('ior', 'b(is_not_const)', '#c')), ('ior', ('ior', a
, c
), b
)),
1117 (('ixor', '#a', ('ixor', 'b(is_not_const)', '#c')), ('ixor', ('ixor', a
, c
), b
)),
1119 # Drop mul-div by the same value when there's no wrapping.
1120 (('idiv', ('imul(no_signed_wrap)', a
, b
), b
), a
),
1123 (('bcsel', ('ige', ('find_lsb', a
), 0), ('find_lsb', a
), -1), ('find_lsb', a
)),
1124 (('bcsel', ('ige', ('ifind_msb', a
), 0), ('ifind_msb', a
), -1), ('ifind_msb', a
)),
1125 (('bcsel', ('ige', ('ufind_msb', a
), 0), ('ufind_msb', a
), -1), ('ufind_msb', a
)),
1127 (('bcsel', ('ine', a
, 0), ('find_lsb', a
), -1), ('find_lsb', a
)),
1128 (('bcsel', ('ine', a
, 0), ('ifind_msb', a
), -1), ('ifind_msb', a
)),
1129 (('bcsel', ('ine', a
, 0), ('ufind_msb', a
), -1), ('ufind_msb', a
)),
1131 (('bcsel', ('ine', a
, -1), ('ifind_msb', a
), -1), ('ifind_msb', a
)),
1133 (('fmin3@64', a
, b
, c
), ('fmin@64', a
, ('fmin@64', b
, c
))),
1134 (('fmax3@64', a
, b
, c
), ('fmax@64', a
, ('fmax@64', b
, c
))),
1135 (('fmed3@64', a
, b
, c
), ('fmax@64', ('fmin@64', ('fmax@64', a
, b
), c
), ('fmin@64', a
, b
))),
1138 (('fmod', a
, b
), ('fsub', a
, ('fmul', b
, ('ffloor', ('fdiv', a
, b
)))), 'options->lower_fmod'),
1139 (('frem', a
, b
), ('fsub', a
, ('fmul', b
, ('ftrunc', ('fdiv', a
, b
)))), 'options->lower_fmod'),
1140 (('uadd_carry@32', a
, b
), ('b2i', ('ult', ('iadd', a
, b
), a
)), 'options->lower_uadd_carry'),
1141 (('usub_borrow@32', a
, b
), ('b2i', ('ult', a
, b
)), 'options->lower_usub_borrow'),
1143 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
1144 ('bcsel', ('ult', 31, 'bits'), 'insert',
1145 ('bfi', ('bfm', 'bits', 'offset'), 'insert', 'base')),
1146 'options->lower_bitfield_insert'),
1147 (('ihadd', a
, b
), ('iadd', ('iand', a
, b
), ('ishr', ('ixor', a
, b
), 1)), 'options->lower_hadd'),
1148 (('uhadd', a
, b
), ('iadd', ('iand', a
, b
), ('ushr', ('ixor', a
, b
), 1)), 'options->lower_hadd'),
1149 (('irhadd', a
, b
), ('isub', ('ior', a
, b
), ('ishr', ('ixor', a
, b
), 1)), 'options->lower_hadd'),
1150 (('urhadd', a
, b
), ('isub', ('ior', a
, b
), ('ushr', ('ixor', a
, b
), 1)), 'options->lower_hadd'),
1151 (('ihadd@64', a
, b
), ('iadd', ('iand', a
, b
), ('ishr', ('ixor', a
, b
), 1)), 'options->lower_hadd64 || (options->lower_int64_options & nir_lower_iadd64) != 0'),
1152 (('uhadd@64', a
, b
), ('iadd', ('iand', a
, b
), ('ushr', ('ixor', a
, b
), 1)), 'options->lower_hadd64 || (options->lower_int64_options & nir_lower_iadd64) != 0'),
1153 (('irhadd@64', a
, b
), ('isub', ('ior', a
, b
), ('ishr', ('ixor', a
, b
), 1)), 'options->lower_hadd64 || (options->lower_int64_options & nir_lower_iadd64) != 0'),
1154 (('urhadd@64', a
, b
), ('isub', ('ior', a
, b
), ('ushr', ('ixor', a
, b
), 1)), 'options->lower_hadd64 || (options->lower_int64_options & nir_lower_iadd64) != 0'),
1156 (('uadd_sat@64', a
, b
), ('bcsel', ('ult', ('iadd', a
, b
), a
), -1, ('iadd', a
, b
)), 'options->lower_add_sat || (options->lower_int64_options & nir_lower_iadd64) != 0'),
1157 (('uadd_sat', a
, b
), ('bcsel', ('ult', ('iadd', a
, b
), a
), -1, ('iadd', a
, b
)), 'options->lower_add_sat'),
1158 (('usub_sat', a
, b
), ('bcsel', ('ult', a
, b
), 0, ('isub', a
, b
)), 'options->lower_add_sat'),
1159 (('usub_sat@64', a
, b
), ('bcsel', ('ult', a
, b
), 0, ('isub', a
, b
)), 'options->lower_usub_sat64 || (options->lower_int64_options & nir_lower_iadd64) != 0'),
1161 # int64_t sum = a + b;
1163 # if (a < 0 && b < 0 && a < sum)
1165 # } else if (a >= 0 && b >= 0 && sum < a)
1169 # A couple optimizations are applied.
1171 # 1. a < sum => sum >= 0. This replacement works because it is known that
1172 # a < 0 and b < 0, so sum should also be < 0 unless there was
1175 # 2. sum < a => sum < 0. This replacement works because it is known that
1176 # a >= 0 and b >= 0, so sum should also be >= 0 unless there was
1179 # 3. Invert the second if-condition and swap the order of parameters for
1180 # the bcsel. !(a >= 0 && b >= 0 && sum < 0) becomes !(a >= 0) || !(b >=
1181 # 0) || !(sum < 0), and that becomes (a < 0) || (b < 0) || (sum >= 0)
1183 # On Intel Gen11, this saves ~11 instructions.
1184 (('iadd_sat@64', a
, b
), ('bcsel',
1185 ('iand', ('iand', ('ilt', a
, 0), ('ilt', b
, 0)), ('ige', ('iadd', a
, b
), 0)),
1188 ('ior', ('ior', ('ilt', a
, 0), ('ilt', b
, 0)), ('ige', ('iadd', a
, b
), 0)),
1190 0x7fffffffffffffff)),
1191 '(options->lower_int64_options & nir_lower_iadd64) != 0'),
1193 # int64_t sum = a - b;
1195 # if (a < 0 && b >= 0 && a < sum)
1197 # } else if (a >= 0 && b < 0 && a >= sum)
1201 # Optimizations similar to the iadd_sat case are applied here.
1202 (('isub_sat@64', a
, b
), ('bcsel',
1203 ('iand', ('iand', ('ilt', a
, 0), ('ige', b
, 0)), ('ige', ('isub', a
, b
), 0)),
1206 ('ior', ('ior', ('ilt', a
, 0), ('ige', b
, 0)), ('ige', ('isub', a
, b
), 0)),
1208 0x7fffffffffffffff)),
1209 '(options->lower_int64_options & nir_lower_iadd64) != 0'),
1211 # These are done here instead of in the backend because the int64 lowering
1212 # pass will make a mess of the patterns. The first patterns are
1213 # conditioned on nir_lower_minmax64 because it was not clear that it was
1214 # always an improvement on platforms that have real int64 support. No
1215 # shaders in shader-db hit this, so it was hard to say one way or the
1217 (('ilt', ('imax(is_used_once)', 'a@64', 'b@64'), 0), ('ilt', ('imax', ('unpack_64_2x32_split_y', a
), ('unpack_64_2x32_split_y', b
)), 0), '(options->lower_int64_options & nir_lower_minmax64) != 0'),
1218 (('ilt', ('imin(is_used_once)', 'a@64', 'b@64'), 0), ('ilt', ('imin', ('unpack_64_2x32_split_y', a
), ('unpack_64_2x32_split_y', b
)), 0), '(options->lower_int64_options & nir_lower_minmax64) != 0'),
1219 (('ige', ('imax(is_used_once)', 'a@64', 'b@64'), 0), ('ige', ('imax', ('unpack_64_2x32_split_y', a
), ('unpack_64_2x32_split_y', b
)), 0), '(options->lower_int64_options & nir_lower_minmax64) != 0'),
1220 (('ige', ('imin(is_used_once)', 'a@64', 'b@64'), 0), ('ige', ('imin', ('unpack_64_2x32_split_y', a
), ('unpack_64_2x32_split_y', b
)), 0), '(options->lower_int64_options & nir_lower_minmax64) != 0'),
1221 (('ilt', 'a@64', 0), ('ilt', ('unpack_64_2x32_split_y', a
), 0), '(options->lower_int64_options & nir_lower_icmp64) != 0'),
1222 (('ige', 'a@64', 0), ('ige', ('unpack_64_2x32_split_y', a
), 0), '(options->lower_int64_options & nir_lower_icmp64) != 0'),
1224 (('ine', 'a@64', 0), ('ine', ('ior', ('unpack_64_2x32_split_x', a
), ('unpack_64_2x32_split_y', a
)), 0), '(options->lower_int64_options & nir_lower_icmp64) != 0'),
1225 (('ieq', 'a@64', 0), ('ieq', ('ior', ('unpack_64_2x32_split_x', a
), ('unpack_64_2x32_split_y', a
)), 0), '(options->lower_int64_options & nir_lower_icmp64) != 0'),
1226 # 0u < uint(a) <=> uint(a) != 0u
1227 (('ult', 0, 'a@64'), ('ine', ('ior', ('unpack_64_2x32_split_x', a
), ('unpack_64_2x32_split_y', a
)), 0), '(options->lower_int64_options & nir_lower_icmp64) != 0'),
1229 # Alternative lowering that doesn't rely on bfi.
1230 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
1231 ('bcsel', ('ult', 31, 'bits'),
1234 ('iand', 'base', ('inot', ('ishl', ('isub', ('ishl', 1, 'bits'), 1), 'offset'))),
1235 ('iand', ('ishl', 'insert', 'offset'), ('ishl', ('isub', ('ishl', 1, 'bits'), 1), 'offset'))))),
1236 'options->lower_bitfield_insert_to_shifts'),
1238 # Alternative lowering that uses bitfield_select.
1239 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
1240 ('bcsel', ('ult', 31, 'bits'), 'insert',
1241 ('bitfield_select', ('bfm', 'bits', 'offset'), ('ishl', 'insert', 'offset'), 'base')),
1242 'options->lower_bitfield_insert_to_bitfield_select'),
1244 (('ibitfield_extract', 'value', 'offset', 'bits'),
1245 ('bcsel', ('ult', 31, 'bits'), 'value',
1246 ('ibfe', 'value', 'offset', 'bits')),
1247 'options->lower_bitfield_extract'),
1249 (('ubitfield_extract', 'value', 'offset', 'bits'),
1250 ('bcsel', ('ult', 31, 'bits'), 'value',
1251 ('ubfe', 'value', 'offset', 'bits')),
1252 'options->lower_bitfield_extract'),
1254 # Note that these opcodes are defined to only use the five least significant bits of 'offset' and 'bits'
1255 (('ubfe', 'value', 'offset', ('iand', 31, 'bits')), ('ubfe', 'value', 'offset', 'bits')),
1256 (('ubfe', 'value', ('iand', 31, 'offset'), 'bits'), ('ubfe', 'value', 'offset', 'bits')),
1257 (('ibfe', 'value', 'offset', ('iand', 31, 'bits')), ('ibfe', 'value', 'offset', 'bits')),
1258 (('ibfe', 'value', ('iand', 31, 'offset'), 'bits'), ('ibfe', 'value', 'offset', 'bits')),
1259 (('bfm', 'bits', ('iand', 31, 'offset')), ('bfm', 'bits', 'offset')),
1260 (('bfm', ('iand', 31, 'bits'), 'offset'), ('bfm', 'bits', 'offset')),
1262 # Section 8.8 (Integer Functions) of the GLSL 4.60 spec says:
1264 # If bits is zero, the result will be zero.
1266 # These patterns prevent other patterns from generating invalid results
1267 # when count is zero.
1268 (('ubfe', a
, b
, 0), 0),
1269 (('ibfe', a
, b
, 0), 0),
1271 (('ubfe', a
, 0, '#b'), ('iand', a
, ('ushr', 0xffffffff, ('ineg', b
)))),
1273 (('b2i32', ('i2b', ('ubfe', a
, b
, 1))), ('ubfe', a
, b
, 1)),
1274 (('b2i32', ('i2b', ('ibfe', a
, b
, 1))), ('ubfe', a
, b
, 1)), # ubfe in the replacement is correct
1275 (('ine', ('ibfe(is_used_once)', a
, '#b', '#c'), 0), ('ine', ('iand', a
, ('ishl', ('ushr', 0xffffffff, ('ineg', c
)), b
)), 0)),
1276 (('ieq', ('ibfe(is_used_once)', a
, '#b', '#c'), 0), ('ieq', ('iand', a
, ('ishl', ('ushr', 0xffffffff, ('ineg', c
)), b
)), 0)),
1277 (('ine', ('ubfe(is_used_once)', a
, '#b', '#c'), 0), ('ine', ('iand', a
, ('ishl', ('ushr', 0xffffffff, ('ineg', c
)), b
)), 0)),
1278 (('ieq', ('ubfe(is_used_once)', a
, '#b', '#c'), 0), ('ieq', ('iand', a
, ('ishl', ('ushr', 0xffffffff, ('ineg', c
)), b
)), 0)),
1280 (('ibitfield_extract', 'value', 'offset', 'bits'),
1281 ('bcsel', ('ieq', 0, 'bits'),
1284 ('ishl', 'value', ('isub', ('isub', 32, 'bits'), 'offset')),
1285 ('isub', 32, 'bits'))),
1286 'options->lower_bitfield_extract_to_shifts'),
1288 (('ubitfield_extract', 'value', 'offset', 'bits'),
1290 ('ushr', 'value', 'offset'),
1291 ('bcsel', ('ieq', 'bits', 32),
1293 ('isub', ('ishl', 1, 'bits'), 1))),
1294 'options->lower_bitfield_extract_to_shifts'),
1296 (('ifind_msb', 'value'),
1297 ('ufind_msb', ('bcsel', ('ilt', 'value', 0), ('inot', 'value'), 'value')),
1298 'options->lower_ifind_msb'),
1300 (('find_lsb', 'value'),
1301 ('ufind_msb', ('iand', 'value', ('ineg', 'value'))),
1302 'options->lower_find_lsb'),
1304 (('extract_i8', a
, 'b@32'),
1305 ('ishr', ('ishl', a
, ('imul', ('isub', 3, b
), 8)), 24),
1306 'options->lower_extract_byte'),
1308 (('extract_u8', a
, 'b@32'),
1309 ('iand', ('ushr', a
, ('imul', b
, 8)), 0xff),
1310 'options->lower_extract_byte'),
1312 (('extract_i16', a
, 'b@32'),
1313 ('ishr', ('ishl', a
, ('imul', ('isub', 1, b
), 16)), 16),
1314 'options->lower_extract_word'),
1316 (('extract_u16', a
, 'b@32'),
1317 ('iand', ('ushr', a
, ('imul', b
, 16)), 0xffff),
1318 'options->lower_extract_word'),
1320 (('pack_unorm_2x16', 'v'),
1321 ('pack_uvec2_to_uint',
1322 ('f2u32', ('fround_even', ('fmul', ('fsat', 'v'), 65535.0)))),
1323 'options->lower_pack_unorm_2x16'),
1325 (('pack_unorm_4x8', 'v'),
1326 ('pack_uvec4_to_uint',
1327 ('f2u32', ('fround_even', ('fmul', ('fsat', 'v'), 255.0)))),
1328 'options->lower_pack_unorm_4x8'),
1330 (('pack_snorm_2x16', 'v'),
1331 ('pack_uvec2_to_uint',
1332 ('f2i32', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 32767.0)))),
1333 'options->lower_pack_snorm_2x16'),
1335 (('pack_snorm_4x8', 'v'),
1336 ('pack_uvec4_to_uint',
1337 ('f2i32', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 127.0)))),
1338 'options->lower_pack_snorm_4x8'),
1340 (('unpack_unorm_2x16', 'v'),
1341 ('fdiv', ('u2f32', ('vec2', ('extract_u16', 'v', 0),
1342 ('extract_u16', 'v', 1))),
1344 'options->lower_unpack_unorm_2x16'),
1346 (('unpack_unorm_4x8', 'v'),
1347 ('fdiv', ('u2f32', ('vec4', ('extract_u8', 'v', 0),
1348 ('extract_u8', 'v', 1),
1349 ('extract_u8', 'v', 2),
1350 ('extract_u8', 'v', 3))),
1352 'options->lower_unpack_unorm_4x8'),
1354 (('unpack_snorm_2x16', 'v'),
1355 ('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec2', ('extract_i16', 'v', 0),
1356 ('extract_i16', 'v', 1))),
1358 'options->lower_unpack_snorm_2x16'),
1360 (('unpack_snorm_4x8', 'v'),
1361 ('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec4', ('extract_i8', 'v', 0),
1362 ('extract_i8', 'v', 1),
1363 ('extract_i8', 'v', 2),
1364 ('extract_i8', 'v', 3))),
1366 'options->lower_unpack_snorm_4x8'),
1368 (('pack_half_2x16_split', 'a@32', 'b@32'),
1369 ('ior', ('ishl', ('u2u32', ('f2f16', b
)), 16), ('u2u32', ('f2f16', a
))),
1370 'options->lower_pack_split'),
1372 (('unpack_half_2x16_split_x', 'a@32'),
1373 ('f2f32', ('u2u16', a
)),
1374 'options->lower_pack_split'),
1376 (('unpack_half_2x16_split_y', 'a@32'),
1377 ('f2f32', ('u2u16', ('ushr', a
, 16))),
1378 'options->lower_pack_split'),
1380 (('pack_32_2x16_split', 'a@16', 'b@16'),
1381 ('ior', ('ishl', ('u2u32', b
), 16), ('u2u32', a
)),
1382 'options->lower_pack_split'),
1384 (('unpack_32_2x16_split_x', 'a@32'),
1386 'options->lower_pack_split'),
1388 (('unpack_32_2x16_split_y', 'a@32'),
1389 ('u2u16', ('ushr', 'a', 16)),
1390 'options->lower_pack_split'),
1392 (('isign', a
), ('imin', ('imax', a
, -1), 1), 'options->lower_isign'),
1393 (('fsign', a
), ('fsub', ('b2f', ('flt', 0.0, a
)), ('b2f', ('flt', a
, 0.0))), 'options->lower_fsign'),
1395 # Address/offset calculations:
1396 # Drivers supporting imul24 should use the nir_lower_amul() pass, this
1397 # rule converts everyone else to imul:
1398 (('amul', a
, b
), ('imul', a
, b
), '!options->has_imul24'),
1401 ('imul', ('iand', a
, 0xffffff), ('iand', b
, 0xffffff)),
1402 '!options->has_umul24'),
1403 (('umad24', a
, b
, c
),
1404 ('iadd', ('imul', ('iand', a
, 0xffffff), ('iand', b
, 0xffffff)), c
),
1405 '!options->has_umad24'),
1407 (('imad24_ir3', a
, b
, 0), ('imul24', a
, b
)),
1408 (('imad24_ir3', a
, 0, c
), (c
)),
1409 (('imad24_ir3', a
, 1, c
), ('iadd', a
, c
)),
1411 # if first two srcs are const, crack apart the imad so constant folding
1412 # can clean up the imul:
1413 # TODO ffma should probably get a similar rule:
1414 (('imad24_ir3', '#a', '#b', c
), ('iadd', ('imul', a
, b
), c
)),
1416 # These will turn 24b address/offset calc back into 32b shifts, but
1417 # it should be safe to get back some of the bits of precision that we
1418 # already decided were no necessary:
1419 (('imul24', a
, '#b@32(is_pos_power_of_two)'), ('ishl', a
, ('find_lsb', b
)), '!options->lower_bitops'),
1420 (('imul24', a
, '#b@32(is_neg_power_of_two)'), ('ineg', ('ishl', a
, ('find_lsb', ('iabs', b
)))), '!options->lower_bitops'),
1421 (('imul24', a
, 0), (0)),
1424 # bit_size dependent lowerings
1425 for bit_size
in [8, 16, 32, 64]:
1426 # convenience constants
1427 intmax
= (1 << (bit_size
- 1)) - 1
1428 intmin
= 1 << (bit_size
- 1)
1431 (('iadd_sat@' + str(bit_size
), a
, b
),
1432 ('bcsel', ('ige', b
, 1), ('bcsel', ('ilt', ('iadd', a
, b
), a
), intmax
, ('iadd', a
, b
)),
1433 ('bcsel', ('ilt', a
, ('iadd', a
, b
)), intmin
, ('iadd', a
, b
))), 'options->lower_add_sat'),
1434 (('isub_sat@' + str(bit_size
), a
, b
),
1435 ('bcsel', ('ilt', b
, 0), ('bcsel', ('ilt', ('isub', a
, b
), a
), intmax
, ('isub', a
, b
)),
1436 ('bcsel', ('ilt', a
, ('isub', a
, b
)), intmin
, ('isub', a
, b
))), 'options->lower_add_sat'),
1439 invert
= OrderedDict([('feq', 'fne'), ('fne', 'feq')])
1441 for left
, right
in itertools
.combinations_with_replacement(invert
.keys(), 2):
1442 optimizations
.append((('inot', ('ior(is_used_once)', (left
, a
, b
), (right
, c
, d
))),
1443 ('iand', (invert
[left
], a
, b
), (invert
[right
], c
, d
))))
1444 optimizations
.append((('inot', ('iand(is_used_once)', (left
, a
, b
), (right
, c
, d
))),
1445 ('ior', (invert
[left
], a
, b
), (invert
[right
], c
, d
))))
1447 # Optimize x2bN(b2x(x)) -> x
1448 for size
in type_sizes('bool'):
1449 aN
= 'a@' + str(size
)
1450 f2bN
= 'f2b' + str(size
)
1451 i2bN
= 'i2b' + str(size
)
1452 optimizations
.append(((f2bN
, ('b2f', aN
)), a
))
1453 optimizations
.append(((i2bN
, ('b2i', aN
)), a
))
1455 # Optimize x2yN(b2x(x)) -> b2y
1456 for x
, y
in itertools
.product(['f', 'u', 'i'], ['f', 'u', 'i']):
1457 if x
!= 'f' and y
!= 'f' and x
!= y
:
1460 b2x
= 'b2f' if x
== 'f' else 'b2i'
1461 b2y
= 'b2f' if y
== 'f' else 'b2i'
1462 x2yN
= '{}2{}'.format(x
, y
)
1463 optimizations
.append(((x2yN
, (b2x
, a
)), (b2y
, a
)))
1465 # Optimize away x2xN(a@N)
1466 for t
in ['int', 'uint', 'float', 'bool']:
1467 for N
in type_sizes(t
):
1468 x2xN
= '{0}2{0}{1}'.format(t
[0], N
)
1469 aN
= 'a@{0}'.format(N
)
1470 optimizations
.append(((x2xN
, aN
), a
))
1472 # Optimize x2xN(y2yM(a@P)) -> y2yN(a) for integers
1473 # In particular, we can optimize away everything except upcast of downcast and
1474 # upcasts where the type differs from the other cast
1475 for N
, M
in itertools
.product(type_sizes('uint'), type_sizes('uint')):
1477 # The outer cast is a down-cast. It doesn't matter what the size of the
1478 # argument of the inner cast is because we'll never been in the upcast
1479 # of downcast case. Regardless of types, we'll always end up with y2yN
1481 for x
, y
in itertools
.product(['i', 'u'], ['i', 'u']):
1482 x2xN
= '{0}2{0}{1}'.format(x
, N
)
1483 y2yM
= '{0}2{0}{1}'.format(y
, M
)
1484 y2yN
= '{0}2{0}{1}'.format(y
, N
)
1485 optimizations
.append(((x2xN
, (y2yM
, a
)), (y2yN
, a
)))
1487 # If the outer cast is an up-cast, we have to be more careful about the
1488 # size of the argument of the inner cast and with types. In this case,
1489 # the type is always the type of type up-cast which is given by the
1491 for P
in type_sizes('uint'):
1492 # We can't optimize away up-cast of down-cast.
1496 # Because we're doing down-cast of down-cast, the types always have
1497 # to match between the two casts
1498 for x
in ['i', 'u']:
1499 x2xN
= '{0}2{0}{1}'.format(x
, N
)
1500 x2xM
= '{0}2{0}{1}'.format(x
, M
)
1501 aP
= 'a@{0}'.format(P
)
1502 optimizations
.append(((x2xN
, (x2xM
, aP
)), (x2xN
, a
)))
1504 # The N == M case is handled by other optimizations
1507 # Downcast operations should be able to see through pack
1508 for t
in ['i', 'u']:
1509 for N
in [8, 16, 32]:
1510 x2xN
= '{0}2{0}{1}'.format(t
, N
)
1512 ((x2xN
, ('pack_64_2x32_split', a
, b
)), (x2xN
, a
)),
1513 ((x2xN
, ('pack_64_2x32_split', a
, b
)), (x2xN
, a
)),
1516 # Optimize comparisons with up-casts
1517 for t
in ['int', 'uint', 'float']:
1518 for N
, M
in itertools
.product(type_sizes(t
), repeat
=2):
1519 if N
== 1 or N
>= M
:
1524 cond
= 'options->support_8bit_alu'
1526 cond
= 'options->support_16bit_alu'
1527 x2xM
= '{0}2{0}{1}'.format(t
[0], M
)
1528 x2xN
= '{0}2{0}{1}'.format(t
[0], N
)
1531 xeq
= 'feq' if t
== 'float' else 'ieq'
1532 xne
= 'fne' if t
== 'float' else 'ine'
1533 xge
= '{0}ge'.format(t
[0])
1534 xlt
= '{0}lt'.format(t
[0])
1536 # Up-casts are lossless so for correctly signed comparisons of
1537 # up-casted values we can do the comparison at the largest of the two
1538 # original sizes and drop one or both of the casts. (We have
1539 # optimizations to drop the no-op casts which this may generate.)
1540 for P
in type_sizes(t
):
1546 ((xeq
, (x2xM
, aN
), (x2xM
, bP
)), (xeq
, a
, (x2xN
, b
)), cond
),
1547 ((xne
, (x2xM
, aN
), (x2xM
, bP
)), (xne
, a
, (x2xN
, b
)), cond
),
1548 ((xge
, (x2xM
, aN
), (x2xM
, bP
)), (xge
, a
, (x2xN
, b
)), cond
),
1549 ((xlt
, (x2xM
, aN
), (x2xM
, bP
)), (xlt
, a
, (x2xN
, b
)), cond
),
1550 ((xge
, (x2xM
, bP
), (x2xM
, aN
)), (xge
, (x2xN
, b
), a
), cond
),
1551 ((xlt
, (x2xM
, bP
), (x2xM
, aN
)), (xlt
, (x2xN
, b
), a
), cond
),
1554 # The next bit doesn't work on floats because the range checks would
1555 # get way too complicated.
1556 if t
in ['int', 'uint']:
1558 xN_min
= -(1 << (N
- 1))
1559 xN_max
= (1 << (N
- 1)) - 1
1562 xN_max
= (1 << N
) - 1
1566 # If we're up-casting and comparing to a constant, we can unfold
1567 # the comparison into a comparison with the shrunk down constant
1568 # and a check that the constant fits in the smaller bit size.
1570 ((xeq
, (x2xM
, aN
), '#b'),
1571 ('iand', (xeq
, a
, (x2xN
, b
)), (xeq
, (x2xM
, (x2xN
, b
)), b
)), cond
),
1572 ((xne
, (x2xM
, aN
), '#b'),
1573 ('ior', (xne
, a
, (x2xN
, b
)), (xne
, (x2xM
, (x2xN
, b
)), b
)), cond
),
1574 ((xlt
, (x2xM
, aN
), '#b'),
1575 ('iand', (xlt
, xN_min
, b
),
1576 ('ior', (xlt
, xN_max
, b
), (xlt
, a
, (x2xN
, b
)))), cond
),
1577 ((xlt
, '#a', (x2xM
, bN
)),
1578 ('iand', (xlt
, a
, xN_max
),
1579 ('ior', (xlt
, a
, xN_min
), (xlt
, (x2xN
, a
), b
))), cond
),
1580 ((xge
, (x2xM
, aN
), '#b'),
1581 ('iand', (xge
, xN_max
, b
),
1582 ('ior', (xge
, xN_min
, b
), (xge
, a
, (x2xN
, b
)))), cond
),
1583 ((xge
, '#a', (x2xM
, bN
)),
1584 ('iand', (xge
, a
, xN_min
),
1585 ('ior', (xge
, a
, xN_max
), (xge
, (x2xN
, a
), b
))), cond
),
1588 def fexp2i(exp
, bits
):
1589 # Generate an expression which constructs value 2.0^exp or 0.0.
1591 # We assume that exp is already in a valid range:
1593 # * [-15, 15] for 16-bit float
1594 # * [-127, 127] for 32-bit float
1595 # * [-1023, 1023] for 16-bit float
1597 # If exp is the lowest value in the valid range, a value of 0.0 is
1598 # constructed. Otherwise, the value 2.0^exp is constructed.
1600 return ('i2i16', ('ishl', ('iadd', exp
, 15), 10))
1602 return ('ishl', ('iadd', exp
, 127), 23)
1604 return ('pack_64_2x32_split', 0, ('ishl', ('iadd', exp
, 1023), 20))
1608 def ldexp(f
, exp
, bits
):
1609 # The maximum possible range for a normal exponent is [-126, 127] and,
1610 # throwing in denormals, you get a maximum range of [-149, 127]. This
1611 # means that we can potentially have a swing of +-276. If you start with
1612 # FLT_MAX, you actually have to do ldexp(FLT_MAX, -278) to get it to flush
1613 # all the way to zero. The GLSL spec only requires that we handle a subset
1614 # of this range. From version 4.60 of the spec:
1616 # "If exp is greater than +128 (single-precision) or +1024
1617 # (double-precision), the value returned is undefined. If exp is less
1618 # than -126 (single-precision) or -1022 (double-precision), the value
1619 # returned may be flushed to zero. Additionally, splitting the value
1620 # into a significand and exponent using frexp() and then reconstructing
1621 # a floating-point value using ldexp() should yield the original input
1622 # for zero and all finite non-denormalized values."
1624 # The SPIR-V spec has similar language.
1626 # In order to handle the maximum value +128 using the fexp2i() helper
1627 # above, we have to split the exponent in half and do two multiply
1630 # First, we clamp exp to a reasonable range. Specifically, we clamp to
1631 # twice the full range that is valid for the fexp2i() function above. If
1632 # exp/2 is the bottom value of that range, the fexp2i() expression will
1633 # yield 0.0f which, when multiplied by f, will flush it to zero which is
1634 # allowed by the GLSL and SPIR-V specs for low exponent values. If the
1635 # value is clamped from above, then it must have been above the supported
1636 # range of the GLSL built-in and therefore any return value is acceptable.
1638 exp
= ('imin', ('imax', exp
, -30), 30)
1640 exp
= ('imin', ('imax', exp
, -254), 254)
1642 exp
= ('imin', ('imax', exp
, -2046), 2046)
1646 # Now we compute two powers of 2, one for exp/2 and one for exp-exp/2.
1647 # (We use ishr which isn't the same for -1, but the -1 case still works
1648 # since we use exp-exp/2 as the second exponent.) While the spec
1649 # technically defines ldexp as f * 2.0^exp, simply multiplying once doesn't
1650 # work with denormals and doesn't allow for the full swing in exponents
1651 # that you can get with normalized values. Instead, we create two powers
1652 # of two and multiply by them each in turn. That way the effective range
1653 # of our exponent is doubled.
1654 pow2_1
= fexp2i(('ishr', exp
, 1), bits
)
1655 pow2_2
= fexp2i(('isub', exp
, ('ishr', exp
, 1)), bits
)
1656 return ('fmul', ('fmul', f
, pow2_1
), pow2_2
)
1659 (('ldexp@16', 'x', 'exp'), ldexp('x', 'exp', 16), 'options->lower_ldexp'),
1660 (('ldexp@32', 'x', 'exp'), ldexp('x', 'exp', 32), 'options->lower_ldexp'),
1661 (('ldexp@64', 'x', 'exp'), ldexp('x', 'exp', 64), 'options->lower_ldexp'),
1664 # Unreal Engine 4 demo applications open-codes bitfieldReverse()
1665 def bitfield_reverse(u
):
1666 step1
= ('ior', ('ishl', u
, 16), ('ushr', u
, 16))
1667 step2
= ('ior', ('ishl', ('iand', step1
, 0x00ff00ff), 8), ('ushr', ('iand', step1
, 0xff00ff00), 8))
1668 step3
= ('ior', ('ishl', ('iand', step2
, 0x0f0f0f0f), 4), ('ushr', ('iand', step2
, 0xf0f0f0f0), 4))
1669 step4
= ('ior', ('ishl', ('iand', step3
, 0x33333333), 2), ('ushr', ('iand', step3
, 0xcccccccc), 2))
1670 step5
= ('ior(many-comm-expr)', ('ishl', ('iand', step4
, 0x55555555), 1), ('ushr', ('iand', step4
, 0xaaaaaaaa), 1))
1674 optimizations
+= [(bitfield_reverse('x@32'), ('bitfield_reverse', 'x'), '!options->lower_bitfield_reverse')]
1676 # For any float comparison operation, "cmp", if you have "a == a && a cmp b"
1677 # then the "a == a" is redundant because it's equivalent to "a is not NaN"
1678 # and, if a is a NaN then the second comparison will fail anyway.
1679 for op
in ['flt', 'fge', 'feq']:
1681 (('iand', ('feq', a
, a
), (op
, a
, b
)), ('!' + op
, a
, b
)),
1682 (('iand', ('feq', a
, a
), (op
, b
, a
)), ('!' + op
, b
, a
)),
1685 # Add optimizations to handle the case where the result of a ternary is
1686 # compared to a constant. This way we can take things like
1692 # a ? (0 > 0) : (1 > 0)
1694 # which constant folding will eat for lunch. The resulting ternary will
1695 # further get cleaned up by the boolean reductions above and we will be
1696 # left with just the original variable "a".
1697 for op
in ['flt', 'fge', 'feq', 'fne',
1698 'ilt', 'ige', 'ieq', 'ine', 'ult', 'uge']:
1700 ((op
, ('bcsel', 'a', '#b', '#c'), '#d'),
1701 ('bcsel', 'a', (op
, 'b', 'd'), (op
, 'c', 'd'))),
1702 ((op
, '#d', ('bcsel', a
, '#b', '#c')),
1703 ('bcsel', 'a', (op
, 'd', 'b'), (op
, 'd', 'c'))),
1707 # For example, this converts things like
1709 # 1 + mix(0, a - 1, condition)
1713 # mix(1, (a-1)+1, condition)
1715 # Other optimizations will rearrange the constants.
1716 for op
in ['fadd', 'fmul', 'iadd', 'imul']:
1718 ((op
, ('bcsel(is_used_once)', a
, '#b', c
), '#d'), ('bcsel', a
, (op
, b
, d
), (op
, c
, d
)))
1721 # For derivatives in compute shaders, GLSL_NV_compute_shader_derivatives
1724 # If neither layout qualifier is specified, derivatives in compute shaders
1725 # return zero, which is consistent with the handling of built-in texture
1726 # functions like texture() in GLSL 4.50 compute shaders.
1727 for op
in ['fddx', 'fddx_fine', 'fddx_coarse',
1728 'fddy', 'fddy_fine', 'fddy_coarse']:
1730 ((op
, 'a'), 0.0, 'info->stage == MESA_SHADER_COMPUTE && info->cs.derivative_group == DERIVATIVE_GROUP_NONE')
1733 # Some optimizations for ir3-specific instructions.
1735 # 'al * bl': If either 'al' or 'bl' is zero, return zero.
1736 (('umul_low', '#a(is_lower_half_zero)', 'b'), (0)),
1737 # '(ah * bl) << 16 + c': If either 'ah' or 'bl' is zero, return 'c'.
1738 (('imadsh_mix16', '#a@32(is_lower_half_zero)', 'b@32', 'c@32'), ('c')),
1739 (('imadsh_mix16', 'a@32', '#b@32(is_upper_half_zero)', 'c@32'), ('c')),
1742 # These kinds of sequences can occur after nir_opt_peephole_select.
1744 # NOTE: fadd is not handled here because that gets in the way of ffma
1745 # generation in the i965 driver. Instead, fadd and ffma are handled in
1746 # late_optimizations.
1750 (('bcsel', a
, (op
+ '(is_used_once)', b
, c
, d
), (op
, b
, c
, e
)), (op
, b
, c
, ('bcsel', a
, d
, e
))),
1751 (('bcsel', a
, (op
, b
, c
, d
), (op
+ '(is_used_once)', b
, c
, e
)), (op
, b
, c
, ('bcsel', a
, d
, e
))),
1752 (('bcsel', a
, (op
+ '(is_used_once)', b
, c
, d
), (op
, b
, e
, d
)), (op
, b
, ('bcsel', a
, c
, e
), d
)),
1753 (('bcsel', a
, (op
, b
, c
, d
), (op
+ '(is_used_once)', b
, e
, d
)), (op
, b
, ('bcsel', a
, c
, e
), d
)),
1754 (('bcsel', a
, (op
+ '(is_used_once)', b
, c
, d
), (op
, e
, c
, d
)), (op
, ('bcsel', a
, b
, e
), c
, d
)),
1755 (('bcsel', a
, (op
, b
, c
, d
), (op
+ '(is_used_once)', e
, c
, d
)), (op
, ('bcsel', a
, b
, e
), c
, d
)),
1758 for op
in ['fmul', 'iadd', 'imul', 'iand', 'ior', 'ixor', 'fmin', 'fmax', 'imin', 'imax', 'umin', 'umax']:
1760 (('bcsel', a
, (op
+ '(is_used_once)', b
, c
), (op
, b
, 'd(is_not_const)')), (op
, b
, ('bcsel', a
, c
, d
))),
1761 (('bcsel', a
, (op
+ '(is_used_once)', b
, 'c(is_not_const)'), (op
, b
, d
)), (op
, b
, ('bcsel', a
, c
, d
))),
1762 (('bcsel', a
, (op
, b
, 'c(is_not_const)'), (op
+ '(is_used_once)', b
, d
)), (op
, b
, ('bcsel', a
, c
, d
))),
1763 (('bcsel', a
, (op
, b
, c
), (op
+ '(is_used_once)', b
, 'd(is_not_const)')), (op
, b
, ('bcsel', a
, c
, d
))),
1768 (('bcsel', a
, (op
+ '(is_used_once)', b
, c
), (op
, b
, d
)), (op
, b
, ('bcsel', a
, c
, d
))),
1769 (('bcsel', a
, (op
, b
, c
), (op
+ '(is_used_once)', b
, d
)), (op
, b
, ('bcsel', a
, c
, d
))),
1770 (('bcsel', a
, (op
+ '(is_used_once)', b
, c
), (op
, d
, c
)), (op
, ('bcsel', a
, b
, d
), c
)),
1771 (('bcsel', a
, (op
, b
, c
), (op
+ '(is_used_once)', d
, c
)), (op
, ('bcsel', a
, b
, d
), c
)),
1774 for op
in ['frcp', 'frsq', 'fsqrt', 'fexp2', 'flog2', 'fsign', 'fsin', 'fcos']:
1776 (('bcsel', a
, (op
+ '(is_used_once)', b
), (op
, c
)), (op
, ('bcsel', a
, b
, c
))),
1777 (('bcsel', a
, (op
, b
), (op
+ '(is_used_once)', c
)), (op
, ('bcsel', a
, b
, c
))),
1780 # This section contains "late" optimizations that should be run before
1781 # creating ffmas and calling regular optimizations for the final time.
1782 # Optimizations should go here if they help code generation and conflict
1783 # with the regular optimizations.
1784 before_ffma_optimizations
= [
1785 # Propagate constants down multiplication chains
1786 (('~fmul(is_used_once)', ('fmul(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('fmul', ('fmul', a
, c
), b
)),
1787 (('imul(is_used_once)', ('imul(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('imul', ('imul', a
, c
), b
)),
1788 (('~fadd(is_used_once)', ('fadd(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('fadd', ('fadd', a
, c
), b
)),
1789 (('iadd(is_used_once)', ('iadd(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('iadd', ('iadd', a
, c
), b
)),
1791 (('~fadd', ('fmul', a
, b
), ('fmul', a
, c
)), ('fmul', a
, ('fadd', b
, c
))),
1792 (('iadd', ('imul', a
, b
), ('imul', a
, c
)), ('imul', a
, ('iadd', b
, c
))),
1793 (('~fadd', ('fneg', a
), a
), 0.0),
1794 (('iadd', ('ineg', a
), a
), 0),
1795 (('iadd', ('ineg', a
), ('iadd', a
, b
)), b
),
1796 (('iadd', a
, ('iadd', ('ineg', a
), b
)), b
),
1797 (('~fadd', ('fneg', a
), ('fadd', a
, b
)), b
),
1798 (('~fadd', a
, ('fadd', ('fneg', a
), b
)), b
),
1800 (('~flrp@32', ('fadd(is_used_once)', a
, -1.0), ('fadd(is_used_once)', a
, 1.0), d
), ('fadd', ('flrp', -1.0, 1.0, d
), a
)),
1801 (('~flrp@32', ('fadd(is_used_once)', a
, 1.0), ('fadd(is_used_once)', a
, -1.0), d
), ('fadd', ('flrp', 1.0, -1.0, d
), a
)),
1802 (('~flrp@32', ('fadd(is_used_once)', a
, '#b'), ('fadd(is_used_once)', a
, '#c'), d
), ('fadd', ('fmul', d
, ('fadd', c
, ('fneg', b
))), ('fadd', a
, b
))),
1805 # This section contains "late" optimizations that should be run after the
1806 # regular optimizations have finished. Optimizations should go here if
1807 # they help code generation but do not necessarily produce code that is
1808 # more easily optimizable.
1809 late_optimizations
= [
1810 # Most of these optimizations aren't quite safe when you get infinity or
1811 # Nan involved but the first one should be fine.
1812 (('flt', ('fadd', a
, b
), 0.0), ('flt', a
, ('fneg', b
))),
1813 (('flt', ('fneg', ('fadd', a
, b
)), 0.0), ('flt', ('fneg', a
), b
)),
1814 (('~fge', ('fadd', a
, b
), 0.0), ('fge', a
, ('fneg', b
))),
1815 (('~fge', ('fneg', ('fadd', a
, b
)), 0.0), ('fge', ('fneg', a
), b
)),
1816 (('~feq', ('fadd', a
, b
), 0.0), ('feq', a
, ('fneg', b
))),
1817 (('~fne', ('fadd', a
, b
), 0.0), ('fne', a
, ('fneg', b
))),
1819 # nir_lower_to_source_mods will collapse this, but its existence during the
1820 # optimization loop can prevent other optimizations.
1821 (('fneg', ('fneg', a
)), a
),
1823 # Subtractions get lowered during optimization, so we need to recombine them
1824 (('fadd', 'a', ('fneg', 'b')), ('fsub', 'a', 'b'), '!options->lower_sub'),
1825 (('iadd', 'a', ('ineg', 'b')), ('isub', 'a', 'b'), '!options->lower_sub'),
1826 (('fneg', a
), ('fsub', 0.0, a
), 'options->lower_negate'),
1827 (('ineg', a
), ('isub', 0, a
), 'options->lower_negate'),
1829 # These are duplicated from the main optimizations table. The late
1830 # patterns that rearrange expressions like x - .5 < 0 to x < .5 can create
1831 # new patterns like these. The patterns that compare with zero are removed
1832 # because they are unlikely to be created in by anything in
1833 # late_optimizations.
1834 (('flt', ('fsat(is_used_once)', a
), '#b(is_gt_0_and_lt_1)'), ('flt', a
, b
)),
1835 (('flt', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a
)), ('flt', b
, a
)),
1836 (('fge', ('fsat(is_used_once)', a
), '#b(is_gt_0_and_lt_1)'), ('fge', a
, b
)),
1837 (('fge', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a
)), ('fge', b
, a
)),
1838 (('feq', ('fsat(is_used_once)', a
), '#b(is_gt_0_and_lt_1)'), ('feq', a
, b
)),
1839 (('fne', ('fsat(is_used_once)', a
), '#b(is_gt_0_and_lt_1)'), ('fne', a
, b
)),
1841 (('fge', ('fsat(is_used_once)', a
), 1.0), ('fge', a
, 1.0)),
1842 (('flt', ('fsat(is_used_once)', a
), 1.0), ('flt', a
, 1.0)),
1844 (('~fge', ('fmin(is_used_once)', ('fadd(is_used_once)', a
, b
), ('fadd', c
, d
)), 0.0), ('iand', ('fge', a
, ('fneg', b
)), ('fge', c
, ('fneg', d
)))),
1846 (('flt', ('fneg', a
), ('fneg', b
)), ('flt', b
, a
)),
1847 (('fge', ('fneg', a
), ('fneg', b
)), ('fge', b
, a
)),
1848 (('feq', ('fneg', a
), ('fneg', b
)), ('feq', b
, a
)),
1849 (('fne', ('fneg', a
), ('fneg', b
)), ('fne', b
, a
)),
1850 (('flt', ('fneg', a
), -1.0), ('flt', 1.0, a
)),
1851 (('flt', -1.0, ('fneg', a
)), ('flt', a
, 1.0)),
1852 (('fge', ('fneg', a
), -1.0), ('fge', 1.0, a
)),
1853 (('fge', -1.0, ('fneg', a
)), ('fge', a
, 1.0)),
1854 (('fne', ('fneg', a
), -1.0), ('fne', 1.0, a
)),
1855 (('feq', -1.0, ('fneg', a
)), ('feq', a
, 1.0)),
1858 (('iand', a
, a
), a
),
1860 (('iand', ('ine(is_used_once)', 'a@32', 0), ('ine', 'b@32', 0)), ('ine', ('umin', a
, b
), 0)),
1861 (('ior', ('ieq(is_used_once)', 'a@32', 0), ('ieq', 'b@32', 0)), ('ieq', ('umin', a
, b
), 0)),
1863 (('~fadd', ('fneg(is_used_once)', ('fsat(is_used_once)', 'a(is_not_fmul)')), 1.0), ('fsat', ('fadd', 1.0, ('fneg', a
)))),
1865 (('fdot2', a
, b
), ('fdot_replicated2', a
, b
), 'options->fdot_replicates'),
1866 (('fdot3', a
, b
), ('fdot_replicated3', a
, b
), 'options->fdot_replicates'),
1867 (('fdot4', a
, b
), ('fdot_replicated4', a
, b
), 'options->fdot_replicates'),
1868 (('fdph', a
, b
), ('fdph_replicated', a
, b
), 'options->fdot_replicates'),
1870 (('~flrp@32', ('fadd(is_used_once)', a
, b
), ('fadd(is_used_once)', a
, c
), d
), ('fadd', ('flrp', b
, c
, d
), a
)),
1871 (('~flrp@64', ('fadd(is_used_once)', a
, b
), ('fadd(is_used_once)', a
, c
), d
), ('fadd', ('flrp', b
, c
, d
), a
)),
1873 (('~fadd@32', 1.0, ('fmul(is_used_once)', c
, ('fadd', b
, -1.0 ))), ('fadd', ('fadd', 1.0, ('fneg', c
)), ('fmul', b
, c
)), 'options->lower_flrp32'),
1874 (('~fadd@64', 1.0, ('fmul(is_used_once)', c
, ('fadd', b
, -1.0 ))), ('fadd', ('fadd', 1.0, ('fneg', c
)), ('fmul', b
, c
)), 'options->lower_flrp64'),
1876 # A similar operation could apply to any ffma(#a, b, #(-a/2)), but this
1877 # particular operation is common for expanding values stored in a texture
1878 # from [0,1] to [-1,1].
1879 (('~ffma@32', a
, 2.0, -1.0), ('flrp', -1.0, 1.0, a
), '!options->lower_flrp32'),
1880 (('~ffma@32', a
, -2.0, -1.0), ('flrp', -1.0, 1.0, ('fneg', a
)), '!options->lower_flrp32'),
1881 (('~ffma@32', a
, -2.0, 1.0), ('flrp', 1.0, -1.0, a
), '!options->lower_flrp32'),
1882 (('~ffma@32', a
, 2.0, 1.0), ('flrp', 1.0, -1.0, ('fneg', a
)), '!options->lower_flrp32'),
1883 (('~fadd@32', ('fmul(is_used_once)', 2.0, a
), -1.0), ('flrp', -1.0, 1.0, a
), '!options->lower_flrp32'),
1884 (('~fadd@32', ('fmul(is_used_once)', -2.0, a
), -1.0), ('flrp', -1.0, 1.0, ('fneg', a
)), '!options->lower_flrp32'),
1885 (('~fadd@32', ('fmul(is_used_once)', -2.0, a
), 1.0), ('flrp', 1.0, -1.0, a
), '!options->lower_flrp32'),
1886 (('~fadd@32', ('fmul(is_used_once)', 2.0, a
), 1.0), ('flrp', 1.0, -1.0, ('fneg', a
)), '!options->lower_flrp32'),
1890 # a + -a*a + a*b (1)
1892 # Option 1: ffma(a, (b-a), a)
1894 # Alternately, after (1):
1900 # Option 2: ffma(a, 2, -(a*a))
1901 # Option 3: ffma(a, 2, (-a)*a)
1902 # Option 4: ffma(a, -a, (2*a)
1903 # Option 5: a * (2 - a)
1905 # There are a lot of other possible combinations.
1906 (('~ffma@32', ('fadd', b
, ('fneg', a
)), a
, a
), ('flrp', a
, b
, a
), '!options->lower_flrp32'),
1907 (('~ffma@32', a
, 2.0, ('fneg', ('fmul', a
, a
))), ('flrp', a
, 1.0, a
), '!options->lower_flrp32'),
1908 (('~ffma@32', a
, 2.0, ('fmul', ('fneg', a
), a
)), ('flrp', a
, 1.0, a
), '!options->lower_flrp32'),
1909 (('~ffma@32', a
, ('fneg', a
), ('fmul', 2.0, a
)), ('flrp', a
, 1.0, a
), '!options->lower_flrp32'),
1910 (('~fmul@32', a
, ('fadd', 2.0, ('fneg', a
))), ('flrp', a
, 1.0, a
), '!options->lower_flrp32'),
1912 # we do these late so that we don't get in the way of creating ffmas
1913 (('fmin', ('fadd(is_used_once)', '#c', a
), ('fadd(is_used_once)', '#c', b
)), ('fadd', c
, ('fmin', a
, b
))),
1914 (('fmax', ('fadd(is_used_once)', '#c', a
), ('fadd(is_used_once)', '#c', b
)), ('fadd', c
, ('fmax', a
, b
))),
1916 (('bcsel', a
, 0, ('b2f32', ('inot', 'b@bool'))), ('b2f32', ('inot', ('ior', a
, b
)))),
1918 # Putting this in 'optimizations' interferes with the bcsel(a, op(b, c),
1919 # op(b, d)) => op(b, bcsel(a, c, d)) transformations. I do not know why.
1920 (('bcsel', ('feq', ('fsqrt', 'a(is_not_negative)'), 0.0), intBitsToFloat(0x7f7fffff), ('frsq', a
)),
1921 ('fmin', ('frsq', a
), intBitsToFloat(0x7f7fffff))),
1923 # Things that look like DPH in the source shader may get expanded to
1924 # something that looks like dot(v1.xyz, v2.xyz) + v1.w by the time it gets
1925 # to NIR. After FFMA is generated, this can look like:
1927 # fadd(ffma(v1.z, v2.z, ffma(v1.y, v2.y, fmul(v1.x, v2.x))), v1.w)
1929 # Reassociate the last addition into the first multiplication.
1931 # Some shaders do not use 'invariant' in vertex and (possibly) geometry
1932 # shader stages on some outputs that are intended to be invariant. For
1933 # various reasons, this optimization may not be fully applied in all
1934 # shaders used for different rendering passes of the same geometry. This
1935 # can result in Z-fighting artifacts (at best). For now, disable this
1936 # optimization in these stages. See bugzilla #111490. In tessellation
1937 # stages applications seem to use 'precise' when necessary, so allow the
1938 # optimization in those stages.
1939 (('~fadd', ('ffma(is_used_once)', a
, b
, ('ffma', c
, d
, ('fmul', 'e(is_not_const_and_not_fsign)', 'f(is_not_const_and_not_fsign)'))), 'g(is_not_const)'),
1940 ('ffma', a
, b
, ('ffma', c
, d
, ('ffma', e
, 'f', 'g'))), '(info->stage != MESA_SHADER_VERTEX && info->stage != MESA_SHADER_GEOMETRY) && !options->intel_vec4'),
1941 (('~fadd', ('ffma(is_used_once)', a
, b
, ('fmul', 'c(is_not_const_and_not_fsign)', 'd(is_not_const_and_not_fsign)') ), 'e(is_not_const)'),
1942 ('ffma', a
, b
, ('ffma', c
, d
, e
)), '(info->stage != MESA_SHADER_VERTEX && info->stage != MESA_SHADER_GEOMETRY) && !options->intel_vec4'),
1944 # Convert *2*mp instructions to concrete *2*16 instructions. At this point
1945 # any conversions that could have been removed will have been removed in
1946 # nir_opt_algebraic so any remaining ones are required.
1947 (('f2fmp', a
), ('f2f16', a
)),
1948 (('i2imp', a
), ('i2i16', a
)),
1949 (('u2ump', a
), ('u2u16', a
)),
1951 # Section 8.8 (Integer Functions) of the GLSL 4.60 spec says:
1953 # If bits is zero, the result will be zero.
1955 # These prevent the next two lowerings generating incorrect results when
1957 (('ubfe', a
, b
, 0), 0),
1958 (('ibfe', a
, b
, 0), 0),
1960 # On Intel GPUs, BFE is a 3-source instruction. Like all 3-source
1961 # instructions on Intel GPUs, it cannot have an immediate values as
1962 # sources. There are also limitations on source register strides. As a
1963 # result, it is very easy for 3-source instruction combined with either
1964 # loads of immediate values or copies from weird register strides to be
1965 # more expensive than the primitive instructions it represents.
1966 (('ubfe', a
, '#b', '#c'), ('iand', ('ushr', 0xffffffff, ('ineg', c
)), ('ushr', a
, b
)), 'options->lower_bfe_with_two_constants'),
1968 # b is the lowest order bit to be extracted and c is the number of bits to
1969 # extract. The inner shift removes the bits above b + c by shifting left
1970 # 32 - (b + c). ishl only sees the low 5 bits of the shift count, which is
1971 # -(b + c). The outer shift moves the bit that was at b to bit zero.
1972 # After the first shift, that bit is now at b + (32 - (b + c)) or 32 - c.
1973 # This means that it must be shifted right by 32 - c or -c bits.
1974 (('ibfe', a
, '#b', '#c'), ('ishr', ('ishl', a
, ('ineg', ('iadd', b
, c
))), ('ineg', c
)), 'options->lower_bfe_with_two_constants'),
1976 # Clean up no-op shifts that may result from the bfe lowerings.
1977 (('ishl', a
, 0), a
),
1978 (('ishl', a
, -32), a
),
1979 (('ishr', a
, 0), a
),
1980 (('ishr', a
, -32), a
),
1981 (('ushr', a
, 0), a
),
1985 late_optimizations
+= [
1986 (('bcsel', a
, (op
+ '(is_used_once)', b
, c
), (op
, b
, d
)), (op
, b
, ('bcsel', a
, c
, d
))),
1987 (('bcsel', a
, (op
, b
, c
), (op
+ '(is_used_once)', b
, d
)), (op
, b
, ('bcsel', a
, c
, d
))),
1991 late_optimizations
+= [
1992 (('bcsel', a
, (op
+ '(is_used_once)', b
, c
, d
), (op
, b
, c
, e
)), (op
, b
, c
, ('bcsel', a
, d
, e
))),
1993 (('bcsel', a
, (op
, b
, c
, d
), (op
+ '(is_used_once)', b
, c
, e
)), (op
, b
, c
, ('bcsel', a
, d
, e
))),
1995 (('bcsel', a
, (op
+ '(is_used_once)', b
, c
, d
), (op
, b
, e
, d
)), (op
, b
, ('bcsel', a
, c
, e
), d
)),
1996 (('bcsel', a
, (op
, b
, c
, d
), (op
+ '(is_used_once)', b
, e
, d
)), (op
, b
, ('bcsel', a
, c
, e
), d
)),
1999 distribute_src_mods
= [
2000 # Try to remove some spurious negations rather than pushing them down.
2001 (('fmul', ('fneg', a
), ('fneg', b
)), ('fmul', a
, b
)),
2002 (('ffma', ('fneg', a
), ('fneg', b
), c
), ('ffma', a
, b
, c
)),
2003 (('fdot_replicated2', ('fneg', a
), ('fneg', b
)), ('fdot_replicated2', a
, b
)),
2004 (('fdot_replicated3', ('fneg', a
), ('fneg', b
)), ('fdot_replicated3', a
, b
)),
2005 (('fdot_replicated4', ('fneg', a
), ('fneg', b
)), ('fdot_replicated4', a
, b
)),
2006 (('fneg', ('fneg', a
)), a
),
2008 (('fneg', ('ffma(is_used_once)', a
, b
, c
)), ('ffma', ('fneg', a
), b
, ('fneg', c
))),
2009 (('fneg', ('flrp(is_used_once)', a
, b
, c
)), ('flrp', ('fneg', a
), ('fneg', b
), c
)),
2010 (('fneg', ('fadd(is_used_once)', a
, b
)), ('fadd', ('fneg', a
), ('fneg', b
))),
2012 # Note that fmin <-> fmax. I don't think there is a way to distribute
2013 # fabs() into fmin or fmax.
2014 (('fneg', ('fmin(is_used_once)', a
, b
)), ('fmax', ('fneg', a
), ('fneg', b
))),
2015 (('fneg', ('fmax(is_used_once)', a
, b
)), ('fmin', ('fneg', a
), ('fneg', b
))),
2017 # fdph works mostly like fdot, but to get the correct result, the negation
2018 # must be applied to the second source.
2019 (('fneg', ('fdph_replicated(is_used_once)', a
, b
)), ('fdph_replicated', a
, ('fneg', b
))),
2020 (('fabs', ('fdph_replicated(is_used_once)', a
, b
)), ('fdph_replicated', ('fabs', a
), ('fabs', b
))),
2022 (('fneg', ('fsign(is_used_once)', a
)), ('fsign', ('fneg', a
))),
2023 (('fabs', ('fsign(is_used_once)', a
)), ('fsign', ('fabs', a
))),
2026 for op
in ['fmul', 'fdot_replicated2', 'fdot_replicated3', 'fdot_replicated4']:
2027 distribute_src_mods
.extend([
2028 (('fneg', (op
+ '(is_used_once)', a
, b
)), (op
, ('fneg', a
), b
)),
2029 (('fabs', (op
+ '(is_used_once)', a
, b
)), (op
, ('fabs', a
), ('fabs', b
))),
2032 print(nir_algebraic
.AlgebraicPass("nir_opt_algebraic", optimizations
).render())
2033 print(nir_algebraic
.AlgebraicPass("nir_opt_algebraic_before_ffma",
2034 before_ffma_optimizations
).render())
2035 print(nir_algebraic
.AlgebraicPass("nir_opt_algebraic_late",
2036 late_optimizations
).render())
2037 print(nir_algebraic
.AlgebraicPass("nir_opt_algebraic_distribute_src_mods",
2038 distribute_src_mods
).render())