04ce3f0c9bf22eec43e8ed8e3df3a81726fc7b88
[mesa.git] / src / compiler / nir / nir_opt_algebraic.py
1 #
2 # Copyright (C) 2014 Intel Corporation
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining a
5 # copy of this software and associated documentation files (the "Software"),
6 # to deal in the Software without restriction, including without limitation
7 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 # and/or sell copies of the Software, and to permit persons to whom the
9 # Software is furnished to do so, subject to the following conditions:
10 #
11 # The above copyright notice and this permission notice (including the next
12 # paragraph) shall be included in all copies or substantial portions of the
13 # Software.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 # IN THE SOFTWARE.
22 #
23 # Authors:
24 # Jason Ekstrand (jason@jlekstrand.net)
25
26 from __future__ import print_function
27
28 from collections import OrderedDict
29 import nir_algebraic
30 from nir_opcodes import type_sizes
31 import itertools
32 from math import pi
33
34 # Convenience variables
35 a = 'a'
36 b = 'b'
37 c = 'c'
38 d = 'd'
39 e = 'e'
40
41 # Written in the form (<search>, <replace>) where <search> is an expression
42 # and <replace> is either an expression or a value. An expression is
43 # defined as a tuple of the form ([~]<op>, <src0>, <src1>, <src2>, <src3>)
44 # where each source is either an expression or a value. A value can be
45 # either a numeric constant or a string representing a variable name.
46 #
47 # If the opcode in a search expression is prefixed by a '~' character, this
48 # indicates that the operation is inexact. Such operations will only get
49 # applied to SSA values that do not have the exact bit set. This should be
50 # used by by any optimizations that are not bit-for-bit exact. It should not,
51 # however, be used for backend-requested lowering operations as those need to
52 # happen regardless of precision.
53 #
54 # Variable names are specified as "[#]name[@type][(cond)][.swiz]" where:
55 # "#" indicates that the given variable will only match constants,
56 # type indicates that the given variable will only match values from ALU
57 # instructions with the given output type,
58 # (cond) specifies an additional condition function (see nir_search_helpers.h),
59 # swiz is a swizzle applied to the variable (only in the <replace> expression)
60 #
61 # For constants, you have to be careful to make sure that it is the right
62 # type because python is unaware of the source and destination types of the
63 # opcodes.
64 #
65 # All expression types can have a bit-size specified. For opcodes, this
66 # looks like "op@32", for variables it is "a@32" or "a@uint32" to specify a
67 # type and size. In the search half of the expression this indicates that it
68 # should only match that particular bit-size. In the replace half of the
69 # expression this indicates that the constructed value should have that
70 # bit-size.
71 #
72 # A special condition "many-comm-expr" can be used with expressions to note
73 # that the expression and its subexpressions have more commutative expressions
74 # than nir_replace_instr can handle. If this special condition is needed with
75 # another condition, the two can be separated by a comma (e.g.,
76 # "(many-comm-expr,is_used_once)").
77
78 # based on https://web.archive.org/web/20180105155939/http://forum.devmaster.net/t/fast-and-accurate-sine-cosine/9648
79 def lowered_sincos(c):
80 x = ('fsub', ('fmul', 2.0, ('ffract', ('fadd', ('fmul', 0.5 / pi, a), c))), 1.0)
81 x = ('fmul', ('fsub', x, ('fmul', x, ('fabs', x))), 4.0)
82 return ('ffma', ('ffma', x, ('fabs', x), ('fneg', x)), 0.225, x)
83
84 optimizations = [
85
86 (('imul', a, '#b@32(is_pos_power_of_two)'), ('ishl', a, ('find_lsb', b)), '!options->lower_bitops'),
87 (('imul', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('ishl', a, ('find_lsb', ('iabs', b)))), '!options->lower_bitops'),
88 (('ishl', a, '#b@32'), ('imul', a, ('ishl', 1, b)), 'options->lower_bitops'),
89
90 (('unpack_64_2x32_split_x', ('imul_2x32_64(is_used_once)', a, b)), ('imul', a, b)),
91 (('unpack_64_2x32_split_x', ('umul_2x32_64(is_used_once)', a, b)), ('imul', a, b)),
92 (('imul_2x32_64', a, b), ('pack_64_2x32_split', ('imul', a, b), ('imul_high', a, b)), 'options->lower_mul_2x32_64'),
93 (('umul_2x32_64', a, b), ('pack_64_2x32_split', ('imul', a, b), ('umul_high', a, b)), 'options->lower_mul_2x32_64'),
94 (('udiv', a, 1), a),
95 (('idiv', a, 1), a),
96 (('umod', a, 1), 0),
97 (('imod', a, 1), 0),
98 (('udiv', a, '#b@32(is_pos_power_of_two)'), ('ushr', a, ('find_lsb', b)), '!options->lower_bitops'),
99 (('idiv', a, '#b@32(is_pos_power_of_two)'), ('imul', ('isign', a), ('ushr', ('iabs', a), ('find_lsb', b))), 'options->lower_idiv'),
100 (('idiv', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('imul', ('isign', a), ('ushr', ('iabs', a), ('find_lsb', ('iabs', b))))), 'options->lower_idiv'),
101 (('umod', a, '#b(is_pos_power_of_two)'), ('iand', a, ('isub', b, 1))),
102
103 (('~fneg', ('fneg', a)), a),
104 (('ineg', ('ineg', a)), a),
105 (('fabs', ('fabs', a)), ('fabs', a)),
106 (('fabs', ('fneg', a)), ('fabs', a)),
107 (('fabs', ('u2f', a)), ('u2f', a)),
108 (('iabs', ('iabs', a)), ('iabs', a)),
109 (('iabs', ('ineg', a)), ('iabs', a)),
110 (('f2b', ('fneg', a)), ('f2b', a)),
111 (('i2b', ('ineg', a)), ('i2b', a)),
112 (('~fadd', a, 0.0), a),
113 (('iadd', a, 0), a),
114 (('usadd_4x8', a, 0), a),
115 (('usadd_4x8', a, ~0), ~0),
116 (('~fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))),
117 (('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))),
118 (('~fadd', ('fneg', a), a), 0.0),
119 (('iadd', ('ineg', a), a), 0),
120 (('iadd', ('ineg', a), ('iadd', a, b)), b),
121 (('iadd', a, ('iadd', ('ineg', a), b)), b),
122 (('~fadd', ('fneg', a), ('fadd', a, b)), b),
123 (('~fadd', a, ('fadd', ('fneg', a), b)), b),
124 (('fadd', ('fsat', a), ('fsat', ('fneg', a))), ('fsat', ('fabs', a))),
125 (('~fmul', a, 0.0), 0.0),
126 (('imul', a, 0), 0),
127 (('umul_unorm_4x8', a, 0), 0),
128 (('umul_unorm_4x8', a, ~0), a),
129 (('~fmul', a, 1.0), a),
130 (('imul', a, 1), a),
131 (('fmul', a, -1.0), ('fneg', a)),
132 (('imul', a, -1), ('ineg', a)),
133 # If a < 0: fsign(a)*a*a => -1*a*a => -a*a => abs(a)*a
134 # If a > 0: fsign(a)*a*a => 1*a*a => a*a => abs(a)*a
135 # If a == 0: fsign(a)*a*a => 0*0*0 => abs(0)*0
136 (('fmul', ('fsign', a), ('fmul', a, a)), ('fmul', ('fabs', a), a)),
137 (('fmul', ('fmul', ('fsign', a), a), a), ('fmul', ('fabs', a), a)),
138 (('~ffma', 0.0, a, b), b),
139 (('~ffma', a, b, 0.0), ('fmul', a, b)),
140 (('ffma', 1.0, a, b), ('fadd', a, b)),
141 (('ffma', -1.0, a, b), ('fadd', ('fneg', a), b)),
142 (('~flrp', a, b, 0.0), a),
143 (('~flrp', a, b, 1.0), b),
144 (('~flrp', a, a, b), a),
145 (('~flrp', 0.0, a, b), ('fmul', a, b)),
146
147 # flrp(a, a + b, c) => a + flrp(0, b, c) => a + (b * c)
148 (('~flrp', a, ('fadd(is_used_once)', a, b), c), ('fadd', ('fmul', b, c), a)),
149 (('~flrp@32', a, ('fadd', a, b), c), ('fadd', ('fmul', b, c), a), 'options->lower_flrp32'),
150 (('~flrp@64', a, ('fadd', a, b), c), ('fadd', ('fmul', b, c), a), 'options->lower_flrp64'),
151
152 (('~flrp@32', ('fadd', a, b), ('fadd', a, c), d), ('fadd', ('flrp', b, c, d), a), 'options->lower_flrp32'),
153 (('~flrp@64', ('fadd', a, b), ('fadd', a, c), d), ('fadd', ('flrp', b, c, d), a), 'options->lower_flrp64'),
154
155 (('~flrp@32', a, ('fmul(is_used_once)', a, b), c), ('fmul', ('flrp', 1.0, b, c), a), 'options->lower_flrp32'),
156 (('~flrp@64', a, ('fmul(is_used_once)', a, b), c), ('fmul', ('flrp', 1.0, b, c), a), 'options->lower_flrp64'),
157
158 (('~flrp', ('fmul(is_used_once)', a, b), ('fmul(is_used_once)', a, c), d), ('fmul', ('flrp', b, c, d), a)),
159
160 (('~flrp', a, b, ('b2f', 'c@1')), ('bcsel', c, b, a), 'options->lower_flrp32'),
161 (('~flrp', a, 0.0, c), ('fadd', ('fmul', ('fneg', a), c), a)),
162 (('ftrunc', a), ('bcsel', ('flt', a, 0.0), ('fneg', ('ffloor', ('fabs', a))), ('ffloor', ('fabs', a))), 'options->lower_ftrunc'),
163 (('ffloor', a), ('fsub', a, ('ffract', a)), 'options->lower_ffloor'),
164 (('fadd', a, ('fneg', ('ffract', a))), ('ffloor', a), '!options->lower_ffloor'),
165 (('ffract', a), ('fsub', a, ('ffloor', a)), 'options->lower_ffract'),
166 (('fceil', a), ('fneg', ('ffloor', ('fneg', a))), 'options->lower_fceil'),
167 (('~fadd', ('fmul', a, ('fadd', 1.0, ('fneg', ('b2f', 'c@1')))), ('fmul', b, ('b2f', c))), ('bcsel', c, b, a), 'options->lower_flrp32'),
168 (('~fadd@32', ('fmul', a, ('fadd', 1.0, ('fneg', c ) )), ('fmul', b, c )), ('flrp', a, b, c), '!options->lower_flrp32'),
169 (('~fadd@64', ('fmul', a, ('fadd', 1.0, ('fneg', c ) )), ('fmul', b, c )), ('flrp', a, b, c), '!options->lower_flrp64'),
170 # These are the same as the previous three rules, but it depends on
171 # 1-fsat(x) <=> fsat(1-x). See below.
172 (('~fadd@32', ('fmul', a, ('fsat', ('fadd', 1.0, ('fneg', c )))), ('fmul', b, ('fsat', c))), ('flrp', a, b, ('fsat', c)), '!options->lower_flrp32'),
173 (('~fadd@64', ('fmul', a, ('fsat', ('fadd', 1.0, ('fneg', c )))), ('fmul', b, ('fsat', c))), ('flrp', a, b, ('fsat', c)), '!options->lower_flrp64'),
174
175 (('~fadd', a, ('fmul', ('b2f', 'c@1'), ('fadd', b, ('fneg', a)))), ('bcsel', c, b, a), 'options->lower_flrp32'),
176 (('~fadd@32', a, ('fmul', c , ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp32'),
177 (('~fadd@64', a, ('fmul', c , ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp64'),
178 (('ffma', a, b, c), ('fadd', ('fmul', a, b), c), 'options->lower_ffma'),
179 (('~fadd', ('fmul', a, b), c), ('ffma', a, b, c), 'options->fuse_ffma'),
180
181 (('~fmul', ('fadd', ('iand', ('ineg', ('b2i32', 'a@bool')), ('fmul', b, c)), '#d'), '#e'),
182 ('bcsel', a, ('fmul', ('fadd', ('fmul', b, c), d), e), ('fmul', d, e))),
183
184 (('fdph', a, b), ('fdot4', ('vec4', 'a.x', 'a.y', 'a.z', 1.0), b), 'options->lower_fdph'),
185
186 (('fdot4', ('vec4', a, b, c, 1.0), d), ('fdph', ('vec3', a, b, c), d), '!options->lower_fdph'),
187 (('fdot4', ('vec4', a, 0.0, 0.0, 0.0), b), ('fmul', a, b)),
188 (('fdot4', ('vec4', a, b, 0.0, 0.0), c), ('fdot2', ('vec2', a, b), c)),
189 (('fdot4', ('vec4', a, b, c, 0.0), d), ('fdot3', ('vec3', a, b, c), d)),
190
191 (('fdot3', ('vec3', a, 0.0, 0.0), b), ('fmul', a, b)),
192 (('fdot3', ('vec3', a, b, 0.0), c), ('fdot2', ('vec2', a, b), c)),
193
194 (('fdot2', ('vec2', a, 0.0), b), ('fmul', a, b)),
195 (('fdot2', a, 1.0), ('fadd', 'a.x', 'a.y')),
196
197 # Lower fdot to fsum when it is available
198 (('fdot2', a, b), ('fsum2', ('fmul', a, b)), 'options->lower_fdot'),
199 (('fdot3', a, b), ('fsum3', ('fmul', a, b)), 'options->lower_fdot'),
200 (('fdot4', a, b), ('fsum4', ('fmul', a, b)), 'options->lower_fdot'),
201 (('fsum2', a), ('fadd', 'a.x', 'a.y'), 'options->lower_fdot'),
202
203 # If x >= 0 and x <= 1: fsat(1 - x) == 1 - fsat(x) trivially
204 # If x < 0: 1 - fsat(x) => 1 - 0 => 1 and fsat(1 - x) => fsat(> 1) => 1
205 # If x > 1: 1 - fsat(x) => 1 - 1 => 0 and fsat(1 - x) => fsat(< 0) => 0
206 (('~fadd', ('fneg(is_used_once)', ('fsat(is_used_once)', 'a(is_not_fmul)')), 1.0), ('fsat', ('fadd', 1.0, ('fneg', a)))),
207 (('~fsub', 1.0, ('fsat', a)), ('fsat', ('fsub', 1.0, a))),
208
209 # 1 - ((1 - a) * (1 - b))
210 # 1 - (1 - a - b + a*b)
211 # 1 - 1 + a + b - a*b
212 # a + b - a*b
213 # a + b*(1 - a)
214 # b*(1 - a) + 1*a
215 # flrp(b, 1, a)
216 (('~fadd@32', 1.0, ('fneg', ('fmul', ('fadd', 1.0, ('fneg', a)), ('fadd', 1.0, ('fneg', b))))),
217 ('flrp', b, 1.0, a), '!options->lower_flrp32'),
218
219 # (a * #b + #c) << #d
220 # ((a * #b) << #d) + (#c << #d)
221 # (a * (#b << #d)) + (#c << #d)
222 (('ishl', ('iadd', ('imul', a, '#b'), '#c'), '#d'),
223 ('iadd', ('imul', a, ('ishl', b, d)), ('ishl', c, d))),
224
225 # (a * #b) << #c
226 # a * (#b << #c)
227 (('ishl', ('imul', a, '#b'), '#c'), ('imul', a, ('ishl', b, c))),
228 ]
229
230 # Care must be taken here. Shifts in NIR uses only the lower log2(bitsize)
231 # bits of the second source. These replacements must correctly handle the
232 # case where (b % bitsize) + (c % bitsize) >= bitsize.
233 for s in [8, 16, 32, 64]:
234 mask = (1 << s) - 1
235
236 ishl = "ishl@{}".format(s)
237 ishr = "ishr@{}".format(s)
238 ushr = "ushr@{}".format(s)
239
240 in_bounds = ('ult', ('iadd', ('iand', b, mask), ('iand', c, mask)), s)
241
242 optimizations.extend([
243 ((ishl, (ishl, a, '#b'), '#c'), ('bcsel', in_bounds, (ishl, a, ('iadd', b, c)), 0)),
244 ((ushr, (ushr, a, '#b'), '#c'), ('bcsel', in_bounds, (ushr, a, ('iadd', b, c)), 0)),
245
246 # To get get -1 for large shifts of negative values, ishr must instead
247 # clamp the shift count to the maximum value.
248 ((ishr, (ishr, a, '#b'), '#c'),
249 (ishr, a, ('imin', ('iadd', ('iand', b, mask), ('iand', c, mask)), s - 1))),
250 ])
251
252 optimizations.extend([
253 # This is common for address calculations. Reassociating may enable the
254 # 'a<<c' to be CSE'd. It also helps architectures that have an ISHLADD
255 # instruction or a constant offset field for in load / store instructions.
256 (('ishl', ('iadd', a, '#b'), '#c'), ('iadd', ('ishl', a, c), ('ishl', b, c))),
257
258 # Comparison simplifications
259 (('~inot', ('flt', a, b)), ('fge', a, b)),
260 (('~inot', ('fge', a, b)), ('flt', a, b)),
261 (('inot', ('feq', a, b)), ('fne', a, b)),
262 (('inot', ('fne', a, b)), ('feq', a, b)),
263 (('inot', ('ilt', a, b)), ('ige', a, b)),
264 (('inot', ('ult', a, b)), ('uge', a, b)),
265 (('inot', ('ige', a, b)), ('ilt', a, b)),
266 (('inot', ('uge', a, b)), ('ult', a, b)),
267 (('inot', ('ieq', a, b)), ('ine', a, b)),
268 (('inot', ('ine', a, b)), ('ieq', a, b)),
269
270 (('iand', ('feq', a, b), ('fne', a, b)), False),
271 (('iand', ('flt', a, b), ('flt', b, a)), False),
272 (('iand', ('ieq', a, b), ('ine', a, b)), False),
273 (('iand', ('ilt', a, b), ('ilt', b, a)), False),
274 (('iand', ('ult', a, b), ('ult', b, a)), False),
275
276 # This helps some shaders because, after some optimizations, they end up
277 # with patterns like (-a < -b) || (b < a). In an ideal world, this sort of
278 # matching would be handled by CSE.
279 (('flt', ('fneg', a), ('fneg', b)), ('flt', b, a)),
280 (('fge', ('fneg', a), ('fneg', b)), ('fge', b, a)),
281 (('feq', ('fneg', a), ('fneg', b)), ('feq', b, a)),
282 (('fne', ('fneg', a), ('fneg', b)), ('fne', b, a)),
283 (('flt', ('fneg', a), -1.0), ('flt', 1.0, a)),
284 (('flt', -1.0, ('fneg', a)), ('flt', a, 1.0)),
285 (('fge', ('fneg', a), -1.0), ('fge', 1.0, a)),
286 (('fge', -1.0, ('fneg', a)), ('fge', a, 1.0)),
287 (('fne', ('fneg', a), -1.0), ('fne', 1.0, a)),
288 (('feq', -1.0, ('fneg', a)), ('feq', a, 1.0)),
289
290 (('flt', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('flt', a, b)),
291 (('flt', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('flt', b, a)),
292 (('fge', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fge', a, b)),
293 (('fge', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('fge', b, a)),
294 (('feq', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('feq', a, b)),
295 (('fne', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fne', a, b)),
296
297 (('fge', ('fsat(is_used_once)', a), 1.0), ('fge', a, 1.0)),
298 (('flt', ('fsat(is_used_once)', a), 1.0), ('flt', a, 1.0)),
299 (('fge', 0.0, ('fsat(is_used_once)', a)), ('fge', 0.0, a)),
300 (('flt', 0.0, ('fsat(is_used_once)', a)), ('flt', 0.0, a)),
301
302 # 0.0 >= b2f(a)
303 # b2f(a) <= 0.0
304 # b2f(a) == 0.0 because b2f(a) can only be 0 or 1
305 # inot(a)
306 (('fge', 0.0, ('b2f', 'a@1')), ('inot', a)),
307
308 (('fge', ('fneg', ('b2f', 'a@1')), 0.0), ('inot', a)),
309
310 (('fne', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a, b)),
311 (('fne', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a, b)),
312 (('fne', ('bcsel', a, 1.0, ('b2f', 'b@1')) , 0.0), ('ior', a, b)),
313 (('fne', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), ('ior', a, b)),
314 (('fne', ('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a, b)),
315 (('fne', ('fmin', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a, b)),
316 (('fne', ('bcsel', a, ('b2f', 'b@1'), 0.0) , 0.0), ('iand', a, b)),
317 (('fne', ('fadd', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), 0.0), ('ixor', a, b)),
318 (('fne', ('b2f', 'a@1') , ('b2f', 'b@1') ), ('ixor', a, b)),
319 (('fne', ('fneg', ('b2f', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('ixor', a, b)),
320 (('feq', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('ior', a, b))),
321 (('feq', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('ior', a, b))),
322 (('feq', ('bcsel', a, 1.0, ('b2f', 'b@1')) , 0.0), ('inot', ('ior', a, b))),
323 (('feq', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), ('inot', ('ior', a, b))),
324 (('feq', ('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('iand', a, b))),
325 (('feq', ('fmin', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('iand', a, b))),
326 (('feq', ('bcsel', a, ('b2f', 'b@1'), 0.0) , 0.0), ('inot', ('iand', a, b))),
327 (('feq', ('fadd', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), 0.0), ('ieq', a, b)),
328 (('feq', ('b2f', 'a@1') , ('b2f', 'b@1') ), ('ieq', a, b)),
329 (('feq', ('fneg', ('b2f', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('ieq', a, b)),
330
331 # -(b2f(a) + b2f(b)) < 0
332 # 0 < b2f(a) + b2f(b)
333 # 0 != b2f(a) + b2f(b) b2f must be 0 or 1, so the sum is non-negative
334 # a || b
335 (('flt', ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), 0.0), ('ior', a, b)),
336 (('flt', 0.0, ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('ior', a, b)),
337
338 # -(b2f(a) + b2f(b)) >= 0
339 # 0 >= b2f(a) + b2f(b)
340 # 0 == b2f(a) + b2f(b) b2f must be 0 or 1, so the sum is non-negative
341 # !(a || b)
342 (('fge', ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), 0.0), ('inot', ('ior', a, b))),
343 (('fge', 0.0, ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('inot', ('ior', a, b))),
344
345 (('flt', a, ('fneg', a)), ('flt', a, 0.0)),
346 (('fge', a, ('fneg', a)), ('fge', a, 0.0)),
347
348 # Some optimizations (below) convert things like (a < b || c < b) into
349 # (min(a, c) < b). However, this interfers with the previous optimizations
350 # that try to remove comparisons with negated sums of b2f. This just
351 # breaks that apart.
352 (('flt', ('fmin', c, ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')))), 0.0),
353 ('ior', ('flt', c, 0.0), ('ior', a, b))),
354
355 (('~flt', ('fadd', a, b), a), ('flt', b, 0.0)),
356 (('~fge', ('fadd', a, b), a), ('fge', b, 0.0)),
357 (('~feq', ('fadd', a, b), a), ('feq', b, 0.0)),
358 (('~fne', ('fadd', a, b), a), ('fne', b, 0.0)),
359 (('~flt', ('fadd(is_used_once)', a, '#b'), '#c'), ('flt', a, ('fadd', c, ('fneg', b)))),
360 (('~flt', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('flt', ('fneg', ('fadd', c, b)), a)),
361 (('~fge', ('fadd(is_used_once)', a, '#b'), '#c'), ('fge', a, ('fadd', c, ('fneg', b)))),
362 (('~fge', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('fge', ('fneg', ('fadd', c, b)), a)),
363 (('~feq', ('fadd(is_used_once)', a, '#b'), '#c'), ('feq', a, ('fadd', c, ('fneg', b)))),
364 (('~feq', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('feq', ('fneg', ('fadd', c, b)), a)),
365 (('~fne', ('fadd(is_used_once)', a, '#b'), '#c'), ('fne', a, ('fadd', c, ('fneg', b)))),
366 (('~fne', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('fne', ('fneg', ('fadd', c, b)), a)),
367
368 # Cannot remove the addition from ilt or ige due to overflow.
369 (('ieq', ('iadd', a, b), a), ('ieq', b, 0)),
370 (('ine', ('iadd', a, b), a), ('ine', b, 0)),
371
372 # fmin(-b2f(a), b) >= 0.0
373 # -b2f(a) >= 0.0 && b >= 0.0
374 # -b2f(a) == 0.0 && b >= 0.0 -b2f can only be 0 or -1, never >0
375 # b2f(a) == 0.0 && b >= 0.0
376 # a == False && b >= 0.0
377 # !a && b >= 0.0
378 #
379 # The fge in the second replacement is not a typo. I leave the proof that
380 # "fmin(-b2f(a), b) >= 0 <=> fmin(-b2f(a), b) == 0" as an exercise for the
381 # reader.
382 (('fge', ('fmin', ('fneg', ('b2f', 'a@1')), 'b@1'), 0.0), ('iand', ('inot', a), ('fge', b, 0.0))),
383 (('feq', ('fmin', ('fneg', ('b2f', 'a@1')), 'b@1'), 0.0), ('iand', ('inot', a), ('fge', b, 0.0))),
384
385 (('feq', ('b2f', 'a@1'), 0.0), ('inot', a)),
386 (('~fne', ('b2f', 'a@1'), 0.0), a),
387 (('ieq', ('b2i', 'a@1'), 0), ('inot', a)),
388 (('ine', ('b2i', 'a@1'), 0), a),
389
390 (('fne', ('u2f', a), 0.0), ('ine', a, 0)),
391 (('feq', ('u2f', a), 0.0), ('ieq', a, 0)),
392 (('fge', ('u2f', a), 0.0), True),
393 (('fge', 0.0, ('u2f', a)), ('uge', 0, a)), # ieq instead?
394 (('flt', ('u2f', a), 0.0), False),
395 (('flt', 0.0, ('u2f', a)), ('ult', 0, a)), # ine instead?
396 (('fne', ('i2f', a), 0.0), ('ine', a, 0)),
397 (('feq', ('i2f', a), 0.0), ('ieq', a, 0)),
398 (('fge', ('i2f', a), 0.0), ('ige', a, 0)),
399 (('fge', 0.0, ('i2f', a)), ('ige', 0, a)),
400 (('flt', ('i2f', a), 0.0), ('ilt', a, 0)),
401 (('flt', 0.0, ('i2f', a)), ('ilt', 0, a)),
402
403 # 0.0 < fabs(a)
404 # fabs(a) > 0.0
405 # fabs(a) != 0.0 because fabs(a) must be >= 0
406 # a != 0.0
407 (('~flt', 0.0, ('fabs', a)), ('fne', a, 0.0)),
408
409 # -fabs(a) < 0.0
410 # fabs(a) > 0.0
411 (('~flt', ('fneg', ('fabs', a)), 0.0), ('fne', a, 0.0)),
412
413 # 0.0 >= fabs(a)
414 # 0.0 == fabs(a) because fabs(a) must be >= 0
415 # 0.0 == a
416 (('fge', 0.0, ('fabs', a)), ('feq', a, 0.0)),
417
418 # -fabs(a) >= 0.0
419 # 0.0 >= fabs(a)
420 (('fge', ('fneg', ('fabs', a)), 0.0), ('feq', a, 0.0)),
421
422 # (a >= 0.0) && (a <= 1.0) -> fsat(a) == a
423 (('iand', ('fge', a, 0.0), ('fge', 1.0, a)), ('feq', a, ('fsat', a)), '!options->lower_fsat'),
424
425 # (a < 0.0) || (a > 1.0)
426 # !(!(a < 0.0) && !(a > 1.0))
427 # !((a >= 0.0) && (a <= 1.0))
428 # !(a == fsat(a))
429 # a != fsat(a)
430 (('ior', ('flt', a, 0.0), ('flt', 1.0, a)), ('fne', a, ('fsat', a)), '!options->lower_fsat'),
431
432 (('fmax', ('b2f(is_used_once)', 'a@1'), ('b2f', 'b@1')), ('b2f', ('ior', a, b))),
433 (('fmax', ('fneg(is_used_once)', ('b2f(is_used_once)', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('fneg', ('b2f', ('ior', a, b)))),
434 (('fmin', ('b2f(is_used_once)', 'a@1'), ('b2f', 'b@1')), ('b2f', ('iand', a, b))),
435 (('fmin', ('fneg(is_used_once)', ('b2f(is_used_once)', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('fneg', ('b2f', ('iand', a, b)))),
436
437 # fmin(b2f(a), b)
438 # bcsel(a, fmin(b2f(a), b), fmin(b2f(a), b))
439 # bcsel(a, fmin(b2f(True), b), fmin(b2f(False), b))
440 # bcsel(a, fmin(1.0, b), fmin(0.0, b))
441 #
442 # Since b is a constant, constant folding will eliminate the fmin and the
443 # fmax. If b is > 1.0, the bcsel will be replaced with a b2f.
444 (('fmin', ('b2f', 'a@1'), '#b'), ('bcsel', a, ('fmin', b, 1.0), ('fmin', b, 0.0))),
445
446 (('flt', ('fadd(is_used_once)', a, ('fneg', b)), 0.0), ('flt', a, b)),
447
448 (('fge', ('fneg', ('fabs', a)), 0.0), ('feq', a, 0.0)),
449 (('~bcsel', ('flt', b, a), b, a), ('fmin', a, b)),
450 (('~bcsel', ('flt', a, b), b, a), ('fmax', a, b)),
451 (('~bcsel', ('fge', a, b), b, a), ('fmin', a, b)),
452 (('~bcsel', ('fge', b, a), b, a), ('fmax', a, b)),
453 (('bcsel', ('i2b', a), b, c), ('bcsel', ('ine', a, 0), b, c)),
454 (('bcsel', ('inot', a), b, c), ('bcsel', a, c, b)),
455 (('bcsel', a, ('bcsel', a, b, c), d), ('bcsel', a, b, d)),
456 (('bcsel', a, b, ('bcsel', a, c, d)), ('bcsel', a, b, d)),
457 (('bcsel', a, ('bcsel', b, c, d), ('bcsel(is_used_once)', b, c, 'e')), ('bcsel', b, c, ('bcsel', a, d, 'e'))),
458 (('bcsel', a, ('bcsel(is_used_once)', b, c, d), ('bcsel', b, c, 'e')), ('bcsel', b, c, ('bcsel', a, d, 'e'))),
459 (('bcsel', a, ('bcsel', b, c, d), ('bcsel(is_used_once)', b, 'e', d)), ('bcsel', b, ('bcsel', a, c, 'e'), d)),
460 (('bcsel', a, ('bcsel(is_used_once)', b, c, d), ('bcsel', b, 'e', d)), ('bcsel', b, ('bcsel', a, c, 'e'), d)),
461 (('bcsel', a, True, b), ('ior', a, b)),
462 (('bcsel', a, a, b), ('ior', a, b)),
463 (('bcsel', a, b, False), ('iand', a, b)),
464 (('bcsel', a, b, a), ('iand', a, b)),
465 (('~fmin', a, a), a),
466 (('~fmax', a, a), a),
467 (('imin', a, a), a),
468 (('imax', a, a), a),
469 (('umin', a, a), a),
470 (('umax', a, a), a),
471 (('fmax', ('fmax', a, b), b), ('fmax', a, b)),
472 (('umax', ('umax', a, b), b), ('umax', a, b)),
473 (('imax', ('imax', a, b), b), ('imax', a, b)),
474 (('fmin', ('fmin', a, b), b), ('fmin', a, b)),
475 (('umin', ('umin', a, b), b), ('umin', a, b)),
476 (('imin', ('imin', a, b), b), ('imin', a, b)),
477 (('fmax', a, ('fneg', a)), ('fabs', a)),
478 (('imax', a, ('ineg', a)), ('iabs', a)),
479 (('fmin', a, ('fneg', a)), ('fneg', ('fabs', a))),
480 (('imin', a, ('ineg', a)), ('ineg', ('iabs', a))),
481 (('fmin', a, ('fneg', ('fabs', a))), ('fneg', ('fabs', a))),
482 (('imin', a, ('ineg', ('iabs', a))), ('ineg', ('iabs', a))),
483 (('~fmin', a, ('fabs', a)), a),
484 (('imin', a, ('iabs', a)), a),
485 (('~fmax', a, ('fneg', ('fabs', a))), a),
486 (('imax', a, ('ineg', ('iabs', a))), a),
487 (('fmax', a, ('fabs', a)), ('fabs', a)),
488 (('imax', a, ('iabs', a)), ('iabs', a)),
489 (('fmax', a, ('fneg', a)), ('fabs', a)),
490 (('imax', a, ('ineg', a)), ('iabs', a)),
491 (('~fmax', ('fabs', a), 0.0), ('fabs', a)),
492 (('~fmin', ('fmax', a, 0.0), 1.0), ('fsat', a), '!options->lower_fsat'),
493 (('~fmax', ('fmin', a, 1.0), 0.0), ('fsat', a), '!options->lower_fsat'),
494 (('~fmin', ('fmax', a, -1.0), 0.0), ('fneg', ('fsat', ('fneg', a))), '!options->lower_negate && !options->lower_fsat'),
495 (('~fmax', ('fmin', a, 0.0), -1.0), ('fneg', ('fsat', ('fneg', a))), '!options->lower_negate && !options->lower_fsat'),
496 (('fsat', ('fsign', a)), ('b2f', ('flt', 0.0, a))),
497 (('fsat', ('b2f', a)), ('b2f', a)),
498 (('fsat', a), ('fmin', ('fmax', a, 0.0), 1.0), 'options->lower_fsat'),
499 (('fsat', ('fsat', a)), ('fsat', a)),
500 (('fsat', ('fneg(is_used_once)', ('fadd(is_used_once)', a, b))), ('fsat', ('fadd', ('fneg', a), ('fneg', b))), '!options->lower_negate && !options->lower_fsat'),
501 (('fsat', ('fneg(is_used_once)', ('fmul(is_used_once)', a, b))), ('fsat', ('fmul', ('fneg', a), b)), '!options->lower_negate && !options->lower_fsat'),
502 (('fsat', ('fabs(is_used_once)', ('fmul(is_used_once)', a, b))), ('fsat', ('fmul', ('fabs', a), ('fabs', b))), '!options->lower_fsat'),
503 (('fmin', ('fmax', ('fmin', ('fmax', a, b), c), b), c), ('fmin', ('fmax', a, b), c)),
504 (('imin', ('imax', ('imin', ('imax', a, b), c), b), c), ('imin', ('imax', a, b), c)),
505 (('umin', ('umax', ('umin', ('umax', a, b), c), b), c), ('umin', ('umax', a, b), c)),
506 (('fmax', ('fsat', a), '#b@32(is_zero_to_one)'), ('fsat', ('fmax', a, b))),
507 (('fmin', ('fsat', a), '#b@32(is_zero_to_one)'), ('fsat', ('fmin', a, b))),
508 (('extract_u8', ('imin', ('imax', a, 0), 0xff), 0), ('imin', ('imax', a, 0), 0xff)),
509 (('~ior', ('flt(is_used_once)', a, b), ('flt', a, c)), ('flt', a, ('fmax', b, c))),
510 (('~ior', ('flt(is_used_once)', a, c), ('flt', b, c)), ('flt', ('fmin', a, b), c)),
511 (('~ior', ('fge(is_used_once)', a, b), ('fge', a, c)), ('fge', a, ('fmin', b, c))),
512 (('~ior', ('fge(is_used_once)', a, c), ('fge', b, c)), ('fge', ('fmax', a, b), c)),
513 (('~ior', ('flt', a, '#b'), ('flt', a, '#c')), ('flt', a, ('fmax', b, c))),
514 (('~ior', ('flt', '#a', c), ('flt', '#b', c)), ('flt', ('fmin', a, b), c)),
515 (('~ior', ('fge', a, '#b'), ('fge', a, '#c')), ('fge', a, ('fmin', b, c))),
516 (('~ior', ('fge', '#a', c), ('fge', '#b', c)), ('fge', ('fmax', a, b), c)),
517 (('~iand', ('flt(is_used_once)', a, b), ('flt', a, c)), ('flt', a, ('fmin', b, c))),
518 (('~iand', ('flt(is_used_once)', a, c), ('flt', b, c)), ('flt', ('fmax', a, b), c)),
519 (('~iand', ('fge(is_used_once)', a, b), ('fge', a, c)), ('fge', a, ('fmax', b, c))),
520 (('~iand', ('fge(is_used_once)', a, c), ('fge', b, c)), ('fge', ('fmin', a, b), c)),
521 (('~iand', ('flt', a, '#b'), ('flt', a, '#c')), ('flt', a, ('fmin', b, c))),
522 (('~iand', ('flt', '#a', c), ('flt', '#b', c)), ('flt', ('fmax', a, b), c)),
523 (('~iand', ('fge', a, '#b'), ('fge', a, '#c')), ('fge', a, ('fmax', b, c))),
524 (('~iand', ('fge', '#a', c), ('fge', '#b', c)), ('fge', ('fmin', a, b), c)),
525
526 (('ior', ('ilt(is_used_once)', a, b), ('ilt', a, c)), ('ilt', a, ('imax', b, c))),
527 (('ior', ('ilt(is_used_once)', a, c), ('ilt', b, c)), ('ilt', ('imin', a, b), c)),
528 (('ior', ('ige(is_used_once)', a, b), ('ige', a, c)), ('ige', a, ('imin', b, c))),
529 (('ior', ('ige(is_used_once)', a, c), ('ige', b, c)), ('ige', ('imax', a, b), c)),
530 (('ior', ('ult(is_used_once)', a, b), ('ult', a, c)), ('ult', a, ('umax', b, c))),
531 (('ior', ('ult(is_used_once)', a, c), ('ult', b, c)), ('ult', ('umin', a, b), c)),
532 (('ior', ('uge(is_used_once)', a, b), ('uge', a, c)), ('uge', a, ('umin', b, c))),
533 (('ior', ('uge(is_used_once)', a, c), ('uge', b, c)), ('uge', ('umax', a, b), c)),
534 (('iand', ('ilt(is_used_once)', a, b), ('ilt', a, c)), ('ilt', a, ('imin', b, c))),
535 (('iand', ('ilt(is_used_once)', a, c), ('ilt', b, c)), ('ilt', ('imax', a, b), c)),
536 (('iand', ('ige(is_used_once)', a, b), ('ige', a, c)), ('ige', a, ('imax', b, c))),
537 (('iand', ('ige(is_used_once)', a, c), ('ige', b, c)), ('ige', ('imin', a, b), c)),
538 (('iand', ('ult(is_used_once)', a, b), ('ult', a, c)), ('ult', a, ('umin', b, c))),
539 (('iand', ('ult(is_used_once)', a, c), ('ult', b, c)), ('ult', ('umax', a, b), c)),
540 (('iand', ('uge(is_used_once)', a, b), ('uge', a, c)), ('uge', a, ('umax', b, c))),
541 (('iand', ('uge(is_used_once)', a, c), ('uge', b, c)), ('uge', ('umin', a, b), c)),
542
543 # These derive from the previous patterns with the application of b < 0 <=>
544 # 0 < -b. The transformation should be applied if either comparison is
545 # used once as this ensures that the number of comparisons will not
546 # increase. The sources to the ior and iand are not symmetric, so the
547 # rules have to be duplicated to get this behavior.
548 (('~ior', ('flt(is_used_once)', 0.0, 'a@32'), ('flt', 'b@32', 0.0)), ('flt', 0.0, ('fmax', a, ('fneg', b)))),
549 (('~ior', ('flt', 0.0, 'a@32'), ('flt(is_used_once)', 'b@32', 0.0)), ('flt', 0.0, ('fmax', a, ('fneg', b)))),
550 (('~ior', ('fge(is_used_once)', 0.0, 'a@32'), ('fge', 'b@32', 0.0)), ('fge', 0.0, ('fmin', a, ('fneg', b)))),
551 (('~ior', ('fge', 0.0, 'a@32'), ('fge(is_used_once)', 'b@32', 0.0)), ('fge', 0.0, ('fmin', a, ('fneg', b)))),
552 (('~iand', ('flt(is_used_once)', 0.0, 'a@32'), ('flt', 'b@32', 0.0)), ('flt', 0.0, ('fmin', a, ('fneg', b)))),
553 (('~iand', ('flt', 0.0, 'a@32'), ('flt(is_used_once)', 'b@32', 0.0)), ('flt', 0.0, ('fmin', a, ('fneg', b)))),
554 (('~iand', ('fge(is_used_once)', 0.0, 'a@32'), ('fge', 'b@32', 0.0)), ('fge', 0.0, ('fmax', a, ('fneg', b)))),
555 (('~iand', ('fge', 0.0, 'a@32'), ('fge(is_used_once)', 'b@32', 0.0)), ('fge', 0.0, ('fmax', a, ('fneg', b)))),
556
557 # Common pattern like 'if (i == 0 || i == 1 || ...)'
558 (('ior', ('ieq', a, 0), ('ieq', a, 1)), ('uge', 1, a)),
559 (('ior', ('uge', 1, a), ('ieq', a, 2)), ('uge', 2, a)),
560 (('ior', ('uge', 2, a), ('ieq', a, 3)), ('uge', 3, a)),
561
562 # The (i2f32, ...) part is an open-coded fsign. When that is combined with
563 # the bcsel, it's basically copysign(1.0, a). There is no copysign in NIR,
564 # so emit an open-coded version of that.
565 (('bcsel@32', ('feq', a, 0.0), 1.0, ('i2f32', ('iadd', ('b2i32', ('flt', 0.0, 'a@32')), ('ineg', ('b2i32', ('flt', 'a@32', 0.0)))))),
566 ('ior', 0x3f800000, ('iand', a, 0x80000000))),
567
568 (('ior', a, ('ieq', a, False)), True),
569 (('ior', a, ('inot', a)), -1),
570
571 (('ine', ('ineg', ('b2i32', 'a@1')), ('ineg', ('b2i32', 'b@1'))), ('ine', a, b)),
572 (('b2i32', ('ine', 'a@1', 'b@1')), ('b2i32', ('ixor', a, b))),
573
574 (('iand', ('ieq', 'a@32', 0), ('ieq', 'b@32', 0)), ('ieq', ('ior', 'a@32', 'b@32'), 0), '!options->lower_bitops'),
575
576 # These patterns can result when (a < b || a < c) => (a < min(b, c))
577 # transformations occur before constant propagation and loop-unrolling.
578 (('~flt', a, ('fmax', b, a)), ('flt', a, b)),
579 (('~flt', ('fmin', a, b), a), ('flt', b, a)),
580 (('~fge', a, ('fmin', b, a)), True),
581 (('~fge', ('fmax', a, b), a), True),
582 (('~flt', a, ('fmin', b, a)), False),
583 (('~flt', ('fmax', a, b), a), False),
584 (('~fge', a, ('fmax', b, a)), ('fge', a, b)),
585 (('~fge', ('fmin', a, b), a), ('fge', b, a)),
586
587 (('ilt', a, ('imax', b, a)), ('ilt', a, b)),
588 (('ilt', ('imin', a, b), a), ('ilt', b, a)),
589 (('ige', a, ('imin', b, a)), True),
590 (('ige', ('imax', a, b), a), True),
591 (('ult', a, ('umax', b, a)), ('ult', a, b)),
592 (('ult', ('umin', a, b), a), ('ult', b, a)),
593 (('uge', a, ('umin', b, a)), True),
594 (('uge', ('umax', a, b), a), True),
595 (('ilt', a, ('imin', b, a)), False),
596 (('ilt', ('imax', a, b), a), False),
597 (('ige', a, ('imax', b, a)), ('ige', a, b)),
598 (('ige', ('imin', a, b), a), ('ige', b, a)),
599 (('ult', a, ('umin', b, a)), False),
600 (('ult', ('umax', a, b), a), False),
601 (('uge', a, ('umax', b, a)), ('uge', a, b)),
602 (('uge', ('umin', a, b), a), ('uge', b, a)),
603 (('ult', a, ('iand', b, a)), False),
604 (('ult', ('ior', a, b), a), False),
605 (('uge', a, ('iand', b, a)), True),
606 (('uge', ('ior', a, b), a), True),
607
608 (('ilt', '#a', ('imax', '#b', c)), ('ior', ('ilt', a, b), ('ilt', a, c))),
609 (('ilt', ('imin', '#a', b), '#c'), ('ior', ('ilt', a, c), ('ilt', b, c))),
610 (('ige', '#a', ('imin', '#b', c)), ('ior', ('ige', a, b), ('ige', a, c))),
611 (('ige', ('imax', '#a', b), '#c'), ('ior', ('ige', a, c), ('ige', b, c))),
612 (('ult', '#a', ('umax', '#b', c)), ('ior', ('ult', a, b), ('ult', a, c))),
613 (('ult', ('umin', '#a', b), '#c'), ('ior', ('ult', a, c), ('ult', b, c))),
614 (('uge', '#a', ('umin', '#b', c)), ('ior', ('uge', a, b), ('uge', a, c))),
615 (('uge', ('umax', '#a', b), '#c'), ('ior', ('uge', a, c), ('uge', b, c))),
616 (('ilt', '#a', ('imin', '#b', c)), ('iand', ('ilt', a, b), ('ilt', a, c))),
617 (('ilt', ('imax', '#a', b), '#c'), ('iand', ('ilt', a, c), ('ilt', b, c))),
618 (('ige', '#a', ('imax', '#b', c)), ('iand', ('ige', a, b), ('ige', a, c))),
619 (('ige', ('imin', '#a', b), '#c'), ('iand', ('ige', a, c), ('ige', b, c))),
620 (('ult', '#a', ('umin', '#b', c)), ('iand', ('ult', a, b), ('ult', a, c))),
621 (('ult', ('umax', '#a', b), '#c'), ('iand', ('ult', a, c), ('ult', b, c))),
622 (('uge', '#a', ('umax', '#b', c)), ('iand', ('uge', a, b), ('uge', a, c))),
623 (('uge', ('umin', '#a', b), '#c'), ('iand', ('uge', a, c), ('uge', b, c))),
624
625 # Thanks to sign extension, the ishr(a, b) is negative if and only if a is
626 # negative.
627 (('bcsel', ('ilt', a, 0), ('ineg', ('ishr', a, b)), ('ishr', a, b)),
628 ('iabs', ('ishr', a, b))),
629 (('iabs', ('ishr', ('iabs', a), b)), ('ishr', ('iabs', a), b)),
630
631 (('fabs', ('slt', a, b)), ('slt', a, b)),
632 (('fabs', ('sge', a, b)), ('sge', a, b)),
633 (('fabs', ('seq', a, b)), ('seq', a, b)),
634 (('fabs', ('sne', a, b)), ('sne', a, b)),
635 (('slt', a, b), ('b2f', ('flt', a, b)), 'options->lower_scmp'),
636 (('sge', a, b), ('b2f', ('fge', a, b)), 'options->lower_scmp'),
637 (('seq', a, b), ('b2f', ('feq', a, b)), 'options->lower_scmp'),
638 (('sne', a, b), ('b2f', ('fne', a, b)), 'options->lower_scmp'),
639 (('seq', ('seq', a, b), 1.0), ('seq', a, b)),
640 (('seq', ('sne', a, b), 1.0), ('sne', a, b)),
641 (('seq', ('slt', a, b), 1.0), ('slt', a, b)),
642 (('seq', ('sge', a, b), 1.0), ('sge', a, b)),
643 (('sne', ('seq', a, b), 0.0), ('seq', a, b)),
644 (('sne', ('sne', a, b), 0.0), ('sne', a, b)),
645 (('sne', ('slt', a, b), 0.0), ('slt', a, b)),
646 (('sne', ('sge', a, b), 0.0), ('sge', a, b)),
647 (('seq', ('seq', a, b), 0.0), ('sne', a, b)),
648 (('seq', ('sne', a, b), 0.0), ('seq', a, b)),
649 (('seq', ('slt', a, b), 0.0), ('sge', a, b)),
650 (('seq', ('sge', a, b), 0.0), ('slt', a, b)),
651 (('sne', ('seq', a, b), 1.0), ('sne', a, b)),
652 (('sne', ('sne', a, b), 1.0), ('seq', a, b)),
653 (('sne', ('slt', a, b), 1.0), ('sge', a, b)),
654 (('sne', ('sge', a, b), 1.0), ('slt', a, b)),
655 (('fall_equal2', a, b), ('fmin', ('seq', 'a.x', 'b.x'), ('seq', 'a.y', 'b.y')), 'options->lower_vector_cmp'),
656 (('fall_equal3', a, b), ('seq', ('fany_nequal3', a, b), 0.0), 'options->lower_vector_cmp'),
657 (('fall_equal4', a, b), ('seq', ('fany_nequal4', a, b), 0.0), 'options->lower_vector_cmp'),
658 (('fany_nequal2', a, b), ('fmax', ('sne', 'a.x', 'b.x'), ('sne', 'a.y', 'b.y')), 'options->lower_vector_cmp'),
659 (('fany_nequal3', a, b), ('fsat', ('fdot3', ('sne', a, b), ('sne', a, b))), 'options->lower_vector_cmp'),
660 (('fany_nequal4', a, b), ('fsat', ('fdot4', ('sne', a, b), ('sne', a, b))), 'options->lower_vector_cmp'),
661 (('fne', ('fneg', a), a), ('fne', a, 0.0)),
662 (('feq', ('fneg', a), a), ('feq', a, 0.0)),
663 # Emulating booleans
664 (('imul', ('b2i', 'a@1'), ('b2i', 'b@1')), ('b2i', ('iand', a, b))),
665 (('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), ('b2f', ('iand', a, b))),
666 (('fsat', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('b2f', ('ior', a, b))),
667 (('iand', 'a@bool32', 1.0), ('b2f', a)),
668 # True/False are ~0 and 0 in NIR. b2i of True is 1, and -1 is ~0 (True).
669 (('ineg', ('b2i32', 'a@32')), a),
670 (('flt', ('fneg', ('b2f', 'a@1')), 0), a), # Generated by TGSI KILL_IF.
671 (('flt', ('fsub', 0.0, ('b2f', 'a@1')), 0), a), # Generated by TGSI KILL_IF.
672 # Comparison with the same args. Note that these are not done for
673 # the float versions because NaN always returns false on float
674 # inequalities.
675 (('ilt', a, a), False),
676 (('ige', a, a), True),
677 (('ieq', a, a), True),
678 (('ine', a, a), False),
679 (('ult', a, a), False),
680 (('uge', a, a), True),
681 # Logical and bit operations
682 (('iand', a, a), a),
683 (('iand', a, ~0), a),
684 (('iand', a, 0), 0),
685 (('ior', a, a), a),
686 (('ior', a, 0), a),
687 (('ior', a, True), True),
688 (('ixor', a, a), 0),
689 (('ixor', a, 0), a),
690 (('inot', ('inot', a)), a),
691 (('ior', ('iand', a, b), b), b),
692 (('ior', ('ior', a, b), b), ('ior', a, b)),
693 (('iand', ('ior', a, b), b), b),
694 (('iand', ('iand', a, b), b), ('iand', a, b)),
695 # DeMorgan's Laws
696 (('iand', ('inot', a), ('inot', b)), ('inot', ('ior', a, b))),
697 (('ior', ('inot', a), ('inot', b)), ('inot', ('iand', a, b))),
698 # Shift optimizations
699 (('ishl', 0, a), 0),
700 (('ishl', a, 0), a),
701 (('ishr', 0, a), 0),
702 (('ishr', a, 0), a),
703 (('ushr', 0, a), 0),
704 (('ushr', a, 0), a),
705 (('iand', 0xff, ('ushr@32', a, 24)), ('ushr', a, 24)),
706 (('iand', 0xffff, ('ushr@32', a, 16)), ('ushr', a, 16)),
707 (('ior', ('ishl@16', a, b), ('ushr@16', a, ('iadd', 16, ('ineg', b)))), ('urol', a, b), '!options->lower_rotate'),
708 (('ior', ('ishl@16', a, b), ('ushr@16', a, ('isub', 16, b))), ('urol', a, b), '!options->lower_rotate'),
709 (('ior', ('ishl@32', a, b), ('ushr@32', a, ('iadd', 32, ('ineg', b)))), ('urol', a, b), '!options->lower_rotate'),
710 (('ior', ('ishl@32', a, b), ('ushr@32', a, ('isub', 32, b))), ('urol', a, b), '!options->lower_rotate'),
711 (('ior', ('ushr@16', a, b), ('ishl@16', a, ('iadd', 16, ('ineg', b)))), ('uror', a, b), '!options->lower_rotate'),
712 (('ior', ('ushr@16', a, b), ('ishl@16', a, ('isub', 16, b))), ('uror', a, b), '!options->lower_rotate'),
713 (('ior', ('ushr@32', a, b), ('ishl@32', a, ('iadd', 32, ('ineg', b)))), ('uror', a, b), '!options->lower_rotate'),
714 (('ior', ('ushr@32', a, b), ('ishl@32', a, ('isub', 32, b))), ('uror', a, b), '!options->lower_rotate'),
715 (('urol@16', a, b), ('ior', ('ishl', a, b), ('ushr', a, ('isub', 16, b))), 'options->lower_rotate'),
716 (('urol@32', a, b), ('ior', ('ishl', a, b), ('ushr', a, ('isub', 32, b))), 'options->lower_rotate'),
717 (('uror@16', a, b), ('ior', ('ushr', a, b), ('ishl', a, ('isub', 16, b))), 'options->lower_rotate'),
718 (('uror@32', a, b), ('ior', ('ushr', a, b), ('ishl', a, ('isub', 32, b))), 'options->lower_rotate'),
719 # Exponential/logarithmic identities
720 (('~fexp2', ('flog2', a)), a), # 2^lg2(a) = a
721 (('~flog2', ('fexp2', a)), a), # lg2(2^a) = a
722 (('fpow', a, b), ('fexp2', ('fmul', ('flog2', a), b)), 'options->lower_fpow'), # a^b = 2^(lg2(a)*b)
723 (('~fexp2', ('fmul', ('flog2', a), b)), ('fpow', a, b), '!options->lower_fpow'), # 2^(lg2(a)*b) = a^b
724 (('~fexp2', ('fadd', ('fmul', ('flog2', a), b), ('fmul', ('flog2', c), d))),
725 ('~fmul', ('fpow', a, b), ('fpow', c, d)), '!options->lower_fpow'), # 2^(lg2(a) * b + lg2(c) + d) = a^b * c^d
726 (('~fexp2', ('fmul', ('flog2', a), 2.0)), ('fmul', a, a)),
727 (('~fexp2', ('fmul', ('flog2', a), 4.0)), ('fmul', ('fmul', a, a), ('fmul', a, a))),
728 (('~fpow', a, 1.0), a),
729 (('~fpow', a, 2.0), ('fmul', a, a)),
730 (('~fpow', a, 4.0), ('fmul', ('fmul', a, a), ('fmul', a, a))),
731 (('~fpow', 2.0, a), ('fexp2', a)),
732 (('~fpow', ('fpow', a, 2.2), 0.454545), a),
733 (('~fpow', ('fabs', ('fpow', a, 2.2)), 0.454545), ('fabs', a)),
734 (('~fsqrt', ('fexp2', a)), ('fexp2', ('fmul', 0.5, a))),
735 (('~frcp', ('fexp2', a)), ('fexp2', ('fneg', a))),
736 (('~frsq', ('fexp2', a)), ('fexp2', ('fmul', -0.5, a))),
737 (('~flog2', ('fsqrt', a)), ('fmul', 0.5, ('flog2', a))),
738 (('~flog2', ('frcp', a)), ('fneg', ('flog2', a))),
739 (('~flog2', ('frsq', a)), ('fmul', -0.5, ('flog2', a))),
740 (('~flog2', ('fpow', a, b)), ('fmul', b, ('flog2', a))),
741 (('~fmul', ('fexp2(is_used_once)', a), ('fexp2(is_used_once)', b)), ('fexp2', ('fadd', a, b))),
742 (('bcsel', ('flt', a, 0.0), 0.0, ('fsqrt', a)), ('fsqrt', ('fmax', a, 0.0))),
743 # Division and reciprocal
744 (('~fdiv', 1.0, a), ('frcp', a)),
745 (('fdiv', a, b), ('fmul', a, ('frcp', b)), 'options->lower_fdiv'),
746 (('~frcp', ('frcp', a)), a),
747 (('~frcp', ('fsqrt', a)), ('frsq', a)),
748 (('fsqrt', a), ('frcp', ('frsq', a)), 'options->lower_fsqrt'),
749 (('~frcp', ('frsq', a)), ('fsqrt', a), '!options->lower_fsqrt'),
750 # Trig
751 (('fsin', a), lowered_sincos(0.5), 'options->lower_sincos'),
752 (('fcos', a), lowered_sincos(0.75), 'options->lower_sincos'),
753 # Boolean simplifications
754 (('i2b32(is_used_by_if)', a), ('ine32', a, 0)),
755 (('i2b1(is_used_by_if)', a), ('ine', a, 0)),
756 (('ieq', a, True), a),
757 (('ine(is_not_used_by_if)', a, True), ('inot', a)),
758 (('ine', a, False), a),
759 (('ieq(is_not_used_by_if)', a, False), ('inot', 'a')),
760 (('bcsel', a, True, False), a),
761 (('bcsel', a, False, True), ('inot', a)),
762 (('bcsel@32', a, 1.0, 0.0), ('b2f', a)),
763 (('bcsel@32', a, 0.0, 1.0), ('b2f', ('inot', a))),
764 (('bcsel@32', a, -1.0, -0.0), ('fneg', ('b2f', a))),
765 (('bcsel@32', a, -0.0, -1.0), ('fneg', ('b2f', ('inot', a)))),
766 (('bcsel', True, b, c), b),
767 (('bcsel', False, b, c), c),
768 (('bcsel', a, ('b2f(is_used_once)', 'b@32'), ('b2f', 'c@32')), ('b2f', ('bcsel', a, b, c))),
769
770 (('bcsel', a, b, b), b),
771 (('~fcsel', a, b, b), b),
772
773 # D3D Boolean emulation
774 (('bcsel', a, -1, 0), ('ineg', ('b2i', 'a@1'))),
775 (('bcsel', a, 0, -1), ('ineg', ('b2i', ('inot', a)))),
776 (('iand', ('ineg', ('b2i', 'a@1')), ('ineg', ('b2i', 'b@1'))),
777 ('ineg', ('b2i', ('iand', a, b)))),
778 (('ior', ('ineg', ('b2i','a@1')), ('ineg', ('b2i', 'b@1'))),
779 ('ineg', ('b2i', ('ior', a, b)))),
780 (('ieq', ('ineg', ('b2i', 'a@1')), 0), ('inot', a)),
781 (('ieq', ('ineg', ('b2i', 'a@1')), -1), a),
782 (('ine', ('ineg', ('b2i', 'a@1')), 0), a),
783 (('ine', ('ineg', ('b2i', 'a@1')), -1), ('inot', a)),
784 (('iand', ('ineg', ('b2i', a)), 1.0), ('b2f', a)),
785 (('iand', ('ineg', ('b2i', a)), 1), ('b2i', a)),
786
787 # SM5 32-bit shifts are defined to use the 5 least significant bits
788 (('ishl', 'a@32', ('iand', 31, b)), ('ishl', a, b)),
789 (('ishr', 'a@32', ('iand', 31, b)), ('ishr', a, b)),
790 (('ushr', 'a@32', ('iand', 31, b)), ('ushr', a, b)),
791
792 # Conversions
793 (('i2b32', ('b2i', 'a@32')), a),
794 (('f2i', ('ftrunc', a)), ('f2i', a)),
795 (('f2u', ('ftrunc', a)), ('f2u', a)),
796 (('i2b', ('ineg', a)), ('i2b', a)),
797 (('i2b', ('iabs', a)), ('i2b', a)),
798 (('inot', ('f2b1', a)), ('feq', a, 0.0)),
799
800 # Ironically, mark these as imprecise because removing the conversions may
801 # preserve more precision than doing the conversions (e.g.,
802 # uint(float(0x81818181u)) == 0x81818200).
803 (('~f2i32', ('i2f', 'a@32')), a),
804 (('~f2i32', ('u2f', 'a@32')), a),
805 (('~f2u32', ('i2f', 'a@32')), a),
806 (('~f2u32', ('u2f', 'a@32')), a),
807
808 (('ffloor', 'a(is_integral)'), a),
809 (('fceil', 'a(is_integral)'), a),
810 (('ftrunc', 'a(is_integral)'), a),
811 # fract(x) = x - floor(x), so fract(NaN) = NaN
812 (('~ffract', 'a(is_integral)'), 0.0),
813 (('fabs', 'a(is_not_negative)'), a),
814 (('iabs', 'a(is_not_negative)'), a),
815 (('fsat', 'a(is_not_positive)'), 0.0),
816
817 # Section 5.4.1 (Conversion and Scalar Constructors) of the GLSL 4.60 spec
818 # says:
819 #
820 # It is undefined to convert a negative floating-point value to an
821 # uint.
822 #
823 # Assuming that (uint)some_float behaves like (uint)(int)some_float allows
824 # some optimizations in the i965 backend to proceed.
825 (('ige', ('f2u', a), b), ('ige', ('f2i', a), b)),
826 (('ige', b, ('f2u', a)), ('ige', b, ('f2i', a))),
827 (('ilt', ('f2u', a), b), ('ilt', ('f2i', a), b)),
828 (('ilt', b, ('f2u', a)), ('ilt', b, ('f2i', a))),
829
830 (('~fmin', 'a(is_not_negative)', 1.0), ('fsat', a), '!options->lower_fsat'),
831
832 # The result of the multiply must be in [-1, 0], so the result of the ffma
833 # must be in [0, 1].
834 (('flt', ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0), 0.0), False),
835 (('flt', ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0), 0.0), False),
836 (('fmax', ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0), 0.0), ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0)),
837 (('fmax', ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0), 0.0), ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0)),
838
839 (('fne', 'a(is_not_zero)', 0.0), True),
840 (('feq', 'a(is_not_zero)', 0.0), False),
841
842 # In this chart, + means value > 0 and - means value < 0.
843 #
844 # + >= + -> unknown 0 >= + -> false - >= + -> false
845 # + >= 0 -> true 0 >= 0 -> true - >= 0 -> false
846 # + >= - -> true 0 >= - -> true - >= - -> unknown
847 #
848 # Using grouping conceptually similar to a Karnaugh map...
849 #
850 # (+ >= 0, + >= -, 0 >= 0, 0 >= -) == (is_not_negative >= is_not_positive) -> true
851 # (0 >= +, - >= +) == (is_not_positive >= gt_zero) -> false
852 # (- >= +, - >= 0) == (lt_zero >= is_not_negative) -> false
853 #
854 # The flt / ilt cases just invert the expected result.
855 #
856 # The results expecting true, must be marked imprecise. The results
857 # expecting false are fine because NaN compared >= or < anything is false.
858
859 (('~fge', 'a(is_not_negative)', 'b(is_not_positive)'), True),
860 (('fge', 'a(is_not_positive)', 'b(is_gt_zero)'), False),
861 (('fge', 'a(is_lt_zero)', 'b(is_not_negative)'), False),
862
863 (('flt', 'a(is_not_negative)', 'b(is_not_positive)'), False),
864 (('~flt', 'a(is_not_positive)', 'b(is_gt_zero)'), True),
865 (('~flt', 'a(is_lt_zero)', 'b(is_not_negative)'), True),
866
867 (('ine', 'a(is_not_zero)', 0), True),
868 (('ieq', 'a(is_not_zero)', 0), False),
869
870 (('ige', 'a(is_not_negative)', 'b(is_not_positive)'), True),
871 (('ige', 'a(is_not_positive)', 'b(is_gt_zero)'), False),
872 (('ige', 'a(is_lt_zero)', 'b(is_not_negative)'), False),
873
874 (('ilt', 'a(is_not_negative)', 'b(is_not_positive)'), False),
875 (('ilt', 'a(is_not_positive)', 'b(is_gt_zero)'), True),
876 (('ilt', 'a(is_lt_zero)', 'b(is_not_negative)'), True),
877
878 (('ult', 0, 'a(is_gt_zero)'), True),
879
880 # Packing and then unpacking does nothing
881 (('unpack_64_2x32_split_x', ('pack_64_2x32_split', a, b)), a),
882 (('unpack_64_2x32_split_y', ('pack_64_2x32_split', a, b)), b),
883 (('pack_64_2x32_split', ('unpack_64_2x32_split_x', a),
884 ('unpack_64_2x32_split_y', a)), a),
885
886 # Comparing two halves of an unpack separately. While this optimization
887 # should be correct for non-constant values, it's less obvious that it's
888 # useful in that case. For constant values, the pack will fold and we're
889 # guaranteed to reduce the whole tree to one instruction.
890 (('iand', ('ieq', ('unpack_32_2x16_split_x', a), '#b'),
891 ('ieq', ('unpack_32_2x16_split_y', a), '#c')),
892 ('ieq', a, ('pack_32_2x16_split', b, c))),
893
894 # Byte extraction
895 (('ushr', 'a@16', 8), ('extract_u8', a, 1), '!options->lower_extract_byte'),
896 (('ushr', 'a@32', 24), ('extract_u8', a, 3), '!options->lower_extract_byte'),
897 (('ushr', 'a@64', 56), ('extract_u8', a, 7), '!options->lower_extract_byte'),
898 (('ishr', 'a@16', 8), ('extract_i8', a, 1), '!options->lower_extract_byte'),
899 (('ishr', 'a@32', 24), ('extract_i8', a, 3), '!options->lower_extract_byte'),
900 (('ishr', 'a@64', 56), ('extract_i8', a, 7), '!options->lower_extract_byte'),
901 (('iand', 0xff, a), ('extract_u8', a, 0), '!options->lower_extract_byte'),
902
903 # Useless masking before unpacking
904 (('unpack_half_2x16_split_x', ('iand', a, 0xffff)), ('unpack_half_2x16_split_x', a)),
905 (('unpack_32_2x16_split_x', ('iand', a, 0xffff)), ('unpack_32_2x16_split_x', a)),
906 (('unpack_64_2x32_split_x', ('iand', a, 0xffffffff)), ('unpack_64_2x32_split_x', a)),
907 (('unpack_half_2x16_split_y', ('iand', a, 0xffff0000)), ('unpack_half_2x16_split_y', a)),
908 (('unpack_32_2x16_split_y', ('iand', a, 0xffff0000)), ('unpack_32_2x16_split_y', a)),
909 (('unpack_64_2x32_split_y', ('iand', a, 0xffffffff00000000)), ('unpack_64_2x32_split_y', a)),
910 ])
911
912 # After the ('extract_u8', a, 0) pattern, above, triggers, there will be
913 # patterns like those below.
914 for op in ('ushr', 'ishr'):
915 optimizations.extend([(('extract_u8', (op, 'a@16', 8), 0), ('extract_u8', a, 1))])
916 optimizations.extend([(('extract_u8', (op, 'a@32', 8 * i), 0), ('extract_u8', a, i)) for i in range(1, 4)])
917 optimizations.extend([(('extract_u8', (op, 'a@64', 8 * i), 0), ('extract_u8', a, i)) for i in range(1, 8)])
918
919 optimizations.extend([(('extract_u8', ('extract_u16', a, 1), 0), ('extract_u8', a, 2))])
920
921 # After the ('extract_[iu]8', a, 3) patterns, above, trigger, there will be
922 # patterns like those below.
923 for op in ('extract_u8', 'extract_i8'):
924 optimizations.extend([((op, ('ishl', 'a@16', 8), 1), (op, a, 0))])
925 optimizations.extend([((op, ('ishl', 'a@32', 24 - 8 * i), 3), (op, a, i)) for i in range(2, -1, -1)])
926 optimizations.extend([((op, ('ishl', 'a@64', 56 - 8 * i), 7), (op, a, i)) for i in range(6, -1, -1)])
927
928 optimizations.extend([
929 # Word extraction
930 (('ushr', ('ishl', 'a@32', 16), 16), ('extract_u16', a, 0), '!options->lower_extract_word'),
931 (('ushr', 'a@32', 16), ('extract_u16', a, 1), '!options->lower_extract_word'),
932 (('ishr', ('ishl', 'a@32', 16), 16), ('extract_i16', a, 0), '!options->lower_extract_word'),
933 (('ishr', 'a@32', 16), ('extract_i16', a, 1), '!options->lower_extract_word'),
934 (('iand', 0xffff, a), ('extract_u16', a, 0), '!options->lower_extract_word'),
935
936 # Subtracts
937 (('~fsub', a, ('fsub', 0.0, b)), ('fadd', a, b)),
938 (('isub', a, ('isub', 0, b)), ('iadd', a, b)),
939 (('isub', ('iadd', a, b), b), a),
940 (('~fsub', ('fadd', a, b), b), a),
941 (('ussub_4x8', a, 0), a),
942 (('ussub_4x8', a, ~0), 0),
943 (('fsub', a, b), ('fadd', a, ('fneg', b)), 'options->lower_sub'),
944 (('isub', a, b), ('iadd', a, ('ineg', b)), 'options->lower_sub'),
945 (('fneg', a), ('fsub', 0.0, a), 'options->lower_negate'),
946 (('ineg', a), ('isub', 0, a), 'options->lower_negate'),
947 (('~fadd', a, ('fsub', 0.0, b)), ('fsub', a, b)),
948 (('iadd', a, ('isub', 0, b)), ('isub', a, b)),
949 (('fabs', ('fsub', 0.0, a)), ('fabs', a)),
950 (('iabs', ('isub', 0, a)), ('iabs', a)),
951
952 # Propagate negation up multiplication chains
953 (('fmul(is_used_by_non_fsat)', ('fneg', a), b), ('fneg', ('fmul', a, b))),
954 (('imul', ('ineg', a), b), ('ineg', ('imul', a, b))),
955
956 # Propagate constants up multiplication chains
957 (('~fmul(is_used_once)', ('fmul(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('fmul', ('fmul', a, c), b)),
958 (('imul(is_used_once)', ('imul(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('imul', ('imul', a, c), b)),
959 (('~fadd(is_used_once)', ('fadd(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('fadd', ('fadd', a, c), b)),
960 (('iadd(is_used_once)', ('iadd(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('iadd', ('iadd', a, c), b)),
961
962 # Reassociate constants in add/mul chains so they can be folded together.
963 # For now, we mostly only handle cases where the constants are separated by
964 # a single non-constant. We could do better eventually.
965 (('~fmul', '#a', ('fmul', 'b(is_not_const)', '#c')), ('fmul', ('fmul', a, c), b)),
966 (('imul', '#a', ('imul', 'b(is_not_const)', '#c')), ('imul', ('imul', a, c), b)),
967 (('~fadd', '#a', ('fadd', 'b(is_not_const)', '#c')), ('fadd', ('fadd', a, c), b)),
968 (('~fadd', '#a', ('fneg', ('fadd', 'b(is_not_const)', '#c'))), ('fadd', ('fadd', a, ('fneg', c)), ('fneg', b))),
969 (('iadd', '#a', ('iadd', 'b(is_not_const)', '#c')), ('iadd', ('iadd', a, c), b)),
970
971 # Drop mul-div by the same value when there's no wrapping.
972 (('idiv', ('imul(no_signed_wrap)', a, b), b), a),
973
974 # By definition...
975 (('bcsel', ('ige', ('find_lsb', a), 0), ('find_lsb', a), -1), ('find_lsb', a)),
976 (('bcsel', ('ige', ('ifind_msb', a), 0), ('ifind_msb', a), -1), ('ifind_msb', a)),
977 (('bcsel', ('ige', ('ufind_msb', a), 0), ('ufind_msb', a), -1), ('ufind_msb', a)),
978
979 (('bcsel', ('ine', a, 0), ('find_lsb', a), -1), ('find_lsb', a)),
980 (('bcsel', ('ine', a, 0), ('ifind_msb', a), -1), ('ifind_msb', a)),
981 (('bcsel', ('ine', a, 0), ('ufind_msb', a), -1), ('ufind_msb', a)),
982
983 (('bcsel', ('ine', a, -1), ('ifind_msb', a), -1), ('ifind_msb', a)),
984
985 # Misc. lowering
986 (('fmod@16', a, b), ('fsub', a, ('fmul', b, ('ffloor', ('fdiv', a, b)))), 'options->lower_fmod'),
987 (('fmod@32', a, b), ('fsub', a, ('fmul', b, ('ffloor', ('fdiv', a, b)))), 'options->lower_fmod'),
988 (('frem', a, b), ('fsub', a, ('fmul', b, ('ftrunc', ('fdiv', a, b)))), 'options->lower_fmod'),
989 (('uadd_carry@32', a, b), ('b2i', ('ult', ('iadd', a, b), a)), 'options->lower_uadd_carry'),
990 (('usub_borrow@32', a, b), ('b2i', ('ult', a, b)), 'options->lower_usub_borrow'),
991
992 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
993 ('bcsel', ('ult', 31, 'bits'), 'insert',
994 ('bfi', ('bfm', 'bits', 'offset'), 'insert', 'base')),
995 'options->lower_bitfield_insert'),
996 (('ihadd', a, b), ('iadd', ('iand', a, b), ('ishr', ('ixor', a, b), 1)), 'options->lower_hadd'),
997 (('uhadd', a, b), ('iadd', ('iand', a, b), ('ushr', ('ixor', a, b), 1)), 'options->lower_hadd'),
998 (('irhadd', a, b), ('isub', ('ior', a, b), ('ishr', ('ixor', a, b), 1)), 'options->lower_hadd'),
999 (('urhadd', a, b), ('isub', ('ior', a, b), ('ushr', ('ixor', a, b), 1)), 'options->lower_hadd'),
1000 (('uadd_sat', a, b), ('bcsel', ('ult', ('iadd', a, b), a), -1, ('iadd', a, b)), 'options->lower_add_sat'),
1001 (('usub_sat', a, b), ('bcsel', ('ult', a, b), 0, ('isub', a, b)), 'options->lower_add_sat'),
1002
1003 # Alternative lowering that doesn't rely on bfi.
1004 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
1005 ('bcsel', ('ult', 31, 'bits'),
1006 'insert',
1007 (('ior',
1008 ('iand', 'base', ('inot', ('ishl', ('isub', ('ishl', 1, 'bits'), 1), 'offset'))),
1009 ('iand', ('ishl', 'insert', 'offset'), ('ishl', ('isub', ('ishl', 1, 'bits'), 1), 'offset'))))),
1010 'options->lower_bitfield_insert_to_shifts'),
1011
1012 # Alternative lowering that uses bitfield_select.
1013 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
1014 ('bcsel', ('ult', 31, 'bits'), 'insert',
1015 ('bitfield_select', ('bfm', 'bits', 'offset'), ('ishl', 'insert', 'offset'), 'base')),
1016 'options->lower_bitfield_insert_to_bitfield_select'),
1017
1018 (('ibitfield_extract', 'value', 'offset', 'bits'),
1019 ('bcsel', ('ult', 31, 'bits'), 'value',
1020 ('ibfe', 'value', 'offset', 'bits')),
1021 'options->lower_bitfield_extract'),
1022
1023 (('ubitfield_extract', 'value', 'offset', 'bits'),
1024 ('bcsel', ('ult', 31, 'bits'), 'value',
1025 ('ubfe', 'value', 'offset', 'bits')),
1026 'options->lower_bitfield_extract'),
1027
1028 # Note that these opcodes are defined to only use the five least significant bits of 'offset' and 'bits'
1029 (('ubfe', 'value', 'offset', ('iand', 31, 'bits')), ('ubfe', 'value', 'offset', 'bits')),
1030 (('ubfe', 'value', ('iand', 31, 'offset'), 'bits'), ('ubfe', 'value', 'offset', 'bits')),
1031 (('ibfe', 'value', 'offset', ('iand', 31, 'bits')), ('ibfe', 'value', 'offset', 'bits')),
1032 (('ibfe', 'value', ('iand', 31, 'offset'), 'bits'), ('ibfe', 'value', 'offset', 'bits')),
1033 (('bfm', 'bits', ('iand', 31, 'offset')), ('bfm', 'bits', 'offset')),
1034 (('bfm', ('iand', 31, 'bits'), 'offset'), ('bfm', 'bits', 'offset')),
1035
1036 (('ibitfield_extract', 'value', 'offset', 'bits'),
1037 ('bcsel', ('ieq', 0, 'bits'),
1038 0,
1039 ('ishr',
1040 ('ishl', 'value', ('isub', ('isub', 32, 'bits'), 'offset')),
1041 ('isub', 32, 'bits'))),
1042 'options->lower_bitfield_extract_to_shifts'),
1043
1044 (('ubitfield_extract', 'value', 'offset', 'bits'),
1045 ('iand',
1046 ('ushr', 'value', 'offset'),
1047 ('bcsel', ('ieq', 'bits', 32),
1048 0xffffffff,
1049 ('isub', ('ishl', 1, 'bits'), 1))),
1050 'options->lower_bitfield_extract_to_shifts'),
1051
1052 (('ifind_msb', 'value'),
1053 ('ufind_msb', ('bcsel', ('ilt', 'value', 0), ('inot', 'value'), 'value')),
1054 'options->lower_ifind_msb'),
1055
1056 (('find_lsb', 'value'),
1057 ('ufind_msb', ('iand', 'value', ('ineg', 'value'))),
1058 'options->lower_find_lsb'),
1059
1060 (('extract_i8', a, 'b@32'),
1061 ('ishr', ('ishl', a, ('imul', ('isub', 3, b), 8)), 24),
1062 'options->lower_extract_byte'),
1063
1064 (('extract_u8', a, 'b@32'),
1065 ('iand', ('ushr', a, ('imul', b, 8)), 0xff),
1066 'options->lower_extract_byte'),
1067
1068 (('extract_i16', a, 'b@32'),
1069 ('ishr', ('ishl', a, ('imul', ('isub', 1, b), 16)), 16),
1070 'options->lower_extract_word'),
1071
1072 (('extract_u16', a, 'b@32'),
1073 ('iand', ('ushr', a, ('imul', b, 16)), 0xffff),
1074 'options->lower_extract_word'),
1075
1076 (('pack_unorm_2x16', 'v'),
1077 ('pack_uvec2_to_uint',
1078 ('f2u32', ('fround_even', ('fmul', ('fsat', 'v'), 65535.0)))),
1079 'options->lower_pack_unorm_2x16'),
1080
1081 (('pack_unorm_4x8', 'v'),
1082 ('pack_uvec4_to_uint',
1083 ('f2u32', ('fround_even', ('fmul', ('fsat', 'v'), 255.0)))),
1084 'options->lower_pack_unorm_4x8'),
1085
1086 (('pack_snorm_2x16', 'v'),
1087 ('pack_uvec2_to_uint',
1088 ('f2i32', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 32767.0)))),
1089 'options->lower_pack_snorm_2x16'),
1090
1091 (('pack_snorm_4x8', 'v'),
1092 ('pack_uvec4_to_uint',
1093 ('f2i32', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 127.0)))),
1094 'options->lower_pack_snorm_4x8'),
1095
1096 (('unpack_unorm_2x16', 'v'),
1097 ('fdiv', ('u2f32', ('vec2', ('extract_u16', 'v', 0),
1098 ('extract_u16', 'v', 1))),
1099 65535.0),
1100 'options->lower_unpack_unorm_2x16'),
1101
1102 (('unpack_unorm_4x8', 'v'),
1103 ('fdiv', ('u2f32', ('vec4', ('extract_u8', 'v', 0),
1104 ('extract_u8', 'v', 1),
1105 ('extract_u8', 'v', 2),
1106 ('extract_u8', 'v', 3))),
1107 255.0),
1108 'options->lower_unpack_unorm_4x8'),
1109
1110 (('unpack_snorm_2x16', 'v'),
1111 ('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec2', ('extract_i16', 'v', 0),
1112 ('extract_i16', 'v', 1))),
1113 32767.0))),
1114 'options->lower_unpack_snorm_2x16'),
1115
1116 (('unpack_snorm_4x8', 'v'),
1117 ('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec4', ('extract_i8', 'v', 0),
1118 ('extract_i8', 'v', 1),
1119 ('extract_i8', 'v', 2),
1120 ('extract_i8', 'v', 3))),
1121 127.0))),
1122 'options->lower_unpack_snorm_4x8'),
1123
1124 (('isign', a), ('imin', ('imax', a, -1), 1), 'options->lower_isign'),
1125 (('fsign', a), ('fsub', ('b2f', ('flt', 0.0, a)), ('b2f', ('flt', a, 0.0))), 'options->lower_fsign'),
1126 ])
1127
1128 # bit_size dependent lowerings
1129 for bit_size in [8, 16, 32, 64]:
1130 # convenience constants
1131 intmax = (1 << (bit_size - 1)) - 1
1132 intmin = 1 << (bit_size - 1)
1133
1134 optimizations += [
1135 (('iadd_sat@' + str(bit_size), a, b),
1136 ('bcsel', ('ige', b, 1), ('bcsel', ('ilt', ('iadd', a, b), a), intmax, ('iadd', a, b)),
1137 ('bcsel', ('ilt', a, ('iadd', a, b)), intmin, ('iadd', a, b))), 'options->lower_add_sat'),
1138 (('isub_sat@' + str(bit_size), a, b),
1139 ('bcsel', ('ilt', b, 0), ('bcsel', ('ilt', ('isub', a, b), a), intmax, ('isub', a, b)),
1140 ('bcsel', ('ilt', a, ('isub', a, b)), intmin, ('isub', a, b))), 'options->lower_add_sat'),
1141 ]
1142
1143 invert = OrderedDict([('feq', 'fne'), ('fne', 'feq'), ('fge', 'flt'), ('flt', 'fge')])
1144
1145 for left, right in itertools.combinations_with_replacement(invert.keys(), 2):
1146 optimizations.append((('inot', ('ior(is_used_once)', (left, a, b), (right, c, d))),
1147 ('iand', (invert[left], a, b), (invert[right], c, d))))
1148 optimizations.append((('inot', ('iand(is_used_once)', (left, a, b), (right, c, d))),
1149 ('ior', (invert[left], a, b), (invert[right], c, d))))
1150
1151 # Optimize x2bN(b2x(x)) -> x
1152 for size in type_sizes('bool'):
1153 aN = 'a@' + str(size)
1154 f2bN = 'f2b' + str(size)
1155 i2bN = 'i2b' + str(size)
1156 optimizations.append(((f2bN, ('b2f', aN)), a))
1157 optimizations.append(((i2bN, ('b2i', aN)), a))
1158
1159 # Optimize x2yN(b2x(x)) -> b2y
1160 for x, y in itertools.product(['f', 'u', 'i'], ['f', 'u', 'i']):
1161 if x != 'f' and y != 'f' and x != y:
1162 continue
1163
1164 b2x = 'b2f' if x == 'f' else 'b2i'
1165 b2y = 'b2f' if y == 'f' else 'b2i'
1166 x2yN = '{}2{}'.format(x, y)
1167 optimizations.append(((x2yN, (b2x, a)), (b2y, a)))
1168
1169 # Optimize away x2xN(a@N)
1170 for t in ['int', 'uint', 'float']:
1171 for N in type_sizes(t):
1172 x2xN = '{0}2{0}{1}'.format(t[0], N)
1173 aN = 'a@{0}'.format(N)
1174 optimizations.append(((x2xN, aN), a))
1175
1176 # Optimize x2xN(y2yM(a@P)) -> y2yN(a) for integers
1177 # In particular, we can optimize away everything except upcast of downcast and
1178 # upcasts where the type differs from the other cast
1179 for N, M in itertools.product(type_sizes('uint'), type_sizes('uint')):
1180 if N < M:
1181 # The outer cast is a down-cast. It doesn't matter what the size of the
1182 # argument of the inner cast is because we'll never been in the upcast
1183 # of downcast case. Regardless of types, we'll always end up with y2yN
1184 # in the end.
1185 for x, y in itertools.product(['i', 'u'], ['i', 'u']):
1186 x2xN = '{0}2{0}{1}'.format(x, N)
1187 y2yM = '{0}2{0}{1}'.format(y, M)
1188 y2yN = '{0}2{0}{1}'.format(y, N)
1189 optimizations.append(((x2xN, (y2yM, a)), (y2yN, a)))
1190 elif N > M:
1191 # If the outer cast is an up-cast, we have to be more careful about the
1192 # size of the argument of the inner cast and with types. In this case,
1193 # the type is always the type of type up-cast which is given by the
1194 # outer cast.
1195 for P in type_sizes('uint'):
1196 # We can't optimize away up-cast of down-cast.
1197 if M < P:
1198 continue
1199
1200 # Because we're doing down-cast of down-cast, the types always have
1201 # to match between the two casts
1202 for x in ['i', 'u']:
1203 x2xN = '{0}2{0}{1}'.format(x, N)
1204 x2xM = '{0}2{0}{1}'.format(x, M)
1205 aP = 'a@{0}'.format(P)
1206 optimizations.append(((x2xN, (x2xM, aP)), (x2xN, a)))
1207 else:
1208 # The N == M case is handled by other optimizations
1209 pass
1210
1211 # Optimize comparisons with up-casts
1212 for t in ['int', 'uint', 'float']:
1213 for N, M in itertools.product(type_sizes(t), repeat=2):
1214 if N == 1 or N >= M:
1215 continue
1216
1217 x2xM = '{0}2{0}{1}'.format(t[0], M)
1218 x2xN = '{0}2{0}{1}'.format(t[0], N)
1219 aN = 'a@' + str(N)
1220 bN = 'b@' + str(N)
1221 xeq = 'feq' if t == 'float' else 'ieq'
1222 xne = 'fne' if t == 'float' else 'ine'
1223 xge = '{0}ge'.format(t[0])
1224 xlt = '{0}lt'.format(t[0])
1225
1226 # Up-casts are lossless so for correctly signed comparisons of
1227 # up-casted values we can do the comparison at the largest of the two
1228 # original sizes and drop one or both of the casts. (We have
1229 # optimizations to drop the no-op casts which this may generate.)
1230 for P in type_sizes(t):
1231 if P == 1 or P > N:
1232 continue
1233
1234 bP = 'b@' + str(P)
1235 optimizations += [
1236 ((xeq, (x2xM, aN), (x2xM, bP)), (xeq, a, (x2xN, b))),
1237 ((xne, (x2xM, aN), (x2xM, bP)), (xne, a, (x2xN, b))),
1238 ((xge, (x2xM, aN), (x2xM, bP)), (xge, a, (x2xN, b))),
1239 ((xlt, (x2xM, aN), (x2xM, bP)), (xlt, a, (x2xN, b))),
1240 ((xge, (x2xM, bP), (x2xM, aN)), (xge, (x2xN, b), a)),
1241 ((xlt, (x2xM, bP), (x2xM, aN)), (xlt, (x2xN, b), a)),
1242 ]
1243
1244 # The next bit doesn't work on floats because the range checks would
1245 # get way too complicated.
1246 if t in ['int', 'uint']:
1247 if t == 'int':
1248 xN_min = -(1 << (N - 1))
1249 xN_max = (1 << (N - 1)) - 1
1250 elif t == 'uint':
1251 xN_min = 0
1252 xN_max = (1 << N) - 1
1253 else:
1254 assert False
1255
1256 # If we're up-casting and comparing to a constant, we can unfold
1257 # the comparison into a comparison with the shrunk down constant
1258 # and a check that the constant fits in the smaller bit size.
1259 optimizations += [
1260 ((xeq, (x2xM, aN), '#b'),
1261 ('iand', (xeq, a, (x2xN, b)), (xeq, (x2xM, (x2xN, b)), b))),
1262 ((xne, (x2xM, aN), '#b'),
1263 ('ior', (xne, a, (x2xN, b)), (xne, (x2xM, (x2xN, b)), b))),
1264 ((xlt, (x2xM, aN), '#b'),
1265 ('iand', (xlt, xN_min, b),
1266 ('ior', (xlt, xN_max, b), (xlt, a, (x2xN, b))))),
1267 ((xlt, '#a', (x2xM, bN)),
1268 ('iand', (xlt, a, xN_max),
1269 ('ior', (xlt, a, xN_min), (xlt, (x2xN, a), b)))),
1270 ((xge, (x2xM, aN), '#b'),
1271 ('iand', (xge, xN_max, b),
1272 ('ior', (xge, xN_min, b), (xge, a, (x2xN, b))))),
1273 ((xge, '#a', (x2xM, bN)),
1274 ('iand', (xge, a, xN_min),
1275 ('ior', (xge, a, xN_max), (xge, (x2xN, a), b)))),
1276 ]
1277
1278 def fexp2i(exp, bits):
1279 # We assume that exp is already in the right range.
1280 if bits == 16:
1281 return ('i2i16', ('ishl', ('iadd', exp, 15), 10))
1282 elif bits == 32:
1283 return ('ishl', ('iadd', exp, 127), 23)
1284 elif bits == 64:
1285 return ('pack_64_2x32_split', 0, ('ishl', ('iadd', exp, 1023), 20))
1286 else:
1287 assert False
1288
1289 def ldexp(f, exp, bits):
1290 # First, we clamp exp to a reasonable range. The maximum possible range
1291 # for a normal exponent is [-126, 127] and, throwing in denormals, you get
1292 # a maximum range of [-149, 127]. This means that we can potentially have
1293 # a swing of +-276. If you start with FLT_MAX, you actually have to do
1294 # ldexp(FLT_MAX, -278) to get it to flush all the way to zero. The GLSL
1295 # spec, on the other hand, only requires that we handle an exponent value
1296 # in the range [-126, 128]. This implementation is *mostly* correct; it
1297 # handles a range on exp of [-252, 254] which allows you to create any
1298 # value (including denorms if the hardware supports it) and to adjust the
1299 # exponent of any normal value to anything you want.
1300 if bits == 16:
1301 exp = ('imin', ('imax', exp, -28), 30)
1302 elif bits == 32:
1303 exp = ('imin', ('imax', exp, -252), 254)
1304 elif bits == 64:
1305 exp = ('imin', ('imax', exp, -2044), 2046)
1306 else:
1307 assert False
1308
1309 # Now we compute two powers of 2, one for exp/2 and one for exp-exp/2.
1310 # (We use ishr which isn't the same for -1, but the -1 case still works
1311 # since we use exp-exp/2 as the second exponent.) While the spec
1312 # technically defines ldexp as f * 2.0^exp, simply multiplying once doesn't
1313 # work with denormals and doesn't allow for the full swing in exponents
1314 # that you can get with normalized values. Instead, we create two powers
1315 # of two and multiply by them each in turn. That way the effective range
1316 # of our exponent is doubled.
1317 pow2_1 = fexp2i(('ishr', exp, 1), bits)
1318 pow2_2 = fexp2i(('isub', exp, ('ishr', exp, 1)), bits)
1319 return ('fmul', ('fmul', f, pow2_1), pow2_2)
1320
1321 optimizations += [
1322 (('ldexp@16', 'x', 'exp'), ldexp('x', 'exp', 16), 'options->lower_ldexp'),
1323 (('ldexp@32', 'x', 'exp'), ldexp('x', 'exp', 32), 'options->lower_ldexp'),
1324 (('ldexp@64', 'x', 'exp'), ldexp('x', 'exp', 64), 'options->lower_ldexp'),
1325 ]
1326
1327 # Unreal Engine 4 demo applications open-codes bitfieldReverse()
1328 def bitfield_reverse(u):
1329 step1 = ('ior', ('ishl', u, 16), ('ushr', u, 16))
1330 step2 = ('ior', ('ishl', ('iand', step1, 0x00ff00ff), 8), ('ushr', ('iand', step1, 0xff00ff00), 8))
1331 step3 = ('ior', ('ishl', ('iand', step2, 0x0f0f0f0f), 4), ('ushr', ('iand', step2, 0xf0f0f0f0), 4))
1332 step4 = ('ior', ('ishl', ('iand', step3, 0x33333333), 2), ('ushr', ('iand', step3, 0xcccccccc), 2))
1333 step5 = ('ior(many-comm-expr)', ('ishl', ('iand', step4, 0x55555555), 1), ('ushr', ('iand', step4, 0xaaaaaaaa), 1))
1334
1335 return step5
1336
1337 optimizations += [(bitfield_reverse('x@32'), ('bitfield_reverse', 'x'), '!options->lower_bitfield_reverse')]
1338
1339 # For any float comparison operation, "cmp", if you have "a == a && a cmp b"
1340 # then the "a == a" is redundant because it's equivalent to "a is not NaN"
1341 # and, if a is a NaN then the second comparison will fail anyway.
1342 for op in ['flt', 'fge', 'feq']:
1343 optimizations += [
1344 (('iand', ('feq', a, a), (op, a, b)), (op, a, b)),
1345 (('iand', ('feq', a, a), (op, b, a)), (op, b, a)),
1346 ]
1347
1348 # Add optimizations to handle the case where the result of a ternary is
1349 # compared to a constant. This way we can take things like
1350 #
1351 # (a ? 0 : 1) > 0
1352 #
1353 # and turn it into
1354 #
1355 # a ? (0 > 0) : (1 > 0)
1356 #
1357 # which constant folding will eat for lunch. The resulting ternary will
1358 # further get cleaned up by the boolean reductions above and we will be
1359 # left with just the original variable "a".
1360 for op in ['flt', 'fge', 'feq', 'fne',
1361 'ilt', 'ige', 'ieq', 'ine', 'ult', 'uge']:
1362 optimizations += [
1363 ((op, ('bcsel', 'a', '#b', '#c'), '#d'),
1364 ('bcsel', 'a', (op, 'b', 'd'), (op, 'c', 'd'))),
1365 ((op, '#d', ('bcsel', a, '#b', '#c')),
1366 ('bcsel', 'a', (op, 'd', 'b'), (op, 'd', 'c'))),
1367 ]
1368
1369
1370 # For example, this converts things like
1371 #
1372 # 1 + mix(0, a - 1, condition)
1373 #
1374 # into
1375 #
1376 # mix(1, (a-1)+1, condition)
1377 #
1378 # Other optimizations will rearrange the constants.
1379 for op in ['fadd', 'fmul', 'iadd', 'imul']:
1380 optimizations += [
1381 ((op, ('bcsel(is_used_once)', a, '#b', c), '#d'), ('bcsel', a, (op, b, d), (op, c, d)))
1382 ]
1383
1384 # For derivatives in compute shaders, GLSL_NV_compute_shader_derivatives
1385 # states:
1386 #
1387 # If neither layout qualifier is specified, derivatives in compute shaders
1388 # return zero, which is consistent with the handling of built-in texture
1389 # functions like texture() in GLSL 4.50 compute shaders.
1390 for op in ['fddx', 'fddx_fine', 'fddx_coarse',
1391 'fddy', 'fddy_fine', 'fddy_coarse']:
1392 optimizations += [
1393 ((op, 'a'), 0.0, 'info->stage == MESA_SHADER_COMPUTE && info->cs.derivative_group == DERIVATIVE_GROUP_NONE')
1394 ]
1395
1396 # Some optimizations for ir3-specific instructions.
1397 optimizations += [
1398 # 'al * bl': If either 'al' or 'bl' is zero, return zero.
1399 (('umul_low', '#a(is_lower_half_zero)', 'b'), (0)),
1400 # '(ah * bl) << 16 + c': If either 'ah' or 'bl' is zero, return 'c'.
1401 (('imadsh_mix16', '#a@32(is_lower_half_zero)', 'b@32', 'c@32'), ('c')),
1402 (('imadsh_mix16', 'a@32', '#b@32(is_upper_half_zero)', 'c@32'), ('c')),
1403 ]
1404
1405 # This section contains "late" optimizations that should be run before
1406 # creating ffmas and calling regular optimizations for the final time.
1407 # Optimizations should go here if they help code generation and conflict
1408 # with the regular optimizations.
1409 before_ffma_optimizations = [
1410 # Propagate constants down multiplication chains
1411 (('~fmul(is_used_once)', ('fmul(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('fmul', ('fmul', a, c), b)),
1412 (('imul(is_used_once)', ('imul(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('imul', ('imul', a, c), b)),
1413 (('~fadd(is_used_once)', ('fadd(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('fadd', ('fadd', a, c), b)),
1414 (('iadd(is_used_once)', ('iadd(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('iadd', ('iadd', a, c), b)),
1415
1416 (('~fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))),
1417 (('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))),
1418 (('~fadd', ('fneg', a), a), 0.0),
1419 (('iadd', ('ineg', a), a), 0),
1420 (('iadd', ('ineg', a), ('iadd', a, b)), b),
1421 (('iadd', a, ('iadd', ('ineg', a), b)), b),
1422 (('~fadd', ('fneg', a), ('fadd', a, b)), b),
1423 (('~fadd', a, ('fadd', ('fneg', a), b)), b),
1424
1425 (('~flrp@32', ('fadd(is_used_once)', a, -1.0), ('fadd(is_used_once)', a, 1.0), d), ('fadd', ('flrp', -1.0, 1.0, d), a)),
1426 (('~flrp@32', ('fadd(is_used_once)', a, 1.0), ('fadd(is_used_once)', a, -1.0), d), ('fadd', ('flrp', 1.0, -1.0, d), a)),
1427 (('~flrp@32', ('fadd(is_used_once)', a, '#b'), ('fadd(is_used_once)', a, '#c'), d), ('fadd', ('fmul', d, ('fadd', c, ('fneg', b))), ('fadd', a, b))),
1428 ]
1429
1430 # This section contains "late" optimizations that should be run after the
1431 # regular optimizations have finished. Optimizations should go here if
1432 # they help code generation but do not necessarily produce code that is
1433 # more easily optimizable.
1434 late_optimizations = [
1435 # Most of these optimizations aren't quite safe when you get infinity or
1436 # Nan involved but the first one should be fine.
1437 (('flt', ('fadd', a, b), 0.0), ('flt', a, ('fneg', b))),
1438 (('flt', ('fneg', ('fadd', a, b)), 0.0), ('flt', ('fneg', a), b)),
1439 (('~fge', ('fadd', a, b), 0.0), ('fge', a, ('fneg', b))),
1440 (('~fge', ('fneg', ('fadd', a, b)), 0.0), ('fge', ('fneg', a), b)),
1441 (('~feq', ('fadd', a, b), 0.0), ('feq', a, ('fneg', b))),
1442 (('~fne', ('fadd', a, b), 0.0), ('fne', a, ('fneg', b))),
1443
1444 # nir_lower_to_source_mods will collapse this, but its existence during the
1445 # optimization loop can prevent other optimizations.
1446 (('fneg', ('fneg', a)), a),
1447
1448 # These are duplicated from the main optimizations table. The late
1449 # patterns that rearrange expressions like x - .5 < 0 to x < .5 can create
1450 # new patterns like these. The patterns that compare with zero are removed
1451 # because they are unlikely to be created in by anything in
1452 # late_optimizations.
1453 (('flt', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('flt', a, b)),
1454 (('flt', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('flt', b, a)),
1455 (('fge', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fge', a, b)),
1456 (('fge', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('fge', b, a)),
1457 (('feq', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('feq', a, b)),
1458 (('fne', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fne', a, b)),
1459
1460 (('fge', ('fsat(is_used_once)', a), 1.0), ('fge', a, 1.0)),
1461 (('flt', ('fsat(is_used_once)', a), 1.0), ('flt', a, 1.0)),
1462
1463 (('~fge', ('fmin(is_used_once)', ('fadd(is_used_once)', a, b), ('fadd', c, d)), 0.0), ('iand', ('fge', a, ('fneg', b)), ('fge', c, ('fneg', d)))),
1464
1465 (('flt', ('fneg', a), ('fneg', b)), ('flt', b, a)),
1466 (('fge', ('fneg', a), ('fneg', b)), ('fge', b, a)),
1467 (('feq', ('fneg', a), ('fneg', b)), ('feq', b, a)),
1468 (('fne', ('fneg', a), ('fneg', b)), ('fne', b, a)),
1469 (('flt', ('fneg', a), -1.0), ('flt', 1.0, a)),
1470 (('flt', -1.0, ('fneg', a)), ('flt', a, 1.0)),
1471 (('fge', ('fneg', a), -1.0), ('fge', 1.0, a)),
1472 (('fge', -1.0, ('fneg', a)), ('fge', a, 1.0)),
1473 (('fne', ('fneg', a), -1.0), ('fne', 1.0, a)),
1474 (('feq', -1.0, ('fneg', a)), ('feq', a, 1.0)),
1475
1476 (('ior', a, a), a),
1477 (('iand', a, a), a),
1478
1479 (('~fadd', ('fneg(is_used_once)', ('fsat(is_used_once)', 'a(is_not_fmul)')), 1.0), ('fsat', ('fadd', 1.0, ('fneg', a)))),
1480
1481 (('fdot2', a, b), ('fdot_replicated2', a, b), 'options->fdot_replicates'),
1482 (('fdot3', a, b), ('fdot_replicated3', a, b), 'options->fdot_replicates'),
1483 (('fdot4', a, b), ('fdot_replicated4', a, b), 'options->fdot_replicates'),
1484 (('fdph', a, b), ('fdph_replicated', a, b), 'options->fdot_replicates'),
1485
1486 (('~flrp@32', ('fadd(is_used_once)', a, b), ('fadd(is_used_once)', a, c), d), ('fadd', ('flrp', b, c, d), a)),
1487 (('~flrp@64', ('fadd(is_used_once)', a, b), ('fadd(is_used_once)', a, c), d), ('fadd', ('flrp', b, c, d), a)),
1488
1489 (('~fadd@32', 1.0, ('fmul(is_used_once)', c , ('fadd', b, -1.0 ))), ('fadd', ('fadd', 1.0, ('fneg', c)), ('fmul', b, c)), 'options->lower_flrp32'),
1490 (('~fadd@64', 1.0, ('fmul(is_used_once)', c , ('fadd', b, -1.0 ))), ('fadd', ('fadd', 1.0, ('fneg', c)), ('fmul', b, c)), 'options->lower_flrp64'),
1491
1492 # A similar operation could apply to any ffma(#a, b, #(-a/2)), but this
1493 # particular operation is common for expanding values stored in a texture
1494 # from [0,1] to [-1,1].
1495 (('~ffma@32', a, 2.0, -1.0), ('flrp', -1.0, 1.0, a ), '!options->lower_flrp32'),
1496 (('~ffma@32', a, -2.0, -1.0), ('flrp', -1.0, 1.0, ('fneg', a)), '!options->lower_flrp32'),
1497 (('~ffma@32', a, -2.0, 1.0), ('flrp', 1.0, -1.0, a ), '!options->lower_flrp32'),
1498 (('~ffma@32', a, 2.0, 1.0), ('flrp', 1.0, -1.0, ('fneg', a)), '!options->lower_flrp32'),
1499 (('~fadd@32', ('fmul(is_used_once)', 2.0, a), -1.0), ('flrp', -1.0, 1.0, a ), '!options->lower_flrp32'),
1500 (('~fadd@32', ('fmul(is_used_once)', -2.0, a), -1.0), ('flrp', -1.0, 1.0, ('fneg', a)), '!options->lower_flrp32'),
1501 (('~fadd@32', ('fmul(is_used_once)', -2.0, a), 1.0), ('flrp', 1.0, -1.0, a ), '!options->lower_flrp32'),
1502 (('~fadd@32', ('fmul(is_used_once)', 2.0, a), 1.0), ('flrp', 1.0, -1.0, ('fneg', a)), '!options->lower_flrp32'),
1503
1504 # flrp(a, b, a)
1505 # a*(1-a) + b*a
1506 # a + -a*a + a*b (1)
1507 # a + a*(b - a)
1508 # Option 1: ffma(a, (b-a), a)
1509 #
1510 # Alternately, after (1):
1511 # a*(1+b) + -a*a
1512 # a*((1+b) + -a)
1513 #
1514 # Let b=1
1515 #
1516 # Option 2: ffma(a, 2, -(a*a))
1517 # Option 3: ffma(a, 2, (-a)*a)
1518 # Option 4: ffma(a, -a, (2*a)
1519 # Option 5: a * (2 - a)
1520 #
1521 # There are a lot of other possible combinations.
1522 (('~ffma@32', ('fadd', b, ('fneg', a)), a, a), ('flrp', a, b, a), '!options->lower_flrp32'),
1523 (('~ffma@32', a, 2.0, ('fneg', ('fmul', a, a))), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1524 (('~ffma@32', a, 2.0, ('fmul', ('fneg', a), a)), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1525 (('~ffma@32', a, ('fneg', a), ('fmul', 2.0, a)), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1526 (('~fmul@32', a, ('fadd', 2.0, ('fneg', a))), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1527
1528 # we do these late so that we don't get in the way of creating ffmas
1529 (('fmin', ('fadd(is_used_once)', '#c', a), ('fadd(is_used_once)', '#c', b)), ('fadd', c, ('fmin', a, b))),
1530 (('fmax', ('fadd(is_used_once)', '#c', a), ('fadd(is_used_once)', '#c', b)), ('fadd', c, ('fmax', a, b))),
1531
1532 (('bcsel', a, 0, ('b2f32', ('inot', 'b@bool'))), ('b2f32', ('inot', ('ior', a, b)))),
1533
1534 # Things that look like DPH in the source shader may get expanded to
1535 # something that looks like dot(v1.xyz, v2.xyz) + v1.w by the time it gets
1536 # to NIR. After FFMA is generated, this can look like:
1537 #
1538 # fadd(ffma(v1.z, v2.z, ffma(v1.y, v2.y, fmul(v1.x, v2.x))), v1.w)
1539 #
1540 # Reassociate the last addition into the first multiplication.
1541 #
1542 # Some shaders do not use 'invariant' in vertex and (possibly) geometry
1543 # shader stages on some outputs that are intended to be invariant. For
1544 # various reasons, this optimization may not be fully applied in all
1545 # shaders used for different rendering passes of the same geometry. This
1546 # can result in Z-fighting artifacts (at best). For now, disable this
1547 # optimization in these stages. See bugzilla #111490. In tessellation
1548 # stages applications seem to use 'precise' when necessary, so allow the
1549 # optimization in those stages.
1550 (('~fadd', ('ffma(is_used_once)', a, b, ('ffma', c, d, ('fmul', 'e(is_not_const_and_not_fsign)', 'f(is_not_const_and_not_fsign)'))), 'g(is_not_const)'),
1551 ('ffma', a, b, ('ffma', c, d, ('ffma', e, 'f', 'g'))), '(info->stage != MESA_SHADER_VERTEX && info->stage != MESA_SHADER_GEOMETRY) && !options->intel_vec4'),
1552 (('~fadd', ('ffma(is_used_once)', a, b, ('fmul', 'c(is_not_const_and_not_fsign)', 'd(is_not_const_and_not_fsign)') ), 'e(is_not_const)'),
1553 ('ffma', a, b, ('ffma', c, d, e)), '(info->stage != MESA_SHADER_VERTEX && info->stage != MESA_SHADER_GEOMETRY) && !options->intel_vec4'),
1554 ]
1555
1556 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic", optimizations).render())
1557 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic_before_ffma",
1558 before_ffma_optimizations).render())
1559 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic_late",
1560 late_optimizations).render())