nir/algebraic: a & ~(a >> 31) -> imax(a, 0)
[mesa.git] / src / compiler / nir / nir_opt_algebraic.py
1 #
2 # Copyright (C) 2014 Intel Corporation
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining a
5 # copy of this software and associated documentation files (the "Software"),
6 # to deal in the Software without restriction, including without limitation
7 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 # and/or sell copies of the Software, and to permit persons to whom the
9 # Software is furnished to do so, subject to the following conditions:
10 #
11 # The above copyright notice and this permission notice (including the next
12 # paragraph) shall be included in all copies or substantial portions of the
13 # Software.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 # IN THE SOFTWARE.
22 #
23 # Authors:
24 # Jason Ekstrand (jason@jlekstrand.net)
25
26 from __future__ import print_function
27
28 from collections import OrderedDict
29 import nir_algebraic
30 from nir_opcodes import type_sizes
31 import itertools
32 import struct
33 from math import pi
34
35 # Convenience variables
36 a = 'a'
37 b = 'b'
38 c = 'c'
39 d = 'd'
40 e = 'e'
41
42 # Written in the form (<search>, <replace>) where <search> is an expression
43 # and <replace> is either an expression or a value. An expression is
44 # defined as a tuple of the form ([~]<op>, <src0>, <src1>, <src2>, <src3>)
45 # where each source is either an expression or a value. A value can be
46 # either a numeric constant or a string representing a variable name.
47 #
48 # If the opcode in a search expression is prefixed by a '~' character, this
49 # indicates that the operation is inexact. Such operations will only get
50 # applied to SSA values that do not have the exact bit set. This should be
51 # used by by any optimizations that are not bit-for-bit exact. It should not,
52 # however, be used for backend-requested lowering operations as those need to
53 # happen regardless of precision.
54 #
55 # Variable names are specified as "[#]name[@type][(cond)][.swiz]" where:
56 # "#" indicates that the given variable will only match constants,
57 # type indicates that the given variable will only match values from ALU
58 # instructions with the given output type,
59 # (cond) specifies an additional condition function (see nir_search_helpers.h),
60 # swiz is a swizzle applied to the variable (only in the <replace> expression)
61 #
62 # For constants, you have to be careful to make sure that it is the right
63 # type because python is unaware of the source and destination types of the
64 # opcodes.
65 #
66 # All expression types can have a bit-size specified. For opcodes, this
67 # looks like "op@32", for variables it is "a@32" or "a@uint32" to specify a
68 # type and size. In the search half of the expression this indicates that it
69 # should only match that particular bit-size. In the replace half of the
70 # expression this indicates that the constructed value should have that
71 # bit-size.
72 #
73 # If the opcode in a replacement expression is prefixed by a '!' character,
74 # this indicated that the new expression will be marked exact.
75 #
76 # A special condition "many-comm-expr" can be used with expressions to note
77 # that the expression and its subexpressions have more commutative expressions
78 # than nir_replace_instr can handle. If this special condition is needed with
79 # another condition, the two can be separated by a comma (e.g.,
80 # "(many-comm-expr,is_used_once)").
81
82 # based on https://web.archive.org/web/20180105155939/http://forum.devmaster.net/t/fast-and-accurate-sine-cosine/9648
83 def lowered_sincos(c):
84 x = ('fsub', ('fmul', 2.0, ('ffract', ('fadd', ('fmul', 0.5 / pi, a), c))), 1.0)
85 x = ('fmul', ('fsub', x, ('fmul', x, ('fabs', x))), 4.0)
86 return ('ffma', ('ffma', x, ('fabs', x), ('fneg', x)), 0.225, x)
87
88 def intBitsToFloat(i):
89 return struct.unpack('!f', struct.pack('!I', i))[0]
90
91 optimizations = [
92
93 (('imul', a, '#b@32(is_pos_power_of_two)'), ('ishl', a, ('find_lsb', b)), '!options->lower_bitops'),
94 (('imul', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('ishl', a, ('find_lsb', ('iabs', b)))), '!options->lower_bitops'),
95 (('ishl', a, '#b@32'), ('imul', a, ('ishl', 1, b)), 'options->lower_bitops'),
96
97 (('unpack_64_2x32_split_x', ('imul_2x32_64(is_used_once)', a, b)), ('imul', a, b)),
98 (('unpack_64_2x32_split_x', ('umul_2x32_64(is_used_once)', a, b)), ('imul', a, b)),
99 (('imul_2x32_64', a, b), ('pack_64_2x32_split', ('imul', a, b), ('imul_high', a, b)), 'options->lower_mul_2x32_64'),
100 (('umul_2x32_64', a, b), ('pack_64_2x32_split', ('imul', a, b), ('umul_high', a, b)), 'options->lower_mul_2x32_64'),
101 (('udiv', a, 1), a),
102 (('idiv', a, 1), a),
103 (('umod', a, 1), 0),
104 (('imod', a, 1), 0),
105 (('udiv', a, '#b@32(is_pos_power_of_two)'), ('ushr', a, ('find_lsb', b)), '!options->lower_bitops'),
106 (('idiv', a, '#b@32(is_pos_power_of_two)'), ('imul', ('isign', a), ('ushr', ('iabs', a), ('find_lsb', b))), 'options->lower_idiv'),
107 (('idiv', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('imul', ('isign', a), ('ushr', ('iabs', a), ('find_lsb', ('iabs', b))))), 'options->lower_idiv'),
108 (('umod', a, '#b(is_pos_power_of_two)'), ('iand', a, ('isub', b, 1))),
109
110 (('~fneg', ('fneg', a)), a),
111 (('ineg', ('ineg', a)), a),
112 (('fabs', ('fabs', a)), ('fabs', a)),
113 (('fabs', ('fneg', a)), ('fabs', a)),
114 (('fabs', ('u2f', a)), ('u2f', a)),
115 (('iabs', ('iabs', a)), ('iabs', a)),
116 (('iabs', ('ineg', a)), ('iabs', a)),
117 (('f2b', ('fneg', a)), ('f2b', a)),
118 (('i2b', ('ineg', a)), ('i2b', a)),
119 (('~fadd', a, 0.0), a),
120 (('iadd', a, 0), a),
121 (('usadd_4x8', a, 0), a),
122 (('usadd_4x8', a, ~0), ~0),
123 (('~fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))),
124 (('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))),
125 (('~fadd', ('fneg', a), a), 0.0),
126 (('iadd', ('ineg', a), a), 0),
127 (('iadd', ('ineg', a), ('iadd', a, b)), b),
128 (('iadd', a, ('iadd', ('ineg', a), b)), b),
129 (('~fadd', ('fneg', a), ('fadd', a, b)), b),
130 (('~fadd', a, ('fadd', ('fneg', a), b)), b),
131 (('fadd', ('fsat', a), ('fsat', ('fneg', a))), ('fsat', ('fabs', a))),
132 (('~fmul', a, 0.0), 0.0),
133 (('imul', a, 0), 0),
134 (('umul_unorm_4x8', a, 0), 0),
135 (('umul_unorm_4x8', a, ~0), a),
136 (('~fmul', a, 1.0), a),
137 (('imul', a, 1), a),
138 (('fmul', a, -1.0), ('fneg', a)),
139 (('imul', a, -1), ('ineg', a)),
140 # If a < 0: fsign(a)*a*a => -1*a*a => -a*a => abs(a)*a
141 # If a > 0: fsign(a)*a*a => 1*a*a => a*a => abs(a)*a
142 # If a == 0: fsign(a)*a*a => 0*0*0 => abs(0)*0
143 (('fmul', ('fsign', a), ('fmul', a, a)), ('fmul', ('fabs', a), a)),
144 (('fmul', ('fmul', ('fsign', a), a), a), ('fmul', ('fabs', a), a)),
145 (('~ffma', 0.0, a, b), b),
146 (('~ffma', a, b, 0.0), ('fmul', a, b)),
147 (('ffma', 1.0, a, b), ('fadd', a, b)),
148 (('ffma', -1.0, a, b), ('fadd', ('fneg', a), b)),
149 (('~flrp', a, b, 0.0), a),
150 (('~flrp', a, b, 1.0), b),
151 (('~flrp', a, a, b), a),
152 (('~flrp', 0.0, a, b), ('fmul', a, b)),
153
154 # flrp(a, a + b, c) => a + flrp(0, b, c) => a + (b * c)
155 (('~flrp', a, ('fadd(is_used_once)', a, b), c), ('fadd', ('fmul', b, c), a)),
156 (('~flrp@32', a, ('fadd', a, b), c), ('fadd', ('fmul', b, c), a), 'options->lower_flrp32'),
157 (('~flrp@64', a, ('fadd', a, b), c), ('fadd', ('fmul', b, c), a), 'options->lower_flrp64'),
158
159 (('~flrp@32', ('fadd', a, b), ('fadd', a, c), d), ('fadd', ('flrp', b, c, d), a), 'options->lower_flrp32'),
160 (('~flrp@64', ('fadd', a, b), ('fadd', a, c), d), ('fadd', ('flrp', b, c, d), a), 'options->lower_flrp64'),
161
162 (('~flrp@32', a, ('fmul(is_used_once)', a, b), c), ('fmul', ('flrp', 1.0, b, c), a), 'options->lower_flrp32'),
163 (('~flrp@64', a, ('fmul(is_used_once)', a, b), c), ('fmul', ('flrp', 1.0, b, c), a), 'options->lower_flrp64'),
164
165 (('~flrp', ('fmul(is_used_once)', a, b), ('fmul(is_used_once)', a, c), d), ('fmul', ('flrp', b, c, d), a)),
166
167 (('~flrp', a, b, ('b2f', 'c@1')), ('bcsel', c, b, a), 'options->lower_flrp32'),
168 (('~flrp', a, 0.0, c), ('fadd', ('fmul', ('fneg', a), c), a)),
169 (('ftrunc', a), ('bcsel', ('flt', a, 0.0), ('fneg', ('ffloor', ('fabs', a))), ('ffloor', ('fabs', a))), 'options->lower_ftrunc'),
170 (('ffloor', a), ('fsub', a, ('ffract', a)), 'options->lower_ffloor'),
171 (('fadd', a, ('fneg', ('ffract', a))), ('ffloor', a), '!options->lower_ffloor'),
172 (('ffract', a), ('fsub', a, ('ffloor', a)), 'options->lower_ffract'),
173 (('fceil', a), ('fneg', ('ffloor', ('fneg', a))), 'options->lower_fceil'),
174 (('~fadd', ('fmul', a, ('fadd', 1.0, ('fneg', ('b2f', 'c@1')))), ('fmul', b, ('b2f', c))), ('bcsel', c, b, a), 'options->lower_flrp32'),
175 (('~fadd@32', ('fmul', a, ('fadd', 1.0, ('fneg', c ) )), ('fmul', b, c )), ('flrp', a, b, c), '!options->lower_flrp32'),
176 (('~fadd@64', ('fmul', a, ('fadd', 1.0, ('fneg', c ) )), ('fmul', b, c )), ('flrp', a, b, c), '!options->lower_flrp64'),
177 # These are the same as the previous three rules, but it depends on
178 # 1-fsat(x) <=> fsat(1-x). See below.
179 (('~fadd@32', ('fmul', a, ('fsat', ('fadd', 1.0, ('fneg', c )))), ('fmul', b, ('fsat', c))), ('flrp', a, b, ('fsat', c)), '!options->lower_flrp32'),
180 (('~fadd@64', ('fmul', a, ('fsat', ('fadd', 1.0, ('fneg', c )))), ('fmul', b, ('fsat', c))), ('flrp', a, b, ('fsat', c)), '!options->lower_flrp64'),
181
182 (('~fadd', a, ('fmul', ('b2f', 'c@1'), ('fadd', b, ('fneg', a)))), ('bcsel', c, b, a), 'options->lower_flrp32'),
183 (('~fadd@32', a, ('fmul', c , ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp32'),
184 (('~fadd@64', a, ('fmul', c , ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp64'),
185 (('ffma', a, b, c), ('fadd', ('fmul', a, b), c), 'options->lower_ffma'),
186 (('~fadd', ('fmul', a, b), c), ('ffma', a, b, c), 'options->fuse_ffma'),
187
188 (('~fmul', ('fadd', ('iand', ('ineg', ('b2i32', 'a@bool')), ('fmul', b, c)), '#d'), '#e'),
189 ('bcsel', a, ('fmul', ('fadd', ('fmul', b, c), d), e), ('fmul', d, e))),
190
191 (('fdph', a, b), ('fdot4', ('vec4', 'a.x', 'a.y', 'a.z', 1.0), b), 'options->lower_fdph'),
192
193 (('fdot4', ('vec4', a, b, c, 1.0), d), ('fdph', ('vec3', a, b, c), d), '!options->lower_fdph'),
194 (('fdot4', ('vec4', a, 0.0, 0.0, 0.0), b), ('fmul', a, b)),
195 (('fdot4', ('vec4', a, b, 0.0, 0.0), c), ('fdot2', ('vec2', a, b), c)),
196 (('fdot4', ('vec4', a, b, c, 0.0), d), ('fdot3', ('vec3', a, b, c), d)),
197
198 (('fdot3', ('vec3', a, 0.0, 0.0), b), ('fmul', a, b)),
199 (('fdot3', ('vec3', a, b, 0.0), c), ('fdot2', ('vec2', a, b), c)),
200
201 (('fdot2', ('vec2', a, 0.0), b), ('fmul', a, b)),
202 (('fdot2', a, 1.0), ('fadd', 'a.x', 'a.y')),
203
204 # Lower fdot to fsum when it is available
205 (('fdot2', a, b), ('fsum2', ('fmul', a, b)), 'options->lower_fdot'),
206 (('fdot3', a, b), ('fsum3', ('fmul', a, b)), 'options->lower_fdot'),
207 (('fdot4', a, b), ('fsum4', ('fmul', a, b)), 'options->lower_fdot'),
208 (('fsum2', a), ('fadd', 'a.x', 'a.y'), 'options->lower_fdot'),
209
210 # If x >= 0 and x <= 1: fsat(1 - x) == 1 - fsat(x) trivially
211 # If x < 0: 1 - fsat(x) => 1 - 0 => 1 and fsat(1 - x) => fsat(> 1) => 1
212 # If x > 1: 1 - fsat(x) => 1 - 1 => 0 and fsat(1 - x) => fsat(< 0) => 0
213 (('~fadd', ('fneg(is_used_once)', ('fsat(is_used_once)', 'a(is_not_fmul)')), 1.0), ('fsat', ('fadd', 1.0, ('fneg', a)))),
214
215 # 1 - ((1 - a) * (1 - b))
216 # 1 - (1 - a - b + a*b)
217 # 1 - 1 + a + b - a*b
218 # a + b - a*b
219 # a + b*(1 - a)
220 # b*(1 - a) + 1*a
221 # flrp(b, 1, a)
222 (('~fadd@32', 1.0, ('fneg', ('fmul', ('fadd', 1.0, ('fneg', a)), ('fadd', 1.0, ('fneg', b))))),
223 ('flrp', b, 1.0, a), '!options->lower_flrp32'),
224
225 # (a * #b + #c) << #d
226 # ((a * #b) << #d) + (#c << #d)
227 # (a * (#b << #d)) + (#c << #d)
228 (('ishl', ('iadd', ('imul', a, '#b'), '#c'), '#d'),
229 ('iadd', ('imul', a, ('ishl', b, d)), ('ishl', c, d))),
230
231 # (a * #b) << #c
232 # a * (#b << #c)
233 (('ishl', ('imul', a, '#b'), '#c'), ('imul', a, ('ishl', b, c))),
234 ]
235
236 # Care must be taken here. Shifts in NIR uses only the lower log2(bitsize)
237 # bits of the second source. These replacements must correctly handle the
238 # case where (b % bitsize) + (c % bitsize) >= bitsize.
239 for s in [8, 16, 32, 64]:
240 mask = (1 << s) - 1
241
242 ishl = "ishl@{}".format(s)
243 ishr = "ishr@{}".format(s)
244 ushr = "ushr@{}".format(s)
245
246 in_bounds = ('ult', ('iadd', ('iand', b, mask), ('iand', c, mask)), s)
247
248 optimizations.extend([
249 ((ishl, (ishl, a, '#b'), '#c'), ('bcsel', in_bounds, (ishl, a, ('iadd', b, c)), 0)),
250 ((ushr, (ushr, a, '#b'), '#c'), ('bcsel', in_bounds, (ushr, a, ('iadd', b, c)), 0)),
251
252 # To get get -1 for large shifts of negative values, ishr must instead
253 # clamp the shift count to the maximum value.
254 ((ishr, (ishr, a, '#b'), '#c'),
255 (ishr, a, ('imin', ('iadd', ('iand', b, mask), ('iand', c, mask)), s - 1))),
256 ])
257
258 optimizations.extend([
259 # This is common for address calculations. Reassociating may enable the
260 # 'a<<c' to be CSE'd. It also helps architectures that have an ISHLADD
261 # instruction or a constant offset field for in load / store instructions.
262 (('ishl', ('iadd', a, '#b'), '#c'), ('iadd', ('ishl', a, c), ('ishl', b, c))),
263
264 # Comparison simplifications
265 (('~inot', ('flt', a, b)), ('fge', a, b)),
266 (('~inot', ('fge', a, b)), ('flt', a, b)),
267 (('inot', ('feq', a, b)), ('fne', a, b)),
268 (('inot', ('fne', a, b)), ('feq', a, b)),
269 (('inot', ('ilt', a, b)), ('ige', a, b)),
270 (('inot', ('ult', a, b)), ('uge', a, b)),
271 (('inot', ('ige', a, b)), ('ilt', a, b)),
272 (('inot', ('uge', a, b)), ('ult', a, b)),
273 (('inot', ('ieq', a, b)), ('ine', a, b)),
274 (('inot', ('ine', a, b)), ('ieq', a, b)),
275
276 (('iand', ('feq', a, b), ('fne', a, b)), False),
277 (('iand', ('flt', a, b), ('flt', b, a)), False),
278 (('iand', ('ieq', a, b), ('ine', a, b)), False),
279 (('iand', ('ilt', a, b), ('ilt', b, a)), False),
280 (('iand', ('ult', a, b), ('ult', b, a)), False),
281
282 # This helps some shaders because, after some optimizations, they end up
283 # with patterns like (-a < -b) || (b < a). In an ideal world, this sort of
284 # matching would be handled by CSE.
285 (('flt', ('fneg', a), ('fneg', b)), ('flt', b, a)),
286 (('fge', ('fneg', a), ('fneg', b)), ('fge', b, a)),
287 (('feq', ('fneg', a), ('fneg', b)), ('feq', b, a)),
288 (('fne', ('fneg', a), ('fneg', b)), ('fne', b, a)),
289 (('flt', ('fneg', a), -1.0), ('flt', 1.0, a)),
290 (('flt', -1.0, ('fneg', a)), ('flt', a, 1.0)),
291 (('fge', ('fneg', a), -1.0), ('fge', 1.0, a)),
292 (('fge', -1.0, ('fneg', a)), ('fge', a, 1.0)),
293 (('fne', ('fneg', a), -1.0), ('fne', 1.0, a)),
294 (('feq', -1.0, ('fneg', a)), ('feq', a, 1.0)),
295
296 (('flt', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('flt', a, b)),
297 (('flt', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('flt', b, a)),
298 (('fge', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fge', a, b)),
299 (('fge', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('fge', b, a)),
300 (('feq', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('feq', a, b)),
301 (('fne', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fne', a, b)),
302
303 (('fge', ('fsat(is_used_once)', a), 1.0), ('fge', a, 1.0)),
304 (('flt', ('fsat(is_used_once)', a), 1.0), ('flt', a, 1.0)),
305 (('fge', 0.0, ('fsat(is_used_once)', a)), ('fge', 0.0, a)),
306 (('flt', 0.0, ('fsat(is_used_once)', a)), ('flt', 0.0, a)),
307
308 # 0.0 >= b2f(a)
309 # b2f(a) <= 0.0
310 # b2f(a) == 0.0 because b2f(a) can only be 0 or 1
311 # inot(a)
312 (('fge', 0.0, ('b2f', 'a@1')), ('inot', a)),
313
314 (('fge', ('fneg', ('b2f', 'a@1')), 0.0), ('inot', a)),
315
316 (('fne', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a, b)),
317 (('fne', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a, b)),
318 (('fne', ('bcsel', a, 1.0, ('b2f', 'b@1')) , 0.0), ('ior', a, b)),
319 (('fne', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), ('ior', a, b)),
320 (('fne', ('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a, b)),
321 (('fne', ('fmin', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a, b)),
322 (('fne', ('bcsel', a, ('b2f', 'b@1'), 0.0) , 0.0), ('iand', a, b)),
323 (('fne', ('fadd', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), 0.0), ('ixor', a, b)),
324 (('fne', ('b2f', 'a@1') , ('b2f', 'b@1') ), ('ixor', a, b)),
325 (('fne', ('fneg', ('b2f', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('ixor', a, b)),
326 (('feq', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('ior', a, b))),
327 (('feq', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('ior', a, b))),
328 (('feq', ('bcsel', a, 1.0, ('b2f', 'b@1')) , 0.0), ('inot', ('ior', a, b))),
329 (('feq', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), ('inot', ('ior', a, b))),
330 (('feq', ('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('iand', a, b))),
331 (('feq', ('fmin', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('iand', a, b))),
332 (('feq', ('bcsel', a, ('b2f', 'b@1'), 0.0) , 0.0), ('inot', ('iand', a, b))),
333 (('feq', ('fadd', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), 0.0), ('ieq', a, b)),
334 (('feq', ('b2f', 'a@1') , ('b2f', 'b@1') ), ('ieq', a, b)),
335 (('feq', ('fneg', ('b2f', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('ieq', a, b)),
336
337 # -(b2f(a) + b2f(b)) < 0
338 # 0 < b2f(a) + b2f(b)
339 # 0 != b2f(a) + b2f(b) b2f must be 0 or 1, so the sum is non-negative
340 # a || b
341 (('flt', ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), 0.0), ('ior', a, b)),
342 (('flt', 0.0, ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('ior', a, b)),
343
344 # -(b2f(a) + b2f(b)) >= 0
345 # 0 >= b2f(a) + b2f(b)
346 # 0 == b2f(a) + b2f(b) b2f must be 0 or 1, so the sum is non-negative
347 # !(a || b)
348 (('fge', ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), 0.0), ('inot', ('ior', a, b))),
349 (('fge', 0.0, ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('inot', ('ior', a, b))),
350
351 (('flt', a, ('fneg', a)), ('flt', a, 0.0)),
352 (('fge', a, ('fneg', a)), ('fge', a, 0.0)),
353
354 # Some optimizations (below) convert things like (a < b || c < b) into
355 # (min(a, c) < b). However, this interfers with the previous optimizations
356 # that try to remove comparisons with negated sums of b2f. This just
357 # breaks that apart.
358 (('flt', ('fmin', c, ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')))), 0.0),
359 ('ior', ('flt', c, 0.0), ('ior', a, b))),
360
361 (('~flt', ('fadd', a, b), a), ('flt', b, 0.0)),
362 (('~fge', ('fadd', a, b), a), ('fge', b, 0.0)),
363 (('~feq', ('fadd', a, b), a), ('feq', b, 0.0)),
364 (('~fne', ('fadd', a, b), a), ('fne', b, 0.0)),
365 (('~flt', ('fadd(is_used_once)', a, '#b'), '#c'), ('flt', a, ('fadd', c, ('fneg', b)))),
366 (('~flt', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('flt', ('fneg', ('fadd', c, b)), a)),
367 (('~fge', ('fadd(is_used_once)', a, '#b'), '#c'), ('fge', a, ('fadd', c, ('fneg', b)))),
368 (('~fge', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('fge', ('fneg', ('fadd', c, b)), a)),
369 (('~feq', ('fadd(is_used_once)', a, '#b'), '#c'), ('feq', a, ('fadd', c, ('fneg', b)))),
370 (('~feq', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('feq', ('fneg', ('fadd', c, b)), a)),
371 (('~fne', ('fadd(is_used_once)', a, '#b'), '#c'), ('fne', a, ('fadd', c, ('fneg', b)))),
372 (('~fne', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('fne', ('fneg', ('fadd', c, b)), a)),
373
374 # Cannot remove the addition from ilt or ige due to overflow.
375 (('ieq', ('iadd', a, b), a), ('ieq', b, 0)),
376 (('ine', ('iadd', a, b), a), ('ine', b, 0)),
377
378 # fmin(-b2f(a), b) >= 0.0
379 # -b2f(a) >= 0.0 && b >= 0.0
380 # -b2f(a) == 0.0 && b >= 0.0 -b2f can only be 0 or -1, never >0
381 # b2f(a) == 0.0 && b >= 0.0
382 # a == False && b >= 0.0
383 # !a && b >= 0.0
384 #
385 # The fge in the second replacement is not a typo. I leave the proof that
386 # "fmin(-b2f(a), b) >= 0 <=> fmin(-b2f(a), b) == 0" as an exercise for the
387 # reader.
388 (('fge', ('fmin', ('fneg', ('b2f', 'a@1')), 'b@1'), 0.0), ('iand', ('inot', a), ('fge', b, 0.0))),
389 (('feq', ('fmin', ('fneg', ('b2f', 'a@1')), 'b@1'), 0.0), ('iand', ('inot', a), ('fge', b, 0.0))),
390
391 (('feq', ('b2f', 'a@1'), 0.0), ('inot', a)),
392 (('~fne', ('b2f', 'a@1'), 0.0), a),
393 (('ieq', ('b2i', 'a@1'), 0), ('inot', a)),
394 (('ine', ('b2i', 'a@1'), 0), a),
395
396 (('fne', ('u2f', a), 0.0), ('ine', a, 0)),
397 (('feq', ('u2f', a), 0.0), ('ieq', a, 0)),
398 (('fge', ('u2f', a), 0.0), True),
399 (('fge', 0.0, ('u2f', a)), ('uge', 0, a)), # ieq instead?
400 (('flt', ('u2f', a), 0.0), False),
401 (('flt', 0.0, ('u2f', a)), ('ult', 0, a)), # ine instead?
402 (('fne', ('i2f', a), 0.0), ('ine', a, 0)),
403 (('feq', ('i2f', a), 0.0), ('ieq', a, 0)),
404 (('fge', ('i2f', a), 0.0), ('ige', a, 0)),
405 (('fge', 0.0, ('i2f', a)), ('ige', 0, a)),
406 (('flt', ('i2f', a), 0.0), ('ilt', a, 0)),
407 (('flt', 0.0, ('i2f', a)), ('ilt', 0, a)),
408
409 # 0.0 < fabs(a)
410 # fabs(a) > 0.0
411 # fabs(a) != 0.0 because fabs(a) must be >= 0
412 # a != 0.0
413 (('~flt', 0.0, ('fabs', a)), ('fne', a, 0.0)),
414
415 # -fabs(a) < 0.0
416 # fabs(a) > 0.0
417 (('~flt', ('fneg', ('fabs', a)), 0.0), ('fne', a, 0.0)),
418
419 # 0.0 >= fabs(a)
420 # 0.0 == fabs(a) because fabs(a) must be >= 0
421 # 0.0 == a
422 (('fge', 0.0, ('fabs', a)), ('feq', a, 0.0)),
423
424 # -fabs(a) >= 0.0
425 # 0.0 >= fabs(a)
426 (('fge', ('fneg', ('fabs', a)), 0.0), ('feq', a, 0.0)),
427
428 # (a >= 0.0) && (a <= 1.0) -> fsat(a) == a
429 (('iand', ('fge', a, 0.0), ('fge', 1.0, a)), ('feq', a, ('fsat', a)), '!options->lower_fsat'),
430
431 # (a < 0.0) || (a > 1.0)
432 # !(!(a < 0.0) && !(a > 1.0))
433 # !((a >= 0.0) && (a <= 1.0))
434 # !(a == fsat(a))
435 # a != fsat(a)
436 (('ior', ('flt', a, 0.0), ('flt', 1.0, a)), ('fne', a, ('fsat', a)), '!options->lower_fsat'),
437
438 (('fmax', ('b2f(is_used_once)', 'a@1'), ('b2f', 'b@1')), ('b2f', ('ior', a, b))),
439 (('fmax', ('fneg(is_used_once)', ('b2f(is_used_once)', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('fneg', ('b2f', ('ior', a, b)))),
440 (('fmin', ('b2f(is_used_once)', 'a@1'), ('b2f', 'b@1')), ('b2f', ('iand', a, b))),
441 (('fmin', ('fneg(is_used_once)', ('b2f(is_used_once)', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('fneg', ('b2f', ('iand', a, b)))),
442
443 # fmin(b2f(a), b)
444 # bcsel(a, fmin(b2f(a), b), fmin(b2f(a), b))
445 # bcsel(a, fmin(b2f(True), b), fmin(b2f(False), b))
446 # bcsel(a, fmin(1.0, b), fmin(0.0, b))
447 #
448 # Since b is a constant, constant folding will eliminate the fmin and the
449 # fmax. If b is > 1.0, the bcsel will be replaced with a b2f.
450 (('fmin', ('b2f', 'a@1'), '#b'), ('bcsel', a, ('fmin', b, 1.0), ('fmin', b, 0.0))),
451
452 (('flt', ('fadd(is_used_once)', a, ('fneg', b)), 0.0), ('flt', a, b)),
453
454 (('fge', ('fneg', ('fabs', a)), 0.0), ('feq', a, 0.0)),
455 (('~bcsel', ('flt', b, a), b, a), ('fmin', a, b)),
456 (('~bcsel', ('flt', a, b), b, a), ('fmax', a, b)),
457 (('~bcsel', ('fge', a, b), b, a), ('fmin', a, b)),
458 (('~bcsel', ('fge', b, a), b, a), ('fmax', a, b)),
459 (('bcsel', ('i2b', a), b, c), ('bcsel', ('ine', a, 0), b, c)),
460 (('bcsel', ('inot', a), b, c), ('bcsel', a, c, b)),
461 (('bcsel', a, ('bcsel', a, b, c), d), ('bcsel', a, b, d)),
462 (('bcsel', a, b, ('bcsel', a, c, d)), ('bcsel', a, b, d)),
463 (('bcsel', a, ('bcsel', b, c, d), ('bcsel(is_used_once)', b, c, 'e')), ('bcsel', b, c, ('bcsel', a, d, 'e'))),
464 (('bcsel', a, ('bcsel(is_used_once)', b, c, d), ('bcsel', b, c, 'e')), ('bcsel', b, c, ('bcsel', a, d, 'e'))),
465 (('bcsel', a, ('bcsel', b, c, d), ('bcsel(is_used_once)', b, 'e', d)), ('bcsel', b, ('bcsel', a, c, 'e'), d)),
466 (('bcsel', a, ('bcsel(is_used_once)', b, c, d), ('bcsel', b, 'e', d)), ('bcsel', b, ('bcsel', a, c, 'e'), d)),
467 (('bcsel', a, True, b), ('ior', a, b)),
468 (('bcsel', a, a, b), ('ior', a, b)),
469 (('bcsel', a, b, False), ('iand', a, b)),
470 (('bcsel', a, b, a), ('iand', a, b)),
471 (('~fmin', a, a), a),
472 (('~fmax', a, a), a),
473 (('imin', a, a), a),
474 (('imax', a, a), a),
475 (('umin', a, a), a),
476 (('umax', a, a), a),
477 (('fmax', ('fmax', a, b), b), ('fmax', a, b)),
478 (('umax', ('umax', a, b), b), ('umax', a, b)),
479 (('imax', ('imax', a, b), b), ('imax', a, b)),
480 (('fmin', ('fmin', a, b), b), ('fmin', a, b)),
481 (('umin', ('umin', a, b), b), ('umin', a, b)),
482 (('imin', ('imin', a, b), b), ('imin', a, b)),
483 (('iand@32', a, ('inot', ('ishr', a, 31))), ('imax', a, 0)),
484 (('fmax', a, ('fneg', a)), ('fabs', a)),
485 (('imax', a, ('ineg', a)), ('iabs', a)),
486 (('fmin', a, ('fneg', a)), ('fneg', ('fabs', a))),
487 (('imin', a, ('ineg', a)), ('ineg', ('iabs', a))),
488 (('fmin', a, ('fneg', ('fabs', a))), ('fneg', ('fabs', a))),
489 (('imin', a, ('ineg', ('iabs', a))), ('ineg', ('iabs', a))),
490 (('~fmin', a, ('fabs', a)), a),
491 (('imin', a, ('iabs', a)), a),
492 (('~fmax', a, ('fneg', ('fabs', a))), a),
493 (('imax', a, ('ineg', ('iabs', a))), a),
494 (('fmax', a, ('fabs', a)), ('fabs', a)),
495 (('imax', a, ('iabs', a)), ('iabs', a)),
496 (('fmax', a, ('fneg', a)), ('fabs', a)),
497 (('imax', a, ('ineg', a)), ('iabs', a)),
498 (('~fmax', ('fabs', a), 0.0), ('fabs', a)),
499 (('~fmin', ('fmax', a, 0.0), 1.0), ('fsat', a), '!options->lower_fsat'),
500 (('~fmax', ('fmin', a, 1.0), 0.0), ('fsat', a), '!options->lower_fsat'),
501 (('~fmin', ('fmax', a, -1.0), 0.0), ('fneg', ('fsat', ('fneg', a))), '!options->lower_fsat'),
502 (('~fmax', ('fmin', a, 0.0), -1.0), ('fneg', ('fsat', ('fneg', a))), '!options->lower_fsat'),
503 (('fsat', ('fsign', a)), ('b2f', ('flt', 0.0, a))),
504 (('fsat', ('b2f', a)), ('b2f', a)),
505 (('fsat', a), ('fmin', ('fmax', a, 0.0), 1.0), 'options->lower_fsat'),
506 (('fsat', ('fsat', a)), ('fsat', a)),
507 (('fsat', ('fneg(is_used_once)', ('fadd(is_used_once)', a, b))), ('fsat', ('fadd', ('fneg', a), ('fneg', b))), '!options->lower_fsat'),
508 (('fsat', ('fneg(is_used_once)', ('fmul(is_used_once)', a, b))), ('fsat', ('fmul', ('fneg', a), b)), '!options->lower_fsat'),
509 (('fsat', ('fabs(is_used_once)', ('fmul(is_used_once)', a, b))), ('fsat', ('fmul', ('fabs', a), ('fabs', b))), '!options->lower_fsat'),
510 (('fmin', ('fmax', ('fmin', ('fmax', a, b), c), b), c), ('fmin', ('fmax', a, b), c)),
511 (('imin', ('imax', ('imin', ('imax', a, b), c), b), c), ('imin', ('imax', a, b), c)),
512 (('umin', ('umax', ('umin', ('umax', a, b), c), b), c), ('umin', ('umax', a, b), c)),
513 (('fmax', ('fsat', a), '#b@32(is_zero_to_one)'), ('fsat', ('fmax', a, b))),
514 (('fmin', ('fsat', a), '#b@32(is_zero_to_one)'), ('fsat', ('fmin', a, b))),
515 (('extract_u8', ('imin', ('imax', a, 0), 0xff), 0), ('imin', ('imax', a, 0), 0xff)),
516 (('~ior', ('flt(is_used_once)', a, b), ('flt', a, c)), ('flt', a, ('fmax', b, c))),
517 (('~ior', ('flt(is_used_once)', a, c), ('flt', b, c)), ('flt', ('fmin', a, b), c)),
518 (('~ior', ('fge(is_used_once)', a, b), ('fge', a, c)), ('fge', a, ('fmin', b, c))),
519 (('~ior', ('fge(is_used_once)', a, c), ('fge', b, c)), ('fge', ('fmax', a, b), c)),
520 (('~ior', ('flt', a, '#b'), ('flt', a, '#c')), ('flt', a, ('fmax', b, c))),
521 (('~ior', ('flt', '#a', c), ('flt', '#b', c)), ('flt', ('fmin', a, b), c)),
522 (('~ior', ('fge', a, '#b'), ('fge', a, '#c')), ('fge', a, ('fmin', b, c))),
523 (('~ior', ('fge', '#a', c), ('fge', '#b', c)), ('fge', ('fmax', a, b), c)),
524 (('~iand', ('flt(is_used_once)', a, b), ('flt', a, c)), ('flt', a, ('fmin', b, c))),
525 (('~iand', ('flt(is_used_once)', a, c), ('flt', b, c)), ('flt', ('fmax', a, b), c)),
526 (('~iand', ('fge(is_used_once)', a, b), ('fge', a, c)), ('fge', a, ('fmax', b, c))),
527 (('~iand', ('fge(is_used_once)', a, c), ('fge', b, c)), ('fge', ('fmin', a, b), c)),
528 (('~iand', ('flt', a, '#b'), ('flt', a, '#c')), ('flt', a, ('fmin', b, c))),
529 (('~iand', ('flt', '#a', c), ('flt', '#b', c)), ('flt', ('fmax', a, b), c)),
530 (('~iand', ('fge', a, '#b'), ('fge', a, '#c')), ('fge', a, ('fmax', b, c))),
531 (('~iand', ('fge', '#a', c), ('fge', '#b', c)), ('fge', ('fmin', a, b), c)),
532
533 (('ior', ('ilt(is_used_once)', a, b), ('ilt', a, c)), ('ilt', a, ('imax', b, c))),
534 (('ior', ('ilt(is_used_once)', a, c), ('ilt', b, c)), ('ilt', ('imin', a, b), c)),
535 (('ior', ('ige(is_used_once)', a, b), ('ige', a, c)), ('ige', a, ('imin', b, c))),
536 (('ior', ('ige(is_used_once)', a, c), ('ige', b, c)), ('ige', ('imax', a, b), c)),
537 (('ior', ('ult(is_used_once)', a, b), ('ult', a, c)), ('ult', a, ('umax', b, c))),
538 (('ior', ('ult(is_used_once)', a, c), ('ult', b, c)), ('ult', ('umin', a, b), c)),
539 (('ior', ('uge(is_used_once)', a, b), ('uge', a, c)), ('uge', a, ('umin', b, c))),
540 (('ior', ('uge(is_used_once)', a, c), ('uge', b, c)), ('uge', ('umax', a, b), c)),
541 (('iand', ('ilt(is_used_once)', a, b), ('ilt', a, c)), ('ilt', a, ('imin', b, c))),
542 (('iand', ('ilt(is_used_once)', a, c), ('ilt', b, c)), ('ilt', ('imax', a, b), c)),
543 (('iand', ('ige(is_used_once)', a, b), ('ige', a, c)), ('ige', a, ('imax', b, c))),
544 (('iand', ('ige(is_used_once)', a, c), ('ige', b, c)), ('ige', ('imin', a, b), c)),
545 (('iand', ('ult(is_used_once)', a, b), ('ult', a, c)), ('ult', a, ('umin', b, c))),
546 (('iand', ('ult(is_used_once)', a, c), ('ult', b, c)), ('ult', ('umax', a, b), c)),
547 (('iand', ('uge(is_used_once)', a, b), ('uge', a, c)), ('uge', a, ('umax', b, c))),
548 (('iand', ('uge(is_used_once)', a, c), ('uge', b, c)), ('uge', ('umin', a, b), c)),
549
550 # These derive from the previous patterns with the application of b < 0 <=>
551 # 0 < -b. The transformation should be applied if either comparison is
552 # used once as this ensures that the number of comparisons will not
553 # increase. The sources to the ior and iand are not symmetric, so the
554 # rules have to be duplicated to get this behavior.
555 (('~ior', ('flt(is_used_once)', 0.0, 'a@32'), ('flt', 'b@32', 0.0)), ('flt', 0.0, ('fmax', a, ('fneg', b)))),
556 (('~ior', ('flt', 0.0, 'a@32'), ('flt(is_used_once)', 'b@32', 0.0)), ('flt', 0.0, ('fmax', a, ('fneg', b)))),
557 (('~ior', ('fge(is_used_once)', 0.0, 'a@32'), ('fge', 'b@32', 0.0)), ('fge', 0.0, ('fmin', a, ('fneg', b)))),
558 (('~ior', ('fge', 0.0, 'a@32'), ('fge(is_used_once)', 'b@32', 0.0)), ('fge', 0.0, ('fmin', a, ('fneg', b)))),
559 (('~iand', ('flt(is_used_once)', 0.0, 'a@32'), ('flt', 'b@32', 0.0)), ('flt', 0.0, ('fmin', a, ('fneg', b)))),
560 (('~iand', ('flt', 0.0, 'a@32'), ('flt(is_used_once)', 'b@32', 0.0)), ('flt', 0.0, ('fmin', a, ('fneg', b)))),
561 (('~iand', ('fge(is_used_once)', 0.0, 'a@32'), ('fge', 'b@32', 0.0)), ('fge', 0.0, ('fmax', a, ('fneg', b)))),
562 (('~iand', ('fge', 0.0, 'a@32'), ('fge(is_used_once)', 'b@32', 0.0)), ('fge', 0.0, ('fmax', a, ('fneg', b)))),
563
564 # Common pattern like 'if (i == 0 || i == 1 || ...)'
565 (('ior', ('ieq', a, 0), ('ieq', a, 1)), ('uge', 1, a)),
566 (('ior', ('uge', 1, a), ('ieq', a, 2)), ('uge', 2, a)),
567 (('ior', ('uge', 2, a), ('ieq', a, 3)), ('uge', 3, a)),
568
569 # The (i2f32, ...) part is an open-coded fsign. When that is combined with
570 # the bcsel, it's basically copysign(1.0, a). There is no copysign in NIR,
571 # so emit an open-coded version of that.
572 (('bcsel@32', ('feq', a, 0.0), 1.0, ('i2f32', ('iadd', ('b2i32', ('flt', 0.0, 'a@32')), ('ineg', ('b2i32', ('flt', 'a@32', 0.0)))))),
573 ('ior', 0x3f800000, ('iand', a, 0x80000000))),
574
575 (('ior', a, ('ieq', a, False)), True),
576 (('ior', a, ('inot', a)), -1),
577
578 (('ine', ('ineg', ('b2i32', 'a@1')), ('ineg', ('b2i32', 'b@1'))), ('ine', a, b)),
579 (('b2i32', ('ine', 'a@1', 'b@1')), ('b2i32', ('ixor', a, b))),
580
581 (('iand', ('ieq', 'a@32', 0), ('ieq', 'b@32', 0)), ('ieq', ('ior', 'a@32', 'b@32'), 0), '!options->lower_bitops'),
582
583 # These patterns can result when (a < b || a < c) => (a < min(b, c))
584 # transformations occur before constant propagation and loop-unrolling.
585 (('~flt', a, ('fmax', b, a)), ('flt', a, b)),
586 (('~flt', ('fmin', a, b), a), ('flt', b, a)),
587 (('~fge', a, ('fmin', b, a)), True),
588 (('~fge', ('fmax', a, b), a), True),
589 (('~flt', a, ('fmin', b, a)), False),
590 (('~flt', ('fmax', a, b), a), False),
591 (('~fge', a, ('fmax', b, a)), ('fge', a, b)),
592 (('~fge', ('fmin', a, b), a), ('fge', b, a)),
593
594 (('ilt', a, ('imax', b, a)), ('ilt', a, b)),
595 (('ilt', ('imin', a, b), a), ('ilt', b, a)),
596 (('ige', a, ('imin', b, a)), True),
597 (('ige', ('imax', a, b), a), True),
598 (('ult', a, ('umax', b, a)), ('ult', a, b)),
599 (('ult', ('umin', a, b), a), ('ult', b, a)),
600 (('uge', a, ('umin', b, a)), True),
601 (('uge', ('umax', a, b), a), True),
602 (('ilt', a, ('imin', b, a)), False),
603 (('ilt', ('imax', a, b), a), False),
604 (('ige', a, ('imax', b, a)), ('ige', a, b)),
605 (('ige', ('imin', a, b), a), ('ige', b, a)),
606 (('ult', a, ('umin', b, a)), False),
607 (('ult', ('umax', a, b), a), False),
608 (('uge', a, ('umax', b, a)), ('uge', a, b)),
609 (('uge', ('umin', a, b), a), ('uge', b, a)),
610 (('ult', a, ('iand', b, a)), False),
611 (('ult', ('ior', a, b), a), False),
612 (('uge', a, ('iand', b, a)), True),
613 (('uge', ('ior', a, b), a), True),
614
615 (('ilt', '#a', ('imax', '#b', c)), ('ior', ('ilt', a, b), ('ilt', a, c))),
616 (('ilt', ('imin', '#a', b), '#c'), ('ior', ('ilt', a, c), ('ilt', b, c))),
617 (('ige', '#a', ('imin', '#b', c)), ('ior', ('ige', a, b), ('ige', a, c))),
618 (('ige', ('imax', '#a', b), '#c'), ('ior', ('ige', a, c), ('ige', b, c))),
619 (('ult', '#a', ('umax', '#b', c)), ('ior', ('ult', a, b), ('ult', a, c))),
620 (('ult', ('umin', '#a', b), '#c'), ('ior', ('ult', a, c), ('ult', b, c))),
621 (('uge', '#a', ('umin', '#b', c)), ('ior', ('uge', a, b), ('uge', a, c))),
622 (('uge', ('umax', '#a', b), '#c'), ('ior', ('uge', a, c), ('uge', b, c))),
623 (('ilt', '#a', ('imin', '#b', c)), ('iand', ('ilt', a, b), ('ilt', a, c))),
624 (('ilt', ('imax', '#a', b), '#c'), ('iand', ('ilt', a, c), ('ilt', b, c))),
625 (('ige', '#a', ('imax', '#b', c)), ('iand', ('ige', a, b), ('ige', a, c))),
626 (('ige', ('imin', '#a', b), '#c'), ('iand', ('ige', a, c), ('ige', b, c))),
627 (('ult', '#a', ('umin', '#b', c)), ('iand', ('ult', a, b), ('ult', a, c))),
628 (('ult', ('umax', '#a', b), '#c'), ('iand', ('ult', a, c), ('ult', b, c))),
629 (('uge', '#a', ('umax', '#b', c)), ('iand', ('uge', a, b), ('uge', a, c))),
630 (('uge', ('umin', '#a', b), '#c'), ('iand', ('uge', a, c), ('uge', b, c))),
631
632 # Thanks to sign extension, the ishr(a, b) is negative if and only if a is
633 # negative.
634 (('bcsel', ('ilt', a, 0), ('ineg', ('ishr', a, b)), ('ishr', a, b)),
635 ('iabs', ('ishr', a, b))),
636 (('iabs', ('ishr', ('iabs', a), b)), ('ishr', ('iabs', a), b)),
637
638 (('fabs', ('slt', a, b)), ('slt', a, b)),
639 (('fabs', ('sge', a, b)), ('sge', a, b)),
640 (('fabs', ('seq', a, b)), ('seq', a, b)),
641 (('fabs', ('sne', a, b)), ('sne', a, b)),
642 (('slt', a, b), ('b2f', ('flt', a, b)), 'options->lower_scmp'),
643 (('sge', a, b), ('b2f', ('fge', a, b)), 'options->lower_scmp'),
644 (('seq', a, b), ('b2f', ('feq', a, b)), 'options->lower_scmp'),
645 (('sne', a, b), ('b2f', ('fne', a, b)), 'options->lower_scmp'),
646 (('seq', ('seq', a, b), 1.0), ('seq', a, b)),
647 (('seq', ('sne', a, b), 1.0), ('sne', a, b)),
648 (('seq', ('slt', a, b), 1.0), ('slt', a, b)),
649 (('seq', ('sge', a, b), 1.0), ('sge', a, b)),
650 (('sne', ('seq', a, b), 0.0), ('seq', a, b)),
651 (('sne', ('sne', a, b), 0.0), ('sne', a, b)),
652 (('sne', ('slt', a, b), 0.0), ('slt', a, b)),
653 (('sne', ('sge', a, b), 0.0), ('sge', a, b)),
654 (('seq', ('seq', a, b), 0.0), ('sne', a, b)),
655 (('seq', ('sne', a, b), 0.0), ('seq', a, b)),
656 (('seq', ('slt', a, b), 0.0), ('sge', a, b)),
657 (('seq', ('sge', a, b), 0.0), ('slt', a, b)),
658 (('sne', ('seq', a, b), 1.0), ('sne', a, b)),
659 (('sne', ('sne', a, b), 1.0), ('seq', a, b)),
660 (('sne', ('slt', a, b), 1.0), ('sge', a, b)),
661 (('sne', ('sge', a, b), 1.0), ('slt', a, b)),
662 (('fall_equal2', a, b), ('fmin', ('seq', 'a.x', 'b.x'), ('seq', 'a.y', 'b.y')), 'options->lower_vector_cmp'),
663 (('fall_equal3', a, b), ('seq', ('fany_nequal3', a, b), 0.0), 'options->lower_vector_cmp'),
664 (('fall_equal4', a, b), ('seq', ('fany_nequal4', a, b), 0.0), 'options->lower_vector_cmp'),
665 (('fany_nequal2', a, b), ('fmax', ('sne', 'a.x', 'b.x'), ('sne', 'a.y', 'b.y')), 'options->lower_vector_cmp'),
666 (('fany_nequal3', a, b), ('fsat', ('fdot3', ('sne', a, b), ('sne', a, b))), 'options->lower_vector_cmp'),
667 (('fany_nequal4', a, b), ('fsat', ('fdot4', ('sne', a, b), ('sne', a, b))), 'options->lower_vector_cmp'),
668 (('fne', ('fneg', a), a), ('fne', a, 0.0)),
669 (('feq', ('fneg', a), a), ('feq', a, 0.0)),
670 # Emulating booleans
671 (('imul', ('b2i', 'a@1'), ('b2i', 'b@1')), ('b2i', ('iand', a, b))),
672 (('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), ('b2f', ('iand', a, b))),
673 (('fsat', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('b2f', ('ior', a, b))),
674 (('iand', 'a@bool32', 1.0), ('b2f', a)),
675 # True/False are ~0 and 0 in NIR. b2i of True is 1, and -1 is ~0 (True).
676 (('ineg', ('b2i32', 'a@32')), a),
677 (('flt', ('fneg', ('b2f', 'a@1')), 0), a), # Generated by TGSI KILL_IF.
678 # Comparison with the same args. Note that these are not done for
679 # the float versions because NaN always returns false on float
680 # inequalities.
681 (('ilt', a, a), False),
682 (('ige', a, a), True),
683 (('ieq', a, a), True),
684 (('ine', a, a), False),
685 (('ult', a, a), False),
686 (('uge', a, a), True),
687 # Logical and bit operations
688 (('iand', a, a), a),
689 (('iand', a, ~0), a),
690 (('iand', a, 0), 0),
691 (('ior', a, a), a),
692 (('ior', a, 0), a),
693 (('ior', a, True), True),
694 (('ixor', a, a), 0),
695 (('ixor', a, 0), a),
696 (('inot', ('inot', a)), a),
697 (('ior', ('iand', a, b), b), b),
698 (('ior', ('ior', a, b), b), ('ior', a, b)),
699 (('iand', ('ior', a, b), b), b),
700 (('iand', ('iand', a, b), b), ('iand', a, b)),
701 # DeMorgan's Laws
702 (('iand', ('inot', a), ('inot', b)), ('inot', ('ior', a, b))),
703 (('ior', ('inot', a), ('inot', b)), ('inot', ('iand', a, b))),
704 # Shift optimizations
705 (('ishl', 0, a), 0),
706 (('ishl', a, 0), a),
707 (('ishr', 0, a), 0),
708 (('ishr', a, 0), a),
709 (('ushr', 0, a), 0),
710 (('ushr', a, 0), a),
711 (('iand', 0xff, ('ushr@32', a, 24)), ('ushr', a, 24)),
712 (('iand', 0xffff, ('ushr@32', a, 16)), ('ushr', a, 16)),
713 (('ior', ('ishl@16', a, b), ('ushr@16', a, ('iadd', 16, ('ineg', b)))), ('urol', a, b), '!options->lower_rotate'),
714 (('ior', ('ishl@16', a, b), ('ushr@16', a, ('isub', 16, b))), ('urol', a, b), '!options->lower_rotate'),
715 (('ior', ('ishl@32', a, b), ('ushr@32', a, ('iadd', 32, ('ineg', b)))), ('urol', a, b), '!options->lower_rotate'),
716 (('ior', ('ishl@32', a, b), ('ushr@32', a, ('isub', 32, b))), ('urol', a, b), '!options->lower_rotate'),
717 (('ior', ('ushr@16', a, b), ('ishl@16', a, ('iadd', 16, ('ineg', b)))), ('uror', a, b), '!options->lower_rotate'),
718 (('ior', ('ushr@16', a, b), ('ishl@16', a, ('isub', 16, b))), ('uror', a, b), '!options->lower_rotate'),
719 (('ior', ('ushr@32', a, b), ('ishl@32', a, ('iadd', 32, ('ineg', b)))), ('uror', a, b), '!options->lower_rotate'),
720 (('ior', ('ushr@32', a, b), ('ishl@32', a, ('isub', 32, b))), ('uror', a, b), '!options->lower_rotate'),
721 (('urol@16', a, b), ('ior', ('ishl', a, b), ('ushr', a, ('isub', 16, b))), 'options->lower_rotate'),
722 (('urol@32', a, b), ('ior', ('ishl', a, b), ('ushr', a, ('isub', 32, b))), 'options->lower_rotate'),
723 (('uror@16', a, b), ('ior', ('ushr', a, b), ('ishl', a, ('isub', 16, b))), 'options->lower_rotate'),
724 (('uror@32', a, b), ('ior', ('ushr', a, b), ('ishl', a, ('isub', 32, b))), 'options->lower_rotate'),
725 # Exponential/logarithmic identities
726 (('~fexp2', ('flog2', a)), a), # 2^lg2(a) = a
727 (('~flog2', ('fexp2', a)), a), # lg2(2^a) = a
728 (('fpow', a, b), ('fexp2', ('fmul', ('flog2', a), b)), 'options->lower_fpow'), # a^b = 2^(lg2(a)*b)
729 (('~fexp2', ('fmul', ('flog2', a), b)), ('fpow', a, b), '!options->lower_fpow'), # 2^(lg2(a)*b) = a^b
730 (('~fexp2', ('fadd', ('fmul', ('flog2', a), b), ('fmul', ('flog2', c), d))),
731 ('~fmul', ('fpow', a, b), ('fpow', c, d)), '!options->lower_fpow'), # 2^(lg2(a) * b + lg2(c) + d) = a^b * c^d
732 (('~fexp2', ('fmul', ('flog2', a), 2.0)), ('fmul', a, a)),
733 (('~fexp2', ('fmul', ('flog2', a), 4.0)), ('fmul', ('fmul', a, a), ('fmul', a, a))),
734 (('~fpow', a, 1.0), a),
735 (('~fpow', a, 2.0), ('fmul', a, a)),
736 (('~fpow', a, 4.0), ('fmul', ('fmul', a, a), ('fmul', a, a))),
737 (('~fpow', 2.0, a), ('fexp2', a)),
738 (('~fpow', ('fpow', a, 2.2), 0.454545), a),
739 (('~fpow', ('fabs', ('fpow', a, 2.2)), 0.454545), ('fabs', a)),
740 (('~fsqrt', ('fexp2', a)), ('fexp2', ('fmul', 0.5, a))),
741 (('~frcp', ('fexp2', a)), ('fexp2', ('fneg', a))),
742 (('~frsq', ('fexp2', a)), ('fexp2', ('fmul', -0.5, a))),
743 (('~flog2', ('fsqrt', a)), ('fmul', 0.5, ('flog2', a))),
744 (('~flog2', ('frcp', a)), ('fneg', ('flog2', a))),
745 (('~flog2', ('frsq', a)), ('fmul', -0.5, ('flog2', a))),
746 (('~flog2', ('fpow', a, b)), ('fmul', b, ('flog2', a))),
747 (('~fmul', ('fexp2(is_used_once)', a), ('fexp2(is_used_once)', b)), ('fexp2', ('fadd', a, b))),
748 (('bcsel', ('flt', a, 0.0), 0.0, ('fsqrt', a)), ('fsqrt', ('fmax', a, 0.0))),
749 # Division and reciprocal
750 (('~fdiv', 1.0, a), ('frcp', a)),
751 (('fdiv', a, b), ('fmul', a, ('frcp', b)), 'options->lower_fdiv'),
752 (('~frcp', ('frcp', a)), a),
753 (('~frcp', ('fsqrt', a)), ('frsq', a)),
754 (('fsqrt', a), ('frcp', ('frsq', a)), 'options->lower_fsqrt'),
755 (('~frcp', ('frsq', a)), ('fsqrt', a), '!options->lower_fsqrt'),
756 # Trig
757 (('fsin', a), lowered_sincos(0.5), 'options->lower_sincos'),
758 (('fcos', a), lowered_sincos(0.75), 'options->lower_sincos'),
759 # Boolean simplifications
760 (('i2b32(is_used_by_if)', a), ('ine32', a, 0)),
761 (('i2b1(is_used_by_if)', a), ('ine', a, 0)),
762 (('ieq', a, True), a),
763 (('ine(is_not_used_by_if)', a, True), ('inot', a)),
764 (('ine', a, False), a),
765 (('ieq(is_not_used_by_if)', a, False), ('inot', 'a')),
766 (('bcsel', a, True, False), a),
767 (('bcsel', a, False, True), ('inot', a)),
768 (('bcsel@32', a, 1.0, 0.0), ('b2f', a)),
769 (('bcsel@32', a, 0.0, 1.0), ('b2f', ('inot', a))),
770 (('bcsel@32', a, -1.0, -0.0), ('fneg', ('b2f', a))),
771 (('bcsel@32', a, -0.0, -1.0), ('fneg', ('b2f', ('inot', a)))),
772 (('bcsel', True, b, c), b),
773 (('bcsel', False, b, c), c),
774 (('bcsel', a, ('b2f(is_used_once)', 'b@32'), ('b2f', 'c@32')), ('b2f', ('bcsel', a, b, c))),
775
776 (('bcsel', a, b, b), b),
777 (('~fcsel', a, b, b), b),
778
779 # D3D Boolean emulation
780 (('bcsel', a, -1, 0), ('ineg', ('b2i', 'a@1'))),
781 (('bcsel', a, 0, -1), ('ineg', ('b2i', ('inot', a)))),
782 (('iand', ('ineg', ('b2i', 'a@1')), ('ineg', ('b2i', 'b@1'))),
783 ('ineg', ('b2i', ('iand', a, b)))),
784 (('ior', ('ineg', ('b2i','a@1')), ('ineg', ('b2i', 'b@1'))),
785 ('ineg', ('b2i', ('ior', a, b)))),
786 (('ieq', ('ineg', ('b2i', 'a@1')), 0), ('inot', a)),
787 (('ieq', ('ineg', ('b2i', 'a@1')), -1), a),
788 (('ine', ('ineg', ('b2i', 'a@1')), 0), a),
789 (('ine', ('ineg', ('b2i', 'a@1')), -1), ('inot', a)),
790 (('iand', ('ineg', ('b2i', a)), 1.0), ('b2f', a)),
791 (('iand', ('ineg', ('b2i', a)), 1), ('b2i', a)),
792
793 # SM5 32-bit shifts are defined to use the 5 least significant bits
794 (('ishl', 'a@32', ('iand', 31, b)), ('ishl', a, b)),
795 (('ishr', 'a@32', ('iand', 31, b)), ('ishr', a, b)),
796 (('ushr', 'a@32', ('iand', 31, b)), ('ushr', a, b)),
797
798 # Conversions
799 (('i2b32', ('b2i', 'a@32')), a),
800 (('f2i', ('ftrunc', a)), ('f2i', a)),
801 (('f2u', ('ftrunc', a)), ('f2u', a)),
802 (('i2b', ('ineg', a)), ('i2b', a)),
803 (('i2b', ('iabs', a)), ('i2b', a)),
804 (('inot', ('f2b1', a)), ('feq', a, 0.0)),
805
806 # Ironically, mark these as imprecise because removing the conversions may
807 # preserve more precision than doing the conversions (e.g.,
808 # uint(float(0x81818181u)) == 0x81818200).
809 (('~f2i32', ('i2f', 'a@32')), a),
810 (('~f2i32', ('u2f', 'a@32')), a),
811 (('~f2u32', ('i2f', 'a@32')), a),
812 (('~f2u32', ('u2f', 'a@32')), a),
813
814 (('ffloor', 'a(is_integral)'), a),
815 (('fceil', 'a(is_integral)'), a),
816 (('ftrunc', 'a(is_integral)'), a),
817 # fract(x) = x - floor(x), so fract(NaN) = NaN
818 (('~ffract', 'a(is_integral)'), 0.0),
819 (('fabs', 'a(is_not_negative)'), a),
820 (('iabs', 'a(is_not_negative)'), a),
821 (('fsat', 'a(is_not_positive)'), 0.0),
822
823 # Section 5.4.1 (Conversion and Scalar Constructors) of the GLSL 4.60 spec
824 # says:
825 #
826 # It is undefined to convert a negative floating-point value to an
827 # uint.
828 #
829 # Assuming that (uint)some_float behaves like (uint)(int)some_float allows
830 # some optimizations in the i965 backend to proceed.
831 (('ige', ('f2u', a), b), ('ige', ('f2i', a), b)),
832 (('ige', b, ('f2u', a)), ('ige', b, ('f2i', a))),
833 (('ilt', ('f2u', a), b), ('ilt', ('f2i', a), b)),
834 (('ilt', b, ('f2u', a)), ('ilt', b, ('f2i', a))),
835
836 (('~fmin', 'a(is_not_negative)', 1.0), ('fsat', a), '!options->lower_fsat'),
837
838 # The result of the multiply must be in [-1, 0], so the result of the ffma
839 # must be in [0, 1].
840 (('flt', ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0), 0.0), False),
841 (('flt', ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0), 0.0), False),
842 (('fmax', ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0), 0.0), ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0)),
843 (('fmax', ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0), 0.0), ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0)),
844
845 (('fne', 'a(is_not_zero)', 0.0), True),
846 (('feq', 'a(is_not_zero)', 0.0), False),
847
848 # In this chart, + means value > 0 and - means value < 0.
849 #
850 # + >= + -> unknown 0 >= + -> false - >= + -> false
851 # + >= 0 -> true 0 >= 0 -> true - >= 0 -> false
852 # + >= - -> true 0 >= - -> true - >= - -> unknown
853 #
854 # Using grouping conceptually similar to a Karnaugh map...
855 #
856 # (+ >= 0, + >= -, 0 >= 0, 0 >= -) == (is_not_negative >= is_not_positive) -> true
857 # (0 >= +, - >= +) == (is_not_positive >= gt_zero) -> false
858 # (- >= +, - >= 0) == (lt_zero >= is_not_negative) -> false
859 #
860 # The flt / ilt cases just invert the expected result.
861 #
862 # The results expecting true, must be marked imprecise. The results
863 # expecting false are fine because NaN compared >= or < anything is false.
864
865 (('~fge', 'a(is_not_negative)', 'b(is_not_positive)'), True),
866 (('fge', 'a(is_not_positive)', 'b(is_gt_zero)'), False),
867 (('fge', 'a(is_lt_zero)', 'b(is_not_negative)'), False),
868
869 (('flt', 'a(is_not_negative)', 'b(is_not_positive)'), False),
870 (('~flt', 'a(is_not_positive)', 'b(is_gt_zero)'), True),
871 (('~flt', 'a(is_lt_zero)', 'b(is_not_negative)'), True),
872
873 (('ine', 'a(is_not_zero)', 0), True),
874 (('ieq', 'a(is_not_zero)', 0), False),
875
876 (('ige', 'a(is_not_negative)', 'b(is_not_positive)'), True),
877 (('ige', 'a(is_not_positive)', 'b(is_gt_zero)'), False),
878 (('ige', 'a(is_lt_zero)', 'b(is_not_negative)'), False),
879
880 (('ilt', 'a(is_not_negative)', 'b(is_not_positive)'), False),
881 (('ilt', 'a(is_not_positive)', 'b(is_gt_zero)'), True),
882 (('ilt', 'a(is_lt_zero)', 'b(is_not_negative)'), True),
883
884 (('ult', 0, 'a(is_gt_zero)'), True),
885
886 # Packing and then unpacking does nothing
887 (('unpack_64_2x32_split_x', ('pack_64_2x32_split', a, b)), a),
888 (('unpack_64_2x32_split_y', ('pack_64_2x32_split', a, b)), b),
889 (('pack_64_2x32_split', ('unpack_64_2x32_split_x', a),
890 ('unpack_64_2x32_split_y', a)), a),
891
892 # Comparing two halves of an unpack separately. While this optimization
893 # should be correct for non-constant values, it's less obvious that it's
894 # useful in that case. For constant values, the pack will fold and we're
895 # guaranteed to reduce the whole tree to one instruction.
896 (('iand', ('ieq', ('unpack_32_2x16_split_x', a), '#b'),
897 ('ieq', ('unpack_32_2x16_split_y', a), '#c')),
898 ('ieq', a, ('pack_32_2x16_split', b, c))),
899
900 # Byte extraction
901 (('ushr', 'a@16', 8), ('extract_u8', a, 1), '!options->lower_extract_byte'),
902 (('ushr', 'a@32', 24), ('extract_u8', a, 3), '!options->lower_extract_byte'),
903 (('ushr', 'a@64', 56), ('extract_u8', a, 7), '!options->lower_extract_byte'),
904 (('ishr', 'a@16', 8), ('extract_i8', a, 1), '!options->lower_extract_byte'),
905 (('ishr', 'a@32', 24), ('extract_i8', a, 3), '!options->lower_extract_byte'),
906 (('ishr', 'a@64', 56), ('extract_i8', a, 7), '!options->lower_extract_byte'),
907 (('iand', 0xff, a), ('extract_u8', a, 0), '!options->lower_extract_byte'),
908
909 # Useless masking before unpacking
910 (('unpack_half_2x16_split_x', ('iand', a, 0xffff)), ('unpack_half_2x16_split_x', a)),
911 (('unpack_32_2x16_split_x', ('iand', a, 0xffff)), ('unpack_32_2x16_split_x', a)),
912 (('unpack_64_2x32_split_x', ('iand', a, 0xffffffff)), ('unpack_64_2x32_split_x', a)),
913 (('unpack_half_2x16_split_y', ('iand', a, 0xffff0000)), ('unpack_half_2x16_split_y', a)),
914 (('unpack_32_2x16_split_y', ('iand', a, 0xffff0000)), ('unpack_32_2x16_split_y', a)),
915 (('unpack_64_2x32_split_y', ('iand', a, 0xffffffff00000000)), ('unpack_64_2x32_split_y', a)),
916 ])
917
918 # After the ('extract_u8', a, 0) pattern, above, triggers, there will be
919 # patterns like those below.
920 for op in ('ushr', 'ishr'):
921 optimizations.extend([(('extract_u8', (op, 'a@16', 8), 0), ('extract_u8', a, 1))])
922 optimizations.extend([(('extract_u8', (op, 'a@32', 8 * i), 0), ('extract_u8', a, i)) for i in range(1, 4)])
923 optimizations.extend([(('extract_u8', (op, 'a@64', 8 * i), 0), ('extract_u8', a, i)) for i in range(1, 8)])
924
925 optimizations.extend([(('extract_u8', ('extract_u16', a, 1), 0), ('extract_u8', a, 2))])
926
927 # After the ('extract_[iu]8', a, 3) patterns, above, trigger, there will be
928 # patterns like those below.
929 for op in ('extract_u8', 'extract_i8'):
930 optimizations.extend([((op, ('ishl', 'a@16', 8), 1), (op, a, 0))])
931 optimizations.extend([((op, ('ishl', 'a@32', 24 - 8 * i), 3), (op, a, i)) for i in range(2, -1, -1)])
932 optimizations.extend([((op, ('ishl', 'a@64', 56 - 8 * i), 7), (op, a, i)) for i in range(6, -1, -1)])
933
934 optimizations.extend([
935 # Word extraction
936 (('ushr', ('ishl', 'a@32', 16), 16), ('extract_u16', a, 0), '!options->lower_extract_word'),
937 (('ushr', 'a@32', 16), ('extract_u16', a, 1), '!options->lower_extract_word'),
938 (('ishr', ('ishl', 'a@32', 16), 16), ('extract_i16', a, 0), '!options->lower_extract_word'),
939 (('ishr', 'a@32', 16), ('extract_i16', a, 1), '!options->lower_extract_word'),
940 (('iand', 0xffff, a), ('extract_u16', a, 0), '!options->lower_extract_word'),
941
942 # Subtracts
943 (('ussub_4x8', a, 0), a),
944 (('ussub_4x8', a, ~0), 0),
945 # Lower all Subtractions first - they can get recombined later
946 (('fsub', a, b), ('fadd', a, ('fneg', b))),
947 (('isub', a, b), ('iadd', a, ('ineg', b))),
948
949 # Propagate negation up multiplication chains
950 (('fmul(is_used_by_non_fsat)', ('fneg', a), b), ('fneg', ('fmul', a, b))),
951 (('imul', ('ineg', a), b), ('ineg', ('imul', a, b))),
952
953 # Propagate constants up multiplication chains
954 (('~fmul(is_used_once)', ('fmul(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('fmul', ('fmul', a, c), b)),
955 (('imul(is_used_once)', ('imul(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('imul', ('imul', a, c), b)),
956 (('~fadd(is_used_once)', ('fadd(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('fadd', ('fadd', a, c), b)),
957 (('iadd(is_used_once)', ('iadd(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('iadd', ('iadd', a, c), b)),
958
959 # Reassociate constants in add/mul chains so they can be folded together.
960 # For now, we mostly only handle cases where the constants are separated by
961 # a single non-constant. We could do better eventually.
962 (('~fmul', '#a', ('fmul', 'b(is_not_const)', '#c')), ('fmul', ('fmul', a, c), b)),
963 (('imul', '#a', ('imul', 'b(is_not_const)', '#c')), ('imul', ('imul', a, c), b)),
964 (('~fadd', '#a', ('fadd', 'b(is_not_const)', '#c')), ('fadd', ('fadd', a, c), b)),
965 (('~fadd', '#a', ('fneg', ('fadd', 'b(is_not_const)', '#c'))), ('fadd', ('fadd', a, ('fneg', c)), ('fneg', b))),
966 (('iadd', '#a', ('iadd', 'b(is_not_const)', '#c')), ('iadd', ('iadd', a, c), b)),
967
968 # Drop mul-div by the same value when there's no wrapping.
969 (('idiv', ('imul(no_signed_wrap)', a, b), b), a),
970
971 # By definition...
972 (('bcsel', ('ige', ('find_lsb', a), 0), ('find_lsb', a), -1), ('find_lsb', a)),
973 (('bcsel', ('ige', ('ifind_msb', a), 0), ('ifind_msb', a), -1), ('ifind_msb', a)),
974 (('bcsel', ('ige', ('ufind_msb', a), 0), ('ufind_msb', a), -1), ('ufind_msb', a)),
975
976 (('bcsel', ('ine', a, 0), ('find_lsb', a), -1), ('find_lsb', a)),
977 (('bcsel', ('ine', a, 0), ('ifind_msb', a), -1), ('ifind_msb', a)),
978 (('bcsel', ('ine', a, 0), ('ufind_msb', a), -1), ('ufind_msb', a)),
979
980 (('bcsel', ('ine', a, -1), ('ifind_msb', a), -1), ('ifind_msb', a)),
981
982 # Misc. lowering
983 (('fmod@16', a, b), ('fsub', a, ('fmul', b, ('ffloor', ('fdiv', a, b)))), 'options->lower_fmod'),
984 (('fmod@32', a, b), ('fsub', a, ('fmul', b, ('ffloor', ('fdiv', a, b)))), 'options->lower_fmod'),
985 (('frem', a, b), ('fsub', a, ('fmul', b, ('ftrunc', ('fdiv', a, b)))), 'options->lower_fmod'),
986 (('uadd_carry@32', a, b), ('b2i', ('ult', ('iadd', a, b), a)), 'options->lower_uadd_carry'),
987 (('usub_borrow@32', a, b), ('b2i', ('ult', a, b)), 'options->lower_usub_borrow'),
988
989 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
990 ('bcsel', ('ult', 31, 'bits'), 'insert',
991 ('bfi', ('bfm', 'bits', 'offset'), 'insert', 'base')),
992 'options->lower_bitfield_insert'),
993 (('ihadd', a, b), ('iadd', ('iand', a, b), ('ishr', ('ixor', a, b), 1)), 'options->lower_hadd'),
994 (('uhadd', a, b), ('iadd', ('iand', a, b), ('ushr', ('ixor', a, b), 1)), 'options->lower_hadd'),
995 (('irhadd', a, b), ('isub', ('ior', a, b), ('ishr', ('ixor', a, b), 1)), 'options->lower_hadd'),
996 (('urhadd', a, b), ('isub', ('ior', a, b), ('ushr', ('ixor', a, b), 1)), 'options->lower_hadd'),
997 (('uadd_sat', a, b), ('bcsel', ('ult', ('iadd', a, b), a), -1, ('iadd', a, b)), 'options->lower_add_sat'),
998 (('usub_sat', a, b), ('bcsel', ('ult', a, b), 0, ('isub', a, b)), 'options->lower_add_sat'),
999
1000 # Alternative lowering that doesn't rely on bfi.
1001 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
1002 ('bcsel', ('ult', 31, 'bits'),
1003 'insert',
1004 (('ior',
1005 ('iand', 'base', ('inot', ('ishl', ('isub', ('ishl', 1, 'bits'), 1), 'offset'))),
1006 ('iand', ('ishl', 'insert', 'offset'), ('ishl', ('isub', ('ishl', 1, 'bits'), 1), 'offset'))))),
1007 'options->lower_bitfield_insert_to_shifts'),
1008
1009 # Alternative lowering that uses bitfield_select.
1010 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
1011 ('bcsel', ('ult', 31, 'bits'), 'insert',
1012 ('bitfield_select', ('bfm', 'bits', 'offset'), ('ishl', 'insert', 'offset'), 'base')),
1013 'options->lower_bitfield_insert_to_bitfield_select'),
1014
1015 (('ibitfield_extract', 'value', 'offset', 'bits'),
1016 ('bcsel', ('ult', 31, 'bits'), 'value',
1017 ('ibfe', 'value', 'offset', 'bits')),
1018 'options->lower_bitfield_extract'),
1019
1020 (('ubitfield_extract', 'value', 'offset', 'bits'),
1021 ('bcsel', ('ult', 31, 'bits'), 'value',
1022 ('ubfe', 'value', 'offset', 'bits')),
1023 'options->lower_bitfield_extract'),
1024
1025 # Note that these opcodes are defined to only use the five least significant bits of 'offset' and 'bits'
1026 (('ubfe', 'value', 'offset', ('iand', 31, 'bits')), ('ubfe', 'value', 'offset', 'bits')),
1027 (('ubfe', 'value', ('iand', 31, 'offset'), 'bits'), ('ubfe', 'value', 'offset', 'bits')),
1028 (('ibfe', 'value', 'offset', ('iand', 31, 'bits')), ('ibfe', 'value', 'offset', 'bits')),
1029 (('ibfe', 'value', ('iand', 31, 'offset'), 'bits'), ('ibfe', 'value', 'offset', 'bits')),
1030 (('bfm', 'bits', ('iand', 31, 'offset')), ('bfm', 'bits', 'offset')),
1031 (('bfm', ('iand', 31, 'bits'), 'offset'), ('bfm', 'bits', 'offset')),
1032
1033 (('ibitfield_extract', 'value', 'offset', 'bits'),
1034 ('bcsel', ('ieq', 0, 'bits'),
1035 0,
1036 ('ishr',
1037 ('ishl', 'value', ('isub', ('isub', 32, 'bits'), 'offset')),
1038 ('isub', 32, 'bits'))),
1039 'options->lower_bitfield_extract_to_shifts'),
1040
1041 (('ubitfield_extract', 'value', 'offset', 'bits'),
1042 ('iand',
1043 ('ushr', 'value', 'offset'),
1044 ('bcsel', ('ieq', 'bits', 32),
1045 0xffffffff,
1046 ('isub', ('ishl', 1, 'bits'), 1))),
1047 'options->lower_bitfield_extract_to_shifts'),
1048
1049 (('ifind_msb', 'value'),
1050 ('ufind_msb', ('bcsel', ('ilt', 'value', 0), ('inot', 'value'), 'value')),
1051 'options->lower_ifind_msb'),
1052
1053 (('find_lsb', 'value'),
1054 ('ufind_msb', ('iand', 'value', ('ineg', 'value'))),
1055 'options->lower_find_lsb'),
1056
1057 (('extract_i8', a, 'b@32'),
1058 ('ishr', ('ishl', a, ('imul', ('isub', 3, b), 8)), 24),
1059 'options->lower_extract_byte'),
1060
1061 (('extract_u8', a, 'b@32'),
1062 ('iand', ('ushr', a, ('imul', b, 8)), 0xff),
1063 'options->lower_extract_byte'),
1064
1065 (('extract_i16', a, 'b@32'),
1066 ('ishr', ('ishl', a, ('imul', ('isub', 1, b), 16)), 16),
1067 'options->lower_extract_word'),
1068
1069 (('extract_u16', a, 'b@32'),
1070 ('iand', ('ushr', a, ('imul', b, 16)), 0xffff),
1071 'options->lower_extract_word'),
1072
1073 (('pack_unorm_2x16', 'v'),
1074 ('pack_uvec2_to_uint',
1075 ('f2u32', ('fround_even', ('fmul', ('fsat', 'v'), 65535.0)))),
1076 'options->lower_pack_unorm_2x16'),
1077
1078 (('pack_unorm_4x8', 'v'),
1079 ('pack_uvec4_to_uint',
1080 ('f2u32', ('fround_even', ('fmul', ('fsat', 'v'), 255.0)))),
1081 'options->lower_pack_unorm_4x8'),
1082
1083 (('pack_snorm_2x16', 'v'),
1084 ('pack_uvec2_to_uint',
1085 ('f2i32', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 32767.0)))),
1086 'options->lower_pack_snorm_2x16'),
1087
1088 (('pack_snorm_4x8', 'v'),
1089 ('pack_uvec4_to_uint',
1090 ('f2i32', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 127.0)))),
1091 'options->lower_pack_snorm_4x8'),
1092
1093 (('unpack_unorm_2x16', 'v'),
1094 ('fdiv', ('u2f32', ('vec2', ('extract_u16', 'v', 0),
1095 ('extract_u16', 'v', 1))),
1096 65535.0),
1097 'options->lower_unpack_unorm_2x16'),
1098
1099 (('unpack_unorm_4x8', 'v'),
1100 ('fdiv', ('u2f32', ('vec4', ('extract_u8', 'v', 0),
1101 ('extract_u8', 'v', 1),
1102 ('extract_u8', 'v', 2),
1103 ('extract_u8', 'v', 3))),
1104 255.0),
1105 'options->lower_unpack_unorm_4x8'),
1106
1107 (('unpack_snorm_2x16', 'v'),
1108 ('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec2', ('extract_i16', 'v', 0),
1109 ('extract_i16', 'v', 1))),
1110 32767.0))),
1111 'options->lower_unpack_snorm_2x16'),
1112
1113 (('unpack_snorm_4x8', 'v'),
1114 ('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec4', ('extract_i8', 'v', 0),
1115 ('extract_i8', 'v', 1),
1116 ('extract_i8', 'v', 2),
1117 ('extract_i8', 'v', 3))),
1118 127.0))),
1119 'options->lower_unpack_snorm_4x8'),
1120
1121 (('pack_half_2x16_split', 'a@32', 'b@32'),
1122 ('ior', ('ishl', ('u2u32', ('f2f16', b)), 16), ('u2u32', ('f2f16', a))),
1123 'options->lower_pack_half_2x16_split'),
1124
1125 (('unpack_half_2x16_split_x', 'a@32'),
1126 ('f2f32', ('u2u16', a)),
1127 'options->lower_unpack_half_2x16_split'),
1128
1129 (('unpack_half_2x16_split_y', 'a@32'),
1130 ('f2f32', ('u2u16', ('ushr', a, 16))),
1131 'options->lower_unpack_half_2x16_split'),
1132
1133 (('isign', a), ('imin', ('imax', a, -1), 1), 'options->lower_isign'),
1134 (('fsign', a), ('fsub', ('b2f', ('flt', 0.0, a)), ('b2f', ('flt', a, 0.0))), 'options->lower_fsign'),
1135
1136 # Address/offset calculations:
1137 # Drivers supporting imul24 should use the nir_lower_amul() pass, this
1138 # rule converts everyone else to imul:
1139 (('amul', a, b), ('imul', a, b), '!options->has_imul24'),
1140
1141 (('imad24_ir3', a, b, 0), ('imul24', a, b)),
1142 (('imad24_ir3', a, 0, c), (c)),
1143 (('imad24_ir3', a, 1, c), ('iadd', a, c)),
1144
1145 # if first two srcs are const, crack apart the imad so constant folding
1146 # can clean up the imul:
1147 # TODO ffma should probably get a similar rule:
1148 (('imad24_ir3', '#a', '#b', c), ('iadd', ('imul', a, b), c)),
1149
1150 # These will turn 24b address/offset calc back into 32b shifts, but
1151 # it should be safe to get back some of the bits of precision that we
1152 # already decided were no necessary:
1153 (('imul24', a, '#b@32(is_pos_power_of_two)'), ('ishl', a, ('find_lsb', b)), '!options->lower_bitops'),
1154 (('imul24', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('ishl', a, ('find_lsb', ('iabs', b)))), '!options->lower_bitops'),
1155 (('imul24', a, 0), (0)),
1156 ])
1157
1158 # bit_size dependent lowerings
1159 for bit_size in [8, 16, 32, 64]:
1160 # convenience constants
1161 intmax = (1 << (bit_size - 1)) - 1
1162 intmin = 1 << (bit_size - 1)
1163
1164 optimizations += [
1165 (('iadd_sat@' + str(bit_size), a, b),
1166 ('bcsel', ('ige', b, 1), ('bcsel', ('ilt', ('iadd', a, b), a), intmax, ('iadd', a, b)),
1167 ('bcsel', ('ilt', a, ('iadd', a, b)), intmin, ('iadd', a, b))), 'options->lower_add_sat'),
1168 (('isub_sat@' + str(bit_size), a, b),
1169 ('bcsel', ('ilt', b, 0), ('bcsel', ('ilt', ('isub', a, b), a), intmax, ('isub', a, b)),
1170 ('bcsel', ('ilt', a, ('isub', a, b)), intmin, ('isub', a, b))), 'options->lower_add_sat'),
1171 ]
1172
1173 invert = OrderedDict([('feq', 'fne'), ('fne', 'feq'), ('fge', 'flt'), ('flt', 'fge')])
1174
1175 for left, right in itertools.combinations_with_replacement(invert.keys(), 2):
1176 optimizations.append((('inot', ('ior(is_used_once)', (left, a, b), (right, c, d))),
1177 ('iand', (invert[left], a, b), (invert[right], c, d))))
1178 optimizations.append((('inot', ('iand(is_used_once)', (left, a, b), (right, c, d))),
1179 ('ior', (invert[left], a, b), (invert[right], c, d))))
1180
1181 # Optimize x2bN(b2x(x)) -> x
1182 for size in type_sizes('bool'):
1183 aN = 'a@' + str(size)
1184 f2bN = 'f2b' + str(size)
1185 i2bN = 'i2b' + str(size)
1186 optimizations.append(((f2bN, ('b2f', aN)), a))
1187 optimizations.append(((i2bN, ('b2i', aN)), a))
1188
1189 # Optimize x2yN(b2x(x)) -> b2y
1190 for x, y in itertools.product(['f', 'u', 'i'], ['f', 'u', 'i']):
1191 if x != 'f' and y != 'f' and x != y:
1192 continue
1193
1194 b2x = 'b2f' if x == 'f' else 'b2i'
1195 b2y = 'b2f' if y == 'f' else 'b2i'
1196 x2yN = '{}2{}'.format(x, y)
1197 optimizations.append(((x2yN, (b2x, a)), (b2y, a)))
1198
1199 # Optimize away x2xN(a@N)
1200 for t in ['int', 'uint', 'float']:
1201 for N in type_sizes(t):
1202 x2xN = '{0}2{0}{1}'.format(t[0], N)
1203 aN = 'a@{0}'.format(N)
1204 optimizations.append(((x2xN, aN), a))
1205
1206 # Optimize x2xN(y2yM(a@P)) -> y2yN(a) for integers
1207 # In particular, we can optimize away everything except upcast of downcast and
1208 # upcasts where the type differs from the other cast
1209 for N, M in itertools.product(type_sizes('uint'), type_sizes('uint')):
1210 if N < M:
1211 # The outer cast is a down-cast. It doesn't matter what the size of the
1212 # argument of the inner cast is because we'll never been in the upcast
1213 # of downcast case. Regardless of types, we'll always end up with y2yN
1214 # in the end.
1215 for x, y in itertools.product(['i', 'u'], ['i', 'u']):
1216 x2xN = '{0}2{0}{1}'.format(x, N)
1217 y2yM = '{0}2{0}{1}'.format(y, M)
1218 y2yN = '{0}2{0}{1}'.format(y, N)
1219 optimizations.append(((x2xN, (y2yM, a)), (y2yN, a)))
1220 elif N > M:
1221 # If the outer cast is an up-cast, we have to be more careful about the
1222 # size of the argument of the inner cast and with types. In this case,
1223 # the type is always the type of type up-cast which is given by the
1224 # outer cast.
1225 for P in type_sizes('uint'):
1226 # We can't optimize away up-cast of down-cast.
1227 if M < P:
1228 continue
1229
1230 # Because we're doing down-cast of down-cast, the types always have
1231 # to match between the two casts
1232 for x in ['i', 'u']:
1233 x2xN = '{0}2{0}{1}'.format(x, N)
1234 x2xM = '{0}2{0}{1}'.format(x, M)
1235 aP = 'a@{0}'.format(P)
1236 optimizations.append(((x2xN, (x2xM, aP)), (x2xN, a)))
1237 else:
1238 # The N == M case is handled by other optimizations
1239 pass
1240
1241 # Optimize comparisons with up-casts
1242 for t in ['int', 'uint', 'float']:
1243 for N, M in itertools.product(type_sizes(t), repeat=2):
1244 if N == 1 or N >= M:
1245 continue
1246
1247 x2xM = '{0}2{0}{1}'.format(t[0], M)
1248 x2xN = '{0}2{0}{1}'.format(t[0], N)
1249 aN = 'a@' + str(N)
1250 bN = 'b@' + str(N)
1251 xeq = 'feq' if t == 'float' else 'ieq'
1252 xne = 'fne' if t == 'float' else 'ine'
1253 xge = '{0}ge'.format(t[0])
1254 xlt = '{0}lt'.format(t[0])
1255
1256 # Up-casts are lossless so for correctly signed comparisons of
1257 # up-casted values we can do the comparison at the largest of the two
1258 # original sizes and drop one or both of the casts. (We have
1259 # optimizations to drop the no-op casts which this may generate.)
1260 for P in type_sizes(t):
1261 if P == 1 or P > N:
1262 continue
1263
1264 bP = 'b@' + str(P)
1265 optimizations += [
1266 ((xeq, (x2xM, aN), (x2xM, bP)), (xeq, a, (x2xN, b))),
1267 ((xne, (x2xM, aN), (x2xM, bP)), (xne, a, (x2xN, b))),
1268 ((xge, (x2xM, aN), (x2xM, bP)), (xge, a, (x2xN, b))),
1269 ((xlt, (x2xM, aN), (x2xM, bP)), (xlt, a, (x2xN, b))),
1270 ((xge, (x2xM, bP), (x2xM, aN)), (xge, (x2xN, b), a)),
1271 ((xlt, (x2xM, bP), (x2xM, aN)), (xlt, (x2xN, b), a)),
1272 ]
1273
1274 # The next bit doesn't work on floats because the range checks would
1275 # get way too complicated.
1276 if t in ['int', 'uint']:
1277 if t == 'int':
1278 xN_min = -(1 << (N - 1))
1279 xN_max = (1 << (N - 1)) - 1
1280 elif t == 'uint':
1281 xN_min = 0
1282 xN_max = (1 << N) - 1
1283 else:
1284 assert False
1285
1286 # If we're up-casting and comparing to a constant, we can unfold
1287 # the comparison into a comparison with the shrunk down constant
1288 # and a check that the constant fits in the smaller bit size.
1289 optimizations += [
1290 ((xeq, (x2xM, aN), '#b'),
1291 ('iand', (xeq, a, (x2xN, b)), (xeq, (x2xM, (x2xN, b)), b))),
1292 ((xne, (x2xM, aN), '#b'),
1293 ('ior', (xne, a, (x2xN, b)), (xne, (x2xM, (x2xN, b)), b))),
1294 ((xlt, (x2xM, aN), '#b'),
1295 ('iand', (xlt, xN_min, b),
1296 ('ior', (xlt, xN_max, b), (xlt, a, (x2xN, b))))),
1297 ((xlt, '#a', (x2xM, bN)),
1298 ('iand', (xlt, a, xN_max),
1299 ('ior', (xlt, a, xN_min), (xlt, (x2xN, a), b)))),
1300 ((xge, (x2xM, aN), '#b'),
1301 ('iand', (xge, xN_max, b),
1302 ('ior', (xge, xN_min, b), (xge, a, (x2xN, b))))),
1303 ((xge, '#a', (x2xM, bN)),
1304 ('iand', (xge, a, xN_min),
1305 ('ior', (xge, a, xN_max), (xge, (x2xN, a), b)))),
1306 ]
1307
1308 def fexp2i(exp, bits):
1309 # We assume that exp is already in the right range.
1310 if bits == 16:
1311 return ('i2i16', ('ishl', ('iadd', exp, 15), 10))
1312 elif bits == 32:
1313 return ('ishl', ('iadd', exp, 127), 23)
1314 elif bits == 64:
1315 return ('pack_64_2x32_split', 0, ('ishl', ('iadd', exp, 1023), 20))
1316 else:
1317 assert False
1318
1319 def ldexp(f, exp, bits):
1320 # First, we clamp exp to a reasonable range. The maximum possible range
1321 # for a normal exponent is [-126, 127] and, throwing in denormals, you get
1322 # a maximum range of [-149, 127]. This means that we can potentially have
1323 # a swing of +-276. If you start with FLT_MAX, you actually have to do
1324 # ldexp(FLT_MAX, -278) to get it to flush all the way to zero. The GLSL
1325 # spec, on the other hand, only requires that we handle an exponent value
1326 # in the range [-126, 128]. This implementation is *mostly* correct; it
1327 # handles a range on exp of [-252, 254] which allows you to create any
1328 # value (including denorms if the hardware supports it) and to adjust the
1329 # exponent of any normal value to anything you want.
1330 if bits == 16:
1331 exp = ('imin', ('imax', exp, -28), 30)
1332 elif bits == 32:
1333 exp = ('imin', ('imax', exp, -252), 254)
1334 elif bits == 64:
1335 exp = ('imin', ('imax', exp, -2044), 2046)
1336 else:
1337 assert False
1338
1339 # Now we compute two powers of 2, one for exp/2 and one for exp-exp/2.
1340 # (We use ishr which isn't the same for -1, but the -1 case still works
1341 # since we use exp-exp/2 as the second exponent.) While the spec
1342 # technically defines ldexp as f * 2.0^exp, simply multiplying once doesn't
1343 # work with denormals and doesn't allow for the full swing in exponents
1344 # that you can get with normalized values. Instead, we create two powers
1345 # of two and multiply by them each in turn. That way the effective range
1346 # of our exponent is doubled.
1347 pow2_1 = fexp2i(('ishr', exp, 1), bits)
1348 pow2_2 = fexp2i(('isub', exp, ('ishr', exp, 1)), bits)
1349 return ('fmul', ('fmul', f, pow2_1), pow2_2)
1350
1351 optimizations += [
1352 (('ldexp@16', 'x', 'exp'), ldexp('x', 'exp', 16), 'options->lower_ldexp'),
1353 (('ldexp@32', 'x', 'exp'), ldexp('x', 'exp', 32), 'options->lower_ldexp'),
1354 (('ldexp@64', 'x', 'exp'), ldexp('x', 'exp', 64), 'options->lower_ldexp'),
1355 ]
1356
1357 # Unreal Engine 4 demo applications open-codes bitfieldReverse()
1358 def bitfield_reverse(u):
1359 step1 = ('ior', ('ishl', u, 16), ('ushr', u, 16))
1360 step2 = ('ior', ('ishl', ('iand', step1, 0x00ff00ff), 8), ('ushr', ('iand', step1, 0xff00ff00), 8))
1361 step3 = ('ior', ('ishl', ('iand', step2, 0x0f0f0f0f), 4), ('ushr', ('iand', step2, 0xf0f0f0f0), 4))
1362 step4 = ('ior', ('ishl', ('iand', step3, 0x33333333), 2), ('ushr', ('iand', step3, 0xcccccccc), 2))
1363 step5 = ('ior(many-comm-expr)', ('ishl', ('iand', step4, 0x55555555), 1), ('ushr', ('iand', step4, 0xaaaaaaaa), 1))
1364
1365 return step5
1366
1367 optimizations += [(bitfield_reverse('x@32'), ('bitfield_reverse', 'x'), '!options->lower_bitfield_reverse')]
1368
1369 # For any float comparison operation, "cmp", if you have "a == a && a cmp b"
1370 # then the "a == a" is redundant because it's equivalent to "a is not NaN"
1371 # and, if a is a NaN then the second comparison will fail anyway.
1372 for op in ['flt', 'fge', 'feq']:
1373 optimizations += [
1374 (('iand', ('feq', a, a), (op, a, b)), ('!' + op, a, b)),
1375 (('iand', ('feq', a, a), (op, b, a)), ('!' + op, b, a)),
1376 ]
1377
1378 # Add optimizations to handle the case where the result of a ternary is
1379 # compared to a constant. This way we can take things like
1380 #
1381 # (a ? 0 : 1) > 0
1382 #
1383 # and turn it into
1384 #
1385 # a ? (0 > 0) : (1 > 0)
1386 #
1387 # which constant folding will eat for lunch. The resulting ternary will
1388 # further get cleaned up by the boolean reductions above and we will be
1389 # left with just the original variable "a".
1390 for op in ['flt', 'fge', 'feq', 'fne',
1391 'ilt', 'ige', 'ieq', 'ine', 'ult', 'uge']:
1392 optimizations += [
1393 ((op, ('bcsel', 'a', '#b', '#c'), '#d'),
1394 ('bcsel', 'a', (op, 'b', 'd'), (op, 'c', 'd'))),
1395 ((op, '#d', ('bcsel', a, '#b', '#c')),
1396 ('bcsel', 'a', (op, 'd', 'b'), (op, 'd', 'c'))),
1397 ]
1398
1399
1400 # For example, this converts things like
1401 #
1402 # 1 + mix(0, a - 1, condition)
1403 #
1404 # into
1405 #
1406 # mix(1, (a-1)+1, condition)
1407 #
1408 # Other optimizations will rearrange the constants.
1409 for op in ['fadd', 'fmul', 'iadd', 'imul']:
1410 optimizations += [
1411 ((op, ('bcsel(is_used_once)', a, '#b', c), '#d'), ('bcsel', a, (op, b, d), (op, c, d)))
1412 ]
1413
1414 # For derivatives in compute shaders, GLSL_NV_compute_shader_derivatives
1415 # states:
1416 #
1417 # If neither layout qualifier is specified, derivatives in compute shaders
1418 # return zero, which is consistent with the handling of built-in texture
1419 # functions like texture() in GLSL 4.50 compute shaders.
1420 for op in ['fddx', 'fddx_fine', 'fddx_coarse',
1421 'fddy', 'fddy_fine', 'fddy_coarse']:
1422 optimizations += [
1423 ((op, 'a'), 0.0, 'info->stage == MESA_SHADER_COMPUTE && info->cs.derivative_group == DERIVATIVE_GROUP_NONE')
1424 ]
1425
1426 # Some optimizations for ir3-specific instructions.
1427 optimizations += [
1428 # 'al * bl': If either 'al' or 'bl' is zero, return zero.
1429 (('umul_low', '#a(is_lower_half_zero)', 'b'), (0)),
1430 # '(ah * bl) << 16 + c': If either 'ah' or 'bl' is zero, return 'c'.
1431 (('imadsh_mix16', '#a@32(is_lower_half_zero)', 'b@32', 'c@32'), ('c')),
1432 (('imadsh_mix16', 'a@32', '#b@32(is_upper_half_zero)', 'c@32'), ('c')),
1433 ]
1434
1435 # These kinds of sequences can occur after nir_opt_peephole_select.
1436 #
1437 # NOTE: fadd is not handled here because that gets in the way of ffma
1438 # generation in the i965 driver. Instead, fadd and ffma are handled in
1439 # late_optimizations.
1440
1441 for op in ['flrp']:
1442 optimizations += [
1443 (('bcsel', a, (op + '(is_used_once)', b, c, d), (op, b, c, e)), (op, b, c, ('bcsel', a, d, e))),
1444 (('bcsel', a, (op, b, c, d), (op + '(is_used_once)', b, c, e)), (op, b, c, ('bcsel', a, d, e))),
1445 (('bcsel', a, (op + '(is_used_once)', b, c, d), (op, b, e, d)), (op, b, ('bcsel', a, c, e), d)),
1446 (('bcsel', a, (op, b, c, d), (op + '(is_used_once)', b, e, d)), (op, b, ('bcsel', a, c, e), d)),
1447 (('bcsel', a, (op + '(is_used_once)', b, c, d), (op, e, c, d)), (op, ('bcsel', a, b, e), c, d)),
1448 (('bcsel', a, (op, b, c, d), (op + '(is_used_once)', e, c, d)), (op, ('bcsel', a, b, e), c, d)),
1449 ]
1450
1451 for op in ['fmul', 'iadd', 'imul', 'iand', 'ior', 'ixor', 'fmin', 'fmax', 'imin', 'imax', 'umin', 'umax']:
1452 optimizations += [
1453 (('bcsel', a, (op + '(is_used_once)', b, c), (op, b, 'd(is_not_const)')), (op, b, ('bcsel', a, c, d))),
1454 (('bcsel', a, (op + '(is_used_once)', b, 'c(is_not_const)'), (op, b, d)), (op, b, ('bcsel', a, c, d))),
1455 (('bcsel', a, (op, b, 'c(is_not_const)'), (op + '(is_used_once)', b, d)), (op, b, ('bcsel', a, c, d))),
1456 (('bcsel', a, (op, b, c), (op + '(is_used_once)', b, 'd(is_not_const)')), (op, b, ('bcsel', a, c, d))),
1457 ]
1458
1459 for op in ['fpow']:
1460 optimizations += [
1461 (('bcsel', a, (op + '(is_used_once)', b, c), (op, b, d)), (op, b, ('bcsel', a, c, d))),
1462 (('bcsel', a, (op, b, c), (op + '(is_used_once)', b, d)), (op, b, ('bcsel', a, c, d))),
1463 (('bcsel', a, (op + '(is_used_once)', b, c), (op, d, c)), (op, ('bcsel', a, b, d), c)),
1464 (('bcsel', a, (op, b, c), (op + '(is_used_once)', d, c)), (op, ('bcsel', a, b, d), c)),
1465 ]
1466
1467 for op in ['frcp', 'frsq', 'fsqrt', 'fexp2', 'flog2', 'fsign', 'fsin', 'fcos']:
1468 optimizations += [
1469 (('bcsel', a, (op + '(is_used_once)', b), (op, c)), (op, ('bcsel', a, b, c))),
1470 (('bcsel', a, (op, b), (op + '(is_used_once)', c)), (op, ('bcsel', a, b, c))),
1471 ]
1472
1473 # This section contains "late" optimizations that should be run before
1474 # creating ffmas and calling regular optimizations for the final time.
1475 # Optimizations should go here if they help code generation and conflict
1476 # with the regular optimizations.
1477 before_ffma_optimizations = [
1478 # Propagate constants down multiplication chains
1479 (('~fmul(is_used_once)', ('fmul(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('fmul', ('fmul', a, c), b)),
1480 (('imul(is_used_once)', ('imul(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('imul', ('imul', a, c), b)),
1481 (('~fadd(is_used_once)', ('fadd(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('fadd', ('fadd', a, c), b)),
1482 (('iadd(is_used_once)', ('iadd(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('iadd', ('iadd', a, c), b)),
1483
1484 (('~fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))),
1485 (('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))),
1486 (('~fadd', ('fneg', a), a), 0.0),
1487 (('iadd', ('ineg', a), a), 0),
1488 (('iadd', ('ineg', a), ('iadd', a, b)), b),
1489 (('iadd', a, ('iadd', ('ineg', a), b)), b),
1490 (('~fadd', ('fneg', a), ('fadd', a, b)), b),
1491 (('~fadd', a, ('fadd', ('fneg', a), b)), b),
1492
1493 (('~flrp@32', ('fadd(is_used_once)', a, -1.0), ('fadd(is_used_once)', a, 1.0), d), ('fadd', ('flrp', -1.0, 1.0, d), a)),
1494 (('~flrp@32', ('fadd(is_used_once)', a, 1.0), ('fadd(is_used_once)', a, -1.0), d), ('fadd', ('flrp', 1.0, -1.0, d), a)),
1495 (('~flrp@32', ('fadd(is_used_once)', a, '#b'), ('fadd(is_used_once)', a, '#c'), d), ('fadd', ('fmul', d, ('fadd', c, ('fneg', b))), ('fadd', a, b))),
1496 ]
1497
1498 # This section contains "late" optimizations that should be run after the
1499 # regular optimizations have finished. Optimizations should go here if
1500 # they help code generation but do not necessarily produce code that is
1501 # more easily optimizable.
1502 late_optimizations = [
1503 # Most of these optimizations aren't quite safe when you get infinity or
1504 # Nan involved but the first one should be fine.
1505 (('flt', ('fadd', a, b), 0.0), ('flt', a, ('fneg', b))),
1506 (('flt', ('fneg', ('fadd', a, b)), 0.0), ('flt', ('fneg', a), b)),
1507 (('~fge', ('fadd', a, b), 0.0), ('fge', a, ('fneg', b))),
1508 (('~fge', ('fneg', ('fadd', a, b)), 0.0), ('fge', ('fneg', a), b)),
1509 (('~feq', ('fadd', a, b), 0.0), ('feq', a, ('fneg', b))),
1510 (('~fne', ('fadd', a, b), 0.0), ('fne', a, ('fneg', b))),
1511
1512 # nir_lower_to_source_mods will collapse this, but its existence during the
1513 # optimization loop can prevent other optimizations.
1514 (('fneg', ('fneg', a)), a),
1515
1516 # Subtractions get lowered during optimization, so we need to recombine them
1517 (('fadd', 'a', ('fneg', 'b')), ('fsub', 'a', 'b'), '!options->lower_sub'),
1518 (('iadd', 'a', ('ineg', 'b')), ('isub', 'a', 'b'), '!options->lower_sub'),
1519 (('fneg', a), ('fsub', 0.0, a), 'options->lower_negate'),
1520 (('ineg', a), ('isub', 0, a), 'options->lower_negate'),
1521
1522 # These are duplicated from the main optimizations table. The late
1523 # patterns that rearrange expressions like x - .5 < 0 to x < .5 can create
1524 # new patterns like these. The patterns that compare with zero are removed
1525 # because they are unlikely to be created in by anything in
1526 # late_optimizations.
1527 (('flt', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('flt', a, b)),
1528 (('flt', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('flt', b, a)),
1529 (('fge', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fge', a, b)),
1530 (('fge', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('fge', b, a)),
1531 (('feq', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('feq', a, b)),
1532 (('fne', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fne', a, b)),
1533
1534 (('fge', ('fsat(is_used_once)', a), 1.0), ('fge', a, 1.0)),
1535 (('flt', ('fsat(is_used_once)', a), 1.0), ('flt', a, 1.0)),
1536
1537 (('~fge', ('fmin(is_used_once)', ('fadd(is_used_once)', a, b), ('fadd', c, d)), 0.0), ('iand', ('fge', a, ('fneg', b)), ('fge', c, ('fneg', d)))),
1538
1539 (('flt', ('fneg', a), ('fneg', b)), ('flt', b, a)),
1540 (('fge', ('fneg', a), ('fneg', b)), ('fge', b, a)),
1541 (('feq', ('fneg', a), ('fneg', b)), ('feq', b, a)),
1542 (('fne', ('fneg', a), ('fneg', b)), ('fne', b, a)),
1543 (('flt', ('fneg', a), -1.0), ('flt', 1.0, a)),
1544 (('flt', -1.0, ('fneg', a)), ('flt', a, 1.0)),
1545 (('fge', ('fneg', a), -1.0), ('fge', 1.0, a)),
1546 (('fge', -1.0, ('fneg', a)), ('fge', a, 1.0)),
1547 (('fne', ('fneg', a), -1.0), ('fne', 1.0, a)),
1548 (('feq', -1.0, ('fneg', a)), ('feq', a, 1.0)),
1549
1550 (('ior', a, a), a),
1551 (('iand', a, a), a),
1552
1553 (('~fadd', ('fneg(is_used_once)', ('fsat(is_used_once)', 'a(is_not_fmul)')), 1.0), ('fsat', ('fadd', 1.0, ('fneg', a)))),
1554
1555 (('fdot2', a, b), ('fdot_replicated2', a, b), 'options->fdot_replicates'),
1556 (('fdot3', a, b), ('fdot_replicated3', a, b), 'options->fdot_replicates'),
1557 (('fdot4', a, b), ('fdot_replicated4', a, b), 'options->fdot_replicates'),
1558 (('fdph', a, b), ('fdph_replicated', a, b), 'options->fdot_replicates'),
1559
1560 (('~flrp@32', ('fadd(is_used_once)', a, b), ('fadd(is_used_once)', a, c), d), ('fadd', ('flrp', b, c, d), a)),
1561 (('~flrp@64', ('fadd(is_used_once)', a, b), ('fadd(is_used_once)', a, c), d), ('fadd', ('flrp', b, c, d), a)),
1562
1563 (('~fadd@32', 1.0, ('fmul(is_used_once)', c , ('fadd', b, -1.0 ))), ('fadd', ('fadd', 1.0, ('fneg', c)), ('fmul', b, c)), 'options->lower_flrp32'),
1564 (('~fadd@64', 1.0, ('fmul(is_used_once)', c , ('fadd', b, -1.0 ))), ('fadd', ('fadd', 1.0, ('fneg', c)), ('fmul', b, c)), 'options->lower_flrp64'),
1565
1566 # A similar operation could apply to any ffma(#a, b, #(-a/2)), but this
1567 # particular operation is common for expanding values stored in a texture
1568 # from [0,1] to [-1,1].
1569 (('~ffma@32', a, 2.0, -1.0), ('flrp', -1.0, 1.0, a ), '!options->lower_flrp32'),
1570 (('~ffma@32', a, -2.0, -1.0), ('flrp', -1.0, 1.0, ('fneg', a)), '!options->lower_flrp32'),
1571 (('~ffma@32', a, -2.0, 1.0), ('flrp', 1.0, -1.0, a ), '!options->lower_flrp32'),
1572 (('~ffma@32', a, 2.0, 1.0), ('flrp', 1.0, -1.0, ('fneg', a)), '!options->lower_flrp32'),
1573 (('~fadd@32', ('fmul(is_used_once)', 2.0, a), -1.0), ('flrp', -1.0, 1.0, a ), '!options->lower_flrp32'),
1574 (('~fadd@32', ('fmul(is_used_once)', -2.0, a), -1.0), ('flrp', -1.0, 1.0, ('fneg', a)), '!options->lower_flrp32'),
1575 (('~fadd@32', ('fmul(is_used_once)', -2.0, a), 1.0), ('flrp', 1.0, -1.0, a ), '!options->lower_flrp32'),
1576 (('~fadd@32', ('fmul(is_used_once)', 2.0, a), 1.0), ('flrp', 1.0, -1.0, ('fneg', a)), '!options->lower_flrp32'),
1577
1578 # flrp(a, b, a)
1579 # a*(1-a) + b*a
1580 # a + -a*a + a*b (1)
1581 # a + a*(b - a)
1582 # Option 1: ffma(a, (b-a), a)
1583 #
1584 # Alternately, after (1):
1585 # a*(1+b) + -a*a
1586 # a*((1+b) + -a)
1587 #
1588 # Let b=1
1589 #
1590 # Option 2: ffma(a, 2, -(a*a))
1591 # Option 3: ffma(a, 2, (-a)*a)
1592 # Option 4: ffma(a, -a, (2*a)
1593 # Option 5: a * (2 - a)
1594 #
1595 # There are a lot of other possible combinations.
1596 (('~ffma@32', ('fadd', b, ('fneg', a)), a, a), ('flrp', a, b, a), '!options->lower_flrp32'),
1597 (('~ffma@32', a, 2.0, ('fneg', ('fmul', a, a))), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1598 (('~ffma@32', a, 2.0, ('fmul', ('fneg', a), a)), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1599 (('~ffma@32', a, ('fneg', a), ('fmul', 2.0, a)), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1600 (('~fmul@32', a, ('fadd', 2.0, ('fneg', a))), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1601
1602 # we do these late so that we don't get in the way of creating ffmas
1603 (('fmin', ('fadd(is_used_once)', '#c', a), ('fadd(is_used_once)', '#c', b)), ('fadd', c, ('fmin', a, b))),
1604 (('fmax', ('fadd(is_used_once)', '#c', a), ('fadd(is_used_once)', '#c', b)), ('fadd', c, ('fmax', a, b))),
1605
1606 (('bcsel', a, 0, ('b2f32', ('inot', 'b@bool'))), ('b2f32', ('inot', ('ior', a, b)))),
1607
1608 # Putting this in 'optimizations' interferes with the bcsel(a, op(b, c),
1609 # op(b, d)) => op(b, bcsel(a, c, d)) transformations. I do not know why.
1610 (('bcsel', ('feq', ('fsqrt', 'a(is_not_negative)'), 0.0), intBitsToFloat(0x7f7fffff), ('frsq', a)),
1611 ('fmin', ('frsq', a), intBitsToFloat(0x7f7fffff))),
1612
1613 # Things that look like DPH in the source shader may get expanded to
1614 # something that looks like dot(v1.xyz, v2.xyz) + v1.w by the time it gets
1615 # to NIR. After FFMA is generated, this can look like:
1616 #
1617 # fadd(ffma(v1.z, v2.z, ffma(v1.y, v2.y, fmul(v1.x, v2.x))), v1.w)
1618 #
1619 # Reassociate the last addition into the first multiplication.
1620 #
1621 # Some shaders do not use 'invariant' in vertex and (possibly) geometry
1622 # shader stages on some outputs that are intended to be invariant. For
1623 # various reasons, this optimization may not be fully applied in all
1624 # shaders used for different rendering passes of the same geometry. This
1625 # can result in Z-fighting artifacts (at best). For now, disable this
1626 # optimization in these stages. See bugzilla #111490. In tessellation
1627 # stages applications seem to use 'precise' when necessary, so allow the
1628 # optimization in those stages.
1629 (('~fadd', ('ffma(is_used_once)', a, b, ('ffma', c, d, ('fmul', 'e(is_not_const_and_not_fsign)', 'f(is_not_const_and_not_fsign)'))), 'g(is_not_const)'),
1630 ('ffma', a, b, ('ffma', c, d, ('ffma', e, 'f', 'g'))), '(info->stage != MESA_SHADER_VERTEX && info->stage != MESA_SHADER_GEOMETRY) && !options->intel_vec4'),
1631 (('~fadd', ('ffma(is_used_once)', a, b, ('fmul', 'c(is_not_const_and_not_fsign)', 'd(is_not_const_and_not_fsign)') ), 'e(is_not_const)'),
1632 ('ffma', a, b, ('ffma', c, d, e)), '(info->stage != MESA_SHADER_VERTEX && info->stage != MESA_SHADER_GEOMETRY) && !options->intel_vec4'),
1633 ]
1634
1635 for op in ['fadd']:
1636 late_optimizations += [
1637 (('bcsel', a, (op + '(is_used_once)', b, c), (op, b, d)), (op, b, ('bcsel', a, c, d))),
1638 (('bcsel', a, (op, b, c), (op + '(is_used_once)', b, d)), (op, b, ('bcsel', a, c, d))),
1639 ]
1640
1641 for op in ['ffma']:
1642 late_optimizations += [
1643 (('bcsel', a, (op + '(is_used_once)', b, c, d), (op, b, c, e)), (op, b, c, ('bcsel', a, d, e))),
1644 (('bcsel', a, (op, b, c, d), (op + '(is_used_once)', b, c, e)), (op, b, c, ('bcsel', a, d, e))),
1645
1646 (('bcsel', a, (op + '(is_used_once)', b, c, d), (op, b, e, d)), (op, b, ('bcsel', a, c, e), d)),
1647 (('bcsel', a, (op, b, c, d), (op + '(is_used_once)', b, e, d)), (op, b, ('bcsel', a, c, e), d)),
1648 ]
1649
1650 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic", optimizations).render())
1651 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic_before_ffma",
1652 before_ffma_optimizations).render())
1653 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic_late",
1654 late_optimizations).render())