nir: add nir_lower_amul pass
[mesa.git] / src / compiler / nir / nir_opt_algebraic.py
1 #
2 # Copyright (C) 2014 Intel Corporation
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining a
5 # copy of this software and associated documentation files (the "Software"),
6 # to deal in the Software without restriction, including without limitation
7 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 # and/or sell copies of the Software, and to permit persons to whom the
9 # Software is furnished to do so, subject to the following conditions:
10 #
11 # The above copyright notice and this permission notice (including the next
12 # paragraph) shall be included in all copies or substantial portions of the
13 # Software.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 # IN THE SOFTWARE.
22 #
23 # Authors:
24 # Jason Ekstrand (jason@jlekstrand.net)
25
26 from __future__ import print_function
27
28 from collections import OrderedDict
29 import nir_algebraic
30 from nir_opcodes import type_sizes
31 import itertools
32 from math import pi
33
34 # Convenience variables
35 a = 'a'
36 b = 'b'
37 c = 'c'
38 d = 'd'
39 e = 'e'
40
41 # Written in the form (<search>, <replace>) where <search> is an expression
42 # and <replace> is either an expression or a value. An expression is
43 # defined as a tuple of the form ([~]<op>, <src0>, <src1>, <src2>, <src3>)
44 # where each source is either an expression or a value. A value can be
45 # either a numeric constant or a string representing a variable name.
46 #
47 # If the opcode in a search expression is prefixed by a '~' character, this
48 # indicates that the operation is inexact. Such operations will only get
49 # applied to SSA values that do not have the exact bit set. This should be
50 # used by by any optimizations that are not bit-for-bit exact. It should not,
51 # however, be used for backend-requested lowering operations as those need to
52 # happen regardless of precision.
53 #
54 # Variable names are specified as "[#]name[@type][(cond)][.swiz]" where:
55 # "#" indicates that the given variable will only match constants,
56 # type indicates that the given variable will only match values from ALU
57 # instructions with the given output type,
58 # (cond) specifies an additional condition function (see nir_search_helpers.h),
59 # swiz is a swizzle applied to the variable (only in the <replace> expression)
60 #
61 # For constants, you have to be careful to make sure that it is the right
62 # type because python is unaware of the source and destination types of the
63 # opcodes.
64 #
65 # All expression types can have a bit-size specified. For opcodes, this
66 # looks like "op@32", for variables it is "a@32" or "a@uint32" to specify a
67 # type and size. In the search half of the expression this indicates that it
68 # should only match that particular bit-size. In the replace half of the
69 # expression this indicates that the constructed value should have that
70 # bit-size.
71 #
72 # A special condition "many-comm-expr" can be used with expressions to note
73 # that the expression and its subexpressions have more commutative expressions
74 # than nir_replace_instr can handle. If this special condition is needed with
75 # another condition, the two can be separated by a comma (e.g.,
76 # "(many-comm-expr,is_used_once)").
77
78 # based on https://web.archive.org/web/20180105155939/http://forum.devmaster.net/t/fast-and-accurate-sine-cosine/9648
79 def lowered_sincos(c):
80 x = ('fsub', ('fmul', 2.0, ('ffract', ('fadd', ('fmul', 0.5 / pi, a), c))), 1.0)
81 x = ('fmul', ('fsub', x, ('fmul', x, ('fabs', x))), 4.0)
82 return ('ffma', ('ffma', x, ('fabs', x), ('fneg', x)), 0.225, x)
83
84 optimizations = [
85
86 (('imul', a, '#b@32(is_pos_power_of_two)'), ('ishl', a, ('find_lsb', b)), '!options->lower_bitops'),
87 (('imul', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('ishl', a, ('find_lsb', ('iabs', b)))), '!options->lower_bitops'),
88 (('ishl', a, '#b@32'), ('imul', a, ('ishl', 1, b)), 'options->lower_bitops'),
89
90 (('unpack_64_2x32_split_x', ('imul_2x32_64(is_used_once)', a, b)), ('imul', a, b)),
91 (('unpack_64_2x32_split_x', ('umul_2x32_64(is_used_once)', a, b)), ('imul', a, b)),
92 (('imul_2x32_64', a, b), ('pack_64_2x32_split', ('imul', a, b), ('imul_high', a, b)), 'options->lower_mul_2x32_64'),
93 (('umul_2x32_64', a, b), ('pack_64_2x32_split', ('imul', a, b), ('umul_high', a, b)), 'options->lower_mul_2x32_64'),
94 (('udiv', a, 1), a),
95 (('idiv', a, 1), a),
96 (('umod', a, 1), 0),
97 (('imod', a, 1), 0),
98 (('udiv', a, '#b@32(is_pos_power_of_two)'), ('ushr', a, ('find_lsb', b)), '!options->lower_bitops'),
99 (('idiv', a, '#b@32(is_pos_power_of_two)'), ('imul', ('isign', a), ('ushr', ('iabs', a), ('find_lsb', b))), 'options->lower_idiv'),
100 (('idiv', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('imul', ('isign', a), ('ushr', ('iabs', a), ('find_lsb', ('iabs', b))))), 'options->lower_idiv'),
101 (('umod', a, '#b(is_pos_power_of_two)'), ('iand', a, ('isub', b, 1))),
102
103 (('~fneg', ('fneg', a)), a),
104 (('ineg', ('ineg', a)), a),
105 (('fabs', ('fabs', a)), ('fabs', a)),
106 (('fabs', ('fneg', a)), ('fabs', a)),
107 (('fabs', ('u2f', a)), ('u2f', a)),
108 (('iabs', ('iabs', a)), ('iabs', a)),
109 (('iabs', ('ineg', a)), ('iabs', a)),
110 (('f2b', ('fneg', a)), ('f2b', a)),
111 (('i2b', ('ineg', a)), ('i2b', a)),
112 (('~fadd', a, 0.0), a),
113 (('iadd', a, 0), a),
114 (('usadd_4x8', a, 0), a),
115 (('usadd_4x8', a, ~0), ~0),
116 (('~fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))),
117 (('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))),
118 (('~fadd', ('fneg', a), a), 0.0),
119 (('iadd', ('ineg', a), a), 0),
120 (('iadd', ('ineg', a), ('iadd', a, b)), b),
121 (('iadd', a, ('iadd', ('ineg', a), b)), b),
122 (('~fadd', ('fneg', a), ('fadd', a, b)), b),
123 (('~fadd', a, ('fadd', ('fneg', a), b)), b),
124 (('fadd', ('fsat', a), ('fsat', ('fneg', a))), ('fsat', ('fabs', a))),
125 (('~fmul', a, 0.0), 0.0),
126 (('imul', a, 0), 0),
127 (('umul_unorm_4x8', a, 0), 0),
128 (('umul_unorm_4x8', a, ~0), a),
129 (('~fmul', a, 1.0), a),
130 (('imul', a, 1), a),
131 (('fmul', a, -1.0), ('fneg', a)),
132 (('imul', a, -1), ('ineg', a)),
133 # If a < 0: fsign(a)*a*a => -1*a*a => -a*a => abs(a)*a
134 # If a > 0: fsign(a)*a*a => 1*a*a => a*a => abs(a)*a
135 # If a == 0: fsign(a)*a*a => 0*0*0 => abs(0)*0
136 (('fmul', ('fsign', a), ('fmul', a, a)), ('fmul', ('fabs', a), a)),
137 (('fmul', ('fmul', ('fsign', a), a), a), ('fmul', ('fabs', a), a)),
138 (('~ffma', 0.0, a, b), b),
139 (('~ffma', a, b, 0.0), ('fmul', a, b)),
140 (('ffma', 1.0, a, b), ('fadd', a, b)),
141 (('ffma', -1.0, a, b), ('fadd', ('fneg', a), b)),
142 (('~flrp', a, b, 0.0), a),
143 (('~flrp', a, b, 1.0), b),
144 (('~flrp', a, a, b), a),
145 (('~flrp', 0.0, a, b), ('fmul', a, b)),
146
147 # flrp(a, a + b, c) => a + flrp(0, b, c) => a + (b * c)
148 (('~flrp', a, ('fadd(is_used_once)', a, b), c), ('fadd', ('fmul', b, c), a)),
149 (('~flrp@32', a, ('fadd', a, b), c), ('fadd', ('fmul', b, c), a), 'options->lower_flrp32'),
150 (('~flrp@64', a, ('fadd', a, b), c), ('fadd', ('fmul', b, c), a), 'options->lower_flrp64'),
151
152 (('~flrp@32', ('fadd', a, b), ('fadd', a, c), d), ('fadd', ('flrp', b, c, d), a), 'options->lower_flrp32'),
153 (('~flrp@64', ('fadd', a, b), ('fadd', a, c), d), ('fadd', ('flrp', b, c, d), a), 'options->lower_flrp64'),
154
155 (('~flrp@32', a, ('fmul(is_used_once)', a, b), c), ('fmul', ('flrp', 1.0, b, c), a), 'options->lower_flrp32'),
156 (('~flrp@64', a, ('fmul(is_used_once)', a, b), c), ('fmul', ('flrp', 1.0, b, c), a), 'options->lower_flrp64'),
157
158 (('~flrp', ('fmul(is_used_once)', a, b), ('fmul(is_used_once)', a, c), d), ('fmul', ('flrp', b, c, d), a)),
159
160 (('~flrp', a, b, ('b2f', 'c@1')), ('bcsel', c, b, a), 'options->lower_flrp32'),
161 (('~flrp', a, 0.0, c), ('fadd', ('fmul', ('fneg', a), c), a)),
162 (('ftrunc', a), ('bcsel', ('flt', a, 0.0), ('fneg', ('ffloor', ('fabs', a))), ('ffloor', ('fabs', a))), 'options->lower_ftrunc'),
163 (('ffloor', a), ('fsub', a, ('ffract', a)), 'options->lower_ffloor'),
164 (('fadd', a, ('fneg', ('ffract', a))), ('ffloor', a), '!options->lower_ffloor'),
165 (('ffract', a), ('fsub', a, ('ffloor', a)), 'options->lower_ffract'),
166 (('fceil', a), ('fneg', ('ffloor', ('fneg', a))), 'options->lower_fceil'),
167 (('~fadd', ('fmul', a, ('fadd', 1.0, ('fneg', ('b2f', 'c@1')))), ('fmul', b, ('b2f', c))), ('bcsel', c, b, a), 'options->lower_flrp32'),
168 (('~fadd@32', ('fmul', a, ('fadd', 1.0, ('fneg', c ) )), ('fmul', b, c )), ('flrp', a, b, c), '!options->lower_flrp32'),
169 (('~fadd@64', ('fmul', a, ('fadd', 1.0, ('fneg', c ) )), ('fmul', b, c )), ('flrp', a, b, c), '!options->lower_flrp64'),
170 # These are the same as the previous three rules, but it depends on
171 # 1-fsat(x) <=> fsat(1-x). See below.
172 (('~fadd@32', ('fmul', a, ('fsat', ('fadd', 1.0, ('fneg', c )))), ('fmul', b, ('fsat', c))), ('flrp', a, b, ('fsat', c)), '!options->lower_flrp32'),
173 (('~fadd@64', ('fmul', a, ('fsat', ('fadd', 1.0, ('fneg', c )))), ('fmul', b, ('fsat', c))), ('flrp', a, b, ('fsat', c)), '!options->lower_flrp64'),
174
175 (('~fadd', a, ('fmul', ('b2f', 'c@1'), ('fadd', b, ('fneg', a)))), ('bcsel', c, b, a), 'options->lower_flrp32'),
176 (('~fadd@32', a, ('fmul', c , ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp32'),
177 (('~fadd@64', a, ('fmul', c , ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp64'),
178 (('ffma', a, b, c), ('fadd', ('fmul', a, b), c), 'options->lower_ffma'),
179 (('~fadd', ('fmul', a, b), c), ('ffma', a, b, c), 'options->fuse_ffma'),
180
181 (('~fmul', ('fadd', ('iand', ('ineg', ('b2i32', 'a@bool')), ('fmul', b, c)), '#d'), '#e'),
182 ('bcsel', a, ('fmul', ('fadd', ('fmul', b, c), d), e), ('fmul', d, e))),
183
184 (('fdph', a, b), ('fdot4', ('vec4', 'a.x', 'a.y', 'a.z', 1.0), b), 'options->lower_fdph'),
185
186 (('fdot4', ('vec4', a, b, c, 1.0), d), ('fdph', ('vec3', a, b, c), d), '!options->lower_fdph'),
187 (('fdot4', ('vec4', a, 0.0, 0.0, 0.0), b), ('fmul', a, b)),
188 (('fdot4', ('vec4', a, b, 0.0, 0.0), c), ('fdot2', ('vec2', a, b), c)),
189 (('fdot4', ('vec4', a, b, c, 0.0), d), ('fdot3', ('vec3', a, b, c), d)),
190
191 (('fdot3', ('vec3', a, 0.0, 0.0), b), ('fmul', a, b)),
192 (('fdot3', ('vec3', a, b, 0.0), c), ('fdot2', ('vec2', a, b), c)),
193
194 (('fdot2', ('vec2', a, 0.0), b), ('fmul', a, b)),
195 (('fdot2', a, 1.0), ('fadd', 'a.x', 'a.y')),
196
197 # Lower fdot to fsum when it is available
198 (('fdot2', a, b), ('fsum2', ('fmul', a, b)), 'options->lower_fdot'),
199 (('fdot3', a, b), ('fsum3', ('fmul', a, b)), 'options->lower_fdot'),
200 (('fdot4', a, b), ('fsum4', ('fmul', a, b)), 'options->lower_fdot'),
201 (('fsum2', a), ('fadd', 'a.x', 'a.y'), 'options->lower_fdot'),
202
203 # If x >= 0 and x <= 1: fsat(1 - x) == 1 - fsat(x) trivially
204 # If x < 0: 1 - fsat(x) => 1 - 0 => 1 and fsat(1 - x) => fsat(> 1) => 1
205 # If x > 1: 1 - fsat(x) => 1 - 1 => 0 and fsat(1 - x) => fsat(< 0) => 0
206 (('~fadd', ('fneg(is_used_once)', ('fsat(is_used_once)', 'a(is_not_fmul)')), 1.0), ('fsat', ('fadd', 1.0, ('fneg', a)))),
207
208 # 1 - ((1 - a) * (1 - b))
209 # 1 - (1 - a - b + a*b)
210 # 1 - 1 + a + b - a*b
211 # a + b - a*b
212 # a + b*(1 - a)
213 # b*(1 - a) + 1*a
214 # flrp(b, 1, a)
215 (('~fadd@32', 1.0, ('fneg', ('fmul', ('fadd', 1.0, ('fneg', a)), ('fadd', 1.0, ('fneg', b))))),
216 ('flrp', b, 1.0, a), '!options->lower_flrp32'),
217
218 # (a * #b + #c) << #d
219 # ((a * #b) << #d) + (#c << #d)
220 # (a * (#b << #d)) + (#c << #d)
221 (('ishl', ('iadd', ('imul', a, '#b'), '#c'), '#d'),
222 ('iadd', ('imul', a, ('ishl', b, d)), ('ishl', c, d))),
223
224 # (a * #b) << #c
225 # a * (#b << #c)
226 (('ishl', ('imul', a, '#b'), '#c'), ('imul', a, ('ishl', b, c))),
227 ]
228
229 # Care must be taken here. Shifts in NIR uses only the lower log2(bitsize)
230 # bits of the second source. These replacements must correctly handle the
231 # case where (b % bitsize) + (c % bitsize) >= bitsize.
232 for s in [8, 16, 32, 64]:
233 mask = (1 << s) - 1
234
235 ishl = "ishl@{}".format(s)
236 ishr = "ishr@{}".format(s)
237 ushr = "ushr@{}".format(s)
238
239 in_bounds = ('ult', ('iadd', ('iand', b, mask), ('iand', c, mask)), s)
240
241 optimizations.extend([
242 ((ishl, (ishl, a, '#b'), '#c'), ('bcsel', in_bounds, (ishl, a, ('iadd', b, c)), 0)),
243 ((ushr, (ushr, a, '#b'), '#c'), ('bcsel', in_bounds, (ushr, a, ('iadd', b, c)), 0)),
244
245 # To get get -1 for large shifts of negative values, ishr must instead
246 # clamp the shift count to the maximum value.
247 ((ishr, (ishr, a, '#b'), '#c'),
248 (ishr, a, ('imin', ('iadd', ('iand', b, mask), ('iand', c, mask)), s - 1))),
249 ])
250
251 optimizations.extend([
252 # This is common for address calculations. Reassociating may enable the
253 # 'a<<c' to be CSE'd. It also helps architectures that have an ISHLADD
254 # instruction or a constant offset field for in load / store instructions.
255 (('ishl', ('iadd', a, '#b'), '#c'), ('iadd', ('ishl', a, c), ('ishl', b, c))),
256
257 # Comparison simplifications
258 (('~inot', ('flt', a, b)), ('fge', a, b)),
259 (('~inot', ('fge', a, b)), ('flt', a, b)),
260 (('inot', ('feq', a, b)), ('fne', a, b)),
261 (('inot', ('fne', a, b)), ('feq', a, b)),
262 (('inot', ('ilt', a, b)), ('ige', a, b)),
263 (('inot', ('ult', a, b)), ('uge', a, b)),
264 (('inot', ('ige', a, b)), ('ilt', a, b)),
265 (('inot', ('uge', a, b)), ('ult', a, b)),
266 (('inot', ('ieq', a, b)), ('ine', a, b)),
267 (('inot', ('ine', a, b)), ('ieq', a, b)),
268
269 (('iand', ('feq', a, b), ('fne', a, b)), False),
270 (('iand', ('flt', a, b), ('flt', b, a)), False),
271 (('iand', ('ieq', a, b), ('ine', a, b)), False),
272 (('iand', ('ilt', a, b), ('ilt', b, a)), False),
273 (('iand', ('ult', a, b), ('ult', b, a)), False),
274
275 # This helps some shaders because, after some optimizations, they end up
276 # with patterns like (-a < -b) || (b < a). In an ideal world, this sort of
277 # matching would be handled by CSE.
278 (('flt', ('fneg', a), ('fneg', b)), ('flt', b, a)),
279 (('fge', ('fneg', a), ('fneg', b)), ('fge', b, a)),
280 (('feq', ('fneg', a), ('fneg', b)), ('feq', b, a)),
281 (('fne', ('fneg', a), ('fneg', b)), ('fne', b, a)),
282 (('flt', ('fneg', a), -1.0), ('flt', 1.0, a)),
283 (('flt', -1.0, ('fneg', a)), ('flt', a, 1.0)),
284 (('fge', ('fneg', a), -1.0), ('fge', 1.0, a)),
285 (('fge', -1.0, ('fneg', a)), ('fge', a, 1.0)),
286 (('fne', ('fneg', a), -1.0), ('fne', 1.0, a)),
287 (('feq', -1.0, ('fneg', a)), ('feq', a, 1.0)),
288
289 (('flt', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('flt', a, b)),
290 (('flt', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('flt', b, a)),
291 (('fge', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fge', a, b)),
292 (('fge', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('fge', b, a)),
293 (('feq', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('feq', a, b)),
294 (('fne', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fne', a, b)),
295
296 (('fge', ('fsat(is_used_once)', a), 1.0), ('fge', a, 1.0)),
297 (('flt', ('fsat(is_used_once)', a), 1.0), ('flt', a, 1.0)),
298 (('fge', 0.0, ('fsat(is_used_once)', a)), ('fge', 0.0, a)),
299 (('flt', 0.0, ('fsat(is_used_once)', a)), ('flt', 0.0, a)),
300
301 # 0.0 >= b2f(a)
302 # b2f(a) <= 0.0
303 # b2f(a) == 0.0 because b2f(a) can only be 0 or 1
304 # inot(a)
305 (('fge', 0.0, ('b2f', 'a@1')), ('inot', a)),
306
307 (('fge', ('fneg', ('b2f', 'a@1')), 0.0), ('inot', a)),
308
309 (('fne', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a, b)),
310 (('fne', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a, b)),
311 (('fne', ('bcsel', a, 1.0, ('b2f', 'b@1')) , 0.0), ('ior', a, b)),
312 (('fne', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), ('ior', a, b)),
313 (('fne', ('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a, b)),
314 (('fne', ('fmin', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a, b)),
315 (('fne', ('bcsel', a, ('b2f', 'b@1'), 0.0) , 0.0), ('iand', a, b)),
316 (('fne', ('fadd', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), 0.0), ('ixor', a, b)),
317 (('fne', ('b2f', 'a@1') , ('b2f', 'b@1') ), ('ixor', a, b)),
318 (('fne', ('fneg', ('b2f', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('ixor', a, b)),
319 (('feq', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('ior', a, b))),
320 (('feq', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('ior', a, b))),
321 (('feq', ('bcsel', a, 1.0, ('b2f', 'b@1')) , 0.0), ('inot', ('ior', a, b))),
322 (('feq', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), ('inot', ('ior', a, b))),
323 (('feq', ('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('iand', a, b))),
324 (('feq', ('fmin', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('iand', a, b))),
325 (('feq', ('bcsel', a, ('b2f', 'b@1'), 0.0) , 0.0), ('inot', ('iand', a, b))),
326 (('feq', ('fadd', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), 0.0), ('ieq', a, b)),
327 (('feq', ('b2f', 'a@1') , ('b2f', 'b@1') ), ('ieq', a, b)),
328 (('feq', ('fneg', ('b2f', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('ieq', a, b)),
329
330 # -(b2f(a) + b2f(b)) < 0
331 # 0 < b2f(a) + b2f(b)
332 # 0 != b2f(a) + b2f(b) b2f must be 0 or 1, so the sum is non-negative
333 # a || b
334 (('flt', ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), 0.0), ('ior', a, b)),
335 (('flt', 0.0, ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('ior', a, b)),
336
337 # -(b2f(a) + b2f(b)) >= 0
338 # 0 >= b2f(a) + b2f(b)
339 # 0 == b2f(a) + b2f(b) b2f must be 0 or 1, so the sum is non-negative
340 # !(a || b)
341 (('fge', ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), 0.0), ('inot', ('ior', a, b))),
342 (('fge', 0.0, ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('inot', ('ior', a, b))),
343
344 (('flt', a, ('fneg', a)), ('flt', a, 0.0)),
345 (('fge', a, ('fneg', a)), ('fge', a, 0.0)),
346
347 # Some optimizations (below) convert things like (a < b || c < b) into
348 # (min(a, c) < b). However, this interfers with the previous optimizations
349 # that try to remove comparisons with negated sums of b2f. This just
350 # breaks that apart.
351 (('flt', ('fmin', c, ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')))), 0.0),
352 ('ior', ('flt', c, 0.0), ('ior', a, b))),
353
354 (('~flt', ('fadd', a, b), a), ('flt', b, 0.0)),
355 (('~fge', ('fadd', a, b), a), ('fge', b, 0.0)),
356 (('~feq', ('fadd', a, b), a), ('feq', b, 0.0)),
357 (('~fne', ('fadd', a, b), a), ('fne', b, 0.0)),
358 (('~flt', ('fadd(is_used_once)', a, '#b'), '#c'), ('flt', a, ('fadd', c, ('fneg', b)))),
359 (('~flt', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('flt', ('fneg', ('fadd', c, b)), a)),
360 (('~fge', ('fadd(is_used_once)', a, '#b'), '#c'), ('fge', a, ('fadd', c, ('fneg', b)))),
361 (('~fge', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('fge', ('fneg', ('fadd', c, b)), a)),
362 (('~feq', ('fadd(is_used_once)', a, '#b'), '#c'), ('feq', a, ('fadd', c, ('fneg', b)))),
363 (('~feq', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('feq', ('fneg', ('fadd', c, b)), a)),
364 (('~fne', ('fadd(is_used_once)', a, '#b'), '#c'), ('fne', a, ('fadd', c, ('fneg', b)))),
365 (('~fne', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('fne', ('fneg', ('fadd', c, b)), a)),
366
367 # Cannot remove the addition from ilt or ige due to overflow.
368 (('ieq', ('iadd', a, b), a), ('ieq', b, 0)),
369 (('ine', ('iadd', a, b), a), ('ine', b, 0)),
370
371 # fmin(-b2f(a), b) >= 0.0
372 # -b2f(a) >= 0.0 && b >= 0.0
373 # -b2f(a) == 0.0 && b >= 0.0 -b2f can only be 0 or -1, never >0
374 # b2f(a) == 0.0 && b >= 0.0
375 # a == False && b >= 0.0
376 # !a && b >= 0.0
377 #
378 # The fge in the second replacement is not a typo. I leave the proof that
379 # "fmin(-b2f(a), b) >= 0 <=> fmin(-b2f(a), b) == 0" as an exercise for the
380 # reader.
381 (('fge', ('fmin', ('fneg', ('b2f', 'a@1')), 'b@1'), 0.0), ('iand', ('inot', a), ('fge', b, 0.0))),
382 (('feq', ('fmin', ('fneg', ('b2f', 'a@1')), 'b@1'), 0.0), ('iand', ('inot', a), ('fge', b, 0.0))),
383
384 (('feq', ('b2f', 'a@1'), 0.0), ('inot', a)),
385 (('~fne', ('b2f', 'a@1'), 0.0), a),
386 (('ieq', ('b2i', 'a@1'), 0), ('inot', a)),
387 (('ine', ('b2i', 'a@1'), 0), a),
388
389 (('fne', ('u2f', a), 0.0), ('ine', a, 0)),
390 (('feq', ('u2f', a), 0.0), ('ieq', a, 0)),
391 (('fge', ('u2f', a), 0.0), True),
392 (('fge', 0.0, ('u2f', a)), ('uge', 0, a)), # ieq instead?
393 (('flt', ('u2f', a), 0.0), False),
394 (('flt', 0.0, ('u2f', a)), ('ult', 0, a)), # ine instead?
395 (('fne', ('i2f', a), 0.0), ('ine', a, 0)),
396 (('feq', ('i2f', a), 0.0), ('ieq', a, 0)),
397 (('fge', ('i2f', a), 0.0), ('ige', a, 0)),
398 (('fge', 0.0, ('i2f', a)), ('ige', 0, a)),
399 (('flt', ('i2f', a), 0.0), ('ilt', a, 0)),
400 (('flt', 0.0, ('i2f', a)), ('ilt', 0, a)),
401
402 # 0.0 < fabs(a)
403 # fabs(a) > 0.0
404 # fabs(a) != 0.0 because fabs(a) must be >= 0
405 # a != 0.0
406 (('~flt', 0.0, ('fabs', a)), ('fne', a, 0.0)),
407
408 # -fabs(a) < 0.0
409 # fabs(a) > 0.0
410 (('~flt', ('fneg', ('fabs', a)), 0.0), ('fne', a, 0.0)),
411
412 # 0.0 >= fabs(a)
413 # 0.0 == fabs(a) because fabs(a) must be >= 0
414 # 0.0 == a
415 (('fge', 0.0, ('fabs', a)), ('feq', a, 0.0)),
416
417 # -fabs(a) >= 0.0
418 # 0.0 >= fabs(a)
419 (('fge', ('fneg', ('fabs', a)), 0.0), ('feq', a, 0.0)),
420
421 # (a >= 0.0) && (a <= 1.0) -> fsat(a) == a
422 (('iand', ('fge', a, 0.0), ('fge', 1.0, a)), ('feq', a, ('fsat', a)), '!options->lower_fsat'),
423
424 # (a < 0.0) || (a > 1.0)
425 # !(!(a < 0.0) && !(a > 1.0))
426 # !((a >= 0.0) && (a <= 1.0))
427 # !(a == fsat(a))
428 # a != fsat(a)
429 (('ior', ('flt', a, 0.0), ('flt', 1.0, a)), ('fne', a, ('fsat', a)), '!options->lower_fsat'),
430
431 (('fmax', ('b2f(is_used_once)', 'a@1'), ('b2f', 'b@1')), ('b2f', ('ior', a, b))),
432 (('fmax', ('fneg(is_used_once)', ('b2f(is_used_once)', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('fneg', ('b2f', ('ior', a, b)))),
433 (('fmin', ('b2f(is_used_once)', 'a@1'), ('b2f', 'b@1')), ('b2f', ('iand', a, b))),
434 (('fmin', ('fneg(is_used_once)', ('b2f(is_used_once)', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('fneg', ('b2f', ('iand', a, b)))),
435
436 # fmin(b2f(a), b)
437 # bcsel(a, fmin(b2f(a), b), fmin(b2f(a), b))
438 # bcsel(a, fmin(b2f(True), b), fmin(b2f(False), b))
439 # bcsel(a, fmin(1.0, b), fmin(0.0, b))
440 #
441 # Since b is a constant, constant folding will eliminate the fmin and the
442 # fmax. If b is > 1.0, the bcsel will be replaced with a b2f.
443 (('fmin', ('b2f', 'a@1'), '#b'), ('bcsel', a, ('fmin', b, 1.0), ('fmin', b, 0.0))),
444
445 (('flt', ('fadd(is_used_once)', a, ('fneg', b)), 0.0), ('flt', a, b)),
446
447 (('fge', ('fneg', ('fabs', a)), 0.0), ('feq', a, 0.0)),
448 (('~bcsel', ('flt', b, a), b, a), ('fmin', a, b)),
449 (('~bcsel', ('flt', a, b), b, a), ('fmax', a, b)),
450 (('~bcsel', ('fge', a, b), b, a), ('fmin', a, b)),
451 (('~bcsel', ('fge', b, a), b, a), ('fmax', a, b)),
452 (('bcsel', ('i2b', a), b, c), ('bcsel', ('ine', a, 0), b, c)),
453 (('bcsel', ('inot', a), b, c), ('bcsel', a, c, b)),
454 (('bcsel', a, ('bcsel', a, b, c), d), ('bcsel', a, b, d)),
455 (('bcsel', a, b, ('bcsel', a, c, d)), ('bcsel', a, b, d)),
456 (('bcsel', a, ('bcsel', b, c, d), ('bcsel(is_used_once)', b, c, 'e')), ('bcsel', b, c, ('bcsel', a, d, 'e'))),
457 (('bcsel', a, ('bcsel(is_used_once)', b, c, d), ('bcsel', b, c, 'e')), ('bcsel', b, c, ('bcsel', a, d, 'e'))),
458 (('bcsel', a, ('bcsel', b, c, d), ('bcsel(is_used_once)', b, 'e', d)), ('bcsel', b, ('bcsel', a, c, 'e'), d)),
459 (('bcsel', a, ('bcsel(is_used_once)', b, c, d), ('bcsel', b, 'e', d)), ('bcsel', b, ('bcsel', a, c, 'e'), d)),
460 (('bcsel', a, True, b), ('ior', a, b)),
461 (('bcsel', a, a, b), ('ior', a, b)),
462 (('bcsel', a, b, False), ('iand', a, b)),
463 (('bcsel', a, b, a), ('iand', a, b)),
464 (('~fmin', a, a), a),
465 (('~fmax', a, a), a),
466 (('imin', a, a), a),
467 (('imax', a, a), a),
468 (('umin', a, a), a),
469 (('umax', a, a), a),
470 (('fmax', ('fmax', a, b), b), ('fmax', a, b)),
471 (('umax', ('umax', a, b), b), ('umax', a, b)),
472 (('imax', ('imax', a, b), b), ('imax', a, b)),
473 (('fmin', ('fmin', a, b), b), ('fmin', a, b)),
474 (('umin', ('umin', a, b), b), ('umin', a, b)),
475 (('imin', ('imin', a, b), b), ('imin', a, b)),
476 (('fmax', a, ('fneg', a)), ('fabs', a)),
477 (('imax', a, ('ineg', a)), ('iabs', a)),
478 (('fmin', a, ('fneg', a)), ('fneg', ('fabs', a))),
479 (('imin', a, ('ineg', a)), ('ineg', ('iabs', a))),
480 (('fmin', a, ('fneg', ('fabs', a))), ('fneg', ('fabs', a))),
481 (('imin', a, ('ineg', ('iabs', a))), ('ineg', ('iabs', a))),
482 (('~fmin', a, ('fabs', a)), a),
483 (('imin', a, ('iabs', a)), a),
484 (('~fmax', a, ('fneg', ('fabs', a))), a),
485 (('imax', a, ('ineg', ('iabs', a))), a),
486 (('fmax', a, ('fabs', a)), ('fabs', a)),
487 (('imax', a, ('iabs', a)), ('iabs', a)),
488 (('fmax', a, ('fneg', a)), ('fabs', a)),
489 (('imax', a, ('ineg', a)), ('iabs', a)),
490 (('~fmax', ('fabs', a), 0.0), ('fabs', a)),
491 (('~fmin', ('fmax', a, 0.0), 1.0), ('fsat', a), '!options->lower_fsat'),
492 (('~fmax', ('fmin', a, 1.0), 0.0), ('fsat', a), '!options->lower_fsat'),
493 (('~fmin', ('fmax', a, -1.0), 0.0), ('fneg', ('fsat', ('fneg', a))), '!options->lower_fsat'),
494 (('~fmax', ('fmin', a, 0.0), -1.0), ('fneg', ('fsat', ('fneg', a))), '!options->lower_fsat'),
495 (('fsat', ('fsign', a)), ('b2f', ('flt', 0.0, a))),
496 (('fsat', ('b2f', a)), ('b2f', a)),
497 (('fsat', a), ('fmin', ('fmax', a, 0.0), 1.0), 'options->lower_fsat'),
498 (('fsat', ('fsat', a)), ('fsat', a)),
499 (('fsat', ('fneg(is_used_once)', ('fadd(is_used_once)', a, b))), ('fsat', ('fadd', ('fneg', a), ('fneg', b))), '!options->lower_fsat'),
500 (('fsat', ('fneg(is_used_once)', ('fmul(is_used_once)', a, b))), ('fsat', ('fmul', ('fneg', a), b)), '!options->lower_fsat'),
501 (('fsat', ('fabs(is_used_once)', ('fmul(is_used_once)', a, b))), ('fsat', ('fmul', ('fabs', a), ('fabs', b))), '!options->lower_fsat'),
502 (('fmin', ('fmax', ('fmin', ('fmax', a, b), c), b), c), ('fmin', ('fmax', a, b), c)),
503 (('imin', ('imax', ('imin', ('imax', a, b), c), b), c), ('imin', ('imax', a, b), c)),
504 (('umin', ('umax', ('umin', ('umax', a, b), c), b), c), ('umin', ('umax', a, b), c)),
505 (('fmax', ('fsat', a), '#b@32(is_zero_to_one)'), ('fsat', ('fmax', a, b))),
506 (('fmin', ('fsat', a), '#b@32(is_zero_to_one)'), ('fsat', ('fmin', a, b))),
507 (('extract_u8', ('imin', ('imax', a, 0), 0xff), 0), ('imin', ('imax', a, 0), 0xff)),
508 (('~ior', ('flt(is_used_once)', a, b), ('flt', a, c)), ('flt', a, ('fmax', b, c))),
509 (('~ior', ('flt(is_used_once)', a, c), ('flt', b, c)), ('flt', ('fmin', a, b), c)),
510 (('~ior', ('fge(is_used_once)', a, b), ('fge', a, c)), ('fge', a, ('fmin', b, c))),
511 (('~ior', ('fge(is_used_once)', a, c), ('fge', b, c)), ('fge', ('fmax', a, b), c)),
512 (('~ior', ('flt', a, '#b'), ('flt', a, '#c')), ('flt', a, ('fmax', b, c))),
513 (('~ior', ('flt', '#a', c), ('flt', '#b', c)), ('flt', ('fmin', a, b), c)),
514 (('~ior', ('fge', a, '#b'), ('fge', a, '#c')), ('fge', a, ('fmin', b, c))),
515 (('~ior', ('fge', '#a', c), ('fge', '#b', c)), ('fge', ('fmax', a, b), c)),
516 (('~iand', ('flt(is_used_once)', a, b), ('flt', a, c)), ('flt', a, ('fmin', b, c))),
517 (('~iand', ('flt(is_used_once)', a, c), ('flt', b, c)), ('flt', ('fmax', a, b), c)),
518 (('~iand', ('fge(is_used_once)', a, b), ('fge', a, c)), ('fge', a, ('fmax', b, c))),
519 (('~iand', ('fge(is_used_once)', a, c), ('fge', b, c)), ('fge', ('fmin', a, b), c)),
520 (('~iand', ('flt', a, '#b'), ('flt', a, '#c')), ('flt', a, ('fmin', b, c))),
521 (('~iand', ('flt', '#a', c), ('flt', '#b', c)), ('flt', ('fmax', a, b), c)),
522 (('~iand', ('fge', a, '#b'), ('fge', a, '#c')), ('fge', a, ('fmax', b, c))),
523 (('~iand', ('fge', '#a', c), ('fge', '#b', c)), ('fge', ('fmin', a, b), c)),
524
525 (('ior', ('ilt(is_used_once)', a, b), ('ilt', a, c)), ('ilt', a, ('imax', b, c))),
526 (('ior', ('ilt(is_used_once)', a, c), ('ilt', b, c)), ('ilt', ('imin', a, b), c)),
527 (('ior', ('ige(is_used_once)', a, b), ('ige', a, c)), ('ige', a, ('imin', b, c))),
528 (('ior', ('ige(is_used_once)', a, c), ('ige', b, c)), ('ige', ('imax', a, b), c)),
529 (('ior', ('ult(is_used_once)', a, b), ('ult', a, c)), ('ult', a, ('umax', b, c))),
530 (('ior', ('ult(is_used_once)', a, c), ('ult', b, c)), ('ult', ('umin', a, b), c)),
531 (('ior', ('uge(is_used_once)', a, b), ('uge', a, c)), ('uge', a, ('umin', b, c))),
532 (('ior', ('uge(is_used_once)', a, c), ('uge', b, c)), ('uge', ('umax', a, b), c)),
533 (('iand', ('ilt(is_used_once)', a, b), ('ilt', a, c)), ('ilt', a, ('imin', b, c))),
534 (('iand', ('ilt(is_used_once)', a, c), ('ilt', b, c)), ('ilt', ('imax', a, b), c)),
535 (('iand', ('ige(is_used_once)', a, b), ('ige', a, c)), ('ige', a, ('imax', b, c))),
536 (('iand', ('ige(is_used_once)', a, c), ('ige', b, c)), ('ige', ('imin', a, b), c)),
537 (('iand', ('ult(is_used_once)', a, b), ('ult', a, c)), ('ult', a, ('umin', b, c))),
538 (('iand', ('ult(is_used_once)', a, c), ('ult', b, c)), ('ult', ('umax', a, b), c)),
539 (('iand', ('uge(is_used_once)', a, b), ('uge', a, c)), ('uge', a, ('umax', b, c))),
540 (('iand', ('uge(is_used_once)', a, c), ('uge', b, c)), ('uge', ('umin', a, b), c)),
541
542 # These derive from the previous patterns with the application of b < 0 <=>
543 # 0 < -b. The transformation should be applied if either comparison is
544 # used once as this ensures that the number of comparisons will not
545 # increase. The sources to the ior and iand are not symmetric, so the
546 # rules have to be duplicated to get this behavior.
547 (('~ior', ('flt(is_used_once)', 0.0, 'a@32'), ('flt', 'b@32', 0.0)), ('flt', 0.0, ('fmax', a, ('fneg', b)))),
548 (('~ior', ('flt', 0.0, 'a@32'), ('flt(is_used_once)', 'b@32', 0.0)), ('flt', 0.0, ('fmax', a, ('fneg', b)))),
549 (('~ior', ('fge(is_used_once)', 0.0, 'a@32'), ('fge', 'b@32', 0.0)), ('fge', 0.0, ('fmin', a, ('fneg', b)))),
550 (('~ior', ('fge', 0.0, 'a@32'), ('fge(is_used_once)', 'b@32', 0.0)), ('fge', 0.0, ('fmin', a, ('fneg', b)))),
551 (('~iand', ('flt(is_used_once)', 0.0, 'a@32'), ('flt', 'b@32', 0.0)), ('flt', 0.0, ('fmin', a, ('fneg', b)))),
552 (('~iand', ('flt', 0.0, 'a@32'), ('flt(is_used_once)', 'b@32', 0.0)), ('flt', 0.0, ('fmin', a, ('fneg', b)))),
553 (('~iand', ('fge(is_used_once)', 0.0, 'a@32'), ('fge', 'b@32', 0.0)), ('fge', 0.0, ('fmax', a, ('fneg', b)))),
554 (('~iand', ('fge', 0.0, 'a@32'), ('fge(is_used_once)', 'b@32', 0.0)), ('fge', 0.0, ('fmax', a, ('fneg', b)))),
555
556 # Common pattern like 'if (i == 0 || i == 1 || ...)'
557 (('ior', ('ieq', a, 0), ('ieq', a, 1)), ('uge', 1, a)),
558 (('ior', ('uge', 1, a), ('ieq', a, 2)), ('uge', 2, a)),
559 (('ior', ('uge', 2, a), ('ieq', a, 3)), ('uge', 3, a)),
560
561 # The (i2f32, ...) part is an open-coded fsign. When that is combined with
562 # the bcsel, it's basically copysign(1.0, a). There is no copysign in NIR,
563 # so emit an open-coded version of that.
564 (('bcsel@32', ('feq', a, 0.0), 1.0, ('i2f32', ('iadd', ('b2i32', ('flt', 0.0, 'a@32')), ('ineg', ('b2i32', ('flt', 'a@32', 0.0)))))),
565 ('ior', 0x3f800000, ('iand', a, 0x80000000))),
566
567 (('ior', a, ('ieq', a, False)), True),
568 (('ior', a, ('inot', a)), -1),
569
570 (('ine', ('ineg', ('b2i32', 'a@1')), ('ineg', ('b2i32', 'b@1'))), ('ine', a, b)),
571 (('b2i32', ('ine', 'a@1', 'b@1')), ('b2i32', ('ixor', a, b))),
572
573 (('iand', ('ieq', 'a@32', 0), ('ieq', 'b@32', 0)), ('ieq', ('ior', 'a@32', 'b@32'), 0), '!options->lower_bitops'),
574
575 # These patterns can result when (a < b || a < c) => (a < min(b, c))
576 # transformations occur before constant propagation and loop-unrolling.
577 (('~flt', a, ('fmax', b, a)), ('flt', a, b)),
578 (('~flt', ('fmin', a, b), a), ('flt', b, a)),
579 (('~fge', a, ('fmin', b, a)), True),
580 (('~fge', ('fmax', a, b), a), True),
581 (('~flt', a, ('fmin', b, a)), False),
582 (('~flt', ('fmax', a, b), a), False),
583 (('~fge', a, ('fmax', b, a)), ('fge', a, b)),
584 (('~fge', ('fmin', a, b), a), ('fge', b, a)),
585
586 (('ilt', a, ('imax', b, a)), ('ilt', a, b)),
587 (('ilt', ('imin', a, b), a), ('ilt', b, a)),
588 (('ige', a, ('imin', b, a)), True),
589 (('ige', ('imax', a, b), a), True),
590 (('ult', a, ('umax', b, a)), ('ult', a, b)),
591 (('ult', ('umin', a, b), a), ('ult', b, a)),
592 (('uge', a, ('umin', b, a)), True),
593 (('uge', ('umax', a, b), a), True),
594 (('ilt', a, ('imin', b, a)), False),
595 (('ilt', ('imax', a, b), a), False),
596 (('ige', a, ('imax', b, a)), ('ige', a, b)),
597 (('ige', ('imin', a, b), a), ('ige', b, a)),
598 (('ult', a, ('umin', b, a)), False),
599 (('ult', ('umax', a, b), a), False),
600 (('uge', a, ('umax', b, a)), ('uge', a, b)),
601 (('uge', ('umin', a, b), a), ('uge', b, a)),
602 (('ult', a, ('iand', b, a)), False),
603 (('ult', ('ior', a, b), a), False),
604 (('uge', a, ('iand', b, a)), True),
605 (('uge', ('ior', a, b), a), True),
606
607 (('ilt', '#a', ('imax', '#b', c)), ('ior', ('ilt', a, b), ('ilt', a, c))),
608 (('ilt', ('imin', '#a', b), '#c'), ('ior', ('ilt', a, c), ('ilt', b, c))),
609 (('ige', '#a', ('imin', '#b', c)), ('ior', ('ige', a, b), ('ige', a, c))),
610 (('ige', ('imax', '#a', b), '#c'), ('ior', ('ige', a, c), ('ige', b, c))),
611 (('ult', '#a', ('umax', '#b', c)), ('ior', ('ult', a, b), ('ult', a, c))),
612 (('ult', ('umin', '#a', b), '#c'), ('ior', ('ult', a, c), ('ult', b, c))),
613 (('uge', '#a', ('umin', '#b', c)), ('ior', ('uge', a, b), ('uge', a, c))),
614 (('uge', ('umax', '#a', b), '#c'), ('ior', ('uge', a, c), ('uge', b, c))),
615 (('ilt', '#a', ('imin', '#b', c)), ('iand', ('ilt', a, b), ('ilt', a, c))),
616 (('ilt', ('imax', '#a', b), '#c'), ('iand', ('ilt', a, c), ('ilt', b, c))),
617 (('ige', '#a', ('imax', '#b', c)), ('iand', ('ige', a, b), ('ige', a, c))),
618 (('ige', ('imin', '#a', b), '#c'), ('iand', ('ige', a, c), ('ige', b, c))),
619 (('ult', '#a', ('umin', '#b', c)), ('iand', ('ult', a, b), ('ult', a, c))),
620 (('ult', ('umax', '#a', b), '#c'), ('iand', ('ult', a, c), ('ult', b, c))),
621 (('uge', '#a', ('umax', '#b', c)), ('iand', ('uge', a, b), ('uge', a, c))),
622 (('uge', ('umin', '#a', b), '#c'), ('iand', ('uge', a, c), ('uge', b, c))),
623
624 # Thanks to sign extension, the ishr(a, b) is negative if and only if a is
625 # negative.
626 (('bcsel', ('ilt', a, 0), ('ineg', ('ishr', a, b)), ('ishr', a, b)),
627 ('iabs', ('ishr', a, b))),
628 (('iabs', ('ishr', ('iabs', a), b)), ('ishr', ('iabs', a), b)),
629
630 (('fabs', ('slt', a, b)), ('slt', a, b)),
631 (('fabs', ('sge', a, b)), ('sge', a, b)),
632 (('fabs', ('seq', a, b)), ('seq', a, b)),
633 (('fabs', ('sne', a, b)), ('sne', a, b)),
634 (('slt', a, b), ('b2f', ('flt', a, b)), 'options->lower_scmp'),
635 (('sge', a, b), ('b2f', ('fge', a, b)), 'options->lower_scmp'),
636 (('seq', a, b), ('b2f', ('feq', a, b)), 'options->lower_scmp'),
637 (('sne', a, b), ('b2f', ('fne', a, b)), 'options->lower_scmp'),
638 (('seq', ('seq', a, b), 1.0), ('seq', a, b)),
639 (('seq', ('sne', a, b), 1.0), ('sne', a, b)),
640 (('seq', ('slt', a, b), 1.0), ('slt', a, b)),
641 (('seq', ('sge', a, b), 1.0), ('sge', a, b)),
642 (('sne', ('seq', a, b), 0.0), ('seq', a, b)),
643 (('sne', ('sne', a, b), 0.0), ('sne', a, b)),
644 (('sne', ('slt', a, b), 0.0), ('slt', a, b)),
645 (('sne', ('sge', a, b), 0.0), ('sge', a, b)),
646 (('seq', ('seq', a, b), 0.0), ('sne', a, b)),
647 (('seq', ('sne', a, b), 0.0), ('seq', a, b)),
648 (('seq', ('slt', a, b), 0.0), ('sge', a, b)),
649 (('seq', ('sge', a, b), 0.0), ('slt', a, b)),
650 (('sne', ('seq', a, b), 1.0), ('sne', a, b)),
651 (('sne', ('sne', a, b), 1.0), ('seq', a, b)),
652 (('sne', ('slt', a, b), 1.0), ('sge', a, b)),
653 (('sne', ('sge', a, b), 1.0), ('slt', a, b)),
654 (('fall_equal2', a, b), ('fmin', ('seq', 'a.x', 'b.x'), ('seq', 'a.y', 'b.y')), 'options->lower_vector_cmp'),
655 (('fall_equal3', a, b), ('seq', ('fany_nequal3', a, b), 0.0), 'options->lower_vector_cmp'),
656 (('fall_equal4', a, b), ('seq', ('fany_nequal4', a, b), 0.0), 'options->lower_vector_cmp'),
657 (('fany_nequal2', a, b), ('fmax', ('sne', 'a.x', 'b.x'), ('sne', 'a.y', 'b.y')), 'options->lower_vector_cmp'),
658 (('fany_nequal3', a, b), ('fsat', ('fdot3', ('sne', a, b), ('sne', a, b))), 'options->lower_vector_cmp'),
659 (('fany_nequal4', a, b), ('fsat', ('fdot4', ('sne', a, b), ('sne', a, b))), 'options->lower_vector_cmp'),
660 (('fne', ('fneg', a), a), ('fne', a, 0.0)),
661 (('feq', ('fneg', a), a), ('feq', a, 0.0)),
662 # Emulating booleans
663 (('imul', ('b2i', 'a@1'), ('b2i', 'b@1')), ('b2i', ('iand', a, b))),
664 (('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), ('b2f', ('iand', a, b))),
665 (('fsat', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('b2f', ('ior', a, b))),
666 (('iand', 'a@bool32', 1.0), ('b2f', a)),
667 # True/False are ~0 and 0 in NIR. b2i of True is 1, and -1 is ~0 (True).
668 (('ineg', ('b2i32', 'a@32')), a),
669 (('flt', ('fneg', ('b2f', 'a@1')), 0), a), # Generated by TGSI KILL_IF.
670 # Comparison with the same args. Note that these are not done for
671 # the float versions because NaN always returns false on float
672 # inequalities.
673 (('ilt', a, a), False),
674 (('ige', a, a), True),
675 (('ieq', a, a), True),
676 (('ine', a, a), False),
677 (('ult', a, a), False),
678 (('uge', a, a), True),
679 # Logical and bit operations
680 (('iand', a, a), a),
681 (('iand', a, ~0), a),
682 (('iand', a, 0), 0),
683 (('ior', a, a), a),
684 (('ior', a, 0), a),
685 (('ior', a, True), True),
686 (('ixor', a, a), 0),
687 (('ixor', a, 0), a),
688 (('inot', ('inot', a)), a),
689 (('ior', ('iand', a, b), b), b),
690 (('ior', ('ior', a, b), b), ('ior', a, b)),
691 (('iand', ('ior', a, b), b), b),
692 (('iand', ('iand', a, b), b), ('iand', a, b)),
693 # DeMorgan's Laws
694 (('iand', ('inot', a), ('inot', b)), ('inot', ('ior', a, b))),
695 (('ior', ('inot', a), ('inot', b)), ('inot', ('iand', a, b))),
696 # Shift optimizations
697 (('ishl', 0, a), 0),
698 (('ishl', a, 0), a),
699 (('ishr', 0, a), 0),
700 (('ishr', a, 0), a),
701 (('ushr', 0, a), 0),
702 (('ushr', a, 0), a),
703 (('iand', 0xff, ('ushr@32', a, 24)), ('ushr', a, 24)),
704 (('iand', 0xffff, ('ushr@32', a, 16)), ('ushr', a, 16)),
705 (('ior', ('ishl@16', a, b), ('ushr@16', a, ('iadd', 16, ('ineg', b)))), ('urol', a, b), '!options->lower_rotate'),
706 (('ior', ('ishl@16', a, b), ('ushr@16', a, ('isub', 16, b))), ('urol', a, b), '!options->lower_rotate'),
707 (('ior', ('ishl@32', a, b), ('ushr@32', a, ('iadd', 32, ('ineg', b)))), ('urol', a, b), '!options->lower_rotate'),
708 (('ior', ('ishl@32', a, b), ('ushr@32', a, ('isub', 32, b))), ('urol', a, b), '!options->lower_rotate'),
709 (('ior', ('ushr@16', a, b), ('ishl@16', a, ('iadd', 16, ('ineg', b)))), ('uror', a, b), '!options->lower_rotate'),
710 (('ior', ('ushr@16', a, b), ('ishl@16', a, ('isub', 16, b))), ('uror', a, b), '!options->lower_rotate'),
711 (('ior', ('ushr@32', a, b), ('ishl@32', a, ('iadd', 32, ('ineg', b)))), ('uror', a, b), '!options->lower_rotate'),
712 (('ior', ('ushr@32', a, b), ('ishl@32', a, ('isub', 32, b))), ('uror', a, b), '!options->lower_rotate'),
713 (('urol@16', a, b), ('ior', ('ishl', a, b), ('ushr', a, ('isub', 16, b))), 'options->lower_rotate'),
714 (('urol@32', a, b), ('ior', ('ishl', a, b), ('ushr', a, ('isub', 32, b))), 'options->lower_rotate'),
715 (('uror@16', a, b), ('ior', ('ushr', a, b), ('ishl', a, ('isub', 16, b))), 'options->lower_rotate'),
716 (('uror@32', a, b), ('ior', ('ushr', a, b), ('ishl', a, ('isub', 32, b))), 'options->lower_rotate'),
717 # Exponential/logarithmic identities
718 (('~fexp2', ('flog2', a)), a), # 2^lg2(a) = a
719 (('~flog2', ('fexp2', a)), a), # lg2(2^a) = a
720 (('fpow', a, b), ('fexp2', ('fmul', ('flog2', a), b)), 'options->lower_fpow'), # a^b = 2^(lg2(a)*b)
721 (('~fexp2', ('fmul', ('flog2', a), b)), ('fpow', a, b), '!options->lower_fpow'), # 2^(lg2(a)*b) = a^b
722 (('~fexp2', ('fadd', ('fmul', ('flog2', a), b), ('fmul', ('flog2', c), d))),
723 ('~fmul', ('fpow', a, b), ('fpow', c, d)), '!options->lower_fpow'), # 2^(lg2(a) * b + lg2(c) + d) = a^b * c^d
724 (('~fexp2', ('fmul', ('flog2', a), 2.0)), ('fmul', a, a)),
725 (('~fexp2', ('fmul', ('flog2', a), 4.0)), ('fmul', ('fmul', a, a), ('fmul', a, a))),
726 (('~fpow', a, 1.0), a),
727 (('~fpow', a, 2.0), ('fmul', a, a)),
728 (('~fpow', a, 4.0), ('fmul', ('fmul', a, a), ('fmul', a, a))),
729 (('~fpow', 2.0, a), ('fexp2', a)),
730 (('~fpow', ('fpow', a, 2.2), 0.454545), a),
731 (('~fpow', ('fabs', ('fpow', a, 2.2)), 0.454545), ('fabs', a)),
732 (('~fsqrt', ('fexp2', a)), ('fexp2', ('fmul', 0.5, a))),
733 (('~frcp', ('fexp2', a)), ('fexp2', ('fneg', a))),
734 (('~frsq', ('fexp2', a)), ('fexp2', ('fmul', -0.5, a))),
735 (('~flog2', ('fsqrt', a)), ('fmul', 0.5, ('flog2', a))),
736 (('~flog2', ('frcp', a)), ('fneg', ('flog2', a))),
737 (('~flog2', ('frsq', a)), ('fmul', -0.5, ('flog2', a))),
738 (('~flog2', ('fpow', a, b)), ('fmul', b, ('flog2', a))),
739 (('~fmul', ('fexp2(is_used_once)', a), ('fexp2(is_used_once)', b)), ('fexp2', ('fadd', a, b))),
740 (('bcsel', ('flt', a, 0.0), 0.0, ('fsqrt', a)), ('fsqrt', ('fmax', a, 0.0))),
741 # Division and reciprocal
742 (('~fdiv', 1.0, a), ('frcp', a)),
743 (('fdiv', a, b), ('fmul', a, ('frcp', b)), 'options->lower_fdiv'),
744 (('~frcp', ('frcp', a)), a),
745 (('~frcp', ('fsqrt', a)), ('frsq', a)),
746 (('fsqrt', a), ('frcp', ('frsq', a)), 'options->lower_fsqrt'),
747 (('~frcp', ('frsq', a)), ('fsqrt', a), '!options->lower_fsqrt'),
748 # Trig
749 (('fsin', a), lowered_sincos(0.5), 'options->lower_sincos'),
750 (('fcos', a), lowered_sincos(0.75), 'options->lower_sincos'),
751 # Boolean simplifications
752 (('i2b32(is_used_by_if)', a), ('ine32', a, 0)),
753 (('i2b1(is_used_by_if)', a), ('ine', a, 0)),
754 (('ieq', a, True), a),
755 (('ine(is_not_used_by_if)', a, True), ('inot', a)),
756 (('ine', a, False), a),
757 (('ieq(is_not_used_by_if)', a, False), ('inot', 'a')),
758 (('bcsel', a, True, False), a),
759 (('bcsel', a, False, True), ('inot', a)),
760 (('bcsel@32', a, 1.0, 0.0), ('b2f', a)),
761 (('bcsel@32', a, 0.0, 1.0), ('b2f', ('inot', a))),
762 (('bcsel@32', a, -1.0, -0.0), ('fneg', ('b2f', a))),
763 (('bcsel@32', a, -0.0, -1.0), ('fneg', ('b2f', ('inot', a)))),
764 (('bcsel', True, b, c), b),
765 (('bcsel', False, b, c), c),
766 (('bcsel', a, ('b2f(is_used_once)', 'b@32'), ('b2f', 'c@32')), ('b2f', ('bcsel', a, b, c))),
767
768 (('bcsel', a, b, b), b),
769 (('~fcsel', a, b, b), b),
770
771 # D3D Boolean emulation
772 (('bcsel', a, -1, 0), ('ineg', ('b2i', 'a@1'))),
773 (('bcsel', a, 0, -1), ('ineg', ('b2i', ('inot', a)))),
774 (('iand', ('ineg', ('b2i', 'a@1')), ('ineg', ('b2i', 'b@1'))),
775 ('ineg', ('b2i', ('iand', a, b)))),
776 (('ior', ('ineg', ('b2i','a@1')), ('ineg', ('b2i', 'b@1'))),
777 ('ineg', ('b2i', ('ior', a, b)))),
778 (('ieq', ('ineg', ('b2i', 'a@1')), 0), ('inot', a)),
779 (('ieq', ('ineg', ('b2i', 'a@1')), -1), a),
780 (('ine', ('ineg', ('b2i', 'a@1')), 0), a),
781 (('ine', ('ineg', ('b2i', 'a@1')), -1), ('inot', a)),
782 (('iand', ('ineg', ('b2i', a)), 1.0), ('b2f', a)),
783 (('iand', ('ineg', ('b2i', a)), 1), ('b2i', a)),
784
785 # SM5 32-bit shifts are defined to use the 5 least significant bits
786 (('ishl', 'a@32', ('iand', 31, b)), ('ishl', a, b)),
787 (('ishr', 'a@32', ('iand', 31, b)), ('ishr', a, b)),
788 (('ushr', 'a@32', ('iand', 31, b)), ('ushr', a, b)),
789
790 # Conversions
791 (('i2b32', ('b2i', 'a@32')), a),
792 (('f2i', ('ftrunc', a)), ('f2i', a)),
793 (('f2u', ('ftrunc', a)), ('f2u', a)),
794 (('i2b', ('ineg', a)), ('i2b', a)),
795 (('i2b', ('iabs', a)), ('i2b', a)),
796 (('inot', ('f2b1', a)), ('feq', a, 0.0)),
797
798 # Ironically, mark these as imprecise because removing the conversions may
799 # preserve more precision than doing the conversions (e.g.,
800 # uint(float(0x81818181u)) == 0x81818200).
801 (('~f2i32', ('i2f', 'a@32')), a),
802 (('~f2i32', ('u2f', 'a@32')), a),
803 (('~f2u32', ('i2f', 'a@32')), a),
804 (('~f2u32', ('u2f', 'a@32')), a),
805
806 (('ffloor', 'a(is_integral)'), a),
807 (('fceil', 'a(is_integral)'), a),
808 (('ftrunc', 'a(is_integral)'), a),
809 # fract(x) = x - floor(x), so fract(NaN) = NaN
810 (('~ffract', 'a(is_integral)'), 0.0),
811 (('fabs', 'a(is_not_negative)'), a),
812 (('iabs', 'a(is_not_negative)'), a),
813 (('fsat', 'a(is_not_positive)'), 0.0),
814
815 # Section 5.4.1 (Conversion and Scalar Constructors) of the GLSL 4.60 spec
816 # says:
817 #
818 # It is undefined to convert a negative floating-point value to an
819 # uint.
820 #
821 # Assuming that (uint)some_float behaves like (uint)(int)some_float allows
822 # some optimizations in the i965 backend to proceed.
823 (('ige', ('f2u', a), b), ('ige', ('f2i', a), b)),
824 (('ige', b, ('f2u', a)), ('ige', b, ('f2i', a))),
825 (('ilt', ('f2u', a), b), ('ilt', ('f2i', a), b)),
826 (('ilt', b, ('f2u', a)), ('ilt', b, ('f2i', a))),
827
828 (('~fmin', 'a(is_not_negative)', 1.0), ('fsat', a), '!options->lower_fsat'),
829
830 # The result of the multiply must be in [-1, 0], so the result of the ffma
831 # must be in [0, 1].
832 (('flt', ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0), 0.0), False),
833 (('flt', ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0), 0.0), False),
834 (('fmax', ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0), 0.0), ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0)),
835 (('fmax', ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0), 0.0), ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0)),
836
837 (('fne', 'a(is_not_zero)', 0.0), True),
838 (('feq', 'a(is_not_zero)', 0.0), False),
839
840 # In this chart, + means value > 0 and - means value < 0.
841 #
842 # + >= + -> unknown 0 >= + -> false - >= + -> false
843 # + >= 0 -> true 0 >= 0 -> true - >= 0 -> false
844 # + >= - -> true 0 >= - -> true - >= - -> unknown
845 #
846 # Using grouping conceptually similar to a Karnaugh map...
847 #
848 # (+ >= 0, + >= -, 0 >= 0, 0 >= -) == (is_not_negative >= is_not_positive) -> true
849 # (0 >= +, - >= +) == (is_not_positive >= gt_zero) -> false
850 # (- >= +, - >= 0) == (lt_zero >= is_not_negative) -> false
851 #
852 # The flt / ilt cases just invert the expected result.
853 #
854 # The results expecting true, must be marked imprecise. The results
855 # expecting false are fine because NaN compared >= or < anything is false.
856
857 (('~fge', 'a(is_not_negative)', 'b(is_not_positive)'), True),
858 (('fge', 'a(is_not_positive)', 'b(is_gt_zero)'), False),
859 (('fge', 'a(is_lt_zero)', 'b(is_not_negative)'), False),
860
861 (('flt', 'a(is_not_negative)', 'b(is_not_positive)'), False),
862 (('~flt', 'a(is_not_positive)', 'b(is_gt_zero)'), True),
863 (('~flt', 'a(is_lt_zero)', 'b(is_not_negative)'), True),
864
865 (('ine', 'a(is_not_zero)', 0), True),
866 (('ieq', 'a(is_not_zero)', 0), False),
867
868 (('ige', 'a(is_not_negative)', 'b(is_not_positive)'), True),
869 (('ige', 'a(is_not_positive)', 'b(is_gt_zero)'), False),
870 (('ige', 'a(is_lt_zero)', 'b(is_not_negative)'), False),
871
872 (('ilt', 'a(is_not_negative)', 'b(is_not_positive)'), False),
873 (('ilt', 'a(is_not_positive)', 'b(is_gt_zero)'), True),
874 (('ilt', 'a(is_lt_zero)', 'b(is_not_negative)'), True),
875
876 (('ult', 0, 'a(is_gt_zero)'), True),
877
878 # Packing and then unpacking does nothing
879 (('unpack_64_2x32_split_x', ('pack_64_2x32_split', a, b)), a),
880 (('unpack_64_2x32_split_y', ('pack_64_2x32_split', a, b)), b),
881 (('pack_64_2x32_split', ('unpack_64_2x32_split_x', a),
882 ('unpack_64_2x32_split_y', a)), a),
883
884 # Comparing two halves of an unpack separately. While this optimization
885 # should be correct for non-constant values, it's less obvious that it's
886 # useful in that case. For constant values, the pack will fold and we're
887 # guaranteed to reduce the whole tree to one instruction.
888 (('iand', ('ieq', ('unpack_32_2x16_split_x', a), '#b'),
889 ('ieq', ('unpack_32_2x16_split_y', a), '#c')),
890 ('ieq', a, ('pack_32_2x16_split', b, c))),
891
892 # Byte extraction
893 (('ushr', 'a@16', 8), ('extract_u8', a, 1), '!options->lower_extract_byte'),
894 (('ushr', 'a@32', 24), ('extract_u8', a, 3), '!options->lower_extract_byte'),
895 (('ushr', 'a@64', 56), ('extract_u8', a, 7), '!options->lower_extract_byte'),
896 (('ishr', 'a@16', 8), ('extract_i8', a, 1), '!options->lower_extract_byte'),
897 (('ishr', 'a@32', 24), ('extract_i8', a, 3), '!options->lower_extract_byte'),
898 (('ishr', 'a@64', 56), ('extract_i8', a, 7), '!options->lower_extract_byte'),
899 (('iand', 0xff, a), ('extract_u8', a, 0), '!options->lower_extract_byte'),
900
901 # Useless masking before unpacking
902 (('unpack_half_2x16_split_x', ('iand', a, 0xffff)), ('unpack_half_2x16_split_x', a)),
903 (('unpack_32_2x16_split_x', ('iand', a, 0xffff)), ('unpack_32_2x16_split_x', a)),
904 (('unpack_64_2x32_split_x', ('iand', a, 0xffffffff)), ('unpack_64_2x32_split_x', a)),
905 (('unpack_half_2x16_split_y', ('iand', a, 0xffff0000)), ('unpack_half_2x16_split_y', a)),
906 (('unpack_32_2x16_split_y', ('iand', a, 0xffff0000)), ('unpack_32_2x16_split_y', a)),
907 (('unpack_64_2x32_split_y', ('iand', a, 0xffffffff00000000)), ('unpack_64_2x32_split_y', a)),
908 ])
909
910 # After the ('extract_u8', a, 0) pattern, above, triggers, there will be
911 # patterns like those below.
912 for op in ('ushr', 'ishr'):
913 optimizations.extend([(('extract_u8', (op, 'a@16', 8), 0), ('extract_u8', a, 1))])
914 optimizations.extend([(('extract_u8', (op, 'a@32', 8 * i), 0), ('extract_u8', a, i)) for i in range(1, 4)])
915 optimizations.extend([(('extract_u8', (op, 'a@64', 8 * i), 0), ('extract_u8', a, i)) for i in range(1, 8)])
916
917 optimizations.extend([(('extract_u8', ('extract_u16', a, 1), 0), ('extract_u8', a, 2))])
918
919 # After the ('extract_[iu]8', a, 3) patterns, above, trigger, there will be
920 # patterns like those below.
921 for op in ('extract_u8', 'extract_i8'):
922 optimizations.extend([((op, ('ishl', 'a@16', 8), 1), (op, a, 0))])
923 optimizations.extend([((op, ('ishl', 'a@32', 24 - 8 * i), 3), (op, a, i)) for i in range(2, -1, -1)])
924 optimizations.extend([((op, ('ishl', 'a@64', 56 - 8 * i), 7), (op, a, i)) for i in range(6, -1, -1)])
925
926 optimizations.extend([
927 # Word extraction
928 (('ushr', ('ishl', 'a@32', 16), 16), ('extract_u16', a, 0), '!options->lower_extract_word'),
929 (('ushr', 'a@32', 16), ('extract_u16', a, 1), '!options->lower_extract_word'),
930 (('ishr', ('ishl', 'a@32', 16), 16), ('extract_i16', a, 0), '!options->lower_extract_word'),
931 (('ishr', 'a@32', 16), ('extract_i16', a, 1), '!options->lower_extract_word'),
932 (('iand', 0xffff, a), ('extract_u16', a, 0), '!options->lower_extract_word'),
933
934 # Subtracts
935 (('ussub_4x8', a, 0), a),
936 (('ussub_4x8', a, ~0), 0),
937 # Lower all Subtractions first - they can get recombined later
938 (('fsub', a, b), ('fadd', a, ('fneg', b))),
939 (('isub', a, b), ('iadd', a, ('ineg', b))),
940
941 # Propagate negation up multiplication chains
942 (('fmul(is_used_by_non_fsat)', ('fneg', a), b), ('fneg', ('fmul', a, b))),
943 (('imul', ('ineg', a), b), ('ineg', ('imul', a, b))),
944
945 # Propagate constants up multiplication chains
946 (('~fmul(is_used_once)', ('fmul(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('fmul', ('fmul', a, c), b)),
947 (('imul(is_used_once)', ('imul(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('imul', ('imul', a, c), b)),
948 (('~fadd(is_used_once)', ('fadd(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('fadd', ('fadd', a, c), b)),
949 (('iadd(is_used_once)', ('iadd(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('iadd', ('iadd', a, c), b)),
950
951 # Reassociate constants in add/mul chains so they can be folded together.
952 # For now, we mostly only handle cases where the constants are separated by
953 # a single non-constant. We could do better eventually.
954 (('~fmul', '#a', ('fmul', 'b(is_not_const)', '#c')), ('fmul', ('fmul', a, c), b)),
955 (('imul', '#a', ('imul', 'b(is_not_const)', '#c')), ('imul', ('imul', a, c), b)),
956 (('~fadd', '#a', ('fadd', 'b(is_not_const)', '#c')), ('fadd', ('fadd', a, c), b)),
957 (('~fadd', '#a', ('fneg', ('fadd', 'b(is_not_const)', '#c'))), ('fadd', ('fadd', a, ('fneg', c)), ('fneg', b))),
958 (('iadd', '#a', ('iadd', 'b(is_not_const)', '#c')), ('iadd', ('iadd', a, c), b)),
959
960 # Drop mul-div by the same value when there's no wrapping.
961 (('idiv', ('imul(no_signed_wrap)', a, b), b), a),
962
963 # By definition...
964 (('bcsel', ('ige', ('find_lsb', a), 0), ('find_lsb', a), -1), ('find_lsb', a)),
965 (('bcsel', ('ige', ('ifind_msb', a), 0), ('ifind_msb', a), -1), ('ifind_msb', a)),
966 (('bcsel', ('ige', ('ufind_msb', a), 0), ('ufind_msb', a), -1), ('ufind_msb', a)),
967
968 (('bcsel', ('ine', a, 0), ('find_lsb', a), -1), ('find_lsb', a)),
969 (('bcsel', ('ine', a, 0), ('ifind_msb', a), -1), ('ifind_msb', a)),
970 (('bcsel', ('ine', a, 0), ('ufind_msb', a), -1), ('ufind_msb', a)),
971
972 (('bcsel', ('ine', a, -1), ('ifind_msb', a), -1), ('ifind_msb', a)),
973
974 # Misc. lowering
975 (('fmod@16', a, b), ('fsub', a, ('fmul', b, ('ffloor', ('fdiv', a, b)))), 'options->lower_fmod'),
976 (('fmod@32', a, b), ('fsub', a, ('fmul', b, ('ffloor', ('fdiv', a, b)))), 'options->lower_fmod'),
977 (('frem', a, b), ('fsub', a, ('fmul', b, ('ftrunc', ('fdiv', a, b)))), 'options->lower_fmod'),
978 (('uadd_carry@32', a, b), ('b2i', ('ult', ('iadd', a, b), a)), 'options->lower_uadd_carry'),
979 (('usub_borrow@32', a, b), ('b2i', ('ult', a, b)), 'options->lower_usub_borrow'),
980
981 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
982 ('bcsel', ('ult', 31, 'bits'), 'insert',
983 ('bfi', ('bfm', 'bits', 'offset'), 'insert', 'base')),
984 'options->lower_bitfield_insert'),
985 (('ihadd', a, b), ('iadd', ('iand', a, b), ('ishr', ('ixor', a, b), 1)), 'options->lower_hadd'),
986 (('uhadd', a, b), ('iadd', ('iand', a, b), ('ushr', ('ixor', a, b), 1)), 'options->lower_hadd'),
987 (('irhadd', a, b), ('isub', ('ior', a, b), ('ishr', ('ixor', a, b), 1)), 'options->lower_hadd'),
988 (('urhadd', a, b), ('isub', ('ior', a, b), ('ushr', ('ixor', a, b), 1)), 'options->lower_hadd'),
989 (('uadd_sat', a, b), ('bcsel', ('ult', ('iadd', a, b), a), -1, ('iadd', a, b)), 'options->lower_add_sat'),
990 (('usub_sat', a, b), ('bcsel', ('ult', a, b), 0, ('isub', a, b)), 'options->lower_add_sat'),
991
992 # Alternative lowering that doesn't rely on bfi.
993 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
994 ('bcsel', ('ult', 31, 'bits'),
995 'insert',
996 (('ior',
997 ('iand', 'base', ('inot', ('ishl', ('isub', ('ishl', 1, 'bits'), 1), 'offset'))),
998 ('iand', ('ishl', 'insert', 'offset'), ('ishl', ('isub', ('ishl', 1, 'bits'), 1), 'offset'))))),
999 'options->lower_bitfield_insert_to_shifts'),
1000
1001 # Alternative lowering that uses bitfield_select.
1002 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
1003 ('bcsel', ('ult', 31, 'bits'), 'insert',
1004 ('bitfield_select', ('bfm', 'bits', 'offset'), ('ishl', 'insert', 'offset'), 'base')),
1005 'options->lower_bitfield_insert_to_bitfield_select'),
1006
1007 (('ibitfield_extract', 'value', 'offset', 'bits'),
1008 ('bcsel', ('ult', 31, 'bits'), 'value',
1009 ('ibfe', 'value', 'offset', 'bits')),
1010 'options->lower_bitfield_extract'),
1011
1012 (('ubitfield_extract', 'value', 'offset', 'bits'),
1013 ('bcsel', ('ult', 31, 'bits'), 'value',
1014 ('ubfe', 'value', 'offset', 'bits')),
1015 'options->lower_bitfield_extract'),
1016
1017 # Note that these opcodes are defined to only use the five least significant bits of 'offset' and 'bits'
1018 (('ubfe', 'value', 'offset', ('iand', 31, 'bits')), ('ubfe', 'value', 'offset', 'bits')),
1019 (('ubfe', 'value', ('iand', 31, 'offset'), 'bits'), ('ubfe', 'value', 'offset', 'bits')),
1020 (('ibfe', 'value', 'offset', ('iand', 31, 'bits')), ('ibfe', 'value', 'offset', 'bits')),
1021 (('ibfe', 'value', ('iand', 31, 'offset'), 'bits'), ('ibfe', 'value', 'offset', 'bits')),
1022 (('bfm', 'bits', ('iand', 31, 'offset')), ('bfm', 'bits', 'offset')),
1023 (('bfm', ('iand', 31, 'bits'), 'offset'), ('bfm', 'bits', 'offset')),
1024
1025 (('ibitfield_extract', 'value', 'offset', 'bits'),
1026 ('bcsel', ('ieq', 0, 'bits'),
1027 0,
1028 ('ishr',
1029 ('ishl', 'value', ('isub', ('isub', 32, 'bits'), 'offset')),
1030 ('isub', 32, 'bits'))),
1031 'options->lower_bitfield_extract_to_shifts'),
1032
1033 (('ubitfield_extract', 'value', 'offset', 'bits'),
1034 ('iand',
1035 ('ushr', 'value', 'offset'),
1036 ('bcsel', ('ieq', 'bits', 32),
1037 0xffffffff,
1038 ('isub', ('ishl', 1, 'bits'), 1))),
1039 'options->lower_bitfield_extract_to_shifts'),
1040
1041 (('ifind_msb', 'value'),
1042 ('ufind_msb', ('bcsel', ('ilt', 'value', 0), ('inot', 'value'), 'value')),
1043 'options->lower_ifind_msb'),
1044
1045 (('find_lsb', 'value'),
1046 ('ufind_msb', ('iand', 'value', ('ineg', 'value'))),
1047 'options->lower_find_lsb'),
1048
1049 (('extract_i8', a, 'b@32'),
1050 ('ishr', ('ishl', a, ('imul', ('isub', 3, b), 8)), 24),
1051 'options->lower_extract_byte'),
1052
1053 (('extract_u8', a, 'b@32'),
1054 ('iand', ('ushr', a, ('imul', b, 8)), 0xff),
1055 'options->lower_extract_byte'),
1056
1057 (('extract_i16', a, 'b@32'),
1058 ('ishr', ('ishl', a, ('imul', ('isub', 1, b), 16)), 16),
1059 'options->lower_extract_word'),
1060
1061 (('extract_u16', a, 'b@32'),
1062 ('iand', ('ushr', a, ('imul', b, 16)), 0xffff),
1063 'options->lower_extract_word'),
1064
1065 (('pack_unorm_2x16', 'v'),
1066 ('pack_uvec2_to_uint',
1067 ('f2u32', ('fround_even', ('fmul', ('fsat', 'v'), 65535.0)))),
1068 'options->lower_pack_unorm_2x16'),
1069
1070 (('pack_unorm_4x8', 'v'),
1071 ('pack_uvec4_to_uint',
1072 ('f2u32', ('fround_even', ('fmul', ('fsat', 'v'), 255.0)))),
1073 'options->lower_pack_unorm_4x8'),
1074
1075 (('pack_snorm_2x16', 'v'),
1076 ('pack_uvec2_to_uint',
1077 ('f2i32', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 32767.0)))),
1078 'options->lower_pack_snorm_2x16'),
1079
1080 (('pack_snorm_4x8', 'v'),
1081 ('pack_uvec4_to_uint',
1082 ('f2i32', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 127.0)))),
1083 'options->lower_pack_snorm_4x8'),
1084
1085 (('unpack_unorm_2x16', 'v'),
1086 ('fdiv', ('u2f32', ('vec2', ('extract_u16', 'v', 0),
1087 ('extract_u16', 'v', 1))),
1088 65535.0),
1089 'options->lower_unpack_unorm_2x16'),
1090
1091 (('unpack_unorm_4x8', 'v'),
1092 ('fdiv', ('u2f32', ('vec4', ('extract_u8', 'v', 0),
1093 ('extract_u8', 'v', 1),
1094 ('extract_u8', 'v', 2),
1095 ('extract_u8', 'v', 3))),
1096 255.0),
1097 'options->lower_unpack_unorm_4x8'),
1098
1099 (('unpack_snorm_2x16', 'v'),
1100 ('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec2', ('extract_i16', 'v', 0),
1101 ('extract_i16', 'v', 1))),
1102 32767.0))),
1103 'options->lower_unpack_snorm_2x16'),
1104
1105 (('unpack_snorm_4x8', 'v'),
1106 ('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec4', ('extract_i8', 'v', 0),
1107 ('extract_i8', 'v', 1),
1108 ('extract_i8', 'v', 2),
1109 ('extract_i8', 'v', 3))),
1110 127.0))),
1111 'options->lower_unpack_snorm_4x8'),
1112
1113 (('isign', a), ('imin', ('imax', a, -1), 1), 'options->lower_isign'),
1114 (('fsign', a), ('fsub', ('b2f', ('flt', 0.0, a)), ('b2f', ('flt', a, 0.0))), 'options->lower_fsign'),
1115
1116 # Address/offset calculations:
1117 # Drivers supporting imul24 should use the nir_lower_amul() pass, this
1118 # rule converts everyone else to imul:
1119 (('amul', a, b), ('imul', a, b), '!options->has_imul24'),
1120
1121 (('imad24_ir3', a, b, 0), ('imul24', a, b)),
1122 (('imad24_ir3', a, 0, c), (c)),
1123 (('imad24_ir3', a, 1, c), ('iadd', a, c)),
1124
1125 # if first two srcs are const, crack apart the imad so constant folding
1126 # can clean up the imul:
1127 # TODO ffma should probably get a similar rule:
1128 (('imad24_ir3', '#a', '#b', c), ('iadd', ('imul', a, b), c)),
1129
1130 # These will turn 24b address/offset calc back into 32b shifts, but
1131 # it should be safe to get back some of the bits of precision that we
1132 # already decided were no necessary:
1133 (('imul24', a, '#b@32(is_pos_power_of_two)'), ('ishl', a, ('find_lsb', b)), '!options->lower_bitops'),
1134 (('imul24', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('ishl', a, ('find_lsb', ('iabs', b)))), '!options->lower_bitops'),
1135 (('imul24', a, 0), (0)),
1136 ])
1137
1138 # bit_size dependent lowerings
1139 for bit_size in [8, 16, 32, 64]:
1140 # convenience constants
1141 intmax = (1 << (bit_size - 1)) - 1
1142 intmin = 1 << (bit_size - 1)
1143
1144 optimizations += [
1145 (('iadd_sat@' + str(bit_size), a, b),
1146 ('bcsel', ('ige', b, 1), ('bcsel', ('ilt', ('iadd', a, b), a), intmax, ('iadd', a, b)),
1147 ('bcsel', ('ilt', a, ('iadd', a, b)), intmin, ('iadd', a, b))), 'options->lower_add_sat'),
1148 (('isub_sat@' + str(bit_size), a, b),
1149 ('bcsel', ('ilt', b, 0), ('bcsel', ('ilt', ('isub', a, b), a), intmax, ('isub', a, b)),
1150 ('bcsel', ('ilt', a, ('isub', a, b)), intmin, ('isub', a, b))), 'options->lower_add_sat'),
1151 ]
1152
1153 invert = OrderedDict([('feq', 'fne'), ('fne', 'feq'), ('fge', 'flt'), ('flt', 'fge')])
1154
1155 for left, right in itertools.combinations_with_replacement(invert.keys(), 2):
1156 optimizations.append((('inot', ('ior(is_used_once)', (left, a, b), (right, c, d))),
1157 ('iand', (invert[left], a, b), (invert[right], c, d))))
1158 optimizations.append((('inot', ('iand(is_used_once)', (left, a, b), (right, c, d))),
1159 ('ior', (invert[left], a, b), (invert[right], c, d))))
1160
1161 # Optimize x2bN(b2x(x)) -> x
1162 for size in type_sizes('bool'):
1163 aN = 'a@' + str(size)
1164 f2bN = 'f2b' + str(size)
1165 i2bN = 'i2b' + str(size)
1166 optimizations.append(((f2bN, ('b2f', aN)), a))
1167 optimizations.append(((i2bN, ('b2i', aN)), a))
1168
1169 # Optimize x2yN(b2x(x)) -> b2y
1170 for x, y in itertools.product(['f', 'u', 'i'], ['f', 'u', 'i']):
1171 if x != 'f' and y != 'f' and x != y:
1172 continue
1173
1174 b2x = 'b2f' if x == 'f' else 'b2i'
1175 b2y = 'b2f' if y == 'f' else 'b2i'
1176 x2yN = '{}2{}'.format(x, y)
1177 optimizations.append(((x2yN, (b2x, a)), (b2y, a)))
1178
1179 # Optimize away x2xN(a@N)
1180 for t in ['int', 'uint', 'float']:
1181 for N in type_sizes(t):
1182 x2xN = '{0}2{0}{1}'.format(t[0], N)
1183 aN = 'a@{0}'.format(N)
1184 optimizations.append(((x2xN, aN), a))
1185
1186 # Optimize x2xN(y2yM(a@P)) -> y2yN(a) for integers
1187 # In particular, we can optimize away everything except upcast of downcast and
1188 # upcasts where the type differs from the other cast
1189 for N, M in itertools.product(type_sizes('uint'), type_sizes('uint')):
1190 if N < M:
1191 # The outer cast is a down-cast. It doesn't matter what the size of the
1192 # argument of the inner cast is because we'll never been in the upcast
1193 # of downcast case. Regardless of types, we'll always end up with y2yN
1194 # in the end.
1195 for x, y in itertools.product(['i', 'u'], ['i', 'u']):
1196 x2xN = '{0}2{0}{1}'.format(x, N)
1197 y2yM = '{0}2{0}{1}'.format(y, M)
1198 y2yN = '{0}2{0}{1}'.format(y, N)
1199 optimizations.append(((x2xN, (y2yM, a)), (y2yN, a)))
1200 elif N > M:
1201 # If the outer cast is an up-cast, we have to be more careful about the
1202 # size of the argument of the inner cast and with types. In this case,
1203 # the type is always the type of type up-cast which is given by the
1204 # outer cast.
1205 for P in type_sizes('uint'):
1206 # We can't optimize away up-cast of down-cast.
1207 if M < P:
1208 continue
1209
1210 # Because we're doing down-cast of down-cast, the types always have
1211 # to match between the two casts
1212 for x in ['i', 'u']:
1213 x2xN = '{0}2{0}{1}'.format(x, N)
1214 x2xM = '{0}2{0}{1}'.format(x, M)
1215 aP = 'a@{0}'.format(P)
1216 optimizations.append(((x2xN, (x2xM, aP)), (x2xN, a)))
1217 else:
1218 # The N == M case is handled by other optimizations
1219 pass
1220
1221 # Optimize comparisons with up-casts
1222 for t in ['int', 'uint', 'float']:
1223 for N, M in itertools.product(type_sizes(t), repeat=2):
1224 if N == 1 or N >= M:
1225 continue
1226
1227 x2xM = '{0}2{0}{1}'.format(t[0], M)
1228 x2xN = '{0}2{0}{1}'.format(t[0], N)
1229 aN = 'a@' + str(N)
1230 bN = 'b@' + str(N)
1231 xeq = 'feq' if t == 'float' else 'ieq'
1232 xne = 'fne' if t == 'float' else 'ine'
1233 xge = '{0}ge'.format(t[0])
1234 xlt = '{0}lt'.format(t[0])
1235
1236 # Up-casts are lossless so for correctly signed comparisons of
1237 # up-casted values we can do the comparison at the largest of the two
1238 # original sizes and drop one or both of the casts. (We have
1239 # optimizations to drop the no-op casts which this may generate.)
1240 for P in type_sizes(t):
1241 if P == 1 or P > N:
1242 continue
1243
1244 bP = 'b@' + str(P)
1245 optimizations += [
1246 ((xeq, (x2xM, aN), (x2xM, bP)), (xeq, a, (x2xN, b))),
1247 ((xne, (x2xM, aN), (x2xM, bP)), (xne, a, (x2xN, b))),
1248 ((xge, (x2xM, aN), (x2xM, bP)), (xge, a, (x2xN, b))),
1249 ((xlt, (x2xM, aN), (x2xM, bP)), (xlt, a, (x2xN, b))),
1250 ((xge, (x2xM, bP), (x2xM, aN)), (xge, (x2xN, b), a)),
1251 ((xlt, (x2xM, bP), (x2xM, aN)), (xlt, (x2xN, b), a)),
1252 ]
1253
1254 # The next bit doesn't work on floats because the range checks would
1255 # get way too complicated.
1256 if t in ['int', 'uint']:
1257 if t == 'int':
1258 xN_min = -(1 << (N - 1))
1259 xN_max = (1 << (N - 1)) - 1
1260 elif t == 'uint':
1261 xN_min = 0
1262 xN_max = (1 << N) - 1
1263 else:
1264 assert False
1265
1266 # If we're up-casting and comparing to a constant, we can unfold
1267 # the comparison into a comparison with the shrunk down constant
1268 # and a check that the constant fits in the smaller bit size.
1269 optimizations += [
1270 ((xeq, (x2xM, aN), '#b'),
1271 ('iand', (xeq, a, (x2xN, b)), (xeq, (x2xM, (x2xN, b)), b))),
1272 ((xne, (x2xM, aN), '#b'),
1273 ('ior', (xne, a, (x2xN, b)), (xne, (x2xM, (x2xN, b)), b))),
1274 ((xlt, (x2xM, aN), '#b'),
1275 ('iand', (xlt, xN_min, b),
1276 ('ior', (xlt, xN_max, b), (xlt, a, (x2xN, b))))),
1277 ((xlt, '#a', (x2xM, bN)),
1278 ('iand', (xlt, a, xN_max),
1279 ('ior', (xlt, a, xN_min), (xlt, (x2xN, a), b)))),
1280 ((xge, (x2xM, aN), '#b'),
1281 ('iand', (xge, xN_max, b),
1282 ('ior', (xge, xN_min, b), (xge, a, (x2xN, b))))),
1283 ((xge, '#a', (x2xM, bN)),
1284 ('iand', (xge, a, xN_min),
1285 ('ior', (xge, a, xN_max), (xge, (x2xN, a), b)))),
1286 ]
1287
1288 def fexp2i(exp, bits):
1289 # We assume that exp is already in the right range.
1290 if bits == 16:
1291 return ('i2i16', ('ishl', ('iadd', exp, 15), 10))
1292 elif bits == 32:
1293 return ('ishl', ('iadd', exp, 127), 23)
1294 elif bits == 64:
1295 return ('pack_64_2x32_split', 0, ('ishl', ('iadd', exp, 1023), 20))
1296 else:
1297 assert False
1298
1299 def ldexp(f, exp, bits):
1300 # First, we clamp exp to a reasonable range. The maximum possible range
1301 # for a normal exponent is [-126, 127] and, throwing in denormals, you get
1302 # a maximum range of [-149, 127]. This means that we can potentially have
1303 # a swing of +-276. If you start with FLT_MAX, you actually have to do
1304 # ldexp(FLT_MAX, -278) to get it to flush all the way to zero. The GLSL
1305 # spec, on the other hand, only requires that we handle an exponent value
1306 # in the range [-126, 128]. This implementation is *mostly* correct; it
1307 # handles a range on exp of [-252, 254] which allows you to create any
1308 # value (including denorms if the hardware supports it) and to adjust the
1309 # exponent of any normal value to anything you want.
1310 if bits == 16:
1311 exp = ('imin', ('imax', exp, -28), 30)
1312 elif bits == 32:
1313 exp = ('imin', ('imax', exp, -252), 254)
1314 elif bits == 64:
1315 exp = ('imin', ('imax', exp, -2044), 2046)
1316 else:
1317 assert False
1318
1319 # Now we compute two powers of 2, one for exp/2 and one for exp-exp/2.
1320 # (We use ishr which isn't the same for -1, but the -1 case still works
1321 # since we use exp-exp/2 as the second exponent.) While the spec
1322 # technically defines ldexp as f * 2.0^exp, simply multiplying once doesn't
1323 # work with denormals and doesn't allow for the full swing in exponents
1324 # that you can get with normalized values. Instead, we create two powers
1325 # of two and multiply by them each in turn. That way the effective range
1326 # of our exponent is doubled.
1327 pow2_1 = fexp2i(('ishr', exp, 1), bits)
1328 pow2_2 = fexp2i(('isub', exp, ('ishr', exp, 1)), bits)
1329 return ('fmul', ('fmul', f, pow2_1), pow2_2)
1330
1331 optimizations += [
1332 (('ldexp@16', 'x', 'exp'), ldexp('x', 'exp', 16), 'options->lower_ldexp'),
1333 (('ldexp@32', 'x', 'exp'), ldexp('x', 'exp', 32), 'options->lower_ldexp'),
1334 (('ldexp@64', 'x', 'exp'), ldexp('x', 'exp', 64), 'options->lower_ldexp'),
1335 ]
1336
1337 # Unreal Engine 4 demo applications open-codes bitfieldReverse()
1338 def bitfield_reverse(u):
1339 step1 = ('ior', ('ishl', u, 16), ('ushr', u, 16))
1340 step2 = ('ior', ('ishl', ('iand', step1, 0x00ff00ff), 8), ('ushr', ('iand', step1, 0xff00ff00), 8))
1341 step3 = ('ior', ('ishl', ('iand', step2, 0x0f0f0f0f), 4), ('ushr', ('iand', step2, 0xf0f0f0f0), 4))
1342 step4 = ('ior', ('ishl', ('iand', step3, 0x33333333), 2), ('ushr', ('iand', step3, 0xcccccccc), 2))
1343 step5 = ('ior(many-comm-expr)', ('ishl', ('iand', step4, 0x55555555), 1), ('ushr', ('iand', step4, 0xaaaaaaaa), 1))
1344
1345 return step5
1346
1347 optimizations += [(bitfield_reverse('x@32'), ('bitfield_reverse', 'x'), '!options->lower_bitfield_reverse')]
1348
1349 # For any float comparison operation, "cmp", if you have "a == a && a cmp b"
1350 # then the "a == a" is redundant because it's equivalent to "a is not NaN"
1351 # and, if a is a NaN then the second comparison will fail anyway.
1352 for op in ['flt', 'fge', 'feq']:
1353 optimizations += [
1354 (('iand', ('feq', a, a), (op, a, b)), (op, a, b)),
1355 (('iand', ('feq', a, a), (op, b, a)), (op, b, a)),
1356 ]
1357
1358 # Add optimizations to handle the case where the result of a ternary is
1359 # compared to a constant. This way we can take things like
1360 #
1361 # (a ? 0 : 1) > 0
1362 #
1363 # and turn it into
1364 #
1365 # a ? (0 > 0) : (1 > 0)
1366 #
1367 # which constant folding will eat for lunch. The resulting ternary will
1368 # further get cleaned up by the boolean reductions above and we will be
1369 # left with just the original variable "a".
1370 for op in ['flt', 'fge', 'feq', 'fne',
1371 'ilt', 'ige', 'ieq', 'ine', 'ult', 'uge']:
1372 optimizations += [
1373 ((op, ('bcsel', 'a', '#b', '#c'), '#d'),
1374 ('bcsel', 'a', (op, 'b', 'd'), (op, 'c', 'd'))),
1375 ((op, '#d', ('bcsel', a, '#b', '#c')),
1376 ('bcsel', 'a', (op, 'd', 'b'), (op, 'd', 'c'))),
1377 ]
1378
1379
1380 # For example, this converts things like
1381 #
1382 # 1 + mix(0, a - 1, condition)
1383 #
1384 # into
1385 #
1386 # mix(1, (a-1)+1, condition)
1387 #
1388 # Other optimizations will rearrange the constants.
1389 for op in ['fadd', 'fmul', 'iadd', 'imul']:
1390 optimizations += [
1391 ((op, ('bcsel(is_used_once)', a, '#b', c), '#d'), ('bcsel', a, (op, b, d), (op, c, d)))
1392 ]
1393
1394 # For derivatives in compute shaders, GLSL_NV_compute_shader_derivatives
1395 # states:
1396 #
1397 # If neither layout qualifier is specified, derivatives in compute shaders
1398 # return zero, which is consistent with the handling of built-in texture
1399 # functions like texture() in GLSL 4.50 compute shaders.
1400 for op in ['fddx', 'fddx_fine', 'fddx_coarse',
1401 'fddy', 'fddy_fine', 'fddy_coarse']:
1402 optimizations += [
1403 ((op, 'a'), 0.0, 'info->stage == MESA_SHADER_COMPUTE && info->cs.derivative_group == DERIVATIVE_GROUP_NONE')
1404 ]
1405
1406 # Some optimizations for ir3-specific instructions.
1407 optimizations += [
1408 # 'al * bl': If either 'al' or 'bl' is zero, return zero.
1409 (('umul_low', '#a(is_lower_half_zero)', 'b'), (0)),
1410 # '(ah * bl) << 16 + c': If either 'ah' or 'bl' is zero, return 'c'.
1411 (('imadsh_mix16', '#a@32(is_lower_half_zero)', 'b@32', 'c@32'), ('c')),
1412 (('imadsh_mix16', 'a@32', '#b@32(is_upper_half_zero)', 'c@32'), ('c')),
1413 ]
1414
1415 # This section contains "late" optimizations that should be run before
1416 # creating ffmas and calling regular optimizations for the final time.
1417 # Optimizations should go here if they help code generation and conflict
1418 # with the regular optimizations.
1419 before_ffma_optimizations = [
1420 # Propagate constants down multiplication chains
1421 (('~fmul(is_used_once)', ('fmul(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('fmul', ('fmul', a, c), b)),
1422 (('imul(is_used_once)', ('imul(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('imul', ('imul', a, c), b)),
1423 (('~fadd(is_used_once)', ('fadd(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('fadd', ('fadd', a, c), b)),
1424 (('iadd(is_used_once)', ('iadd(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('iadd', ('iadd', a, c), b)),
1425
1426 (('~fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))),
1427 (('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))),
1428 (('~fadd', ('fneg', a), a), 0.0),
1429 (('iadd', ('ineg', a), a), 0),
1430 (('iadd', ('ineg', a), ('iadd', a, b)), b),
1431 (('iadd', a, ('iadd', ('ineg', a), b)), b),
1432 (('~fadd', ('fneg', a), ('fadd', a, b)), b),
1433 (('~fadd', a, ('fadd', ('fneg', a), b)), b),
1434
1435 (('~flrp@32', ('fadd(is_used_once)', a, -1.0), ('fadd(is_used_once)', a, 1.0), d), ('fadd', ('flrp', -1.0, 1.0, d), a)),
1436 (('~flrp@32', ('fadd(is_used_once)', a, 1.0), ('fadd(is_used_once)', a, -1.0), d), ('fadd', ('flrp', 1.0, -1.0, d), a)),
1437 (('~flrp@32', ('fadd(is_used_once)', a, '#b'), ('fadd(is_used_once)', a, '#c'), d), ('fadd', ('fmul', d, ('fadd', c, ('fneg', b))), ('fadd', a, b))),
1438 ]
1439
1440 # This section contains "late" optimizations that should be run after the
1441 # regular optimizations have finished. Optimizations should go here if
1442 # they help code generation but do not necessarily produce code that is
1443 # more easily optimizable.
1444 late_optimizations = [
1445 # Most of these optimizations aren't quite safe when you get infinity or
1446 # Nan involved but the first one should be fine.
1447 (('flt', ('fadd', a, b), 0.0), ('flt', a, ('fneg', b))),
1448 (('flt', ('fneg', ('fadd', a, b)), 0.0), ('flt', ('fneg', a), b)),
1449 (('~fge', ('fadd', a, b), 0.0), ('fge', a, ('fneg', b))),
1450 (('~fge', ('fneg', ('fadd', a, b)), 0.0), ('fge', ('fneg', a), b)),
1451 (('~feq', ('fadd', a, b), 0.0), ('feq', a, ('fneg', b))),
1452 (('~fne', ('fadd', a, b), 0.0), ('fne', a, ('fneg', b))),
1453
1454 # nir_lower_to_source_mods will collapse this, but its existence during the
1455 # optimization loop can prevent other optimizations.
1456 (('fneg', ('fneg', a)), a),
1457
1458 # Subtractions get lowered during optimization, so we need to recombine them
1459 (('fadd', 'a', ('fneg', 'b')), ('fsub', 'a', 'b'), '!options->lower_sub'),
1460 (('iadd', 'a', ('ineg', 'b')), ('isub', 'a', 'b'), '!options->lower_sub'),
1461 (('fneg', a), ('fsub', 0.0, a), 'options->lower_negate'),
1462 (('ineg', a), ('isub', 0, a), 'options->lower_negate'),
1463
1464 # These are duplicated from the main optimizations table. The late
1465 # patterns that rearrange expressions like x - .5 < 0 to x < .5 can create
1466 # new patterns like these. The patterns that compare with zero are removed
1467 # because they are unlikely to be created in by anything in
1468 # late_optimizations.
1469 (('flt', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('flt', a, b)),
1470 (('flt', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('flt', b, a)),
1471 (('fge', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fge', a, b)),
1472 (('fge', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('fge', b, a)),
1473 (('feq', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('feq', a, b)),
1474 (('fne', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fne', a, b)),
1475
1476 (('fge', ('fsat(is_used_once)', a), 1.0), ('fge', a, 1.0)),
1477 (('flt', ('fsat(is_used_once)', a), 1.0), ('flt', a, 1.0)),
1478
1479 (('~fge', ('fmin(is_used_once)', ('fadd(is_used_once)', a, b), ('fadd', c, d)), 0.0), ('iand', ('fge', a, ('fneg', b)), ('fge', c, ('fneg', d)))),
1480
1481 (('flt', ('fneg', a), ('fneg', b)), ('flt', b, a)),
1482 (('fge', ('fneg', a), ('fneg', b)), ('fge', b, a)),
1483 (('feq', ('fneg', a), ('fneg', b)), ('feq', b, a)),
1484 (('fne', ('fneg', a), ('fneg', b)), ('fne', b, a)),
1485 (('flt', ('fneg', a), -1.0), ('flt', 1.0, a)),
1486 (('flt', -1.0, ('fneg', a)), ('flt', a, 1.0)),
1487 (('fge', ('fneg', a), -1.0), ('fge', 1.0, a)),
1488 (('fge', -1.0, ('fneg', a)), ('fge', a, 1.0)),
1489 (('fne', ('fneg', a), -1.0), ('fne', 1.0, a)),
1490 (('feq', -1.0, ('fneg', a)), ('feq', a, 1.0)),
1491
1492 (('ior', a, a), a),
1493 (('iand', a, a), a),
1494
1495 (('~fadd', ('fneg(is_used_once)', ('fsat(is_used_once)', 'a(is_not_fmul)')), 1.0), ('fsat', ('fadd', 1.0, ('fneg', a)))),
1496
1497 (('fdot2', a, b), ('fdot_replicated2', a, b), 'options->fdot_replicates'),
1498 (('fdot3', a, b), ('fdot_replicated3', a, b), 'options->fdot_replicates'),
1499 (('fdot4', a, b), ('fdot_replicated4', a, b), 'options->fdot_replicates'),
1500 (('fdph', a, b), ('fdph_replicated', a, b), 'options->fdot_replicates'),
1501
1502 (('~flrp@32', ('fadd(is_used_once)', a, b), ('fadd(is_used_once)', a, c), d), ('fadd', ('flrp', b, c, d), a)),
1503 (('~flrp@64', ('fadd(is_used_once)', a, b), ('fadd(is_used_once)', a, c), d), ('fadd', ('flrp', b, c, d), a)),
1504
1505 (('~fadd@32', 1.0, ('fmul(is_used_once)', c , ('fadd', b, -1.0 ))), ('fadd', ('fadd', 1.0, ('fneg', c)), ('fmul', b, c)), 'options->lower_flrp32'),
1506 (('~fadd@64', 1.0, ('fmul(is_used_once)', c , ('fadd', b, -1.0 ))), ('fadd', ('fadd', 1.0, ('fneg', c)), ('fmul', b, c)), 'options->lower_flrp64'),
1507
1508 # A similar operation could apply to any ffma(#a, b, #(-a/2)), but this
1509 # particular operation is common for expanding values stored in a texture
1510 # from [0,1] to [-1,1].
1511 (('~ffma@32', a, 2.0, -1.0), ('flrp', -1.0, 1.0, a ), '!options->lower_flrp32'),
1512 (('~ffma@32', a, -2.0, -1.0), ('flrp', -1.0, 1.0, ('fneg', a)), '!options->lower_flrp32'),
1513 (('~ffma@32', a, -2.0, 1.0), ('flrp', 1.0, -1.0, a ), '!options->lower_flrp32'),
1514 (('~ffma@32', a, 2.0, 1.0), ('flrp', 1.0, -1.0, ('fneg', a)), '!options->lower_flrp32'),
1515 (('~fadd@32', ('fmul(is_used_once)', 2.0, a), -1.0), ('flrp', -1.0, 1.0, a ), '!options->lower_flrp32'),
1516 (('~fadd@32', ('fmul(is_used_once)', -2.0, a), -1.0), ('flrp', -1.0, 1.0, ('fneg', a)), '!options->lower_flrp32'),
1517 (('~fadd@32', ('fmul(is_used_once)', -2.0, a), 1.0), ('flrp', 1.0, -1.0, a ), '!options->lower_flrp32'),
1518 (('~fadd@32', ('fmul(is_used_once)', 2.0, a), 1.0), ('flrp', 1.0, -1.0, ('fneg', a)), '!options->lower_flrp32'),
1519
1520 # flrp(a, b, a)
1521 # a*(1-a) + b*a
1522 # a + -a*a + a*b (1)
1523 # a + a*(b - a)
1524 # Option 1: ffma(a, (b-a), a)
1525 #
1526 # Alternately, after (1):
1527 # a*(1+b) + -a*a
1528 # a*((1+b) + -a)
1529 #
1530 # Let b=1
1531 #
1532 # Option 2: ffma(a, 2, -(a*a))
1533 # Option 3: ffma(a, 2, (-a)*a)
1534 # Option 4: ffma(a, -a, (2*a)
1535 # Option 5: a * (2 - a)
1536 #
1537 # There are a lot of other possible combinations.
1538 (('~ffma@32', ('fadd', b, ('fneg', a)), a, a), ('flrp', a, b, a), '!options->lower_flrp32'),
1539 (('~ffma@32', a, 2.0, ('fneg', ('fmul', a, a))), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1540 (('~ffma@32', a, 2.0, ('fmul', ('fneg', a), a)), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1541 (('~ffma@32', a, ('fneg', a), ('fmul', 2.0, a)), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1542 (('~fmul@32', a, ('fadd', 2.0, ('fneg', a))), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1543
1544 # we do these late so that we don't get in the way of creating ffmas
1545 (('fmin', ('fadd(is_used_once)', '#c', a), ('fadd(is_used_once)', '#c', b)), ('fadd', c, ('fmin', a, b))),
1546 (('fmax', ('fadd(is_used_once)', '#c', a), ('fadd(is_used_once)', '#c', b)), ('fadd', c, ('fmax', a, b))),
1547
1548 (('bcsel', a, 0, ('b2f32', ('inot', 'b@bool'))), ('b2f32', ('inot', ('ior', a, b)))),
1549
1550 # Things that look like DPH in the source shader may get expanded to
1551 # something that looks like dot(v1.xyz, v2.xyz) + v1.w by the time it gets
1552 # to NIR. After FFMA is generated, this can look like:
1553 #
1554 # fadd(ffma(v1.z, v2.z, ffma(v1.y, v2.y, fmul(v1.x, v2.x))), v1.w)
1555 #
1556 # Reassociate the last addition into the first multiplication.
1557 #
1558 # Some shaders do not use 'invariant' in vertex and (possibly) geometry
1559 # shader stages on some outputs that are intended to be invariant. For
1560 # various reasons, this optimization may not be fully applied in all
1561 # shaders used for different rendering passes of the same geometry. This
1562 # can result in Z-fighting artifacts (at best). For now, disable this
1563 # optimization in these stages. See bugzilla #111490. In tessellation
1564 # stages applications seem to use 'precise' when necessary, so allow the
1565 # optimization in those stages.
1566 (('~fadd', ('ffma(is_used_once)', a, b, ('ffma', c, d, ('fmul', 'e(is_not_const_and_not_fsign)', 'f(is_not_const_and_not_fsign)'))), 'g(is_not_const)'),
1567 ('ffma', a, b, ('ffma', c, d, ('ffma', e, 'f', 'g'))), '(info->stage != MESA_SHADER_VERTEX && info->stage != MESA_SHADER_GEOMETRY) && !options->intel_vec4'),
1568 (('~fadd', ('ffma(is_used_once)', a, b, ('fmul', 'c(is_not_const_and_not_fsign)', 'd(is_not_const_and_not_fsign)') ), 'e(is_not_const)'),
1569 ('ffma', a, b, ('ffma', c, d, e)), '(info->stage != MESA_SHADER_VERTEX && info->stage != MESA_SHADER_GEOMETRY) && !options->intel_vec4'),
1570 ]
1571
1572 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic", optimizations).render())
1573 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic_before_ffma",
1574 before_ffma_optimizations).render())
1575 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic_late",
1576 late_optimizations).render())