nir/algebraic: i2f(f2i()) -> trunc()
[mesa.git] / src / compiler / nir / nir_opt_algebraic.py
1 #
2 # Copyright (C) 2014 Intel Corporation
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining a
5 # copy of this software and associated documentation files (the "Software"),
6 # to deal in the Software without restriction, including without limitation
7 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 # and/or sell copies of the Software, and to permit persons to whom the
9 # Software is furnished to do so, subject to the following conditions:
10 #
11 # The above copyright notice and this permission notice (including the next
12 # paragraph) shall be included in all copies or substantial portions of the
13 # Software.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 # IN THE SOFTWARE.
22 #
23 # Authors:
24 # Jason Ekstrand (jason@jlekstrand.net)
25
26 from __future__ import print_function
27
28 from collections import OrderedDict
29 import nir_algebraic
30 from nir_opcodes import type_sizes
31 import itertools
32 import struct
33 from math import pi
34
35 # Convenience variables
36 a = 'a'
37 b = 'b'
38 c = 'c'
39 d = 'd'
40 e = 'e'
41
42 # Written in the form (<search>, <replace>) where <search> is an expression
43 # and <replace> is either an expression or a value. An expression is
44 # defined as a tuple of the form ([~]<op>, <src0>, <src1>, <src2>, <src3>)
45 # where each source is either an expression or a value. A value can be
46 # either a numeric constant or a string representing a variable name.
47 #
48 # If the opcode in a search expression is prefixed by a '~' character, this
49 # indicates that the operation is inexact. Such operations will only get
50 # applied to SSA values that do not have the exact bit set. This should be
51 # used by by any optimizations that are not bit-for-bit exact. It should not,
52 # however, be used for backend-requested lowering operations as those need to
53 # happen regardless of precision.
54 #
55 # Variable names are specified as "[#]name[@type][(cond)][.swiz]" where:
56 # "#" indicates that the given variable will only match constants,
57 # type indicates that the given variable will only match values from ALU
58 # instructions with the given output type,
59 # (cond) specifies an additional condition function (see nir_search_helpers.h),
60 # swiz is a swizzle applied to the variable (only in the <replace> expression)
61 #
62 # For constants, you have to be careful to make sure that it is the right
63 # type because python is unaware of the source and destination types of the
64 # opcodes.
65 #
66 # All expression types can have a bit-size specified. For opcodes, this
67 # looks like "op@32", for variables it is "a@32" or "a@uint32" to specify a
68 # type and size. In the search half of the expression this indicates that it
69 # should only match that particular bit-size. In the replace half of the
70 # expression this indicates that the constructed value should have that
71 # bit-size.
72 #
73 # If the opcode in a replacement expression is prefixed by a '!' character,
74 # this indicated that the new expression will be marked exact.
75 #
76 # A special condition "many-comm-expr" can be used with expressions to note
77 # that the expression and its subexpressions have more commutative expressions
78 # than nir_replace_instr can handle. If this special condition is needed with
79 # another condition, the two can be separated by a comma (e.g.,
80 # "(many-comm-expr,is_used_once)").
81
82 # based on https://web.archive.org/web/20180105155939/http://forum.devmaster.net/t/fast-and-accurate-sine-cosine/9648
83 def lowered_sincos(c):
84 x = ('fsub', ('fmul', 2.0, ('ffract', ('fadd', ('fmul', 0.5 / pi, a), c))), 1.0)
85 x = ('fmul', ('fsub', x, ('fmul', x, ('fabs', x))), 4.0)
86 return ('ffma', ('ffma', x, ('fabs', x), ('fneg', x)), 0.225, x)
87
88 def intBitsToFloat(i):
89 return struct.unpack('!f', struct.pack('!I', i))[0]
90
91 optimizations = [
92
93 (('imul', a, '#b@32(is_pos_power_of_two)'), ('ishl', a, ('find_lsb', b)), '!options->lower_bitops'),
94 (('imul', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('ishl', a, ('find_lsb', ('iabs', b)))), '!options->lower_bitops'),
95 (('ishl', a, '#b@32'), ('imul', a, ('ishl', 1, b)), 'options->lower_bitops'),
96
97 (('unpack_64_2x32_split_x', ('imul_2x32_64(is_used_once)', a, b)), ('imul', a, b)),
98 (('unpack_64_2x32_split_x', ('umul_2x32_64(is_used_once)', a, b)), ('imul', a, b)),
99 (('imul_2x32_64', a, b), ('pack_64_2x32_split', ('imul', a, b), ('imul_high', a, b)), 'options->lower_mul_2x32_64'),
100 (('umul_2x32_64', a, b), ('pack_64_2x32_split', ('imul', a, b), ('umul_high', a, b)), 'options->lower_mul_2x32_64'),
101 (('udiv', a, 1), a),
102 (('idiv', a, 1), a),
103 (('umod', a, 1), 0),
104 (('imod', a, 1), 0),
105 (('udiv', a, '#b@32(is_pos_power_of_two)'), ('ushr', a, ('find_lsb', b)), '!options->lower_bitops'),
106 (('idiv', a, '#b@32(is_pos_power_of_two)'), ('imul', ('isign', a), ('ushr', ('iabs', a), ('find_lsb', b))), 'options->lower_idiv'),
107 (('idiv', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('imul', ('isign', a), ('ushr', ('iabs', a), ('find_lsb', ('iabs', b))))), 'options->lower_idiv'),
108 (('umod', a, '#b(is_pos_power_of_two)'), ('iand', a, ('isub', b, 1))),
109
110 (('~fneg', ('fneg', a)), a),
111 (('ineg', ('ineg', a)), a),
112 (('fabs', ('fabs', a)), ('fabs', a)),
113 (('fabs', ('fneg', a)), ('fabs', a)),
114 (('fabs', ('u2f', a)), ('u2f', a)),
115 (('iabs', ('iabs', a)), ('iabs', a)),
116 (('iabs', ('ineg', a)), ('iabs', a)),
117 (('f2b', ('fneg', a)), ('f2b', a)),
118 (('i2b', ('ineg', a)), ('i2b', a)),
119 (('~fadd', a, 0.0), a),
120 (('iadd', a, 0), a),
121 (('usadd_4x8', a, 0), a),
122 (('usadd_4x8', a, ~0), ~0),
123 (('~fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))),
124 (('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))),
125 (('~fadd', ('fneg', a), a), 0.0),
126 (('iadd', ('ineg', a), a), 0),
127 (('iadd', ('ineg', a), ('iadd', a, b)), b),
128 (('iadd', a, ('iadd', ('ineg', a), b)), b),
129 (('~fadd', ('fneg', a), ('fadd', a, b)), b),
130 (('~fadd', a, ('fadd', ('fneg', a), b)), b),
131 (('fadd', ('fsat', a), ('fsat', ('fneg', a))), ('fsat', ('fabs', a))),
132 (('~fmul', a, 0.0), 0.0),
133 (('imul', a, 0), 0),
134 (('umul_unorm_4x8', a, 0), 0),
135 (('umul_unorm_4x8', a, ~0), a),
136 (('~fmul', a, 1.0), a),
137 (('imul', a, 1), a),
138 (('fmul', a, -1.0), ('fneg', a)),
139 (('imul', a, -1), ('ineg', a)),
140 # If a < 0: fsign(a)*a*a => -1*a*a => -a*a => abs(a)*a
141 # If a > 0: fsign(a)*a*a => 1*a*a => a*a => abs(a)*a
142 # If a == 0: fsign(a)*a*a => 0*0*0 => abs(0)*0
143 (('fmul', ('fsign', a), ('fmul', a, a)), ('fmul', ('fabs', a), a)),
144 (('fmul', ('fmul', ('fsign', a), a), a), ('fmul', ('fabs', a), a)),
145 (('~ffma', 0.0, a, b), b),
146 (('~ffma', a, b, 0.0), ('fmul', a, b)),
147 (('ffma', 1.0, a, b), ('fadd', a, b)),
148 (('ffma', -1.0, a, b), ('fadd', ('fneg', a), b)),
149 (('~flrp', a, b, 0.0), a),
150 (('~flrp', a, b, 1.0), b),
151 (('~flrp', a, a, b), a),
152 (('~flrp', 0.0, a, b), ('fmul', a, b)),
153
154 # flrp(a, a + b, c) => a + flrp(0, b, c) => a + (b * c)
155 (('~flrp', a, ('fadd(is_used_once)', a, b), c), ('fadd', ('fmul', b, c), a)),
156 (('~flrp@32', a, ('fadd', a, b), c), ('fadd', ('fmul', b, c), a), 'options->lower_flrp32'),
157 (('~flrp@64', a, ('fadd', a, b), c), ('fadd', ('fmul', b, c), a), 'options->lower_flrp64'),
158
159 (('~flrp@32', ('fadd', a, b), ('fadd', a, c), d), ('fadd', ('flrp', b, c, d), a), 'options->lower_flrp32'),
160 (('~flrp@64', ('fadd', a, b), ('fadd', a, c), d), ('fadd', ('flrp', b, c, d), a), 'options->lower_flrp64'),
161
162 (('~flrp@32', a, ('fmul(is_used_once)', a, b), c), ('fmul', ('flrp', 1.0, b, c), a), 'options->lower_flrp32'),
163 (('~flrp@64', a, ('fmul(is_used_once)', a, b), c), ('fmul', ('flrp', 1.0, b, c), a), 'options->lower_flrp64'),
164
165 (('~flrp', ('fmul(is_used_once)', a, b), ('fmul(is_used_once)', a, c), d), ('fmul', ('flrp', b, c, d), a)),
166
167 (('~flrp', a, b, ('b2f', 'c@1')), ('bcsel', c, b, a), 'options->lower_flrp32'),
168 (('~flrp', a, 0.0, c), ('fadd', ('fmul', ('fneg', a), c), a)),
169 (('ftrunc', a), ('bcsel', ('flt', a, 0.0), ('fneg', ('ffloor', ('fabs', a))), ('ffloor', ('fabs', a))), 'options->lower_ftrunc'),
170 (('ffloor', a), ('fsub', a, ('ffract', a)), 'options->lower_ffloor'),
171 (('fadd', a, ('fneg', ('ffract', a))), ('ffloor', a), '!options->lower_ffloor'),
172 (('ffract', a), ('fsub', a, ('ffloor', a)), 'options->lower_ffract'),
173 (('fceil', a), ('fneg', ('ffloor', ('fneg', a))), 'options->lower_fceil'),
174 (('~fadd', ('fmul', a, ('fadd', 1.0, ('fneg', ('b2f', 'c@1')))), ('fmul', b, ('b2f', c))), ('bcsel', c, b, a), 'options->lower_flrp32'),
175 (('~fadd@32', ('fmul', a, ('fadd', 1.0, ('fneg', c ) )), ('fmul', b, c )), ('flrp', a, b, c), '!options->lower_flrp32'),
176 (('~fadd@64', ('fmul', a, ('fadd', 1.0, ('fneg', c ) )), ('fmul', b, c )), ('flrp', a, b, c), '!options->lower_flrp64'),
177 # These are the same as the previous three rules, but it depends on
178 # 1-fsat(x) <=> fsat(1-x). See below.
179 (('~fadd@32', ('fmul', a, ('fsat', ('fadd', 1.0, ('fneg', c )))), ('fmul', b, ('fsat', c))), ('flrp', a, b, ('fsat', c)), '!options->lower_flrp32'),
180 (('~fadd@64', ('fmul', a, ('fsat', ('fadd', 1.0, ('fneg', c )))), ('fmul', b, ('fsat', c))), ('flrp', a, b, ('fsat', c)), '!options->lower_flrp64'),
181
182 (('~fadd', a, ('fmul', ('b2f', 'c@1'), ('fadd', b, ('fneg', a)))), ('bcsel', c, b, a), 'options->lower_flrp32'),
183 (('~fadd@32', a, ('fmul', c , ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp32'),
184 (('~fadd@64', a, ('fmul', c , ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp64'),
185 (('ffma', a, b, c), ('fadd', ('fmul', a, b), c), 'options->lower_ffma'),
186 (('~fadd', ('fmul', a, b), c), ('ffma', a, b, c), 'options->fuse_ffma'),
187
188 (('~fmul', ('fadd', ('iand', ('ineg', ('b2i32', 'a@bool')), ('fmul', b, c)), '#d'), '#e'),
189 ('bcsel', a, ('fmul', ('fadd', ('fmul', b, c), d), e), ('fmul', d, e))),
190
191 (('fdph', a, b), ('fdot4', ('vec4', 'a.x', 'a.y', 'a.z', 1.0), b), 'options->lower_fdph'),
192
193 (('fdot4', ('vec4', a, b, c, 1.0), d), ('fdph', ('vec3', a, b, c), d), '!options->lower_fdph'),
194 (('fdot4', ('vec4', a, 0.0, 0.0, 0.0), b), ('fmul', a, b)),
195 (('fdot4', ('vec4', a, b, 0.0, 0.0), c), ('fdot2', ('vec2', a, b), c)),
196 (('fdot4', ('vec4', a, b, c, 0.0), d), ('fdot3', ('vec3', a, b, c), d)),
197
198 (('fdot3', ('vec3', a, 0.0, 0.0), b), ('fmul', a, b)),
199 (('fdot3', ('vec3', a, b, 0.0), c), ('fdot2', ('vec2', a, b), c)),
200
201 (('fdot2', ('vec2', a, 0.0), b), ('fmul', a, b)),
202 (('fdot2', a, 1.0), ('fadd', 'a.x', 'a.y')),
203
204 # Lower fdot to fsum when it is available
205 (('fdot2', a, b), ('fsum2', ('fmul', a, b)), 'options->lower_fdot'),
206 (('fdot3', a, b), ('fsum3', ('fmul', a, b)), 'options->lower_fdot'),
207 (('fdot4', a, b), ('fsum4', ('fmul', a, b)), 'options->lower_fdot'),
208 (('fsum2', a), ('fadd', 'a.x', 'a.y'), 'options->lower_fdot'),
209
210 # If x >= 0 and x <= 1: fsat(1 - x) == 1 - fsat(x) trivially
211 # If x < 0: 1 - fsat(x) => 1 - 0 => 1 and fsat(1 - x) => fsat(> 1) => 1
212 # If x > 1: 1 - fsat(x) => 1 - 1 => 0 and fsat(1 - x) => fsat(< 0) => 0
213 (('~fadd', ('fneg(is_used_once)', ('fsat(is_used_once)', 'a(is_not_fmul)')), 1.0), ('fsat', ('fadd', 1.0, ('fneg', a)))),
214
215 # 1 - ((1 - a) * (1 - b))
216 # 1 - (1 - a - b + a*b)
217 # 1 - 1 + a + b - a*b
218 # a + b - a*b
219 # a + b*(1 - a)
220 # b*(1 - a) + 1*a
221 # flrp(b, 1, a)
222 (('~fadd@32', 1.0, ('fneg', ('fmul', ('fadd', 1.0, ('fneg', a)), ('fadd', 1.0, ('fneg', b))))),
223 ('flrp', b, 1.0, a), '!options->lower_flrp32'),
224
225 # (a * #b + #c) << #d
226 # ((a * #b) << #d) + (#c << #d)
227 # (a * (#b << #d)) + (#c << #d)
228 (('ishl', ('iadd', ('imul', a, '#b'), '#c'), '#d'),
229 ('iadd', ('imul', a, ('ishl', b, d)), ('ishl', c, d))),
230
231 # (a * #b) << #c
232 # a * (#b << #c)
233 (('ishl', ('imul', a, '#b'), '#c'), ('imul', a, ('ishl', b, c))),
234 ]
235
236 # Care must be taken here. Shifts in NIR uses only the lower log2(bitsize)
237 # bits of the second source. These replacements must correctly handle the
238 # case where (b % bitsize) + (c % bitsize) >= bitsize.
239 for s in [8, 16, 32, 64]:
240 mask = (1 << s) - 1
241
242 ishl = "ishl@{}".format(s)
243 ishr = "ishr@{}".format(s)
244 ushr = "ushr@{}".format(s)
245
246 in_bounds = ('ult', ('iadd', ('iand', b, mask), ('iand', c, mask)), s)
247
248 optimizations.extend([
249 ((ishl, (ishl, a, '#b'), '#c'), ('bcsel', in_bounds, (ishl, a, ('iadd', b, c)), 0)),
250 ((ushr, (ushr, a, '#b'), '#c'), ('bcsel', in_bounds, (ushr, a, ('iadd', b, c)), 0)),
251
252 # To get get -1 for large shifts of negative values, ishr must instead
253 # clamp the shift count to the maximum value.
254 ((ishr, (ishr, a, '#b'), '#c'),
255 (ishr, a, ('imin', ('iadd', ('iand', b, mask), ('iand', c, mask)), s - 1))),
256 ])
257
258 optimizations.extend([
259 # This is common for address calculations. Reassociating may enable the
260 # 'a<<c' to be CSE'd. It also helps architectures that have an ISHLADD
261 # instruction or a constant offset field for in load / store instructions.
262 (('ishl', ('iadd', a, '#b'), '#c'), ('iadd', ('ishl', a, c), ('ishl', b, c))),
263
264 # Comparison simplifications
265 (('~inot', ('flt', a, b)), ('fge', a, b)),
266 (('~inot', ('fge', a, b)), ('flt', a, b)),
267 (('inot', ('feq', a, b)), ('fne', a, b)),
268 (('inot', ('fne', a, b)), ('feq', a, b)),
269 (('inot', ('ilt', a, b)), ('ige', a, b)),
270 (('inot', ('ult', a, b)), ('uge', a, b)),
271 (('inot', ('ige', a, b)), ('ilt', a, b)),
272 (('inot', ('uge', a, b)), ('ult', a, b)),
273 (('inot', ('ieq', a, b)), ('ine', a, b)),
274 (('inot', ('ine', a, b)), ('ieq', a, b)),
275
276 (('iand', ('feq', a, b), ('fne', a, b)), False),
277 (('iand', ('flt', a, b), ('flt', b, a)), False),
278 (('iand', ('ieq', a, b), ('ine', a, b)), False),
279 (('iand', ('ilt', a, b), ('ilt', b, a)), False),
280 (('iand', ('ult', a, b), ('ult', b, a)), False),
281
282 # This helps some shaders because, after some optimizations, they end up
283 # with patterns like (-a < -b) || (b < a). In an ideal world, this sort of
284 # matching would be handled by CSE.
285 (('flt', ('fneg', a), ('fneg', b)), ('flt', b, a)),
286 (('fge', ('fneg', a), ('fneg', b)), ('fge', b, a)),
287 (('feq', ('fneg', a), ('fneg', b)), ('feq', b, a)),
288 (('fne', ('fneg', a), ('fneg', b)), ('fne', b, a)),
289 (('flt', ('fneg', a), -1.0), ('flt', 1.0, a)),
290 (('flt', -1.0, ('fneg', a)), ('flt', a, 1.0)),
291 (('fge', ('fneg', a), -1.0), ('fge', 1.0, a)),
292 (('fge', -1.0, ('fneg', a)), ('fge', a, 1.0)),
293 (('fne', ('fneg', a), -1.0), ('fne', 1.0, a)),
294 (('feq', -1.0, ('fneg', a)), ('feq', a, 1.0)),
295
296 (('flt', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('flt', a, b)),
297 (('flt', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('flt', b, a)),
298 (('fge', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fge', a, b)),
299 (('fge', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('fge', b, a)),
300 (('feq', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('feq', a, b)),
301 (('fne', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fne', a, b)),
302
303 (('fge', ('fsat(is_used_once)', a), 1.0), ('fge', a, 1.0)),
304 (('flt', ('fsat(is_used_once)', a), 1.0), ('flt', a, 1.0)),
305 (('fge', 0.0, ('fsat(is_used_once)', a)), ('fge', 0.0, a)),
306 (('flt', 0.0, ('fsat(is_used_once)', a)), ('flt', 0.0, a)),
307
308 # 0.0 >= b2f(a)
309 # b2f(a) <= 0.0
310 # b2f(a) == 0.0 because b2f(a) can only be 0 or 1
311 # inot(a)
312 (('fge', 0.0, ('b2f', 'a@1')), ('inot', a)),
313
314 (('fge', ('fneg', ('b2f', 'a@1')), 0.0), ('inot', a)),
315
316 (('fne', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a, b)),
317 (('fne', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a, b)),
318 (('fne', ('bcsel', a, 1.0, ('b2f', 'b@1')) , 0.0), ('ior', a, b)),
319 (('fne', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), ('ior', a, b)),
320 (('fne', ('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a, b)),
321 (('fne', ('fmin', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a, b)),
322 (('fne', ('bcsel', a, ('b2f', 'b@1'), 0.0) , 0.0), ('iand', a, b)),
323 (('fne', ('fadd', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), 0.0), ('ixor', a, b)),
324 (('fne', ('b2f', 'a@1') , ('b2f', 'b@1') ), ('ixor', a, b)),
325 (('fne', ('fneg', ('b2f', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('ixor', a, b)),
326 (('feq', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('ior', a, b))),
327 (('feq', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('ior', a, b))),
328 (('feq', ('bcsel', a, 1.0, ('b2f', 'b@1')) , 0.0), ('inot', ('ior', a, b))),
329 (('feq', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), ('inot', ('ior', a, b))),
330 (('feq', ('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('iand', a, b))),
331 (('feq', ('fmin', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('iand', a, b))),
332 (('feq', ('bcsel', a, ('b2f', 'b@1'), 0.0) , 0.0), ('inot', ('iand', a, b))),
333 (('feq', ('fadd', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), 0.0), ('ieq', a, b)),
334 (('feq', ('b2f', 'a@1') , ('b2f', 'b@1') ), ('ieq', a, b)),
335 (('feq', ('fneg', ('b2f', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('ieq', a, b)),
336
337 # -(b2f(a) + b2f(b)) < 0
338 # 0 < b2f(a) + b2f(b)
339 # 0 != b2f(a) + b2f(b) b2f must be 0 or 1, so the sum is non-negative
340 # a || b
341 (('flt', ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), 0.0), ('ior', a, b)),
342 (('flt', 0.0, ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('ior', a, b)),
343
344 # -(b2f(a) + b2f(b)) >= 0
345 # 0 >= b2f(a) + b2f(b)
346 # 0 == b2f(a) + b2f(b) b2f must be 0 or 1, so the sum is non-negative
347 # !(a || b)
348 (('fge', ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), 0.0), ('inot', ('ior', a, b))),
349 (('fge', 0.0, ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('inot', ('ior', a, b))),
350
351 (('flt', a, ('fneg', a)), ('flt', a, 0.0)),
352 (('fge', a, ('fneg', a)), ('fge', a, 0.0)),
353
354 # Some optimizations (below) convert things like (a < b || c < b) into
355 # (min(a, c) < b). However, this interfers with the previous optimizations
356 # that try to remove comparisons with negated sums of b2f. This just
357 # breaks that apart.
358 (('flt', ('fmin', c, ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')))), 0.0),
359 ('ior', ('flt', c, 0.0), ('ior', a, b))),
360
361 (('~flt', ('fadd', a, b), a), ('flt', b, 0.0)),
362 (('~fge', ('fadd', a, b), a), ('fge', b, 0.0)),
363 (('~feq', ('fadd', a, b), a), ('feq', b, 0.0)),
364 (('~fne', ('fadd', a, b), a), ('fne', b, 0.0)),
365 (('~flt', ('fadd(is_used_once)', a, '#b'), '#c'), ('flt', a, ('fadd', c, ('fneg', b)))),
366 (('~flt', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('flt', ('fneg', ('fadd', c, b)), a)),
367 (('~fge', ('fadd(is_used_once)', a, '#b'), '#c'), ('fge', a, ('fadd', c, ('fneg', b)))),
368 (('~fge', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('fge', ('fneg', ('fadd', c, b)), a)),
369 (('~feq', ('fadd(is_used_once)', a, '#b'), '#c'), ('feq', a, ('fadd', c, ('fneg', b)))),
370 (('~feq', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('feq', ('fneg', ('fadd', c, b)), a)),
371 (('~fne', ('fadd(is_used_once)', a, '#b'), '#c'), ('fne', a, ('fadd', c, ('fneg', b)))),
372 (('~fne', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('fne', ('fneg', ('fadd', c, b)), a)),
373
374 # Cannot remove the addition from ilt or ige due to overflow.
375 (('ieq', ('iadd', a, b), a), ('ieq', b, 0)),
376 (('ine', ('iadd', a, b), a), ('ine', b, 0)),
377
378 # fmin(-b2f(a), b) >= 0.0
379 # -b2f(a) >= 0.0 && b >= 0.0
380 # -b2f(a) == 0.0 && b >= 0.0 -b2f can only be 0 or -1, never >0
381 # b2f(a) == 0.0 && b >= 0.0
382 # a == False && b >= 0.0
383 # !a && b >= 0.0
384 #
385 # The fge in the second replacement is not a typo. I leave the proof that
386 # "fmin(-b2f(a), b) >= 0 <=> fmin(-b2f(a), b) == 0" as an exercise for the
387 # reader.
388 (('fge', ('fmin', ('fneg', ('b2f', 'a@1')), 'b@1'), 0.0), ('iand', ('inot', a), ('fge', b, 0.0))),
389 (('feq', ('fmin', ('fneg', ('b2f', 'a@1')), 'b@1'), 0.0), ('iand', ('inot', a), ('fge', b, 0.0))),
390
391 (('feq', ('b2f', 'a@1'), 0.0), ('inot', a)),
392 (('~fne', ('b2f', 'a@1'), 0.0), a),
393 (('ieq', ('b2i', 'a@1'), 0), ('inot', a)),
394 (('ine', ('b2i', 'a@1'), 0), a),
395
396 (('fne', ('u2f', a), 0.0), ('ine', a, 0)),
397 (('feq', ('u2f', a), 0.0), ('ieq', a, 0)),
398 (('fge', ('u2f', a), 0.0), True),
399 (('fge', 0.0, ('u2f', a)), ('uge', 0, a)), # ieq instead?
400 (('flt', ('u2f', a), 0.0), False),
401 (('flt', 0.0, ('u2f', a)), ('ult', 0, a)), # ine instead?
402 (('fne', ('i2f', a), 0.0), ('ine', a, 0)),
403 (('feq', ('i2f', a), 0.0), ('ieq', a, 0)),
404 (('fge', ('i2f', a), 0.0), ('ige', a, 0)),
405 (('fge', 0.0, ('i2f', a)), ('ige', 0, a)),
406 (('flt', ('i2f', a), 0.0), ('ilt', a, 0)),
407 (('flt', 0.0, ('i2f', a)), ('ilt', 0, a)),
408
409 # 0.0 < fabs(a)
410 # fabs(a) > 0.0
411 # fabs(a) != 0.0 because fabs(a) must be >= 0
412 # a != 0.0
413 (('~flt', 0.0, ('fabs', a)), ('fne', a, 0.0)),
414
415 # -fabs(a) < 0.0
416 # fabs(a) > 0.0
417 (('~flt', ('fneg', ('fabs', a)), 0.0), ('fne', a, 0.0)),
418
419 # 0.0 >= fabs(a)
420 # 0.0 == fabs(a) because fabs(a) must be >= 0
421 # 0.0 == a
422 (('fge', 0.0, ('fabs', a)), ('feq', a, 0.0)),
423
424 # -fabs(a) >= 0.0
425 # 0.0 >= fabs(a)
426 (('fge', ('fneg', ('fabs', a)), 0.0), ('feq', a, 0.0)),
427
428 # (a >= 0.0) && (a <= 1.0) -> fsat(a) == a
429 (('iand', ('fge', a, 0.0), ('fge', 1.0, a)), ('feq', a, ('fsat', a)), '!options->lower_fsat'),
430
431 # (a < 0.0) || (a > 1.0)
432 # !(!(a < 0.0) && !(a > 1.0))
433 # !((a >= 0.0) && (a <= 1.0))
434 # !(a == fsat(a))
435 # a != fsat(a)
436 (('ior', ('flt', a, 0.0), ('flt', 1.0, a)), ('fne', a, ('fsat', a)), '!options->lower_fsat'),
437
438 (('fmax', ('b2f(is_used_once)', 'a@1'), ('b2f', 'b@1')), ('b2f', ('ior', a, b))),
439 (('fmax', ('fneg(is_used_once)', ('b2f(is_used_once)', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('fneg', ('b2f', ('ior', a, b)))),
440 (('fmin', ('b2f(is_used_once)', 'a@1'), ('b2f', 'b@1')), ('b2f', ('iand', a, b))),
441 (('fmin', ('fneg(is_used_once)', ('b2f(is_used_once)', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('fneg', ('b2f', ('iand', a, b)))),
442
443 # fmin(b2f(a), b)
444 # bcsel(a, fmin(b2f(a), b), fmin(b2f(a), b))
445 # bcsel(a, fmin(b2f(True), b), fmin(b2f(False), b))
446 # bcsel(a, fmin(1.0, b), fmin(0.0, b))
447 #
448 # Since b is a constant, constant folding will eliminate the fmin and the
449 # fmax. If b is > 1.0, the bcsel will be replaced with a b2f.
450 (('fmin', ('b2f', 'a@1'), '#b'), ('bcsel', a, ('fmin', b, 1.0), ('fmin', b, 0.0))),
451
452 (('flt', ('fadd(is_used_once)', a, ('fneg', b)), 0.0), ('flt', a, b)),
453
454 (('fge', ('fneg', ('fabs', a)), 0.0), ('feq', a, 0.0)),
455 (('~bcsel', ('flt', b, a), b, a), ('fmin', a, b)),
456 (('~bcsel', ('flt', a, b), b, a), ('fmax', a, b)),
457 (('~bcsel', ('fge', a, b), b, a), ('fmin', a, b)),
458 (('~bcsel', ('fge', b, a), b, a), ('fmax', a, b)),
459 (('bcsel', ('i2b', a), b, c), ('bcsel', ('ine', a, 0), b, c)),
460 (('bcsel', ('inot', a), b, c), ('bcsel', a, c, b)),
461 (('bcsel', a, ('bcsel', a, b, c), d), ('bcsel', a, b, d)),
462 (('bcsel', a, b, ('bcsel', a, c, d)), ('bcsel', a, b, d)),
463 (('bcsel', a, ('bcsel', b, c, d), ('bcsel(is_used_once)', b, c, 'e')), ('bcsel', b, c, ('bcsel', a, d, 'e'))),
464 (('bcsel', a, ('bcsel(is_used_once)', b, c, d), ('bcsel', b, c, 'e')), ('bcsel', b, c, ('bcsel', a, d, 'e'))),
465 (('bcsel', a, ('bcsel', b, c, d), ('bcsel(is_used_once)', b, 'e', d)), ('bcsel', b, ('bcsel', a, c, 'e'), d)),
466 (('bcsel', a, ('bcsel(is_used_once)', b, c, d), ('bcsel', b, 'e', d)), ('bcsel', b, ('bcsel', a, c, 'e'), d)),
467 (('bcsel', a, True, b), ('ior', a, b)),
468 (('bcsel', a, a, b), ('ior', a, b)),
469 (('bcsel', a, b, False), ('iand', a, b)),
470 (('bcsel', a, b, a), ('iand', a, b)),
471 (('~fmin', a, a), a),
472 (('~fmax', a, a), a),
473 (('imin', a, a), a),
474 (('imax', a, a), a),
475 (('umin', a, a), a),
476 (('umax', a, a), a),
477 (('fmax', ('fmax', a, b), b), ('fmax', a, b)),
478 (('umax', ('umax', a, b), b), ('umax', a, b)),
479 (('imax', ('imax', a, b), b), ('imax', a, b)),
480 (('fmin', ('fmin', a, b), b), ('fmin', a, b)),
481 (('umin', ('umin', a, b), b), ('umin', a, b)),
482 (('imin', ('imin', a, b), b), ('imin', a, b)),
483 (('iand@32', a, ('inot', ('ishr', a, 31))), ('imax', a, 0)),
484 (('fmax', a, ('fneg', a)), ('fabs', a)),
485 (('imax', a, ('ineg', a)), ('iabs', a)),
486 (('fmin', a, ('fneg', a)), ('fneg', ('fabs', a))),
487 (('imin', a, ('ineg', a)), ('ineg', ('iabs', a))),
488 (('fmin', a, ('fneg', ('fabs', a))), ('fneg', ('fabs', a))),
489 (('imin', a, ('ineg', ('iabs', a))), ('ineg', ('iabs', a))),
490 (('~fmin', a, ('fabs', a)), a),
491 (('imin', a, ('iabs', a)), a),
492 (('~fmax', a, ('fneg', ('fabs', a))), a),
493 (('imax', a, ('ineg', ('iabs', a))), a),
494 (('fmax', a, ('fabs', a)), ('fabs', a)),
495 (('imax', a, ('iabs', a)), ('iabs', a)),
496 (('fmax', a, ('fneg', a)), ('fabs', a)),
497 (('imax', a, ('ineg', a)), ('iabs', a)),
498 (('~fmax', ('fabs', a), 0.0), ('fabs', a)),
499 (('~fmin', ('fmax', a, 0.0), 1.0), ('fsat', a), '!options->lower_fsat'),
500 (('~fmax', ('fmin', a, 1.0), 0.0), ('fsat', a), '!options->lower_fsat'),
501 (('~fmin', ('fmax', a, -1.0), 0.0), ('fneg', ('fsat', ('fneg', a))), '!options->lower_fsat'),
502 (('~fmax', ('fmin', a, 0.0), -1.0), ('fneg', ('fsat', ('fneg', a))), '!options->lower_fsat'),
503 (('fsat', ('fsign', a)), ('b2f', ('flt', 0.0, a))),
504 (('fsat', ('b2f', a)), ('b2f', a)),
505 (('fsat', a), ('fmin', ('fmax', a, 0.0), 1.0), 'options->lower_fsat'),
506 (('fsat', ('fsat', a)), ('fsat', a)),
507 (('fsat', ('fneg(is_used_once)', ('fadd(is_used_once)', a, b))), ('fsat', ('fadd', ('fneg', a), ('fneg', b))), '!options->lower_fsat'),
508 (('fsat', ('fneg(is_used_once)', ('fmul(is_used_once)', a, b))), ('fsat', ('fmul', ('fneg', a), b)), '!options->lower_fsat'),
509 (('fsat', ('fabs(is_used_once)', ('fmul(is_used_once)', a, b))), ('fsat', ('fmul', ('fabs', a), ('fabs', b))), '!options->lower_fsat'),
510 (('fmin', ('fmax', ('fmin', ('fmax', a, b), c), b), c), ('fmin', ('fmax', a, b), c)),
511 (('imin', ('imax', ('imin', ('imax', a, b), c), b), c), ('imin', ('imax', a, b), c)),
512 (('umin', ('umax', ('umin', ('umax', a, b), c), b), c), ('umin', ('umax', a, b), c)),
513 (('fmax', ('fsat', a), '#b@32(is_zero_to_one)'), ('fsat', ('fmax', a, b))),
514 (('fmin', ('fsat', a), '#b@32(is_zero_to_one)'), ('fsat', ('fmin', a, b))),
515 (('extract_u8', ('imin', ('imax', a, 0), 0xff), 0), ('imin', ('imax', a, 0), 0xff)),
516 (('~ior', ('flt(is_used_once)', a, b), ('flt', a, c)), ('flt', a, ('fmax', b, c))),
517 (('~ior', ('flt(is_used_once)', a, c), ('flt', b, c)), ('flt', ('fmin', a, b), c)),
518 (('~ior', ('fge(is_used_once)', a, b), ('fge', a, c)), ('fge', a, ('fmin', b, c))),
519 (('~ior', ('fge(is_used_once)', a, c), ('fge', b, c)), ('fge', ('fmax', a, b), c)),
520 (('~ior', ('flt', a, '#b'), ('flt', a, '#c')), ('flt', a, ('fmax', b, c))),
521 (('~ior', ('flt', '#a', c), ('flt', '#b', c)), ('flt', ('fmin', a, b), c)),
522 (('~ior', ('fge', a, '#b'), ('fge', a, '#c')), ('fge', a, ('fmin', b, c))),
523 (('~ior', ('fge', '#a', c), ('fge', '#b', c)), ('fge', ('fmax', a, b), c)),
524 (('~iand', ('flt(is_used_once)', a, b), ('flt', a, c)), ('flt', a, ('fmin', b, c))),
525 (('~iand', ('flt(is_used_once)', a, c), ('flt', b, c)), ('flt', ('fmax', a, b), c)),
526 (('~iand', ('fge(is_used_once)', a, b), ('fge', a, c)), ('fge', a, ('fmax', b, c))),
527 (('~iand', ('fge(is_used_once)', a, c), ('fge', b, c)), ('fge', ('fmin', a, b), c)),
528 (('~iand', ('flt', a, '#b'), ('flt', a, '#c')), ('flt', a, ('fmin', b, c))),
529 (('~iand', ('flt', '#a', c), ('flt', '#b', c)), ('flt', ('fmax', a, b), c)),
530 (('~iand', ('fge', a, '#b'), ('fge', a, '#c')), ('fge', a, ('fmax', b, c))),
531 (('~iand', ('fge', '#a', c), ('fge', '#b', c)), ('fge', ('fmin', a, b), c)),
532
533 (('ior', ('ilt(is_used_once)', a, b), ('ilt', a, c)), ('ilt', a, ('imax', b, c))),
534 (('ior', ('ilt(is_used_once)', a, c), ('ilt', b, c)), ('ilt', ('imin', a, b), c)),
535 (('ior', ('ige(is_used_once)', a, b), ('ige', a, c)), ('ige', a, ('imin', b, c))),
536 (('ior', ('ige(is_used_once)', a, c), ('ige', b, c)), ('ige', ('imax', a, b), c)),
537 (('ior', ('ult(is_used_once)', a, b), ('ult', a, c)), ('ult', a, ('umax', b, c))),
538 (('ior', ('ult(is_used_once)', a, c), ('ult', b, c)), ('ult', ('umin', a, b), c)),
539 (('ior', ('uge(is_used_once)', a, b), ('uge', a, c)), ('uge', a, ('umin', b, c))),
540 (('ior', ('uge(is_used_once)', a, c), ('uge', b, c)), ('uge', ('umax', a, b), c)),
541 (('iand', ('ilt(is_used_once)', a, b), ('ilt', a, c)), ('ilt', a, ('imin', b, c))),
542 (('iand', ('ilt(is_used_once)', a, c), ('ilt', b, c)), ('ilt', ('imax', a, b), c)),
543 (('iand', ('ige(is_used_once)', a, b), ('ige', a, c)), ('ige', a, ('imax', b, c))),
544 (('iand', ('ige(is_used_once)', a, c), ('ige', b, c)), ('ige', ('imin', a, b), c)),
545 (('iand', ('ult(is_used_once)', a, b), ('ult', a, c)), ('ult', a, ('umin', b, c))),
546 (('iand', ('ult(is_used_once)', a, c), ('ult', b, c)), ('ult', ('umax', a, b), c)),
547 (('iand', ('uge(is_used_once)', a, b), ('uge', a, c)), ('uge', a, ('umax', b, c))),
548 (('iand', ('uge(is_used_once)', a, c), ('uge', b, c)), ('uge', ('umin', a, b), c)),
549
550 # These derive from the previous patterns with the application of b < 0 <=>
551 # 0 < -b. The transformation should be applied if either comparison is
552 # used once as this ensures that the number of comparisons will not
553 # increase. The sources to the ior and iand are not symmetric, so the
554 # rules have to be duplicated to get this behavior.
555 (('~ior', ('flt(is_used_once)', 0.0, 'a@32'), ('flt', 'b@32', 0.0)), ('flt', 0.0, ('fmax', a, ('fneg', b)))),
556 (('~ior', ('flt', 0.0, 'a@32'), ('flt(is_used_once)', 'b@32', 0.0)), ('flt', 0.0, ('fmax', a, ('fneg', b)))),
557 (('~ior', ('fge(is_used_once)', 0.0, 'a@32'), ('fge', 'b@32', 0.0)), ('fge', 0.0, ('fmin', a, ('fneg', b)))),
558 (('~ior', ('fge', 0.0, 'a@32'), ('fge(is_used_once)', 'b@32', 0.0)), ('fge', 0.0, ('fmin', a, ('fneg', b)))),
559 (('~iand', ('flt(is_used_once)', 0.0, 'a@32'), ('flt', 'b@32', 0.0)), ('flt', 0.0, ('fmin', a, ('fneg', b)))),
560 (('~iand', ('flt', 0.0, 'a@32'), ('flt(is_used_once)', 'b@32', 0.0)), ('flt', 0.0, ('fmin', a, ('fneg', b)))),
561 (('~iand', ('fge(is_used_once)', 0.0, 'a@32'), ('fge', 'b@32', 0.0)), ('fge', 0.0, ('fmax', a, ('fneg', b)))),
562 (('~iand', ('fge', 0.0, 'a@32'), ('fge(is_used_once)', 'b@32', 0.0)), ('fge', 0.0, ('fmax', a, ('fneg', b)))),
563
564 # Common pattern like 'if (i == 0 || i == 1 || ...)'
565 (('ior', ('ieq', a, 0), ('ieq', a, 1)), ('uge', 1, a)),
566 (('ior', ('uge', 1, a), ('ieq', a, 2)), ('uge', 2, a)),
567 (('ior', ('uge', 2, a), ('ieq', a, 3)), ('uge', 3, a)),
568
569 # The (i2f32, ...) part is an open-coded fsign. When that is combined with
570 # the bcsel, it's basically copysign(1.0, a). There is no copysign in NIR,
571 # so emit an open-coded version of that.
572 (('bcsel@32', ('feq', a, 0.0), 1.0, ('i2f32', ('iadd', ('b2i32', ('flt', 0.0, 'a@32')), ('ineg', ('b2i32', ('flt', 'a@32', 0.0)))))),
573 ('ior', 0x3f800000, ('iand', a, 0x80000000))),
574
575 (('ior', a, ('ieq', a, False)), True),
576 (('ior', a, ('inot', a)), -1),
577
578 (('ine', ('ineg', ('b2i32', 'a@1')), ('ineg', ('b2i32', 'b@1'))), ('ine', a, b)),
579 (('b2i32', ('ine', 'a@1', 'b@1')), ('b2i32', ('ixor', a, b))),
580
581 (('iand', ('ieq', 'a@32', 0), ('ieq', 'b@32', 0)), ('ieq', ('ior', 'a@32', 'b@32'), 0), '!options->lower_bitops'),
582
583 # These patterns can result when (a < b || a < c) => (a < min(b, c))
584 # transformations occur before constant propagation and loop-unrolling.
585 (('~flt', a, ('fmax', b, a)), ('flt', a, b)),
586 (('~flt', ('fmin', a, b), a), ('flt', b, a)),
587 (('~fge', a, ('fmin', b, a)), True),
588 (('~fge', ('fmax', a, b), a), True),
589 (('~flt', a, ('fmin', b, a)), False),
590 (('~flt', ('fmax', a, b), a), False),
591 (('~fge', a, ('fmax', b, a)), ('fge', a, b)),
592 (('~fge', ('fmin', a, b), a), ('fge', b, a)),
593
594 (('ilt', a, ('imax', b, a)), ('ilt', a, b)),
595 (('ilt', ('imin', a, b), a), ('ilt', b, a)),
596 (('ige', a, ('imin', b, a)), True),
597 (('ige', ('imax', a, b), a), True),
598 (('ult', a, ('umax', b, a)), ('ult', a, b)),
599 (('ult', ('umin', a, b), a), ('ult', b, a)),
600 (('uge', a, ('umin', b, a)), True),
601 (('uge', ('umax', a, b), a), True),
602 (('ilt', a, ('imin', b, a)), False),
603 (('ilt', ('imax', a, b), a), False),
604 (('ige', a, ('imax', b, a)), ('ige', a, b)),
605 (('ige', ('imin', a, b), a), ('ige', b, a)),
606 (('ult', a, ('umin', b, a)), False),
607 (('ult', ('umax', a, b), a), False),
608 (('uge', a, ('umax', b, a)), ('uge', a, b)),
609 (('uge', ('umin', a, b), a), ('uge', b, a)),
610 (('ult', a, ('iand', b, a)), False),
611 (('ult', ('ior', a, b), a), False),
612 (('uge', a, ('iand', b, a)), True),
613 (('uge', ('ior', a, b), a), True),
614
615 (('ilt', '#a', ('imax', '#b', c)), ('ior', ('ilt', a, b), ('ilt', a, c))),
616 (('ilt', ('imin', '#a', b), '#c'), ('ior', ('ilt', a, c), ('ilt', b, c))),
617 (('ige', '#a', ('imin', '#b', c)), ('ior', ('ige', a, b), ('ige', a, c))),
618 (('ige', ('imax', '#a', b), '#c'), ('ior', ('ige', a, c), ('ige', b, c))),
619 (('ult', '#a', ('umax', '#b', c)), ('ior', ('ult', a, b), ('ult', a, c))),
620 (('ult', ('umin', '#a', b), '#c'), ('ior', ('ult', a, c), ('ult', b, c))),
621 (('uge', '#a', ('umin', '#b', c)), ('ior', ('uge', a, b), ('uge', a, c))),
622 (('uge', ('umax', '#a', b), '#c'), ('ior', ('uge', a, c), ('uge', b, c))),
623 (('ilt', '#a', ('imin', '#b', c)), ('iand', ('ilt', a, b), ('ilt', a, c))),
624 (('ilt', ('imax', '#a', b), '#c'), ('iand', ('ilt', a, c), ('ilt', b, c))),
625 (('ige', '#a', ('imax', '#b', c)), ('iand', ('ige', a, b), ('ige', a, c))),
626 (('ige', ('imin', '#a', b), '#c'), ('iand', ('ige', a, c), ('ige', b, c))),
627 (('ult', '#a', ('umin', '#b', c)), ('iand', ('ult', a, b), ('ult', a, c))),
628 (('ult', ('umax', '#a', b), '#c'), ('iand', ('ult', a, c), ('ult', b, c))),
629 (('uge', '#a', ('umax', '#b', c)), ('iand', ('uge', a, b), ('uge', a, c))),
630 (('uge', ('umin', '#a', b), '#c'), ('iand', ('uge', a, c), ('uge', b, c))),
631
632 # Thanks to sign extension, the ishr(a, b) is negative if and only if a is
633 # negative.
634 (('bcsel', ('ilt', a, 0), ('ineg', ('ishr', a, b)), ('ishr', a, b)),
635 ('iabs', ('ishr', a, b))),
636 (('iabs', ('ishr', ('iabs', a), b)), ('ishr', ('iabs', a), b)),
637
638 (('fabs', ('slt', a, b)), ('slt', a, b)),
639 (('fabs', ('sge', a, b)), ('sge', a, b)),
640 (('fabs', ('seq', a, b)), ('seq', a, b)),
641 (('fabs', ('sne', a, b)), ('sne', a, b)),
642 (('slt', a, b), ('b2f', ('flt', a, b)), 'options->lower_scmp'),
643 (('sge', a, b), ('b2f', ('fge', a, b)), 'options->lower_scmp'),
644 (('seq', a, b), ('b2f', ('feq', a, b)), 'options->lower_scmp'),
645 (('sne', a, b), ('b2f', ('fne', a, b)), 'options->lower_scmp'),
646 (('seq', ('seq', a, b), 1.0), ('seq', a, b)),
647 (('seq', ('sne', a, b), 1.0), ('sne', a, b)),
648 (('seq', ('slt', a, b), 1.0), ('slt', a, b)),
649 (('seq', ('sge', a, b), 1.0), ('sge', a, b)),
650 (('sne', ('seq', a, b), 0.0), ('seq', a, b)),
651 (('sne', ('sne', a, b), 0.0), ('sne', a, b)),
652 (('sne', ('slt', a, b), 0.0), ('slt', a, b)),
653 (('sne', ('sge', a, b), 0.0), ('sge', a, b)),
654 (('seq', ('seq', a, b), 0.0), ('sne', a, b)),
655 (('seq', ('sne', a, b), 0.0), ('seq', a, b)),
656 (('seq', ('slt', a, b), 0.0), ('sge', a, b)),
657 (('seq', ('sge', a, b), 0.0), ('slt', a, b)),
658 (('sne', ('seq', a, b), 1.0), ('sne', a, b)),
659 (('sne', ('sne', a, b), 1.0), ('seq', a, b)),
660 (('sne', ('slt', a, b), 1.0), ('sge', a, b)),
661 (('sne', ('sge', a, b), 1.0), ('slt', a, b)),
662 (('fall_equal2', a, b), ('fmin', ('seq', 'a.x', 'b.x'), ('seq', 'a.y', 'b.y')), 'options->lower_vector_cmp'),
663 (('fall_equal3', a, b), ('seq', ('fany_nequal3', a, b), 0.0), 'options->lower_vector_cmp'),
664 (('fall_equal4', a, b), ('seq', ('fany_nequal4', a, b), 0.0), 'options->lower_vector_cmp'),
665 (('fany_nequal2', a, b), ('fmax', ('sne', 'a.x', 'b.x'), ('sne', 'a.y', 'b.y')), 'options->lower_vector_cmp'),
666 (('fany_nequal3', a, b), ('fsat', ('fdot3', ('sne', a, b), ('sne', a, b))), 'options->lower_vector_cmp'),
667 (('fany_nequal4', a, b), ('fsat', ('fdot4', ('sne', a, b), ('sne', a, b))), 'options->lower_vector_cmp'),
668 (('fne', ('fneg', a), a), ('fne', a, 0.0)),
669 (('feq', ('fneg', a), a), ('feq', a, 0.0)),
670 # Emulating booleans
671 (('imul', ('b2i', 'a@1'), ('b2i', 'b@1')), ('b2i', ('iand', a, b))),
672 (('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), ('b2f', ('iand', a, b))),
673 (('fsat', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('b2f', ('ior', a, b))),
674 (('iand', 'a@bool32', 1.0), ('b2f', a)),
675 # True/False are ~0 and 0 in NIR. b2i of True is 1, and -1 is ~0 (True).
676 (('ineg', ('b2i32', 'a@32')), a),
677 (('flt', ('fneg', ('b2f', 'a@1')), 0), a), # Generated by TGSI KILL_IF.
678 # Comparison with the same args. Note that these are not done for
679 # the float versions because NaN always returns false on float
680 # inequalities.
681 (('ilt', a, a), False),
682 (('ige', a, a), True),
683 (('ieq', a, a), True),
684 (('ine', a, a), False),
685 (('ult', a, a), False),
686 (('uge', a, a), True),
687 # Logical and bit operations
688 (('iand', a, a), a),
689 (('iand', a, ~0), a),
690 (('iand', a, 0), 0),
691 (('ior', a, a), a),
692 (('ior', a, 0), a),
693 (('ior', a, True), True),
694 (('ixor', a, a), 0),
695 (('ixor', a, 0), a),
696 (('inot', ('inot', a)), a),
697 (('ior', ('iand', a, b), b), b),
698 (('ior', ('ior', a, b), b), ('ior', a, b)),
699 (('iand', ('ior', a, b), b), b),
700 (('iand', ('iand', a, b), b), ('iand', a, b)),
701 # DeMorgan's Laws
702 (('iand', ('inot', a), ('inot', b)), ('inot', ('ior', a, b))),
703 (('ior', ('inot', a), ('inot', b)), ('inot', ('iand', a, b))),
704 # Shift optimizations
705 (('ishl', 0, a), 0),
706 (('ishl', a, 0), a),
707 (('ishr', 0, a), 0),
708 (('ishr', a, 0), a),
709 (('ushr', 0, a), 0),
710 (('ushr', a, 0), a),
711 (('iand', 0xff, ('ushr@32', a, 24)), ('ushr', a, 24)),
712 (('iand', 0xffff, ('ushr@32', a, 16)), ('ushr', a, 16)),
713 (('ior', ('ishl@16', a, b), ('ushr@16', a, ('iadd', 16, ('ineg', b)))), ('urol', a, b), '!options->lower_rotate'),
714 (('ior', ('ishl@16', a, b), ('ushr@16', a, ('isub', 16, b))), ('urol', a, b), '!options->lower_rotate'),
715 (('ior', ('ishl@32', a, b), ('ushr@32', a, ('iadd', 32, ('ineg', b)))), ('urol', a, b), '!options->lower_rotate'),
716 (('ior', ('ishl@32', a, b), ('ushr@32', a, ('isub', 32, b))), ('urol', a, b), '!options->lower_rotate'),
717 (('ior', ('ushr@16', a, b), ('ishl@16', a, ('iadd', 16, ('ineg', b)))), ('uror', a, b), '!options->lower_rotate'),
718 (('ior', ('ushr@16', a, b), ('ishl@16', a, ('isub', 16, b))), ('uror', a, b), '!options->lower_rotate'),
719 (('ior', ('ushr@32', a, b), ('ishl@32', a, ('iadd', 32, ('ineg', b)))), ('uror', a, b), '!options->lower_rotate'),
720 (('ior', ('ushr@32', a, b), ('ishl@32', a, ('isub', 32, b))), ('uror', a, b), '!options->lower_rotate'),
721 (('urol@16', a, b), ('ior', ('ishl', a, b), ('ushr', a, ('isub', 16, b))), 'options->lower_rotate'),
722 (('urol@32', a, b), ('ior', ('ishl', a, b), ('ushr', a, ('isub', 32, b))), 'options->lower_rotate'),
723 (('uror@16', a, b), ('ior', ('ushr', a, b), ('ishl', a, ('isub', 16, b))), 'options->lower_rotate'),
724 (('uror@32', a, b), ('ior', ('ushr', a, b), ('ishl', a, ('isub', 32, b))), 'options->lower_rotate'),
725 # Exponential/logarithmic identities
726 (('~fexp2', ('flog2', a)), a), # 2^lg2(a) = a
727 (('~flog2', ('fexp2', a)), a), # lg2(2^a) = a
728 (('fpow', a, b), ('fexp2', ('fmul', ('flog2', a), b)), 'options->lower_fpow'), # a^b = 2^(lg2(a)*b)
729 (('~fexp2', ('fmul', ('flog2', a), b)), ('fpow', a, b), '!options->lower_fpow'), # 2^(lg2(a)*b) = a^b
730 (('~fexp2', ('fadd', ('fmul', ('flog2', a), b), ('fmul', ('flog2', c), d))),
731 ('~fmul', ('fpow', a, b), ('fpow', c, d)), '!options->lower_fpow'), # 2^(lg2(a) * b + lg2(c) + d) = a^b * c^d
732 (('~fexp2', ('fmul', ('flog2', a), 2.0)), ('fmul', a, a)),
733 (('~fexp2', ('fmul', ('flog2', a), 4.0)), ('fmul', ('fmul', a, a), ('fmul', a, a))),
734 (('~fpow', a, 1.0), a),
735 (('~fpow', a, 2.0), ('fmul', a, a)),
736 (('~fpow', a, 4.0), ('fmul', ('fmul', a, a), ('fmul', a, a))),
737 (('~fpow', 2.0, a), ('fexp2', a)),
738 (('~fpow', ('fpow', a, 2.2), 0.454545), a),
739 (('~fpow', ('fabs', ('fpow', a, 2.2)), 0.454545), ('fabs', a)),
740 (('~fsqrt', ('fexp2', a)), ('fexp2', ('fmul', 0.5, a))),
741 (('~frcp', ('fexp2', a)), ('fexp2', ('fneg', a))),
742 (('~frsq', ('fexp2', a)), ('fexp2', ('fmul', -0.5, a))),
743 (('~flog2', ('fsqrt', a)), ('fmul', 0.5, ('flog2', a))),
744 (('~flog2', ('frcp', a)), ('fneg', ('flog2', a))),
745 (('~flog2', ('frsq', a)), ('fmul', -0.5, ('flog2', a))),
746 (('~flog2', ('fpow', a, b)), ('fmul', b, ('flog2', a))),
747 (('~fmul', ('fexp2(is_used_once)', a), ('fexp2(is_used_once)', b)), ('fexp2', ('fadd', a, b))),
748 (('bcsel', ('flt', a, 0.0), 0.0, ('fsqrt', a)), ('fsqrt', ('fmax', a, 0.0))),
749 # Division and reciprocal
750 (('~fdiv', 1.0, a), ('frcp', a)),
751 (('fdiv', a, b), ('fmul', a, ('frcp', b)), 'options->lower_fdiv'),
752 (('~frcp', ('frcp', a)), a),
753 (('~frcp', ('fsqrt', a)), ('frsq', a)),
754 (('fsqrt', a), ('frcp', ('frsq', a)), 'options->lower_fsqrt'),
755 (('~frcp', ('frsq', a)), ('fsqrt', a), '!options->lower_fsqrt'),
756 # Trig
757 (('fsin', a), lowered_sincos(0.5), 'options->lower_sincos'),
758 (('fcos', a), lowered_sincos(0.75), 'options->lower_sincos'),
759 # Boolean simplifications
760 (('i2b32(is_used_by_if)', a), ('ine32', a, 0)),
761 (('i2b1(is_used_by_if)', a), ('ine', a, 0)),
762 (('ieq', a, True), a),
763 (('ine(is_not_used_by_if)', a, True), ('inot', a)),
764 (('ine', a, False), a),
765 (('ieq(is_not_used_by_if)', a, False), ('inot', 'a')),
766 (('bcsel', a, True, False), a),
767 (('bcsel', a, False, True), ('inot', a)),
768 (('bcsel@32', a, 1.0, 0.0), ('b2f', a)),
769 (('bcsel@32', a, 0.0, 1.0), ('b2f', ('inot', a))),
770 (('bcsel@32', a, -1.0, -0.0), ('fneg', ('b2f', a))),
771 (('bcsel@32', a, -0.0, -1.0), ('fneg', ('b2f', ('inot', a)))),
772 (('bcsel', True, b, c), b),
773 (('bcsel', False, b, c), c),
774 (('bcsel', a, ('b2f(is_used_once)', 'b@32'), ('b2f', 'c@32')), ('b2f', ('bcsel', a, b, c))),
775
776 (('bcsel', a, b, b), b),
777 (('~fcsel', a, b, b), b),
778
779 # D3D Boolean emulation
780 (('bcsel', a, -1, 0), ('ineg', ('b2i', 'a@1'))),
781 (('bcsel', a, 0, -1), ('ineg', ('b2i', ('inot', a)))),
782 (('iand', ('ineg', ('b2i', 'a@1')), ('ineg', ('b2i', 'b@1'))),
783 ('ineg', ('b2i', ('iand', a, b)))),
784 (('ior', ('ineg', ('b2i','a@1')), ('ineg', ('b2i', 'b@1'))),
785 ('ineg', ('b2i', ('ior', a, b)))),
786 (('ieq', ('ineg', ('b2i', 'a@1')), 0), ('inot', a)),
787 (('ieq', ('ineg', ('b2i', 'a@1')), -1), a),
788 (('ine', ('ineg', ('b2i', 'a@1')), 0), a),
789 (('ine', ('ineg', ('b2i', 'a@1')), -1), ('inot', a)),
790 (('iand', ('ineg', ('b2i', a)), 1.0), ('b2f', a)),
791 (('iand', ('ineg', ('b2i', a)), 1), ('b2i', a)),
792
793 # SM5 32-bit shifts are defined to use the 5 least significant bits
794 (('ishl', 'a@32', ('iand', 31, b)), ('ishl', a, b)),
795 (('ishr', 'a@32', ('iand', 31, b)), ('ishr', a, b)),
796 (('ushr', 'a@32', ('iand', 31, b)), ('ushr', a, b)),
797
798 # Conversions
799 (('i2b32', ('b2i', 'a@32')), a),
800 (('f2i', ('ftrunc', a)), ('f2i', a)),
801 (('f2u', ('ftrunc', a)), ('f2u', a)),
802 (('i2b', ('ineg', a)), ('i2b', a)),
803 (('i2b', ('iabs', a)), ('i2b', a)),
804 (('inot', ('f2b1', a)), ('feq', a, 0.0)),
805
806 # The C spec says, "If the value of the integral part cannot be represented
807 # by the integer type, the behavior is undefined." "Undefined" can mean
808 # "the conversion doesn't happen at all."
809 (('~i2f32', ('f2i32', 'a@32')), ('ftrunc', a)),
810
811 # Ironically, mark these as imprecise because removing the conversions may
812 # preserve more precision than doing the conversions (e.g.,
813 # uint(float(0x81818181u)) == 0x81818200).
814 (('~f2i32', ('i2f', 'a@32')), a),
815 (('~f2i32', ('u2f', 'a@32')), a),
816 (('~f2u32', ('i2f', 'a@32')), a),
817 (('~f2u32', ('u2f', 'a@32')), a),
818
819 (('ffloor', 'a(is_integral)'), a),
820 (('fceil', 'a(is_integral)'), a),
821 (('ftrunc', 'a(is_integral)'), a),
822 # fract(x) = x - floor(x), so fract(NaN) = NaN
823 (('~ffract', 'a(is_integral)'), 0.0),
824 (('fabs', 'a(is_not_negative)'), a),
825 (('iabs', 'a(is_not_negative)'), a),
826 (('fsat', 'a(is_not_positive)'), 0.0),
827
828 # Section 5.4.1 (Conversion and Scalar Constructors) of the GLSL 4.60 spec
829 # says:
830 #
831 # It is undefined to convert a negative floating-point value to an
832 # uint.
833 #
834 # Assuming that (uint)some_float behaves like (uint)(int)some_float allows
835 # some optimizations in the i965 backend to proceed.
836 (('ige', ('f2u', a), b), ('ige', ('f2i', a), b)),
837 (('ige', b, ('f2u', a)), ('ige', b, ('f2i', a))),
838 (('ilt', ('f2u', a), b), ('ilt', ('f2i', a), b)),
839 (('ilt', b, ('f2u', a)), ('ilt', b, ('f2i', a))),
840
841 (('~fmin', 'a(is_not_negative)', 1.0), ('fsat', a), '!options->lower_fsat'),
842
843 # The result of the multiply must be in [-1, 0], so the result of the ffma
844 # must be in [0, 1].
845 (('flt', ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0), 0.0), False),
846 (('flt', ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0), 0.0), False),
847 (('fmax', ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0), 0.0), ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0)),
848 (('fmax', ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0), 0.0), ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0)),
849
850 (('fne', 'a(is_not_zero)', 0.0), True),
851 (('feq', 'a(is_not_zero)', 0.0), False),
852
853 # In this chart, + means value > 0 and - means value < 0.
854 #
855 # + >= + -> unknown 0 >= + -> false - >= + -> false
856 # + >= 0 -> true 0 >= 0 -> true - >= 0 -> false
857 # + >= - -> true 0 >= - -> true - >= - -> unknown
858 #
859 # Using grouping conceptually similar to a Karnaugh map...
860 #
861 # (+ >= 0, + >= -, 0 >= 0, 0 >= -) == (is_not_negative >= is_not_positive) -> true
862 # (0 >= +, - >= +) == (is_not_positive >= gt_zero) -> false
863 # (- >= +, - >= 0) == (lt_zero >= is_not_negative) -> false
864 #
865 # The flt / ilt cases just invert the expected result.
866 #
867 # The results expecting true, must be marked imprecise. The results
868 # expecting false are fine because NaN compared >= or < anything is false.
869
870 (('~fge', 'a(is_not_negative)', 'b(is_not_positive)'), True),
871 (('fge', 'a(is_not_positive)', 'b(is_gt_zero)'), False),
872 (('fge', 'a(is_lt_zero)', 'b(is_not_negative)'), False),
873
874 (('flt', 'a(is_not_negative)', 'b(is_not_positive)'), False),
875 (('~flt', 'a(is_not_positive)', 'b(is_gt_zero)'), True),
876 (('~flt', 'a(is_lt_zero)', 'b(is_not_negative)'), True),
877
878 (('ine', 'a(is_not_zero)', 0), True),
879 (('ieq', 'a(is_not_zero)', 0), False),
880
881 (('ige', 'a(is_not_negative)', 'b(is_not_positive)'), True),
882 (('ige', 'a(is_not_positive)', 'b(is_gt_zero)'), False),
883 (('ige', 'a(is_lt_zero)', 'b(is_not_negative)'), False),
884
885 (('ilt', 'a(is_not_negative)', 'b(is_not_positive)'), False),
886 (('ilt', 'a(is_not_positive)', 'b(is_gt_zero)'), True),
887 (('ilt', 'a(is_lt_zero)', 'b(is_not_negative)'), True),
888
889 (('ult', 0, 'a(is_gt_zero)'), True),
890
891 # Packing and then unpacking does nothing
892 (('unpack_64_2x32_split_x', ('pack_64_2x32_split', a, b)), a),
893 (('unpack_64_2x32_split_y', ('pack_64_2x32_split', a, b)), b),
894 (('pack_64_2x32_split', ('unpack_64_2x32_split_x', a),
895 ('unpack_64_2x32_split_y', a)), a),
896
897 # Comparing two halves of an unpack separately. While this optimization
898 # should be correct for non-constant values, it's less obvious that it's
899 # useful in that case. For constant values, the pack will fold and we're
900 # guaranteed to reduce the whole tree to one instruction.
901 (('iand', ('ieq', ('unpack_32_2x16_split_x', a), '#b'),
902 ('ieq', ('unpack_32_2x16_split_y', a), '#c')),
903 ('ieq', a, ('pack_32_2x16_split', b, c))),
904
905 # Byte extraction
906 (('ushr', 'a@16', 8), ('extract_u8', a, 1), '!options->lower_extract_byte'),
907 (('ushr', 'a@32', 24), ('extract_u8', a, 3), '!options->lower_extract_byte'),
908 (('ushr', 'a@64', 56), ('extract_u8', a, 7), '!options->lower_extract_byte'),
909 (('ishr', 'a@16', 8), ('extract_i8', a, 1), '!options->lower_extract_byte'),
910 (('ishr', 'a@32', 24), ('extract_i8', a, 3), '!options->lower_extract_byte'),
911 (('ishr', 'a@64', 56), ('extract_i8', a, 7), '!options->lower_extract_byte'),
912 (('iand', 0xff, a), ('extract_u8', a, 0), '!options->lower_extract_byte'),
913
914 # Useless masking before unpacking
915 (('unpack_half_2x16_split_x', ('iand', a, 0xffff)), ('unpack_half_2x16_split_x', a)),
916 (('unpack_32_2x16_split_x', ('iand', a, 0xffff)), ('unpack_32_2x16_split_x', a)),
917 (('unpack_64_2x32_split_x', ('iand', a, 0xffffffff)), ('unpack_64_2x32_split_x', a)),
918 (('unpack_half_2x16_split_y', ('iand', a, 0xffff0000)), ('unpack_half_2x16_split_y', a)),
919 (('unpack_32_2x16_split_y', ('iand', a, 0xffff0000)), ('unpack_32_2x16_split_y', a)),
920 (('unpack_64_2x32_split_y', ('iand', a, 0xffffffff00000000)), ('unpack_64_2x32_split_y', a)),
921 ])
922
923 # After the ('extract_u8', a, 0) pattern, above, triggers, there will be
924 # patterns like those below.
925 for op in ('ushr', 'ishr'):
926 optimizations.extend([(('extract_u8', (op, 'a@16', 8), 0), ('extract_u8', a, 1))])
927 optimizations.extend([(('extract_u8', (op, 'a@32', 8 * i), 0), ('extract_u8', a, i)) for i in range(1, 4)])
928 optimizations.extend([(('extract_u8', (op, 'a@64', 8 * i), 0), ('extract_u8', a, i)) for i in range(1, 8)])
929
930 optimizations.extend([(('extract_u8', ('extract_u16', a, 1), 0), ('extract_u8', a, 2))])
931
932 # After the ('extract_[iu]8', a, 3) patterns, above, trigger, there will be
933 # patterns like those below.
934 for op in ('extract_u8', 'extract_i8'):
935 optimizations.extend([((op, ('ishl', 'a@16', 8), 1), (op, a, 0))])
936 optimizations.extend([((op, ('ishl', 'a@32', 24 - 8 * i), 3), (op, a, i)) for i in range(2, -1, -1)])
937 optimizations.extend([((op, ('ishl', 'a@64', 56 - 8 * i), 7), (op, a, i)) for i in range(6, -1, -1)])
938
939 optimizations.extend([
940 # Word extraction
941 (('ushr', ('ishl', 'a@32', 16), 16), ('extract_u16', a, 0), '!options->lower_extract_word'),
942 (('ushr', 'a@32', 16), ('extract_u16', a, 1), '!options->lower_extract_word'),
943 (('ishr', ('ishl', 'a@32', 16), 16), ('extract_i16', a, 0), '!options->lower_extract_word'),
944 (('ishr', 'a@32', 16), ('extract_i16', a, 1), '!options->lower_extract_word'),
945 (('iand', 0xffff, a), ('extract_u16', a, 0), '!options->lower_extract_word'),
946
947 # Subtracts
948 (('ussub_4x8', a, 0), a),
949 (('ussub_4x8', a, ~0), 0),
950 # Lower all Subtractions first - they can get recombined later
951 (('fsub', a, b), ('fadd', a, ('fneg', b))),
952 (('isub', a, b), ('iadd', a, ('ineg', b))),
953
954 # Propagate negation up multiplication chains
955 (('fmul(is_used_by_non_fsat)', ('fneg', a), b), ('fneg', ('fmul', a, b))),
956 (('imul', ('ineg', a), b), ('ineg', ('imul', a, b))),
957
958 # Propagate constants up multiplication chains
959 (('~fmul(is_used_once)', ('fmul(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('fmul', ('fmul', a, c), b)),
960 (('imul(is_used_once)', ('imul(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('imul', ('imul', a, c), b)),
961 (('~fadd(is_used_once)', ('fadd(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('fadd', ('fadd', a, c), b)),
962 (('iadd(is_used_once)', ('iadd(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('iadd', ('iadd', a, c), b)),
963
964 # Reassociate constants in add/mul chains so they can be folded together.
965 # For now, we mostly only handle cases where the constants are separated by
966 # a single non-constant. We could do better eventually.
967 (('~fmul', '#a', ('fmul', 'b(is_not_const)', '#c')), ('fmul', ('fmul', a, c), b)),
968 (('imul', '#a', ('imul', 'b(is_not_const)', '#c')), ('imul', ('imul', a, c), b)),
969 (('~fadd', '#a', ('fadd', 'b(is_not_const)', '#c')), ('fadd', ('fadd', a, c), b)),
970 (('~fadd', '#a', ('fneg', ('fadd', 'b(is_not_const)', '#c'))), ('fadd', ('fadd', a, ('fneg', c)), ('fneg', b))),
971 (('iadd', '#a', ('iadd', 'b(is_not_const)', '#c')), ('iadd', ('iadd', a, c), b)),
972
973 # Drop mul-div by the same value when there's no wrapping.
974 (('idiv', ('imul(no_signed_wrap)', a, b), b), a),
975
976 # By definition...
977 (('bcsel', ('ige', ('find_lsb', a), 0), ('find_lsb', a), -1), ('find_lsb', a)),
978 (('bcsel', ('ige', ('ifind_msb', a), 0), ('ifind_msb', a), -1), ('ifind_msb', a)),
979 (('bcsel', ('ige', ('ufind_msb', a), 0), ('ufind_msb', a), -1), ('ufind_msb', a)),
980
981 (('bcsel', ('ine', a, 0), ('find_lsb', a), -1), ('find_lsb', a)),
982 (('bcsel', ('ine', a, 0), ('ifind_msb', a), -1), ('ifind_msb', a)),
983 (('bcsel', ('ine', a, 0), ('ufind_msb', a), -1), ('ufind_msb', a)),
984
985 (('bcsel', ('ine', a, -1), ('ifind_msb', a), -1), ('ifind_msb', a)),
986
987 # Misc. lowering
988 (('fmod@16', a, b), ('fsub', a, ('fmul', b, ('ffloor', ('fdiv', a, b)))), 'options->lower_fmod'),
989 (('fmod@32', a, b), ('fsub', a, ('fmul', b, ('ffloor', ('fdiv', a, b)))), 'options->lower_fmod'),
990 (('frem', a, b), ('fsub', a, ('fmul', b, ('ftrunc', ('fdiv', a, b)))), 'options->lower_fmod'),
991 (('uadd_carry@32', a, b), ('b2i', ('ult', ('iadd', a, b), a)), 'options->lower_uadd_carry'),
992 (('usub_borrow@32', a, b), ('b2i', ('ult', a, b)), 'options->lower_usub_borrow'),
993
994 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
995 ('bcsel', ('ult', 31, 'bits'), 'insert',
996 ('bfi', ('bfm', 'bits', 'offset'), 'insert', 'base')),
997 'options->lower_bitfield_insert'),
998 (('ihadd', a, b), ('iadd', ('iand', a, b), ('ishr', ('ixor', a, b), 1)), 'options->lower_hadd'),
999 (('uhadd', a, b), ('iadd', ('iand', a, b), ('ushr', ('ixor', a, b), 1)), 'options->lower_hadd'),
1000 (('irhadd', a, b), ('isub', ('ior', a, b), ('ishr', ('ixor', a, b), 1)), 'options->lower_hadd'),
1001 (('urhadd', a, b), ('isub', ('ior', a, b), ('ushr', ('ixor', a, b), 1)), 'options->lower_hadd'),
1002 (('uadd_sat', a, b), ('bcsel', ('ult', ('iadd', a, b), a), -1, ('iadd', a, b)), 'options->lower_add_sat'),
1003 (('usub_sat', a, b), ('bcsel', ('ult', a, b), 0, ('isub', a, b)), 'options->lower_add_sat'),
1004
1005 # Alternative lowering that doesn't rely on bfi.
1006 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
1007 ('bcsel', ('ult', 31, 'bits'),
1008 'insert',
1009 (('ior',
1010 ('iand', 'base', ('inot', ('ishl', ('isub', ('ishl', 1, 'bits'), 1), 'offset'))),
1011 ('iand', ('ishl', 'insert', 'offset'), ('ishl', ('isub', ('ishl', 1, 'bits'), 1), 'offset'))))),
1012 'options->lower_bitfield_insert_to_shifts'),
1013
1014 # Alternative lowering that uses bitfield_select.
1015 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
1016 ('bcsel', ('ult', 31, 'bits'), 'insert',
1017 ('bitfield_select', ('bfm', 'bits', 'offset'), ('ishl', 'insert', 'offset'), 'base')),
1018 'options->lower_bitfield_insert_to_bitfield_select'),
1019
1020 (('ibitfield_extract', 'value', 'offset', 'bits'),
1021 ('bcsel', ('ult', 31, 'bits'), 'value',
1022 ('ibfe', 'value', 'offset', 'bits')),
1023 'options->lower_bitfield_extract'),
1024
1025 (('ubitfield_extract', 'value', 'offset', 'bits'),
1026 ('bcsel', ('ult', 31, 'bits'), 'value',
1027 ('ubfe', 'value', 'offset', 'bits')),
1028 'options->lower_bitfield_extract'),
1029
1030 # Note that these opcodes are defined to only use the five least significant bits of 'offset' and 'bits'
1031 (('ubfe', 'value', 'offset', ('iand', 31, 'bits')), ('ubfe', 'value', 'offset', 'bits')),
1032 (('ubfe', 'value', ('iand', 31, 'offset'), 'bits'), ('ubfe', 'value', 'offset', 'bits')),
1033 (('ibfe', 'value', 'offset', ('iand', 31, 'bits')), ('ibfe', 'value', 'offset', 'bits')),
1034 (('ibfe', 'value', ('iand', 31, 'offset'), 'bits'), ('ibfe', 'value', 'offset', 'bits')),
1035 (('bfm', 'bits', ('iand', 31, 'offset')), ('bfm', 'bits', 'offset')),
1036 (('bfm', ('iand', 31, 'bits'), 'offset'), ('bfm', 'bits', 'offset')),
1037
1038 (('ibitfield_extract', 'value', 'offset', 'bits'),
1039 ('bcsel', ('ieq', 0, 'bits'),
1040 0,
1041 ('ishr',
1042 ('ishl', 'value', ('isub', ('isub', 32, 'bits'), 'offset')),
1043 ('isub', 32, 'bits'))),
1044 'options->lower_bitfield_extract_to_shifts'),
1045
1046 (('ubitfield_extract', 'value', 'offset', 'bits'),
1047 ('iand',
1048 ('ushr', 'value', 'offset'),
1049 ('bcsel', ('ieq', 'bits', 32),
1050 0xffffffff,
1051 ('isub', ('ishl', 1, 'bits'), 1))),
1052 'options->lower_bitfield_extract_to_shifts'),
1053
1054 (('ifind_msb', 'value'),
1055 ('ufind_msb', ('bcsel', ('ilt', 'value', 0), ('inot', 'value'), 'value')),
1056 'options->lower_ifind_msb'),
1057
1058 (('find_lsb', 'value'),
1059 ('ufind_msb', ('iand', 'value', ('ineg', 'value'))),
1060 'options->lower_find_lsb'),
1061
1062 (('extract_i8', a, 'b@32'),
1063 ('ishr', ('ishl', a, ('imul', ('isub', 3, b), 8)), 24),
1064 'options->lower_extract_byte'),
1065
1066 (('extract_u8', a, 'b@32'),
1067 ('iand', ('ushr', a, ('imul', b, 8)), 0xff),
1068 'options->lower_extract_byte'),
1069
1070 (('extract_i16', a, 'b@32'),
1071 ('ishr', ('ishl', a, ('imul', ('isub', 1, b), 16)), 16),
1072 'options->lower_extract_word'),
1073
1074 (('extract_u16', a, 'b@32'),
1075 ('iand', ('ushr', a, ('imul', b, 16)), 0xffff),
1076 'options->lower_extract_word'),
1077
1078 (('pack_unorm_2x16', 'v'),
1079 ('pack_uvec2_to_uint',
1080 ('f2u32', ('fround_even', ('fmul', ('fsat', 'v'), 65535.0)))),
1081 'options->lower_pack_unorm_2x16'),
1082
1083 (('pack_unorm_4x8', 'v'),
1084 ('pack_uvec4_to_uint',
1085 ('f2u32', ('fround_even', ('fmul', ('fsat', 'v'), 255.0)))),
1086 'options->lower_pack_unorm_4x8'),
1087
1088 (('pack_snorm_2x16', 'v'),
1089 ('pack_uvec2_to_uint',
1090 ('f2i32', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 32767.0)))),
1091 'options->lower_pack_snorm_2x16'),
1092
1093 (('pack_snorm_4x8', 'v'),
1094 ('pack_uvec4_to_uint',
1095 ('f2i32', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 127.0)))),
1096 'options->lower_pack_snorm_4x8'),
1097
1098 (('unpack_unorm_2x16', 'v'),
1099 ('fdiv', ('u2f32', ('vec2', ('extract_u16', 'v', 0),
1100 ('extract_u16', 'v', 1))),
1101 65535.0),
1102 'options->lower_unpack_unorm_2x16'),
1103
1104 (('unpack_unorm_4x8', 'v'),
1105 ('fdiv', ('u2f32', ('vec4', ('extract_u8', 'v', 0),
1106 ('extract_u8', 'v', 1),
1107 ('extract_u8', 'v', 2),
1108 ('extract_u8', 'v', 3))),
1109 255.0),
1110 'options->lower_unpack_unorm_4x8'),
1111
1112 (('unpack_snorm_2x16', 'v'),
1113 ('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec2', ('extract_i16', 'v', 0),
1114 ('extract_i16', 'v', 1))),
1115 32767.0))),
1116 'options->lower_unpack_snorm_2x16'),
1117
1118 (('unpack_snorm_4x8', 'v'),
1119 ('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec4', ('extract_i8', 'v', 0),
1120 ('extract_i8', 'v', 1),
1121 ('extract_i8', 'v', 2),
1122 ('extract_i8', 'v', 3))),
1123 127.0))),
1124 'options->lower_unpack_snorm_4x8'),
1125
1126 (('pack_half_2x16_split', 'a@32', 'b@32'),
1127 ('ior', ('ishl', ('u2u32', ('f2f16', b)), 16), ('u2u32', ('f2f16', a))),
1128 'options->lower_pack_half_2x16_split'),
1129
1130 (('unpack_half_2x16_split_x', 'a@32'),
1131 ('f2f32', ('u2u16', a)),
1132 'options->lower_unpack_half_2x16_split'),
1133
1134 (('unpack_half_2x16_split_y', 'a@32'),
1135 ('f2f32', ('u2u16', ('ushr', a, 16))),
1136 'options->lower_unpack_half_2x16_split'),
1137
1138 (('isign', a), ('imin', ('imax', a, -1), 1), 'options->lower_isign'),
1139 (('fsign', a), ('fsub', ('b2f', ('flt', 0.0, a)), ('b2f', ('flt', a, 0.0))), 'options->lower_fsign'),
1140
1141 # Address/offset calculations:
1142 # Drivers supporting imul24 should use the nir_lower_amul() pass, this
1143 # rule converts everyone else to imul:
1144 (('amul', a, b), ('imul', a, b), '!options->has_imul24'),
1145
1146 (('imad24_ir3', a, b, 0), ('imul24', a, b)),
1147 (('imad24_ir3', a, 0, c), (c)),
1148 (('imad24_ir3', a, 1, c), ('iadd', a, c)),
1149
1150 # if first two srcs are const, crack apart the imad so constant folding
1151 # can clean up the imul:
1152 # TODO ffma should probably get a similar rule:
1153 (('imad24_ir3', '#a', '#b', c), ('iadd', ('imul', a, b), c)),
1154
1155 # These will turn 24b address/offset calc back into 32b shifts, but
1156 # it should be safe to get back some of the bits of precision that we
1157 # already decided were no necessary:
1158 (('imul24', a, '#b@32(is_pos_power_of_two)'), ('ishl', a, ('find_lsb', b)), '!options->lower_bitops'),
1159 (('imul24', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('ishl', a, ('find_lsb', ('iabs', b)))), '!options->lower_bitops'),
1160 (('imul24', a, 0), (0)),
1161 ])
1162
1163 # bit_size dependent lowerings
1164 for bit_size in [8, 16, 32, 64]:
1165 # convenience constants
1166 intmax = (1 << (bit_size - 1)) - 1
1167 intmin = 1 << (bit_size - 1)
1168
1169 optimizations += [
1170 (('iadd_sat@' + str(bit_size), a, b),
1171 ('bcsel', ('ige', b, 1), ('bcsel', ('ilt', ('iadd', a, b), a), intmax, ('iadd', a, b)),
1172 ('bcsel', ('ilt', a, ('iadd', a, b)), intmin, ('iadd', a, b))), 'options->lower_add_sat'),
1173 (('isub_sat@' + str(bit_size), a, b),
1174 ('bcsel', ('ilt', b, 0), ('bcsel', ('ilt', ('isub', a, b), a), intmax, ('isub', a, b)),
1175 ('bcsel', ('ilt', a, ('isub', a, b)), intmin, ('isub', a, b))), 'options->lower_add_sat'),
1176 ]
1177
1178 invert = OrderedDict([('feq', 'fne'), ('fne', 'feq'), ('fge', 'flt'), ('flt', 'fge')])
1179
1180 for left, right in itertools.combinations_with_replacement(invert.keys(), 2):
1181 optimizations.append((('inot', ('ior(is_used_once)', (left, a, b), (right, c, d))),
1182 ('iand', (invert[left], a, b), (invert[right], c, d))))
1183 optimizations.append((('inot', ('iand(is_used_once)', (left, a, b), (right, c, d))),
1184 ('ior', (invert[left], a, b), (invert[right], c, d))))
1185
1186 # Optimize x2bN(b2x(x)) -> x
1187 for size in type_sizes('bool'):
1188 aN = 'a@' + str(size)
1189 f2bN = 'f2b' + str(size)
1190 i2bN = 'i2b' + str(size)
1191 optimizations.append(((f2bN, ('b2f', aN)), a))
1192 optimizations.append(((i2bN, ('b2i', aN)), a))
1193
1194 # Optimize x2yN(b2x(x)) -> b2y
1195 for x, y in itertools.product(['f', 'u', 'i'], ['f', 'u', 'i']):
1196 if x != 'f' and y != 'f' and x != y:
1197 continue
1198
1199 b2x = 'b2f' if x == 'f' else 'b2i'
1200 b2y = 'b2f' if y == 'f' else 'b2i'
1201 x2yN = '{}2{}'.format(x, y)
1202 optimizations.append(((x2yN, (b2x, a)), (b2y, a)))
1203
1204 # Optimize away x2xN(a@N)
1205 for t in ['int', 'uint', 'float']:
1206 for N in type_sizes(t):
1207 x2xN = '{0}2{0}{1}'.format(t[0], N)
1208 aN = 'a@{0}'.format(N)
1209 optimizations.append(((x2xN, aN), a))
1210
1211 # Optimize x2xN(y2yM(a@P)) -> y2yN(a) for integers
1212 # In particular, we can optimize away everything except upcast of downcast and
1213 # upcasts where the type differs from the other cast
1214 for N, M in itertools.product(type_sizes('uint'), type_sizes('uint')):
1215 if N < M:
1216 # The outer cast is a down-cast. It doesn't matter what the size of the
1217 # argument of the inner cast is because we'll never been in the upcast
1218 # of downcast case. Regardless of types, we'll always end up with y2yN
1219 # in the end.
1220 for x, y in itertools.product(['i', 'u'], ['i', 'u']):
1221 x2xN = '{0}2{0}{1}'.format(x, N)
1222 y2yM = '{0}2{0}{1}'.format(y, M)
1223 y2yN = '{0}2{0}{1}'.format(y, N)
1224 optimizations.append(((x2xN, (y2yM, a)), (y2yN, a)))
1225 elif N > M:
1226 # If the outer cast is an up-cast, we have to be more careful about the
1227 # size of the argument of the inner cast and with types. In this case,
1228 # the type is always the type of type up-cast which is given by the
1229 # outer cast.
1230 for P in type_sizes('uint'):
1231 # We can't optimize away up-cast of down-cast.
1232 if M < P:
1233 continue
1234
1235 # Because we're doing down-cast of down-cast, the types always have
1236 # to match between the two casts
1237 for x in ['i', 'u']:
1238 x2xN = '{0}2{0}{1}'.format(x, N)
1239 x2xM = '{0}2{0}{1}'.format(x, M)
1240 aP = 'a@{0}'.format(P)
1241 optimizations.append(((x2xN, (x2xM, aP)), (x2xN, a)))
1242 else:
1243 # The N == M case is handled by other optimizations
1244 pass
1245
1246 # Optimize comparisons with up-casts
1247 for t in ['int', 'uint', 'float']:
1248 for N, M in itertools.product(type_sizes(t), repeat=2):
1249 if N == 1 or N >= M:
1250 continue
1251
1252 x2xM = '{0}2{0}{1}'.format(t[0], M)
1253 x2xN = '{0}2{0}{1}'.format(t[0], N)
1254 aN = 'a@' + str(N)
1255 bN = 'b@' + str(N)
1256 xeq = 'feq' if t == 'float' else 'ieq'
1257 xne = 'fne' if t == 'float' else 'ine'
1258 xge = '{0}ge'.format(t[0])
1259 xlt = '{0}lt'.format(t[0])
1260
1261 # Up-casts are lossless so for correctly signed comparisons of
1262 # up-casted values we can do the comparison at the largest of the two
1263 # original sizes and drop one or both of the casts. (We have
1264 # optimizations to drop the no-op casts which this may generate.)
1265 for P in type_sizes(t):
1266 if P == 1 or P > N:
1267 continue
1268
1269 bP = 'b@' + str(P)
1270 optimizations += [
1271 ((xeq, (x2xM, aN), (x2xM, bP)), (xeq, a, (x2xN, b))),
1272 ((xne, (x2xM, aN), (x2xM, bP)), (xne, a, (x2xN, b))),
1273 ((xge, (x2xM, aN), (x2xM, bP)), (xge, a, (x2xN, b))),
1274 ((xlt, (x2xM, aN), (x2xM, bP)), (xlt, a, (x2xN, b))),
1275 ((xge, (x2xM, bP), (x2xM, aN)), (xge, (x2xN, b), a)),
1276 ((xlt, (x2xM, bP), (x2xM, aN)), (xlt, (x2xN, b), a)),
1277 ]
1278
1279 # The next bit doesn't work on floats because the range checks would
1280 # get way too complicated.
1281 if t in ['int', 'uint']:
1282 if t == 'int':
1283 xN_min = -(1 << (N - 1))
1284 xN_max = (1 << (N - 1)) - 1
1285 elif t == 'uint':
1286 xN_min = 0
1287 xN_max = (1 << N) - 1
1288 else:
1289 assert False
1290
1291 # If we're up-casting and comparing to a constant, we can unfold
1292 # the comparison into a comparison with the shrunk down constant
1293 # and a check that the constant fits in the smaller bit size.
1294 optimizations += [
1295 ((xeq, (x2xM, aN), '#b'),
1296 ('iand', (xeq, a, (x2xN, b)), (xeq, (x2xM, (x2xN, b)), b))),
1297 ((xne, (x2xM, aN), '#b'),
1298 ('ior', (xne, a, (x2xN, b)), (xne, (x2xM, (x2xN, b)), b))),
1299 ((xlt, (x2xM, aN), '#b'),
1300 ('iand', (xlt, xN_min, b),
1301 ('ior', (xlt, xN_max, b), (xlt, a, (x2xN, b))))),
1302 ((xlt, '#a', (x2xM, bN)),
1303 ('iand', (xlt, a, xN_max),
1304 ('ior', (xlt, a, xN_min), (xlt, (x2xN, a), b)))),
1305 ((xge, (x2xM, aN), '#b'),
1306 ('iand', (xge, xN_max, b),
1307 ('ior', (xge, xN_min, b), (xge, a, (x2xN, b))))),
1308 ((xge, '#a', (x2xM, bN)),
1309 ('iand', (xge, a, xN_min),
1310 ('ior', (xge, a, xN_max), (xge, (x2xN, a), b)))),
1311 ]
1312
1313 def fexp2i(exp, bits):
1314 # We assume that exp is already in the right range.
1315 if bits == 16:
1316 return ('i2i16', ('ishl', ('iadd', exp, 15), 10))
1317 elif bits == 32:
1318 return ('ishl', ('iadd', exp, 127), 23)
1319 elif bits == 64:
1320 return ('pack_64_2x32_split', 0, ('ishl', ('iadd', exp, 1023), 20))
1321 else:
1322 assert False
1323
1324 def ldexp(f, exp, bits):
1325 # First, we clamp exp to a reasonable range. The maximum possible range
1326 # for a normal exponent is [-126, 127] and, throwing in denormals, you get
1327 # a maximum range of [-149, 127]. This means that we can potentially have
1328 # a swing of +-276. If you start with FLT_MAX, you actually have to do
1329 # ldexp(FLT_MAX, -278) to get it to flush all the way to zero. The GLSL
1330 # spec, on the other hand, only requires that we handle an exponent value
1331 # in the range [-126, 128]. This implementation is *mostly* correct; it
1332 # handles a range on exp of [-252, 254] which allows you to create any
1333 # value (including denorms if the hardware supports it) and to adjust the
1334 # exponent of any normal value to anything you want.
1335 if bits == 16:
1336 exp = ('imin', ('imax', exp, -28), 30)
1337 elif bits == 32:
1338 exp = ('imin', ('imax', exp, -252), 254)
1339 elif bits == 64:
1340 exp = ('imin', ('imax', exp, -2044), 2046)
1341 else:
1342 assert False
1343
1344 # Now we compute two powers of 2, one for exp/2 and one for exp-exp/2.
1345 # (We use ishr which isn't the same for -1, but the -1 case still works
1346 # since we use exp-exp/2 as the second exponent.) While the spec
1347 # technically defines ldexp as f * 2.0^exp, simply multiplying once doesn't
1348 # work with denormals and doesn't allow for the full swing in exponents
1349 # that you can get with normalized values. Instead, we create two powers
1350 # of two and multiply by them each in turn. That way the effective range
1351 # of our exponent is doubled.
1352 pow2_1 = fexp2i(('ishr', exp, 1), bits)
1353 pow2_2 = fexp2i(('isub', exp, ('ishr', exp, 1)), bits)
1354 return ('fmul', ('fmul', f, pow2_1), pow2_2)
1355
1356 optimizations += [
1357 (('ldexp@16', 'x', 'exp'), ldexp('x', 'exp', 16), 'options->lower_ldexp'),
1358 (('ldexp@32', 'x', 'exp'), ldexp('x', 'exp', 32), 'options->lower_ldexp'),
1359 (('ldexp@64', 'x', 'exp'), ldexp('x', 'exp', 64), 'options->lower_ldexp'),
1360 ]
1361
1362 # Unreal Engine 4 demo applications open-codes bitfieldReverse()
1363 def bitfield_reverse(u):
1364 step1 = ('ior', ('ishl', u, 16), ('ushr', u, 16))
1365 step2 = ('ior', ('ishl', ('iand', step1, 0x00ff00ff), 8), ('ushr', ('iand', step1, 0xff00ff00), 8))
1366 step3 = ('ior', ('ishl', ('iand', step2, 0x0f0f0f0f), 4), ('ushr', ('iand', step2, 0xf0f0f0f0), 4))
1367 step4 = ('ior', ('ishl', ('iand', step3, 0x33333333), 2), ('ushr', ('iand', step3, 0xcccccccc), 2))
1368 step5 = ('ior(many-comm-expr)', ('ishl', ('iand', step4, 0x55555555), 1), ('ushr', ('iand', step4, 0xaaaaaaaa), 1))
1369
1370 return step5
1371
1372 optimizations += [(bitfield_reverse('x@32'), ('bitfield_reverse', 'x'), '!options->lower_bitfield_reverse')]
1373
1374 # For any float comparison operation, "cmp", if you have "a == a && a cmp b"
1375 # then the "a == a" is redundant because it's equivalent to "a is not NaN"
1376 # and, if a is a NaN then the second comparison will fail anyway.
1377 for op in ['flt', 'fge', 'feq']:
1378 optimizations += [
1379 (('iand', ('feq', a, a), (op, a, b)), ('!' + op, a, b)),
1380 (('iand', ('feq', a, a), (op, b, a)), ('!' + op, b, a)),
1381 ]
1382
1383 # Add optimizations to handle the case where the result of a ternary is
1384 # compared to a constant. This way we can take things like
1385 #
1386 # (a ? 0 : 1) > 0
1387 #
1388 # and turn it into
1389 #
1390 # a ? (0 > 0) : (1 > 0)
1391 #
1392 # which constant folding will eat for lunch. The resulting ternary will
1393 # further get cleaned up by the boolean reductions above and we will be
1394 # left with just the original variable "a".
1395 for op in ['flt', 'fge', 'feq', 'fne',
1396 'ilt', 'ige', 'ieq', 'ine', 'ult', 'uge']:
1397 optimizations += [
1398 ((op, ('bcsel', 'a', '#b', '#c'), '#d'),
1399 ('bcsel', 'a', (op, 'b', 'd'), (op, 'c', 'd'))),
1400 ((op, '#d', ('bcsel', a, '#b', '#c')),
1401 ('bcsel', 'a', (op, 'd', 'b'), (op, 'd', 'c'))),
1402 ]
1403
1404
1405 # For example, this converts things like
1406 #
1407 # 1 + mix(0, a - 1, condition)
1408 #
1409 # into
1410 #
1411 # mix(1, (a-1)+1, condition)
1412 #
1413 # Other optimizations will rearrange the constants.
1414 for op in ['fadd', 'fmul', 'iadd', 'imul']:
1415 optimizations += [
1416 ((op, ('bcsel(is_used_once)', a, '#b', c), '#d'), ('bcsel', a, (op, b, d), (op, c, d)))
1417 ]
1418
1419 # For derivatives in compute shaders, GLSL_NV_compute_shader_derivatives
1420 # states:
1421 #
1422 # If neither layout qualifier is specified, derivatives in compute shaders
1423 # return zero, which is consistent with the handling of built-in texture
1424 # functions like texture() in GLSL 4.50 compute shaders.
1425 for op in ['fddx', 'fddx_fine', 'fddx_coarse',
1426 'fddy', 'fddy_fine', 'fddy_coarse']:
1427 optimizations += [
1428 ((op, 'a'), 0.0, 'info->stage == MESA_SHADER_COMPUTE && info->cs.derivative_group == DERIVATIVE_GROUP_NONE')
1429 ]
1430
1431 # Some optimizations for ir3-specific instructions.
1432 optimizations += [
1433 # 'al * bl': If either 'al' or 'bl' is zero, return zero.
1434 (('umul_low', '#a(is_lower_half_zero)', 'b'), (0)),
1435 # '(ah * bl) << 16 + c': If either 'ah' or 'bl' is zero, return 'c'.
1436 (('imadsh_mix16', '#a@32(is_lower_half_zero)', 'b@32', 'c@32'), ('c')),
1437 (('imadsh_mix16', 'a@32', '#b@32(is_upper_half_zero)', 'c@32'), ('c')),
1438 ]
1439
1440 # These kinds of sequences can occur after nir_opt_peephole_select.
1441 #
1442 # NOTE: fadd is not handled here because that gets in the way of ffma
1443 # generation in the i965 driver. Instead, fadd and ffma are handled in
1444 # late_optimizations.
1445
1446 for op in ['flrp']:
1447 optimizations += [
1448 (('bcsel', a, (op + '(is_used_once)', b, c, d), (op, b, c, e)), (op, b, c, ('bcsel', a, d, e))),
1449 (('bcsel', a, (op, b, c, d), (op + '(is_used_once)', b, c, e)), (op, b, c, ('bcsel', a, d, e))),
1450 (('bcsel', a, (op + '(is_used_once)', b, c, d), (op, b, e, d)), (op, b, ('bcsel', a, c, e), d)),
1451 (('bcsel', a, (op, b, c, d), (op + '(is_used_once)', b, e, d)), (op, b, ('bcsel', a, c, e), d)),
1452 (('bcsel', a, (op + '(is_used_once)', b, c, d), (op, e, c, d)), (op, ('bcsel', a, b, e), c, d)),
1453 (('bcsel', a, (op, b, c, d), (op + '(is_used_once)', e, c, d)), (op, ('bcsel', a, b, e), c, d)),
1454 ]
1455
1456 for op in ['fmul', 'iadd', 'imul', 'iand', 'ior', 'ixor', 'fmin', 'fmax', 'imin', 'imax', 'umin', 'umax']:
1457 optimizations += [
1458 (('bcsel', a, (op + '(is_used_once)', b, c), (op, b, 'd(is_not_const)')), (op, b, ('bcsel', a, c, d))),
1459 (('bcsel', a, (op + '(is_used_once)', b, 'c(is_not_const)'), (op, b, d)), (op, b, ('bcsel', a, c, d))),
1460 (('bcsel', a, (op, b, 'c(is_not_const)'), (op + '(is_used_once)', b, d)), (op, b, ('bcsel', a, c, d))),
1461 (('bcsel', a, (op, b, c), (op + '(is_used_once)', b, 'd(is_not_const)')), (op, b, ('bcsel', a, c, d))),
1462 ]
1463
1464 for op in ['fpow']:
1465 optimizations += [
1466 (('bcsel', a, (op + '(is_used_once)', b, c), (op, b, d)), (op, b, ('bcsel', a, c, d))),
1467 (('bcsel', a, (op, b, c), (op + '(is_used_once)', b, d)), (op, b, ('bcsel', a, c, d))),
1468 (('bcsel', a, (op + '(is_used_once)', b, c), (op, d, c)), (op, ('bcsel', a, b, d), c)),
1469 (('bcsel', a, (op, b, c), (op + '(is_used_once)', d, c)), (op, ('bcsel', a, b, d), c)),
1470 ]
1471
1472 for op in ['frcp', 'frsq', 'fsqrt', 'fexp2', 'flog2', 'fsign', 'fsin', 'fcos']:
1473 optimizations += [
1474 (('bcsel', a, (op + '(is_used_once)', b), (op, c)), (op, ('bcsel', a, b, c))),
1475 (('bcsel', a, (op, b), (op + '(is_used_once)', c)), (op, ('bcsel', a, b, c))),
1476 ]
1477
1478 # This section contains "late" optimizations that should be run before
1479 # creating ffmas and calling regular optimizations for the final time.
1480 # Optimizations should go here if they help code generation and conflict
1481 # with the regular optimizations.
1482 before_ffma_optimizations = [
1483 # Propagate constants down multiplication chains
1484 (('~fmul(is_used_once)', ('fmul(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('fmul', ('fmul', a, c), b)),
1485 (('imul(is_used_once)', ('imul(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('imul', ('imul', a, c), b)),
1486 (('~fadd(is_used_once)', ('fadd(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('fadd', ('fadd', a, c), b)),
1487 (('iadd(is_used_once)', ('iadd(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('iadd', ('iadd', a, c), b)),
1488
1489 (('~fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))),
1490 (('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))),
1491 (('~fadd', ('fneg', a), a), 0.0),
1492 (('iadd', ('ineg', a), a), 0),
1493 (('iadd', ('ineg', a), ('iadd', a, b)), b),
1494 (('iadd', a, ('iadd', ('ineg', a), b)), b),
1495 (('~fadd', ('fneg', a), ('fadd', a, b)), b),
1496 (('~fadd', a, ('fadd', ('fneg', a), b)), b),
1497
1498 (('~flrp@32', ('fadd(is_used_once)', a, -1.0), ('fadd(is_used_once)', a, 1.0), d), ('fadd', ('flrp', -1.0, 1.0, d), a)),
1499 (('~flrp@32', ('fadd(is_used_once)', a, 1.0), ('fadd(is_used_once)', a, -1.0), d), ('fadd', ('flrp', 1.0, -1.0, d), a)),
1500 (('~flrp@32', ('fadd(is_used_once)', a, '#b'), ('fadd(is_used_once)', a, '#c'), d), ('fadd', ('fmul', d, ('fadd', c, ('fneg', b))), ('fadd', a, b))),
1501 ]
1502
1503 # This section contains "late" optimizations that should be run after the
1504 # regular optimizations have finished. Optimizations should go here if
1505 # they help code generation but do not necessarily produce code that is
1506 # more easily optimizable.
1507 late_optimizations = [
1508 # Most of these optimizations aren't quite safe when you get infinity or
1509 # Nan involved but the first one should be fine.
1510 (('flt', ('fadd', a, b), 0.0), ('flt', a, ('fneg', b))),
1511 (('flt', ('fneg', ('fadd', a, b)), 0.0), ('flt', ('fneg', a), b)),
1512 (('~fge', ('fadd', a, b), 0.0), ('fge', a, ('fneg', b))),
1513 (('~fge', ('fneg', ('fadd', a, b)), 0.0), ('fge', ('fneg', a), b)),
1514 (('~feq', ('fadd', a, b), 0.0), ('feq', a, ('fneg', b))),
1515 (('~fne', ('fadd', a, b), 0.0), ('fne', a, ('fneg', b))),
1516
1517 # nir_lower_to_source_mods will collapse this, but its existence during the
1518 # optimization loop can prevent other optimizations.
1519 (('fneg', ('fneg', a)), a),
1520
1521 # Subtractions get lowered during optimization, so we need to recombine them
1522 (('fadd', 'a', ('fneg', 'b')), ('fsub', 'a', 'b'), '!options->lower_sub'),
1523 (('iadd', 'a', ('ineg', 'b')), ('isub', 'a', 'b'), '!options->lower_sub'),
1524 (('fneg', a), ('fsub', 0.0, a), 'options->lower_negate'),
1525 (('ineg', a), ('isub', 0, a), 'options->lower_negate'),
1526
1527 # These are duplicated from the main optimizations table. The late
1528 # patterns that rearrange expressions like x - .5 < 0 to x < .5 can create
1529 # new patterns like these. The patterns that compare with zero are removed
1530 # because they are unlikely to be created in by anything in
1531 # late_optimizations.
1532 (('flt', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('flt', a, b)),
1533 (('flt', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('flt', b, a)),
1534 (('fge', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fge', a, b)),
1535 (('fge', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('fge', b, a)),
1536 (('feq', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('feq', a, b)),
1537 (('fne', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fne', a, b)),
1538
1539 (('fge', ('fsat(is_used_once)', a), 1.0), ('fge', a, 1.0)),
1540 (('flt', ('fsat(is_used_once)', a), 1.0), ('flt', a, 1.0)),
1541
1542 (('~fge', ('fmin(is_used_once)', ('fadd(is_used_once)', a, b), ('fadd', c, d)), 0.0), ('iand', ('fge', a, ('fneg', b)), ('fge', c, ('fneg', d)))),
1543
1544 (('flt', ('fneg', a), ('fneg', b)), ('flt', b, a)),
1545 (('fge', ('fneg', a), ('fneg', b)), ('fge', b, a)),
1546 (('feq', ('fneg', a), ('fneg', b)), ('feq', b, a)),
1547 (('fne', ('fneg', a), ('fneg', b)), ('fne', b, a)),
1548 (('flt', ('fneg', a), -1.0), ('flt', 1.0, a)),
1549 (('flt', -1.0, ('fneg', a)), ('flt', a, 1.0)),
1550 (('fge', ('fneg', a), -1.0), ('fge', 1.0, a)),
1551 (('fge', -1.0, ('fneg', a)), ('fge', a, 1.0)),
1552 (('fne', ('fneg', a), -1.0), ('fne', 1.0, a)),
1553 (('feq', -1.0, ('fneg', a)), ('feq', a, 1.0)),
1554
1555 (('ior', a, a), a),
1556 (('iand', a, a), a),
1557
1558 (('~fadd', ('fneg(is_used_once)', ('fsat(is_used_once)', 'a(is_not_fmul)')), 1.0), ('fsat', ('fadd', 1.0, ('fneg', a)))),
1559
1560 (('fdot2', a, b), ('fdot_replicated2', a, b), 'options->fdot_replicates'),
1561 (('fdot3', a, b), ('fdot_replicated3', a, b), 'options->fdot_replicates'),
1562 (('fdot4', a, b), ('fdot_replicated4', a, b), 'options->fdot_replicates'),
1563 (('fdph', a, b), ('fdph_replicated', a, b), 'options->fdot_replicates'),
1564
1565 (('~flrp@32', ('fadd(is_used_once)', a, b), ('fadd(is_used_once)', a, c), d), ('fadd', ('flrp', b, c, d), a)),
1566 (('~flrp@64', ('fadd(is_used_once)', a, b), ('fadd(is_used_once)', a, c), d), ('fadd', ('flrp', b, c, d), a)),
1567
1568 (('~fadd@32', 1.0, ('fmul(is_used_once)', c , ('fadd', b, -1.0 ))), ('fadd', ('fadd', 1.0, ('fneg', c)), ('fmul', b, c)), 'options->lower_flrp32'),
1569 (('~fadd@64', 1.0, ('fmul(is_used_once)', c , ('fadd', b, -1.0 ))), ('fadd', ('fadd', 1.0, ('fneg', c)), ('fmul', b, c)), 'options->lower_flrp64'),
1570
1571 # A similar operation could apply to any ffma(#a, b, #(-a/2)), but this
1572 # particular operation is common for expanding values stored in a texture
1573 # from [0,1] to [-1,1].
1574 (('~ffma@32', a, 2.0, -1.0), ('flrp', -1.0, 1.0, a ), '!options->lower_flrp32'),
1575 (('~ffma@32', a, -2.0, -1.0), ('flrp', -1.0, 1.0, ('fneg', a)), '!options->lower_flrp32'),
1576 (('~ffma@32', a, -2.0, 1.0), ('flrp', 1.0, -1.0, a ), '!options->lower_flrp32'),
1577 (('~ffma@32', a, 2.0, 1.0), ('flrp', 1.0, -1.0, ('fneg', a)), '!options->lower_flrp32'),
1578 (('~fadd@32', ('fmul(is_used_once)', 2.0, a), -1.0), ('flrp', -1.0, 1.0, a ), '!options->lower_flrp32'),
1579 (('~fadd@32', ('fmul(is_used_once)', -2.0, a), -1.0), ('flrp', -1.0, 1.0, ('fneg', a)), '!options->lower_flrp32'),
1580 (('~fadd@32', ('fmul(is_used_once)', -2.0, a), 1.0), ('flrp', 1.0, -1.0, a ), '!options->lower_flrp32'),
1581 (('~fadd@32', ('fmul(is_used_once)', 2.0, a), 1.0), ('flrp', 1.0, -1.0, ('fneg', a)), '!options->lower_flrp32'),
1582
1583 # flrp(a, b, a)
1584 # a*(1-a) + b*a
1585 # a + -a*a + a*b (1)
1586 # a + a*(b - a)
1587 # Option 1: ffma(a, (b-a), a)
1588 #
1589 # Alternately, after (1):
1590 # a*(1+b) + -a*a
1591 # a*((1+b) + -a)
1592 #
1593 # Let b=1
1594 #
1595 # Option 2: ffma(a, 2, -(a*a))
1596 # Option 3: ffma(a, 2, (-a)*a)
1597 # Option 4: ffma(a, -a, (2*a)
1598 # Option 5: a * (2 - a)
1599 #
1600 # There are a lot of other possible combinations.
1601 (('~ffma@32', ('fadd', b, ('fneg', a)), a, a), ('flrp', a, b, a), '!options->lower_flrp32'),
1602 (('~ffma@32', a, 2.0, ('fneg', ('fmul', a, a))), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1603 (('~ffma@32', a, 2.0, ('fmul', ('fneg', a), a)), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1604 (('~ffma@32', a, ('fneg', a), ('fmul', 2.0, a)), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1605 (('~fmul@32', a, ('fadd', 2.0, ('fneg', a))), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1606
1607 # we do these late so that we don't get in the way of creating ffmas
1608 (('fmin', ('fadd(is_used_once)', '#c', a), ('fadd(is_used_once)', '#c', b)), ('fadd', c, ('fmin', a, b))),
1609 (('fmax', ('fadd(is_used_once)', '#c', a), ('fadd(is_used_once)', '#c', b)), ('fadd', c, ('fmax', a, b))),
1610
1611 (('bcsel', a, 0, ('b2f32', ('inot', 'b@bool'))), ('b2f32', ('inot', ('ior', a, b)))),
1612
1613 # Putting this in 'optimizations' interferes with the bcsel(a, op(b, c),
1614 # op(b, d)) => op(b, bcsel(a, c, d)) transformations. I do not know why.
1615 (('bcsel', ('feq', ('fsqrt', 'a(is_not_negative)'), 0.0), intBitsToFloat(0x7f7fffff), ('frsq', a)),
1616 ('fmin', ('frsq', a), intBitsToFloat(0x7f7fffff))),
1617
1618 # Things that look like DPH in the source shader may get expanded to
1619 # something that looks like dot(v1.xyz, v2.xyz) + v1.w by the time it gets
1620 # to NIR. After FFMA is generated, this can look like:
1621 #
1622 # fadd(ffma(v1.z, v2.z, ffma(v1.y, v2.y, fmul(v1.x, v2.x))), v1.w)
1623 #
1624 # Reassociate the last addition into the first multiplication.
1625 #
1626 # Some shaders do not use 'invariant' in vertex and (possibly) geometry
1627 # shader stages on some outputs that are intended to be invariant. For
1628 # various reasons, this optimization may not be fully applied in all
1629 # shaders used for different rendering passes of the same geometry. This
1630 # can result in Z-fighting artifacts (at best). For now, disable this
1631 # optimization in these stages. See bugzilla #111490. In tessellation
1632 # stages applications seem to use 'precise' when necessary, so allow the
1633 # optimization in those stages.
1634 (('~fadd', ('ffma(is_used_once)', a, b, ('ffma', c, d, ('fmul', 'e(is_not_const_and_not_fsign)', 'f(is_not_const_and_not_fsign)'))), 'g(is_not_const)'),
1635 ('ffma', a, b, ('ffma', c, d, ('ffma', e, 'f', 'g'))), '(info->stage != MESA_SHADER_VERTEX && info->stage != MESA_SHADER_GEOMETRY) && !options->intel_vec4'),
1636 (('~fadd', ('ffma(is_used_once)', a, b, ('fmul', 'c(is_not_const_and_not_fsign)', 'd(is_not_const_and_not_fsign)') ), 'e(is_not_const)'),
1637 ('ffma', a, b, ('ffma', c, d, e)), '(info->stage != MESA_SHADER_VERTEX && info->stage != MESA_SHADER_GEOMETRY) && !options->intel_vec4'),
1638 ]
1639
1640 for op in ['fadd']:
1641 late_optimizations += [
1642 (('bcsel', a, (op + '(is_used_once)', b, c), (op, b, d)), (op, b, ('bcsel', a, c, d))),
1643 (('bcsel', a, (op, b, c), (op + '(is_used_once)', b, d)), (op, b, ('bcsel', a, c, d))),
1644 ]
1645
1646 for op in ['ffma']:
1647 late_optimizations += [
1648 (('bcsel', a, (op + '(is_used_once)', b, c, d), (op, b, c, e)), (op, b, c, ('bcsel', a, d, e))),
1649 (('bcsel', a, (op, b, c, d), (op + '(is_used_once)', b, c, e)), (op, b, c, ('bcsel', a, d, e))),
1650
1651 (('bcsel', a, (op + '(is_used_once)', b, c, d), (op, b, e, d)), (op, b, ('bcsel', a, c, e), d)),
1652 (('bcsel', a, (op, b, c, d), (op + '(is_used_once)', b, e, d)), (op, b, ('bcsel', a, c, e), d)),
1653 ]
1654
1655 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic", optimizations).render())
1656 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic_before_ffma",
1657 before_ffma_optimizations).render())
1658 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic_late",
1659 late_optimizations).render())