nir: add option to lower half packing opcodes
[mesa.git] / src / compiler / nir / nir_opt_algebraic.py
1 #
2 # Copyright (C) 2014 Intel Corporation
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining a
5 # copy of this software and associated documentation files (the "Software"),
6 # to deal in the Software without restriction, including without limitation
7 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 # and/or sell copies of the Software, and to permit persons to whom the
9 # Software is furnished to do so, subject to the following conditions:
10 #
11 # The above copyright notice and this permission notice (including the next
12 # paragraph) shall be included in all copies or substantial portions of the
13 # Software.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 # IN THE SOFTWARE.
22 #
23 # Authors:
24 # Jason Ekstrand (jason@jlekstrand.net)
25
26 from __future__ import print_function
27
28 from collections import OrderedDict
29 import nir_algebraic
30 from nir_opcodes import type_sizes
31 import itertools
32 import struct
33 from math import pi
34
35 # Convenience variables
36 a = 'a'
37 b = 'b'
38 c = 'c'
39 d = 'd'
40 e = 'e'
41
42 # Written in the form (<search>, <replace>) where <search> is an expression
43 # and <replace> is either an expression or a value. An expression is
44 # defined as a tuple of the form ([~]<op>, <src0>, <src1>, <src2>, <src3>)
45 # where each source is either an expression or a value. A value can be
46 # either a numeric constant or a string representing a variable name.
47 #
48 # If the opcode in a search expression is prefixed by a '~' character, this
49 # indicates that the operation is inexact. Such operations will only get
50 # applied to SSA values that do not have the exact bit set. This should be
51 # used by by any optimizations that are not bit-for-bit exact. It should not,
52 # however, be used for backend-requested lowering operations as those need to
53 # happen regardless of precision.
54 #
55 # Variable names are specified as "[#]name[@type][(cond)][.swiz]" where:
56 # "#" indicates that the given variable will only match constants,
57 # type indicates that the given variable will only match values from ALU
58 # instructions with the given output type,
59 # (cond) specifies an additional condition function (see nir_search_helpers.h),
60 # swiz is a swizzle applied to the variable (only in the <replace> expression)
61 #
62 # For constants, you have to be careful to make sure that it is the right
63 # type because python is unaware of the source and destination types of the
64 # opcodes.
65 #
66 # All expression types can have a bit-size specified. For opcodes, this
67 # looks like "op@32", for variables it is "a@32" or "a@uint32" to specify a
68 # type and size. In the search half of the expression this indicates that it
69 # should only match that particular bit-size. In the replace half of the
70 # expression this indicates that the constructed value should have that
71 # bit-size.
72 #
73 # If the opcode in a replacement expression is prefixed by a '!' character,
74 # this indicated that the new expression will be marked exact.
75 #
76 # A special condition "many-comm-expr" can be used with expressions to note
77 # that the expression and its subexpressions have more commutative expressions
78 # than nir_replace_instr can handle. If this special condition is needed with
79 # another condition, the two can be separated by a comma (e.g.,
80 # "(many-comm-expr,is_used_once)").
81
82 # based on https://web.archive.org/web/20180105155939/http://forum.devmaster.net/t/fast-and-accurate-sine-cosine/9648
83 def lowered_sincos(c):
84 x = ('fsub', ('fmul', 2.0, ('ffract', ('fadd', ('fmul', 0.5 / pi, a), c))), 1.0)
85 x = ('fmul', ('fsub', x, ('fmul', x, ('fabs', x))), 4.0)
86 return ('ffma', ('ffma', x, ('fabs', x), ('fneg', x)), 0.225, x)
87
88 def intBitsToFloat(i):
89 return struct.unpack('!f', struct.pack('!I', i))[0]
90
91 optimizations = [
92
93 (('imul', a, '#b@32(is_pos_power_of_two)'), ('ishl', a, ('find_lsb', b)), '!options->lower_bitops'),
94 (('imul', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('ishl', a, ('find_lsb', ('iabs', b)))), '!options->lower_bitops'),
95 (('ishl', a, '#b@32'), ('imul', a, ('ishl', 1, b)), 'options->lower_bitops'),
96
97 (('unpack_64_2x32_split_x', ('imul_2x32_64(is_used_once)', a, b)), ('imul', a, b)),
98 (('unpack_64_2x32_split_x', ('umul_2x32_64(is_used_once)', a, b)), ('imul', a, b)),
99 (('imul_2x32_64', a, b), ('pack_64_2x32_split', ('imul', a, b), ('imul_high', a, b)), 'options->lower_mul_2x32_64'),
100 (('umul_2x32_64', a, b), ('pack_64_2x32_split', ('imul', a, b), ('umul_high', a, b)), 'options->lower_mul_2x32_64'),
101 (('udiv', a, 1), a),
102 (('idiv', a, 1), a),
103 (('umod', a, 1), 0),
104 (('imod', a, 1), 0),
105 (('udiv', a, '#b@32(is_pos_power_of_two)'), ('ushr', a, ('find_lsb', b)), '!options->lower_bitops'),
106 (('idiv', a, '#b@32(is_pos_power_of_two)'), ('imul', ('isign', a), ('ushr', ('iabs', a), ('find_lsb', b))), 'options->lower_idiv'),
107 (('idiv', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('imul', ('isign', a), ('ushr', ('iabs', a), ('find_lsb', ('iabs', b))))), 'options->lower_idiv'),
108 (('umod', a, '#b(is_pos_power_of_two)'), ('iand', a, ('isub', b, 1))),
109
110 (('~fneg', ('fneg', a)), a),
111 (('ineg', ('ineg', a)), a),
112 (('fabs', ('fabs', a)), ('fabs', a)),
113 (('fabs', ('fneg', a)), ('fabs', a)),
114 (('fabs', ('u2f', a)), ('u2f', a)),
115 (('iabs', ('iabs', a)), ('iabs', a)),
116 (('iabs', ('ineg', a)), ('iabs', a)),
117 (('f2b', ('fneg', a)), ('f2b', a)),
118 (('i2b', ('ineg', a)), ('i2b', a)),
119 (('~fadd', a, 0.0), a),
120 (('iadd', a, 0), a),
121 (('usadd_4x8', a, 0), a),
122 (('usadd_4x8', a, ~0), ~0),
123 (('~fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))),
124 (('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))),
125 (('~fadd', ('fneg', a), a), 0.0),
126 (('iadd', ('ineg', a), a), 0),
127 (('iadd', ('ineg', a), ('iadd', a, b)), b),
128 (('iadd', a, ('iadd', ('ineg', a), b)), b),
129 (('~fadd', ('fneg', a), ('fadd', a, b)), b),
130 (('~fadd', a, ('fadd', ('fneg', a), b)), b),
131 (('fadd', ('fsat', a), ('fsat', ('fneg', a))), ('fsat', ('fabs', a))),
132 (('~fmul', a, 0.0), 0.0),
133 (('imul', a, 0), 0),
134 (('umul_unorm_4x8', a, 0), 0),
135 (('umul_unorm_4x8', a, ~0), a),
136 (('~fmul', a, 1.0), a),
137 (('imul', a, 1), a),
138 (('fmul', a, -1.0), ('fneg', a)),
139 (('imul', a, -1), ('ineg', a)),
140 # If a < 0: fsign(a)*a*a => -1*a*a => -a*a => abs(a)*a
141 # If a > 0: fsign(a)*a*a => 1*a*a => a*a => abs(a)*a
142 # If a == 0: fsign(a)*a*a => 0*0*0 => abs(0)*0
143 (('fmul', ('fsign', a), ('fmul', a, a)), ('fmul', ('fabs', a), a)),
144 (('fmul', ('fmul', ('fsign', a), a), a), ('fmul', ('fabs', a), a)),
145 (('~ffma', 0.0, a, b), b),
146 (('~ffma', a, b, 0.0), ('fmul', a, b)),
147 (('ffma', 1.0, a, b), ('fadd', a, b)),
148 (('ffma', -1.0, a, b), ('fadd', ('fneg', a), b)),
149 (('~flrp', a, b, 0.0), a),
150 (('~flrp', a, b, 1.0), b),
151 (('~flrp', a, a, b), a),
152 (('~flrp', 0.0, a, b), ('fmul', a, b)),
153
154 # flrp(a, a + b, c) => a + flrp(0, b, c) => a + (b * c)
155 (('~flrp', a, ('fadd(is_used_once)', a, b), c), ('fadd', ('fmul', b, c), a)),
156 (('~flrp@32', a, ('fadd', a, b), c), ('fadd', ('fmul', b, c), a), 'options->lower_flrp32'),
157 (('~flrp@64', a, ('fadd', a, b), c), ('fadd', ('fmul', b, c), a), 'options->lower_flrp64'),
158
159 (('~flrp@32', ('fadd', a, b), ('fadd', a, c), d), ('fadd', ('flrp', b, c, d), a), 'options->lower_flrp32'),
160 (('~flrp@64', ('fadd', a, b), ('fadd', a, c), d), ('fadd', ('flrp', b, c, d), a), 'options->lower_flrp64'),
161
162 (('~flrp@32', a, ('fmul(is_used_once)', a, b), c), ('fmul', ('flrp', 1.0, b, c), a), 'options->lower_flrp32'),
163 (('~flrp@64', a, ('fmul(is_used_once)', a, b), c), ('fmul', ('flrp', 1.0, b, c), a), 'options->lower_flrp64'),
164
165 (('~flrp', ('fmul(is_used_once)', a, b), ('fmul(is_used_once)', a, c), d), ('fmul', ('flrp', b, c, d), a)),
166
167 (('~flrp', a, b, ('b2f', 'c@1')), ('bcsel', c, b, a), 'options->lower_flrp32'),
168 (('~flrp', a, 0.0, c), ('fadd', ('fmul', ('fneg', a), c), a)),
169 (('ftrunc', a), ('bcsel', ('flt', a, 0.0), ('fneg', ('ffloor', ('fabs', a))), ('ffloor', ('fabs', a))), 'options->lower_ftrunc'),
170 (('ffloor', a), ('fsub', a, ('ffract', a)), 'options->lower_ffloor'),
171 (('fadd', a, ('fneg', ('ffract', a))), ('ffloor', a), '!options->lower_ffloor'),
172 (('ffract', a), ('fsub', a, ('ffloor', a)), 'options->lower_ffract'),
173 (('fceil', a), ('fneg', ('ffloor', ('fneg', a))), 'options->lower_fceil'),
174 (('~fadd', ('fmul', a, ('fadd', 1.0, ('fneg', ('b2f', 'c@1')))), ('fmul', b, ('b2f', c))), ('bcsel', c, b, a), 'options->lower_flrp32'),
175 (('~fadd@32', ('fmul', a, ('fadd', 1.0, ('fneg', c ) )), ('fmul', b, c )), ('flrp', a, b, c), '!options->lower_flrp32'),
176 (('~fadd@64', ('fmul', a, ('fadd', 1.0, ('fneg', c ) )), ('fmul', b, c )), ('flrp', a, b, c), '!options->lower_flrp64'),
177 # These are the same as the previous three rules, but it depends on
178 # 1-fsat(x) <=> fsat(1-x). See below.
179 (('~fadd@32', ('fmul', a, ('fsat', ('fadd', 1.0, ('fneg', c )))), ('fmul', b, ('fsat', c))), ('flrp', a, b, ('fsat', c)), '!options->lower_flrp32'),
180 (('~fadd@64', ('fmul', a, ('fsat', ('fadd', 1.0, ('fneg', c )))), ('fmul', b, ('fsat', c))), ('flrp', a, b, ('fsat', c)), '!options->lower_flrp64'),
181
182 (('~fadd', a, ('fmul', ('b2f', 'c@1'), ('fadd', b, ('fneg', a)))), ('bcsel', c, b, a), 'options->lower_flrp32'),
183 (('~fadd@32', a, ('fmul', c , ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp32'),
184 (('~fadd@64', a, ('fmul', c , ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp64'),
185 (('ffma', a, b, c), ('fadd', ('fmul', a, b), c), 'options->lower_ffma'),
186 (('~fadd', ('fmul', a, b), c), ('ffma', a, b, c), 'options->fuse_ffma'),
187
188 (('~fmul', ('fadd', ('iand', ('ineg', ('b2i32', 'a@bool')), ('fmul', b, c)), '#d'), '#e'),
189 ('bcsel', a, ('fmul', ('fadd', ('fmul', b, c), d), e), ('fmul', d, e))),
190
191 (('fdph', a, b), ('fdot4', ('vec4', 'a.x', 'a.y', 'a.z', 1.0), b), 'options->lower_fdph'),
192
193 (('fdot4', ('vec4', a, b, c, 1.0), d), ('fdph', ('vec3', a, b, c), d), '!options->lower_fdph'),
194 (('fdot4', ('vec4', a, 0.0, 0.0, 0.0), b), ('fmul', a, b)),
195 (('fdot4', ('vec4', a, b, 0.0, 0.0), c), ('fdot2', ('vec2', a, b), c)),
196 (('fdot4', ('vec4', a, b, c, 0.0), d), ('fdot3', ('vec3', a, b, c), d)),
197
198 (('fdot3', ('vec3', a, 0.0, 0.0), b), ('fmul', a, b)),
199 (('fdot3', ('vec3', a, b, 0.0), c), ('fdot2', ('vec2', a, b), c)),
200
201 (('fdot2', ('vec2', a, 0.0), b), ('fmul', a, b)),
202 (('fdot2', a, 1.0), ('fadd', 'a.x', 'a.y')),
203
204 # Lower fdot to fsum when it is available
205 (('fdot2', a, b), ('fsum2', ('fmul', a, b)), 'options->lower_fdot'),
206 (('fdot3', a, b), ('fsum3', ('fmul', a, b)), 'options->lower_fdot'),
207 (('fdot4', a, b), ('fsum4', ('fmul', a, b)), 'options->lower_fdot'),
208 (('fsum2', a), ('fadd', 'a.x', 'a.y'), 'options->lower_fdot'),
209
210 # If x >= 0 and x <= 1: fsat(1 - x) == 1 - fsat(x) trivially
211 # If x < 0: 1 - fsat(x) => 1 - 0 => 1 and fsat(1 - x) => fsat(> 1) => 1
212 # If x > 1: 1 - fsat(x) => 1 - 1 => 0 and fsat(1 - x) => fsat(< 0) => 0
213 (('~fadd', ('fneg(is_used_once)', ('fsat(is_used_once)', 'a(is_not_fmul)')), 1.0), ('fsat', ('fadd', 1.0, ('fneg', a)))),
214
215 # 1 - ((1 - a) * (1 - b))
216 # 1 - (1 - a - b + a*b)
217 # 1 - 1 + a + b - a*b
218 # a + b - a*b
219 # a + b*(1 - a)
220 # b*(1 - a) + 1*a
221 # flrp(b, 1, a)
222 (('~fadd@32', 1.0, ('fneg', ('fmul', ('fadd', 1.0, ('fneg', a)), ('fadd', 1.0, ('fneg', b))))),
223 ('flrp', b, 1.0, a), '!options->lower_flrp32'),
224
225 # (a * #b + #c) << #d
226 # ((a * #b) << #d) + (#c << #d)
227 # (a * (#b << #d)) + (#c << #d)
228 (('ishl', ('iadd', ('imul', a, '#b'), '#c'), '#d'),
229 ('iadd', ('imul', a, ('ishl', b, d)), ('ishl', c, d))),
230
231 # (a * #b) << #c
232 # a * (#b << #c)
233 (('ishl', ('imul', a, '#b'), '#c'), ('imul', a, ('ishl', b, c))),
234 ]
235
236 # Care must be taken here. Shifts in NIR uses only the lower log2(bitsize)
237 # bits of the second source. These replacements must correctly handle the
238 # case where (b % bitsize) + (c % bitsize) >= bitsize.
239 for s in [8, 16, 32, 64]:
240 mask = (1 << s) - 1
241
242 ishl = "ishl@{}".format(s)
243 ishr = "ishr@{}".format(s)
244 ushr = "ushr@{}".format(s)
245
246 in_bounds = ('ult', ('iadd', ('iand', b, mask), ('iand', c, mask)), s)
247
248 optimizations.extend([
249 ((ishl, (ishl, a, '#b'), '#c'), ('bcsel', in_bounds, (ishl, a, ('iadd', b, c)), 0)),
250 ((ushr, (ushr, a, '#b'), '#c'), ('bcsel', in_bounds, (ushr, a, ('iadd', b, c)), 0)),
251
252 # To get get -1 for large shifts of negative values, ishr must instead
253 # clamp the shift count to the maximum value.
254 ((ishr, (ishr, a, '#b'), '#c'),
255 (ishr, a, ('imin', ('iadd', ('iand', b, mask), ('iand', c, mask)), s - 1))),
256 ])
257
258 optimizations.extend([
259 # This is common for address calculations. Reassociating may enable the
260 # 'a<<c' to be CSE'd. It also helps architectures that have an ISHLADD
261 # instruction or a constant offset field for in load / store instructions.
262 (('ishl', ('iadd', a, '#b'), '#c'), ('iadd', ('ishl', a, c), ('ishl', b, c))),
263
264 # Comparison simplifications
265 (('~inot', ('flt', a, b)), ('fge', a, b)),
266 (('~inot', ('fge', a, b)), ('flt', a, b)),
267 (('inot', ('feq', a, b)), ('fne', a, b)),
268 (('inot', ('fne', a, b)), ('feq', a, b)),
269 (('inot', ('ilt', a, b)), ('ige', a, b)),
270 (('inot', ('ult', a, b)), ('uge', a, b)),
271 (('inot', ('ige', a, b)), ('ilt', a, b)),
272 (('inot', ('uge', a, b)), ('ult', a, b)),
273 (('inot', ('ieq', a, b)), ('ine', a, b)),
274 (('inot', ('ine', a, b)), ('ieq', a, b)),
275
276 (('iand', ('feq', a, b), ('fne', a, b)), False),
277 (('iand', ('flt', a, b), ('flt', b, a)), False),
278 (('iand', ('ieq', a, b), ('ine', a, b)), False),
279 (('iand', ('ilt', a, b), ('ilt', b, a)), False),
280 (('iand', ('ult', a, b), ('ult', b, a)), False),
281
282 # This helps some shaders because, after some optimizations, they end up
283 # with patterns like (-a < -b) || (b < a). In an ideal world, this sort of
284 # matching would be handled by CSE.
285 (('flt', ('fneg', a), ('fneg', b)), ('flt', b, a)),
286 (('fge', ('fneg', a), ('fneg', b)), ('fge', b, a)),
287 (('feq', ('fneg', a), ('fneg', b)), ('feq', b, a)),
288 (('fne', ('fneg', a), ('fneg', b)), ('fne', b, a)),
289 (('flt', ('fneg', a), -1.0), ('flt', 1.0, a)),
290 (('flt', -1.0, ('fneg', a)), ('flt', a, 1.0)),
291 (('fge', ('fneg', a), -1.0), ('fge', 1.0, a)),
292 (('fge', -1.0, ('fneg', a)), ('fge', a, 1.0)),
293 (('fne', ('fneg', a), -1.0), ('fne', 1.0, a)),
294 (('feq', -1.0, ('fneg', a)), ('feq', a, 1.0)),
295
296 (('flt', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('flt', a, b)),
297 (('flt', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('flt', b, a)),
298 (('fge', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fge', a, b)),
299 (('fge', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('fge', b, a)),
300 (('feq', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('feq', a, b)),
301 (('fne', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fne', a, b)),
302
303 (('fge', ('fsat(is_used_once)', a), 1.0), ('fge', a, 1.0)),
304 (('flt', ('fsat(is_used_once)', a), 1.0), ('flt', a, 1.0)),
305 (('fge', 0.0, ('fsat(is_used_once)', a)), ('fge', 0.0, a)),
306 (('flt', 0.0, ('fsat(is_used_once)', a)), ('flt', 0.0, a)),
307
308 # 0.0 >= b2f(a)
309 # b2f(a) <= 0.0
310 # b2f(a) == 0.0 because b2f(a) can only be 0 or 1
311 # inot(a)
312 (('fge', 0.0, ('b2f', 'a@1')), ('inot', a)),
313
314 (('fge', ('fneg', ('b2f', 'a@1')), 0.0), ('inot', a)),
315
316 (('fne', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a, b)),
317 (('fne', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a, b)),
318 (('fne', ('bcsel', a, 1.0, ('b2f', 'b@1')) , 0.0), ('ior', a, b)),
319 (('fne', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), ('ior', a, b)),
320 (('fne', ('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a, b)),
321 (('fne', ('fmin', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a, b)),
322 (('fne', ('bcsel', a, ('b2f', 'b@1'), 0.0) , 0.0), ('iand', a, b)),
323 (('fne', ('fadd', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), 0.0), ('ixor', a, b)),
324 (('fne', ('b2f', 'a@1') , ('b2f', 'b@1') ), ('ixor', a, b)),
325 (('fne', ('fneg', ('b2f', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('ixor', a, b)),
326 (('feq', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('ior', a, b))),
327 (('feq', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('ior', a, b))),
328 (('feq', ('bcsel', a, 1.0, ('b2f', 'b@1')) , 0.0), ('inot', ('ior', a, b))),
329 (('feq', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), ('inot', ('ior', a, b))),
330 (('feq', ('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('iand', a, b))),
331 (('feq', ('fmin', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('iand', a, b))),
332 (('feq', ('bcsel', a, ('b2f', 'b@1'), 0.0) , 0.0), ('inot', ('iand', a, b))),
333 (('feq', ('fadd', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), 0.0), ('ieq', a, b)),
334 (('feq', ('b2f', 'a@1') , ('b2f', 'b@1') ), ('ieq', a, b)),
335 (('feq', ('fneg', ('b2f', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('ieq', a, b)),
336
337 # -(b2f(a) + b2f(b)) < 0
338 # 0 < b2f(a) + b2f(b)
339 # 0 != b2f(a) + b2f(b) b2f must be 0 or 1, so the sum is non-negative
340 # a || b
341 (('flt', ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), 0.0), ('ior', a, b)),
342 (('flt', 0.0, ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('ior', a, b)),
343
344 # -(b2f(a) + b2f(b)) >= 0
345 # 0 >= b2f(a) + b2f(b)
346 # 0 == b2f(a) + b2f(b) b2f must be 0 or 1, so the sum is non-negative
347 # !(a || b)
348 (('fge', ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), 0.0), ('inot', ('ior', a, b))),
349 (('fge', 0.0, ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('inot', ('ior', a, b))),
350
351 (('flt', a, ('fneg', a)), ('flt', a, 0.0)),
352 (('fge', a, ('fneg', a)), ('fge', a, 0.0)),
353
354 # Some optimizations (below) convert things like (a < b || c < b) into
355 # (min(a, c) < b). However, this interfers with the previous optimizations
356 # that try to remove comparisons with negated sums of b2f. This just
357 # breaks that apart.
358 (('flt', ('fmin', c, ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')))), 0.0),
359 ('ior', ('flt', c, 0.0), ('ior', a, b))),
360
361 (('~flt', ('fadd', a, b), a), ('flt', b, 0.0)),
362 (('~fge', ('fadd', a, b), a), ('fge', b, 0.0)),
363 (('~feq', ('fadd', a, b), a), ('feq', b, 0.0)),
364 (('~fne', ('fadd', a, b), a), ('fne', b, 0.0)),
365 (('~flt', ('fadd(is_used_once)', a, '#b'), '#c'), ('flt', a, ('fadd', c, ('fneg', b)))),
366 (('~flt', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('flt', ('fneg', ('fadd', c, b)), a)),
367 (('~fge', ('fadd(is_used_once)', a, '#b'), '#c'), ('fge', a, ('fadd', c, ('fneg', b)))),
368 (('~fge', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('fge', ('fneg', ('fadd', c, b)), a)),
369 (('~feq', ('fadd(is_used_once)', a, '#b'), '#c'), ('feq', a, ('fadd', c, ('fneg', b)))),
370 (('~feq', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('feq', ('fneg', ('fadd', c, b)), a)),
371 (('~fne', ('fadd(is_used_once)', a, '#b'), '#c'), ('fne', a, ('fadd', c, ('fneg', b)))),
372 (('~fne', ('fneg(is_used_once)', ('fadd(is_used_once)', a, '#b')), '#c'), ('fne', ('fneg', ('fadd', c, b)), a)),
373
374 # Cannot remove the addition from ilt or ige due to overflow.
375 (('ieq', ('iadd', a, b), a), ('ieq', b, 0)),
376 (('ine', ('iadd', a, b), a), ('ine', b, 0)),
377
378 # fmin(-b2f(a), b) >= 0.0
379 # -b2f(a) >= 0.0 && b >= 0.0
380 # -b2f(a) == 0.0 && b >= 0.0 -b2f can only be 0 or -1, never >0
381 # b2f(a) == 0.0 && b >= 0.0
382 # a == False && b >= 0.0
383 # !a && b >= 0.0
384 #
385 # The fge in the second replacement is not a typo. I leave the proof that
386 # "fmin(-b2f(a), b) >= 0 <=> fmin(-b2f(a), b) == 0" as an exercise for the
387 # reader.
388 (('fge', ('fmin', ('fneg', ('b2f', 'a@1')), 'b@1'), 0.0), ('iand', ('inot', a), ('fge', b, 0.0))),
389 (('feq', ('fmin', ('fneg', ('b2f', 'a@1')), 'b@1'), 0.0), ('iand', ('inot', a), ('fge', b, 0.0))),
390
391 (('feq', ('b2f', 'a@1'), 0.0), ('inot', a)),
392 (('~fne', ('b2f', 'a@1'), 0.0), a),
393 (('ieq', ('b2i', 'a@1'), 0), ('inot', a)),
394 (('ine', ('b2i', 'a@1'), 0), a),
395
396 (('fne', ('u2f', a), 0.0), ('ine', a, 0)),
397 (('feq', ('u2f', a), 0.0), ('ieq', a, 0)),
398 (('fge', ('u2f', a), 0.0), True),
399 (('fge', 0.0, ('u2f', a)), ('uge', 0, a)), # ieq instead?
400 (('flt', ('u2f', a), 0.0), False),
401 (('flt', 0.0, ('u2f', a)), ('ult', 0, a)), # ine instead?
402 (('fne', ('i2f', a), 0.0), ('ine', a, 0)),
403 (('feq', ('i2f', a), 0.0), ('ieq', a, 0)),
404 (('fge', ('i2f', a), 0.0), ('ige', a, 0)),
405 (('fge', 0.0, ('i2f', a)), ('ige', 0, a)),
406 (('flt', ('i2f', a), 0.0), ('ilt', a, 0)),
407 (('flt', 0.0, ('i2f', a)), ('ilt', 0, a)),
408
409 # 0.0 < fabs(a)
410 # fabs(a) > 0.0
411 # fabs(a) != 0.0 because fabs(a) must be >= 0
412 # a != 0.0
413 (('~flt', 0.0, ('fabs', a)), ('fne', a, 0.0)),
414
415 # -fabs(a) < 0.0
416 # fabs(a) > 0.0
417 (('~flt', ('fneg', ('fabs', a)), 0.0), ('fne', a, 0.0)),
418
419 # 0.0 >= fabs(a)
420 # 0.0 == fabs(a) because fabs(a) must be >= 0
421 # 0.0 == a
422 (('fge', 0.0, ('fabs', a)), ('feq', a, 0.0)),
423
424 # -fabs(a) >= 0.0
425 # 0.0 >= fabs(a)
426 (('fge', ('fneg', ('fabs', a)), 0.0), ('feq', a, 0.0)),
427
428 # (a >= 0.0) && (a <= 1.0) -> fsat(a) == a
429 (('iand', ('fge', a, 0.0), ('fge', 1.0, a)), ('feq', a, ('fsat', a)), '!options->lower_fsat'),
430
431 # (a < 0.0) || (a > 1.0)
432 # !(!(a < 0.0) && !(a > 1.0))
433 # !((a >= 0.0) && (a <= 1.0))
434 # !(a == fsat(a))
435 # a != fsat(a)
436 (('ior', ('flt', a, 0.0), ('flt', 1.0, a)), ('fne', a, ('fsat', a)), '!options->lower_fsat'),
437
438 (('fmax', ('b2f(is_used_once)', 'a@1'), ('b2f', 'b@1')), ('b2f', ('ior', a, b))),
439 (('fmax', ('fneg(is_used_once)', ('b2f(is_used_once)', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('fneg', ('b2f', ('ior', a, b)))),
440 (('fmin', ('b2f(is_used_once)', 'a@1'), ('b2f', 'b@1')), ('b2f', ('iand', a, b))),
441 (('fmin', ('fneg(is_used_once)', ('b2f(is_used_once)', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('fneg', ('b2f', ('iand', a, b)))),
442
443 # fmin(b2f(a), b)
444 # bcsel(a, fmin(b2f(a), b), fmin(b2f(a), b))
445 # bcsel(a, fmin(b2f(True), b), fmin(b2f(False), b))
446 # bcsel(a, fmin(1.0, b), fmin(0.0, b))
447 #
448 # Since b is a constant, constant folding will eliminate the fmin and the
449 # fmax. If b is > 1.0, the bcsel will be replaced with a b2f.
450 (('fmin', ('b2f', 'a@1'), '#b'), ('bcsel', a, ('fmin', b, 1.0), ('fmin', b, 0.0))),
451
452 (('flt', ('fadd(is_used_once)', a, ('fneg', b)), 0.0), ('flt', a, b)),
453
454 (('fge', ('fneg', ('fabs', a)), 0.0), ('feq', a, 0.0)),
455 (('~bcsel', ('flt', b, a), b, a), ('fmin', a, b)),
456 (('~bcsel', ('flt', a, b), b, a), ('fmax', a, b)),
457 (('~bcsel', ('fge', a, b), b, a), ('fmin', a, b)),
458 (('~bcsel', ('fge', b, a), b, a), ('fmax', a, b)),
459 (('bcsel', ('i2b', a), b, c), ('bcsel', ('ine', a, 0), b, c)),
460 (('bcsel', ('inot', a), b, c), ('bcsel', a, c, b)),
461 (('bcsel', a, ('bcsel', a, b, c), d), ('bcsel', a, b, d)),
462 (('bcsel', a, b, ('bcsel', a, c, d)), ('bcsel', a, b, d)),
463 (('bcsel', a, ('bcsel', b, c, d), ('bcsel(is_used_once)', b, c, 'e')), ('bcsel', b, c, ('bcsel', a, d, 'e'))),
464 (('bcsel', a, ('bcsel(is_used_once)', b, c, d), ('bcsel', b, c, 'e')), ('bcsel', b, c, ('bcsel', a, d, 'e'))),
465 (('bcsel', a, ('bcsel', b, c, d), ('bcsel(is_used_once)', b, 'e', d)), ('bcsel', b, ('bcsel', a, c, 'e'), d)),
466 (('bcsel', a, ('bcsel(is_used_once)', b, c, d), ('bcsel', b, 'e', d)), ('bcsel', b, ('bcsel', a, c, 'e'), d)),
467 (('bcsel', a, True, b), ('ior', a, b)),
468 (('bcsel', a, a, b), ('ior', a, b)),
469 (('bcsel', a, b, False), ('iand', a, b)),
470 (('bcsel', a, b, a), ('iand', a, b)),
471 (('~fmin', a, a), a),
472 (('~fmax', a, a), a),
473 (('imin', a, a), a),
474 (('imax', a, a), a),
475 (('umin', a, a), a),
476 (('umax', a, a), a),
477 (('fmax', ('fmax', a, b), b), ('fmax', a, b)),
478 (('umax', ('umax', a, b), b), ('umax', a, b)),
479 (('imax', ('imax', a, b), b), ('imax', a, b)),
480 (('fmin', ('fmin', a, b), b), ('fmin', a, b)),
481 (('umin', ('umin', a, b), b), ('umin', a, b)),
482 (('imin', ('imin', a, b), b), ('imin', a, b)),
483 (('fmax', a, ('fneg', a)), ('fabs', a)),
484 (('imax', a, ('ineg', a)), ('iabs', a)),
485 (('fmin', a, ('fneg', a)), ('fneg', ('fabs', a))),
486 (('imin', a, ('ineg', a)), ('ineg', ('iabs', a))),
487 (('fmin', a, ('fneg', ('fabs', a))), ('fneg', ('fabs', a))),
488 (('imin', a, ('ineg', ('iabs', a))), ('ineg', ('iabs', a))),
489 (('~fmin', a, ('fabs', a)), a),
490 (('imin', a, ('iabs', a)), a),
491 (('~fmax', a, ('fneg', ('fabs', a))), a),
492 (('imax', a, ('ineg', ('iabs', a))), a),
493 (('fmax', a, ('fabs', a)), ('fabs', a)),
494 (('imax', a, ('iabs', a)), ('iabs', a)),
495 (('fmax', a, ('fneg', a)), ('fabs', a)),
496 (('imax', a, ('ineg', a)), ('iabs', a)),
497 (('~fmax', ('fabs', a), 0.0), ('fabs', a)),
498 (('~fmin', ('fmax', a, 0.0), 1.0), ('fsat', a), '!options->lower_fsat'),
499 (('~fmax', ('fmin', a, 1.0), 0.0), ('fsat', a), '!options->lower_fsat'),
500 (('~fmin', ('fmax', a, -1.0), 0.0), ('fneg', ('fsat', ('fneg', a))), '!options->lower_fsat'),
501 (('~fmax', ('fmin', a, 0.0), -1.0), ('fneg', ('fsat', ('fneg', a))), '!options->lower_fsat'),
502 (('fsat', ('fsign', a)), ('b2f', ('flt', 0.0, a))),
503 (('fsat', ('b2f', a)), ('b2f', a)),
504 (('fsat', a), ('fmin', ('fmax', a, 0.0), 1.0), 'options->lower_fsat'),
505 (('fsat', ('fsat', a)), ('fsat', a)),
506 (('fsat', ('fneg(is_used_once)', ('fadd(is_used_once)', a, b))), ('fsat', ('fadd', ('fneg', a), ('fneg', b))), '!options->lower_fsat'),
507 (('fsat', ('fneg(is_used_once)', ('fmul(is_used_once)', a, b))), ('fsat', ('fmul', ('fneg', a), b)), '!options->lower_fsat'),
508 (('fsat', ('fabs(is_used_once)', ('fmul(is_used_once)', a, b))), ('fsat', ('fmul', ('fabs', a), ('fabs', b))), '!options->lower_fsat'),
509 (('fmin', ('fmax', ('fmin', ('fmax', a, b), c), b), c), ('fmin', ('fmax', a, b), c)),
510 (('imin', ('imax', ('imin', ('imax', a, b), c), b), c), ('imin', ('imax', a, b), c)),
511 (('umin', ('umax', ('umin', ('umax', a, b), c), b), c), ('umin', ('umax', a, b), c)),
512 (('fmax', ('fsat', a), '#b@32(is_zero_to_one)'), ('fsat', ('fmax', a, b))),
513 (('fmin', ('fsat', a), '#b@32(is_zero_to_one)'), ('fsat', ('fmin', a, b))),
514 (('extract_u8', ('imin', ('imax', a, 0), 0xff), 0), ('imin', ('imax', a, 0), 0xff)),
515 (('~ior', ('flt(is_used_once)', a, b), ('flt', a, c)), ('flt', a, ('fmax', b, c))),
516 (('~ior', ('flt(is_used_once)', a, c), ('flt', b, c)), ('flt', ('fmin', a, b), c)),
517 (('~ior', ('fge(is_used_once)', a, b), ('fge', a, c)), ('fge', a, ('fmin', b, c))),
518 (('~ior', ('fge(is_used_once)', a, c), ('fge', b, c)), ('fge', ('fmax', a, b), c)),
519 (('~ior', ('flt', a, '#b'), ('flt', a, '#c')), ('flt', a, ('fmax', b, c))),
520 (('~ior', ('flt', '#a', c), ('flt', '#b', c)), ('flt', ('fmin', a, b), c)),
521 (('~ior', ('fge', a, '#b'), ('fge', a, '#c')), ('fge', a, ('fmin', b, c))),
522 (('~ior', ('fge', '#a', c), ('fge', '#b', c)), ('fge', ('fmax', a, b), c)),
523 (('~iand', ('flt(is_used_once)', a, b), ('flt', a, c)), ('flt', a, ('fmin', b, c))),
524 (('~iand', ('flt(is_used_once)', a, c), ('flt', b, c)), ('flt', ('fmax', a, b), c)),
525 (('~iand', ('fge(is_used_once)', a, b), ('fge', a, c)), ('fge', a, ('fmax', b, c))),
526 (('~iand', ('fge(is_used_once)', a, c), ('fge', b, c)), ('fge', ('fmin', a, b), c)),
527 (('~iand', ('flt', a, '#b'), ('flt', a, '#c')), ('flt', a, ('fmin', b, c))),
528 (('~iand', ('flt', '#a', c), ('flt', '#b', c)), ('flt', ('fmax', a, b), c)),
529 (('~iand', ('fge', a, '#b'), ('fge', a, '#c')), ('fge', a, ('fmax', b, c))),
530 (('~iand', ('fge', '#a', c), ('fge', '#b', c)), ('fge', ('fmin', a, b), c)),
531
532 (('ior', ('ilt(is_used_once)', a, b), ('ilt', a, c)), ('ilt', a, ('imax', b, c))),
533 (('ior', ('ilt(is_used_once)', a, c), ('ilt', b, c)), ('ilt', ('imin', a, b), c)),
534 (('ior', ('ige(is_used_once)', a, b), ('ige', a, c)), ('ige', a, ('imin', b, c))),
535 (('ior', ('ige(is_used_once)', a, c), ('ige', b, c)), ('ige', ('imax', a, b), c)),
536 (('ior', ('ult(is_used_once)', a, b), ('ult', a, c)), ('ult', a, ('umax', b, c))),
537 (('ior', ('ult(is_used_once)', a, c), ('ult', b, c)), ('ult', ('umin', a, b), c)),
538 (('ior', ('uge(is_used_once)', a, b), ('uge', a, c)), ('uge', a, ('umin', b, c))),
539 (('ior', ('uge(is_used_once)', a, c), ('uge', b, c)), ('uge', ('umax', a, b), c)),
540 (('iand', ('ilt(is_used_once)', a, b), ('ilt', a, c)), ('ilt', a, ('imin', b, c))),
541 (('iand', ('ilt(is_used_once)', a, c), ('ilt', b, c)), ('ilt', ('imax', a, b), c)),
542 (('iand', ('ige(is_used_once)', a, b), ('ige', a, c)), ('ige', a, ('imax', b, c))),
543 (('iand', ('ige(is_used_once)', a, c), ('ige', b, c)), ('ige', ('imin', a, b), c)),
544 (('iand', ('ult(is_used_once)', a, b), ('ult', a, c)), ('ult', a, ('umin', b, c))),
545 (('iand', ('ult(is_used_once)', a, c), ('ult', b, c)), ('ult', ('umax', a, b), c)),
546 (('iand', ('uge(is_used_once)', a, b), ('uge', a, c)), ('uge', a, ('umax', b, c))),
547 (('iand', ('uge(is_used_once)', a, c), ('uge', b, c)), ('uge', ('umin', a, b), c)),
548
549 # These derive from the previous patterns with the application of b < 0 <=>
550 # 0 < -b. The transformation should be applied if either comparison is
551 # used once as this ensures that the number of comparisons will not
552 # increase. The sources to the ior and iand are not symmetric, so the
553 # rules have to be duplicated to get this behavior.
554 (('~ior', ('flt(is_used_once)', 0.0, 'a@32'), ('flt', 'b@32', 0.0)), ('flt', 0.0, ('fmax', a, ('fneg', b)))),
555 (('~ior', ('flt', 0.0, 'a@32'), ('flt(is_used_once)', 'b@32', 0.0)), ('flt', 0.0, ('fmax', a, ('fneg', b)))),
556 (('~ior', ('fge(is_used_once)', 0.0, 'a@32'), ('fge', 'b@32', 0.0)), ('fge', 0.0, ('fmin', a, ('fneg', b)))),
557 (('~ior', ('fge', 0.0, 'a@32'), ('fge(is_used_once)', 'b@32', 0.0)), ('fge', 0.0, ('fmin', a, ('fneg', b)))),
558 (('~iand', ('flt(is_used_once)', 0.0, 'a@32'), ('flt', 'b@32', 0.0)), ('flt', 0.0, ('fmin', a, ('fneg', b)))),
559 (('~iand', ('flt', 0.0, 'a@32'), ('flt(is_used_once)', 'b@32', 0.0)), ('flt', 0.0, ('fmin', a, ('fneg', b)))),
560 (('~iand', ('fge(is_used_once)', 0.0, 'a@32'), ('fge', 'b@32', 0.0)), ('fge', 0.0, ('fmax', a, ('fneg', b)))),
561 (('~iand', ('fge', 0.0, 'a@32'), ('fge(is_used_once)', 'b@32', 0.0)), ('fge', 0.0, ('fmax', a, ('fneg', b)))),
562
563 # Common pattern like 'if (i == 0 || i == 1 || ...)'
564 (('ior', ('ieq', a, 0), ('ieq', a, 1)), ('uge', 1, a)),
565 (('ior', ('uge', 1, a), ('ieq', a, 2)), ('uge', 2, a)),
566 (('ior', ('uge', 2, a), ('ieq', a, 3)), ('uge', 3, a)),
567
568 # The (i2f32, ...) part is an open-coded fsign. When that is combined with
569 # the bcsel, it's basically copysign(1.0, a). There is no copysign in NIR,
570 # so emit an open-coded version of that.
571 (('bcsel@32', ('feq', a, 0.0), 1.0, ('i2f32', ('iadd', ('b2i32', ('flt', 0.0, 'a@32')), ('ineg', ('b2i32', ('flt', 'a@32', 0.0)))))),
572 ('ior', 0x3f800000, ('iand', a, 0x80000000))),
573
574 (('ior', a, ('ieq', a, False)), True),
575 (('ior', a, ('inot', a)), -1),
576
577 (('ine', ('ineg', ('b2i32', 'a@1')), ('ineg', ('b2i32', 'b@1'))), ('ine', a, b)),
578 (('b2i32', ('ine', 'a@1', 'b@1')), ('b2i32', ('ixor', a, b))),
579
580 (('iand', ('ieq', 'a@32', 0), ('ieq', 'b@32', 0)), ('ieq', ('ior', 'a@32', 'b@32'), 0), '!options->lower_bitops'),
581
582 # These patterns can result when (a < b || a < c) => (a < min(b, c))
583 # transformations occur before constant propagation and loop-unrolling.
584 (('~flt', a, ('fmax', b, a)), ('flt', a, b)),
585 (('~flt', ('fmin', a, b), a), ('flt', b, a)),
586 (('~fge', a, ('fmin', b, a)), True),
587 (('~fge', ('fmax', a, b), a), True),
588 (('~flt', a, ('fmin', b, a)), False),
589 (('~flt', ('fmax', a, b), a), False),
590 (('~fge', a, ('fmax', b, a)), ('fge', a, b)),
591 (('~fge', ('fmin', a, b), a), ('fge', b, a)),
592
593 (('ilt', a, ('imax', b, a)), ('ilt', a, b)),
594 (('ilt', ('imin', a, b), a), ('ilt', b, a)),
595 (('ige', a, ('imin', b, a)), True),
596 (('ige', ('imax', a, b), a), True),
597 (('ult', a, ('umax', b, a)), ('ult', a, b)),
598 (('ult', ('umin', a, b), a), ('ult', b, a)),
599 (('uge', a, ('umin', b, a)), True),
600 (('uge', ('umax', a, b), a), True),
601 (('ilt', a, ('imin', b, a)), False),
602 (('ilt', ('imax', a, b), a), False),
603 (('ige', a, ('imax', b, a)), ('ige', a, b)),
604 (('ige', ('imin', a, b), a), ('ige', b, a)),
605 (('ult', a, ('umin', b, a)), False),
606 (('ult', ('umax', a, b), a), False),
607 (('uge', a, ('umax', b, a)), ('uge', a, b)),
608 (('uge', ('umin', a, b), a), ('uge', b, a)),
609 (('ult', a, ('iand', b, a)), False),
610 (('ult', ('ior', a, b), a), False),
611 (('uge', a, ('iand', b, a)), True),
612 (('uge', ('ior', a, b), a), True),
613
614 (('ilt', '#a', ('imax', '#b', c)), ('ior', ('ilt', a, b), ('ilt', a, c))),
615 (('ilt', ('imin', '#a', b), '#c'), ('ior', ('ilt', a, c), ('ilt', b, c))),
616 (('ige', '#a', ('imin', '#b', c)), ('ior', ('ige', a, b), ('ige', a, c))),
617 (('ige', ('imax', '#a', b), '#c'), ('ior', ('ige', a, c), ('ige', b, c))),
618 (('ult', '#a', ('umax', '#b', c)), ('ior', ('ult', a, b), ('ult', a, c))),
619 (('ult', ('umin', '#a', b), '#c'), ('ior', ('ult', a, c), ('ult', b, c))),
620 (('uge', '#a', ('umin', '#b', c)), ('ior', ('uge', a, b), ('uge', a, c))),
621 (('uge', ('umax', '#a', b), '#c'), ('ior', ('uge', a, c), ('uge', b, c))),
622 (('ilt', '#a', ('imin', '#b', c)), ('iand', ('ilt', a, b), ('ilt', a, c))),
623 (('ilt', ('imax', '#a', b), '#c'), ('iand', ('ilt', a, c), ('ilt', b, c))),
624 (('ige', '#a', ('imax', '#b', c)), ('iand', ('ige', a, b), ('ige', a, c))),
625 (('ige', ('imin', '#a', b), '#c'), ('iand', ('ige', a, c), ('ige', b, c))),
626 (('ult', '#a', ('umin', '#b', c)), ('iand', ('ult', a, b), ('ult', a, c))),
627 (('ult', ('umax', '#a', b), '#c'), ('iand', ('ult', a, c), ('ult', b, c))),
628 (('uge', '#a', ('umax', '#b', c)), ('iand', ('uge', a, b), ('uge', a, c))),
629 (('uge', ('umin', '#a', b), '#c'), ('iand', ('uge', a, c), ('uge', b, c))),
630
631 # Thanks to sign extension, the ishr(a, b) is negative if and only if a is
632 # negative.
633 (('bcsel', ('ilt', a, 0), ('ineg', ('ishr', a, b)), ('ishr', a, b)),
634 ('iabs', ('ishr', a, b))),
635 (('iabs', ('ishr', ('iabs', a), b)), ('ishr', ('iabs', a), b)),
636
637 (('fabs', ('slt', a, b)), ('slt', a, b)),
638 (('fabs', ('sge', a, b)), ('sge', a, b)),
639 (('fabs', ('seq', a, b)), ('seq', a, b)),
640 (('fabs', ('sne', a, b)), ('sne', a, b)),
641 (('slt', a, b), ('b2f', ('flt', a, b)), 'options->lower_scmp'),
642 (('sge', a, b), ('b2f', ('fge', a, b)), 'options->lower_scmp'),
643 (('seq', a, b), ('b2f', ('feq', a, b)), 'options->lower_scmp'),
644 (('sne', a, b), ('b2f', ('fne', a, b)), 'options->lower_scmp'),
645 (('seq', ('seq', a, b), 1.0), ('seq', a, b)),
646 (('seq', ('sne', a, b), 1.0), ('sne', a, b)),
647 (('seq', ('slt', a, b), 1.0), ('slt', a, b)),
648 (('seq', ('sge', a, b), 1.0), ('sge', a, b)),
649 (('sne', ('seq', a, b), 0.0), ('seq', a, b)),
650 (('sne', ('sne', a, b), 0.0), ('sne', a, b)),
651 (('sne', ('slt', a, b), 0.0), ('slt', a, b)),
652 (('sne', ('sge', a, b), 0.0), ('sge', a, b)),
653 (('seq', ('seq', a, b), 0.0), ('sne', a, b)),
654 (('seq', ('sne', a, b), 0.0), ('seq', a, b)),
655 (('seq', ('slt', a, b), 0.0), ('sge', a, b)),
656 (('seq', ('sge', a, b), 0.0), ('slt', a, b)),
657 (('sne', ('seq', a, b), 1.0), ('sne', a, b)),
658 (('sne', ('sne', a, b), 1.0), ('seq', a, b)),
659 (('sne', ('slt', a, b), 1.0), ('sge', a, b)),
660 (('sne', ('sge', a, b), 1.0), ('slt', a, b)),
661 (('fall_equal2', a, b), ('fmin', ('seq', 'a.x', 'b.x'), ('seq', 'a.y', 'b.y')), 'options->lower_vector_cmp'),
662 (('fall_equal3', a, b), ('seq', ('fany_nequal3', a, b), 0.0), 'options->lower_vector_cmp'),
663 (('fall_equal4', a, b), ('seq', ('fany_nequal4', a, b), 0.0), 'options->lower_vector_cmp'),
664 (('fany_nequal2', a, b), ('fmax', ('sne', 'a.x', 'b.x'), ('sne', 'a.y', 'b.y')), 'options->lower_vector_cmp'),
665 (('fany_nequal3', a, b), ('fsat', ('fdot3', ('sne', a, b), ('sne', a, b))), 'options->lower_vector_cmp'),
666 (('fany_nequal4', a, b), ('fsat', ('fdot4', ('sne', a, b), ('sne', a, b))), 'options->lower_vector_cmp'),
667 (('fne', ('fneg', a), a), ('fne', a, 0.0)),
668 (('feq', ('fneg', a), a), ('feq', a, 0.0)),
669 # Emulating booleans
670 (('imul', ('b2i', 'a@1'), ('b2i', 'b@1')), ('b2i', ('iand', a, b))),
671 (('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), ('b2f', ('iand', a, b))),
672 (('fsat', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('b2f', ('ior', a, b))),
673 (('iand', 'a@bool32', 1.0), ('b2f', a)),
674 # True/False are ~0 and 0 in NIR. b2i of True is 1, and -1 is ~0 (True).
675 (('ineg', ('b2i32', 'a@32')), a),
676 (('flt', ('fneg', ('b2f', 'a@1')), 0), a), # Generated by TGSI KILL_IF.
677 # Comparison with the same args. Note that these are not done for
678 # the float versions because NaN always returns false on float
679 # inequalities.
680 (('ilt', a, a), False),
681 (('ige', a, a), True),
682 (('ieq', a, a), True),
683 (('ine', a, a), False),
684 (('ult', a, a), False),
685 (('uge', a, a), True),
686 # Logical and bit operations
687 (('iand', a, a), a),
688 (('iand', a, ~0), a),
689 (('iand', a, 0), 0),
690 (('ior', a, a), a),
691 (('ior', a, 0), a),
692 (('ior', a, True), True),
693 (('ixor', a, a), 0),
694 (('ixor', a, 0), a),
695 (('inot', ('inot', a)), a),
696 (('ior', ('iand', a, b), b), b),
697 (('ior', ('ior', a, b), b), ('ior', a, b)),
698 (('iand', ('ior', a, b), b), b),
699 (('iand', ('iand', a, b), b), ('iand', a, b)),
700 # DeMorgan's Laws
701 (('iand', ('inot', a), ('inot', b)), ('inot', ('ior', a, b))),
702 (('ior', ('inot', a), ('inot', b)), ('inot', ('iand', a, b))),
703 # Shift optimizations
704 (('ishl', 0, a), 0),
705 (('ishl', a, 0), a),
706 (('ishr', 0, a), 0),
707 (('ishr', a, 0), a),
708 (('ushr', 0, a), 0),
709 (('ushr', a, 0), a),
710 (('iand', 0xff, ('ushr@32', a, 24)), ('ushr', a, 24)),
711 (('iand', 0xffff, ('ushr@32', a, 16)), ('ushr', a, 16)),
712 (('ior', ('ishl@16', a, b), ('ushr@16', a, ('iadd', 16, ('ineg', b)))), ('urol', a, b), '!options->lower_rotate'),
713 (('ior', ('ishl@16', a, b), ('ushr@16', a, ('isub', 16, b))), ('urol', a, b), '!options->lower_rotate'),
714 (('ior', ('ishl@32', a, b), ('ushr@32', a, ('iadd', 32, ('ineg', b)))), ('urol', a, b), '!options->lower_rotate'),
715 (('ior', ('ishl@32', a, b), ('ushr@32', a, ('isub', 32, b))), ('urol', a, b), '!options->lower_rotate'),
716 (('ior', ('ushr@16', a, b), ('ishl@16', a, ('iadd', 16, ('ineg', b)))), ('uror', a, b), '!options->lower_rotate'),
717 (('ior', ('ushr@16', a, b), ('ishl@16', a, ('isub', 16, b))), ('uror', a, b), '!options->lower_rotate'),
718 (('ior', ('ushr@32', a, b), ('ishl@32', a, ('iadd', 32, ('ineg', b)))), ('uror', a, b), '!options->lower_rotate'),
719 (('ior', ('ushr@32', a, b), ('ishl@32', a, ('isub', 32, b))), ('uror', a, b), '!options->lower_rotate'),
720 (('urol@16', a, b), ('ior', ('ishl', a, b), ('ushr', a, ('isub', 16, b))), 'options->lower_rotate'),
721 (('urol@32', a, b), ('ior', ('ishl', a, b), ('ushr', a, ('isub', 32, b))), 'options->lower_rotate'),
722 (('uror@16', a, b), ('ior', ('ushr', a, b), ('ishl', a, ('isub', 16, b))), 'options->lower_rotate'),
723 (('uror@32', a, b), ('ior', ('ushr', a, b), ('ishl', a, ('isub', 32, b))), 'options->lower_rotate'),
724 # Exponential/logarithmic identities
725 (('~fexp2', ('flog2', a)), a), # 2^lg2(a) = a
726 (('~flog2', ('fexp2', a)), a), # lg2(2^a) = a
727 (('fpow', a, b), ('fexp2', ('fmul', ('flog2', a), b)), 'options->lower_fpow'), # a^b = 2^(lg2(a)*b)
728 (('~fexp2', ('fmul', ('flog2', a), b)), ('fpow', a, b), '!options->lower_fpow'), # 2^(lg2(a)*b) = a^b
729 (('~fexp2', ('fadd', ('fmul', ('flog2', a), b), ('fmul', ('flog2', c), d))),
730 ('~fmul', ('fpow', a, b), ('fpow', c, d)), '!options->lower_fpow'), # 2^(lg2(a) * b + lg2(c) + d) = a^b * c^d
731 (('~fexp2', ('fmul', ('flog2', a), 2.0)), ('fmul', a, a)),
732 (('~fexp2', ('fmul', ('flog2', a), 4.0)), ('fmul', ('fmul', a, a), ('fmul', a, a))),
733 (('~fpow', a, 1.0), a),
734 (('~fpow', a, 2.0), ('fmul', a, a)),
735 (('~fpow', a, 4.0), ('fmul', ('fmul', a, a), ('fmul', a, a))),
736 (('~fpow', 2.0, a), ('fexp2', a)),
737 (('~fpow', ('fpow', a, 2.2), 0.454545), a),
738 (('~fpow', ('fabs', ('fpow', a, 2.2)), 0.454545), ('fabs', a)),
739 (('~fsqrt', ('fexp2', a)), ('fexp2', ('fmul', 0.5, a))),
740 (('~frcp', ('fexp2', a)), ('fexp2', ('fneg', a))),
741 (('~frsq', ('fexp2', a)), ('fexp2', ('fmul', -0.5, a))),
742 (('~flog2', ('fsqrt', a)), ('fmul', 0.5, ('flog2', a))),
743 (('~flog2', ('frcp', a)), ('fneg', ('flog2', a))),
744 (('~flog2', ('frsq', a)), ('fmul', -0.5, ('flog2', a))),
745 (('~flog2', ('fpow', a, b)), ('fmul', b, ('flog2', a))),
746 (('~fmul', ('fexp2(is_used_once)', a), ('fexp2(is_used_once)', b)), ('fexp2', ('fadd', a, b))),
747 (('bcsel', ('flt', a, 0.0), 0.0, ('fsqrt', a)), ('fsqrt', ('fmax', a, 0.0))),
748 # Division and reciprocal
749 (('~fdiv', 1.0, a), ('frcp', a)),
750 (('fdiv', a, b), ('fmul', a, ('frcp', b)), 'options->lower_fdiv'),
751 (('~frcp', ('frcp', a)), a),
752 (('~frcp', ('fsqrt', a)), ('frsq', a)),
753 (('fsqrt', a), ('frcp', ('frsq', a)), 'options->lower_fsqrt'),
754 (('~frcp', ('frsq', a)), ('fsqrt', a), '!options->lower_fsqrt'),
755 # Trig
756 (('fsin', a), lowered_sincos(0.5), 'options->lower_sincos'),
757 (('fcos', a), lowered_sincos(0.75), 'options->lower_sincos'),
758 # Boolean simplifications
759 (('i2b32(is_used_by_if)', a), ('ine32', a, 0)),
760 (('i2b1(is_used_by_if)', a), ('ine', a, 0)),
761 (('ieq', a, True), a),
762 (('ine(is_not_used_by_if)', a, True), ('inot', a)),
763 (('ine', a, False), a),
764 (('ieq(is_not_used_by_if)', a, False), ('inot', 'a')),
765 (('bcsel', a, True, False), a),
766 (('bcsel', a, False, True), ('inot', a)),
767 (('bcsel@32', a, 1.0, 0.0), ('b2f', a)),
768 (('bcsel@32', a, 0.0, 1.0), ('b2f', ('inot', a))),
769 (('bcsel@32', a, -1.0, -0.0), ('fneg', ('b2f', a))),
770 (('bcsel@32', a, -0.0, -1.0), ('fneg', ('b2f', ('inot', a)))),
771 (('bcsel', True, b, c), b),
772 (('bcsel', False, b, c), c),
773 (('bcsel', a, ('b2f(is_used_once)', 'b@32'), ('b2f', 'c@32')), ('b2f', ('bcsel', a, b, c))),
774
775 (('bcsel', a, b, b), b),
776 (('~fcsel', a, b, b), b),
777
778 # D3D Boolean emulation
779 (('bcsel', a, -1, 0), ('ineg', ('b2i', 'a@1'))),
780 (('bcsel', a, 0, -1), ('ineg', ('b2i', ('inot', a)))),
781 (('iand', ('ineg', ('b2i', 'a@1')), ('ineg', ('b2i', 'b@1'))),
782 ('ineg', ('b2i', ('iand', a, b)))),
783 (('ior', ('ineg', ('b2i','a@1')), ('ineg', ('b2i', 'b@1'))),
784 ('ineg', ('b2i', ('ior', a, b)))),
785 (('ieq', ('ineg', ('b2i', 'a@1')), 0), ('inot', a)),
786 (('ieq', ('ineg', ('b2i', 'a@1')), -1), a),
787 (('ine', ('ineg', ('b2i', 'a@1')), 0), a),
788 (('ine', ('ineg', ('b2i', 'a@1')), -1), ('inot', a)),
789 (('iand', ('ineg', ('b2i', a)), 1.0), ('b2f', a)),
790 (('iand', ('ineg', ('b2i', a)), 1), ('b2i', a)),
791
792 # SM5 32-bit shifts are defined to use the 5 least significant bits
793 (('ishl', 'a@32', ('iand', 31, b)), ('ishl', a, b)),
794 (('ishr', 'a@32', ('iand', 31, b)), ('ishr', a, b)),
795 (('ushr', 'a@32', ('iand', 31, b)), ('ushr', a, b)),
796
797 # Conversions
798 (('i2b32', ('b2i', 'a@32')), a),
799 (('f2i', ('ftrunc', a)), ('f2i', a)),
800 (('f2u', ('ftrunc', a)), ('f2u', a)),
801 (('i2b', ('ineg', a)), ('i2b', a)),
802 (('i2b', ('iabs', a)), ('i2b', a)),
803 (('inot', ('f2b1', a)), ('feq', a, 0.0)),
804
805 # Ironically, mark these as imprecise because removing the conversions may
806 # preserve more precision than doing the conversions (e.g.,
807 # uint(float(0x81818181u)) == 0x81818200).
808 (('~f2i32', ('i2f', 'a@32')), a),
809 (('~f2i32', ('u2f', 'a@32')), a),
810 (('~f2u32', ('i2f', 'a@32')), a),
811 (('~f2u32', ('u2f', 'a@32')), a),
812
813 (('ffloor', 'a(is_integral)'), a),
814 (('fceil', 'a(is_integral)'), a),
815 (('ftrunc', 'a(is_integral)'), a),
816 # fract(x) = x - floor(x), so fract(NaN) = NaN
817 (('~ffract', 'a(is_integral)'), 0.0),
818 (('fabs', 'a(is_not_negative)'), a),
819 (('iabs', 'a(is_not_negative)'), a),
820 (('fsat', 'a(is_not_positive)'), 0.0),
821
822 # Section 5.4.1 (Conversion and Scalar Constructors) of the GLSL 4.60 spec
823 # says:
824 #
825 # It is undefined to convert a negative floating-point value to an
826 # uint.
827 #
828 # Assuming that (uint)some_float behaves like (uint)(int)some_float allows
829 # some optimizations in the i965 backend to proceed.
830 (('ige', ('f2u', a), b), ('ige', ('f2i', a), b)),
831 (('ige', b, ('f2u', a)), ('ige', b, ('f2i', a))),
832 (('ilt', ('f2u', a), b), ('ilt', ('f2i', a), b)),
833 (('ilt', b, ('f2u', a)), ('ilt', b, ('f2i', a))),
834
835 (('~fmin', 'a(is_not_negative)', 1.0), ('fsat', a), '!options->lower_fsat'),
836
837 # The result of the multiply must be in [-1, 0], so the result of the ffma
838 # must be in [0, 1].
839 (('flt', ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0), 0.0), False),
840 (('flt', ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0), 0.0), False),
841 (('fmax', ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0), 0.0), ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0)),
842 (('fmax', ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0), 0.0), ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0)),
843
844 (('fne', 'a(is_not_zero)', 0.0), True),
845 (('feq', 'a(is_not_zero)', 0.0), False),
846
847 # In this chart, + means value > 0 and - means value < 0.
848 #
849 # + >= + -> unknown 0 >= + -> false - >= + -> false
850 # + >= 0 -> true 0 >= 0 -> true - >= 0 -> false
851 # + >= - -> true 0 >= - -> true - >= - -> unknown
852 #
853 # Using grouping conceptually similar to a Karnaugh map...
854 #
855 # (+ >= 0, + >= -, 0 >= 0, 0 >= -) == (is_not_negative >= is_not_positive) -> true
856 # (0 >= +, - >= +) == (is_not_positive >= gt_zero) -> false
857 # (- >= +, - >= 0) == (lt_zero >= is_not_negative) -> false
858 #
859 # The flt / ilt cases just invert the expected result.
860 #
861 # The results expecting true, must be marked imprecise. The results
862 # expecting false are fine because NaN compared >= or < anything is false.
863
864 (('~fge', 'a(is_not_negative)', 'b(is_not_positive)'), True),
865 (('fge', 'a(is_not_positive)', 'b(is_gt_zero)'), False),
866 (('fge', 'a(is_lt_zero)', 'b(is_not_negative)'), False),
867
868 (('flt', 'a(is_not_negative)', 'b(is_not_positive)'), False),
869 (('~flt', 'a(is_not_positive)', 'b(is_gt_zero)'), True),
870 (('~flt', 'a(is_lt_zero)', 'b(is_not_negative)'), True),
871
872 (('ine', 'a(is_not_zero)', 0), True),
873 (('ieq', 'a(is_not_zero)', 0), False),
874
875 (('ige', 'a(is_not_negative)', 'b(is_not_positive)'), True),
876 (('ige', 'a(is_not_positive)', 'b(is_gt_zero)'), False),
877 (('ige', 'a(is_lt_zero)', 'b(is_not_negative)'), False),
878
879 (('ilt', 'a(is_not_negative)', 'b(is_not_positive)'), False),
880 (('ilt', 'a(is_not_positive)', 'b(is_gt_zero)'), True),
881 (('ilt', 'a(is_lt_zero)', 'b(is_not_negative)'), True),
882
883 (('ult', 0, 'a(is_gt_zero)'), True),
884
885 # Packing and then unpacking does nothing
886 (('unpack_64_2x32_split_x', ('pack_64_2x32_split', a, b)), a),
887 (('unpack_64_2x32_split_y', ('pack_64_2x32_split', a, b)), b),
888 (('pack_64_2x32_split', ('unpack_64_2x32_split_x', a),
889 ('unpack_64_2x32_split_y', a)), a),
890
891 # Comparing two halves of an unpack separately. While this optimization
892 # should be correct for non-constant values, it's less obvious that it's
893 # useful in that case. For constant values, the pack will fold and we're
894 # guaranteed to reduce the whole tree to one instruction.
895 (('iand', ('ieq', ('unpack_32_2x16_split_x', a), '#b'),
896 ('ieq', ('unpack_32_2x16_split_y', a), '#c')),
897 ('ieq', a, ('pack_32_2x16_split', b, c))),
898
899 # Byte extraction
900 (('ushr', 'a@16', 8), ('extract_u8', a, 1), '!options->lower_extract_byte'),
901 (('ushr', 'a@32', 24), ('extract_u8', a, 3), '!options->lower_extract_byte'),
902 (('ushr', 'a@64', 56), ('extract_u8', a, 7), '!options->lower_extract_byte'),
903 (('ishr', 'a@16', 8), ('extract_i8', a, 1), '!options->lower_extract_byte'),
904 (('ishr', 'a@32', 24), ('extract_i8', a, 3), '!options->lower_extract_byte'),
905 (('ishr', 'a@64', 56), ('extract_i8', a, 7), '!options->lower_extract_byte'),
906 (('iand', 0xff, a), ('extract_u8', a, 0), '!options->lower_extract_byte'),
907
908 # Useless masking before unpacking
909 (('unpack_half_2x16_split_x', ('iand', a, 0xffff)), ('unpack_half_2x16_split_x', a)),
910 (('unpack_32_2x16_split_x', ('iand', a, 0xffff)), ('unpack_32_2x16_split_x', a)),
911 (('unpack_64_2x32_split_x', ('iand', a, 0xffffffff)), ('unpack_64_2x32_split_x', a)),
912 (('unpack_half_2x16_split_y', ('iand', a, 0xffff0000)), ('unpack_half_2x16_split_y', a)),
913 (('unpack_32_2x16_split_y', ('iand', a, 0xffff0000)), ('unpack_32_2x16_split_y', a)),
914 (('unpack_64_2x32_split_y', ('iand', a, 0xffffffff00000000)), ('unpack_64_2x32_split_y', a)),
915 ])
916
917 # After the ('extract_u8', a, 0) pattern, above, triggers, there will be
918 # patterns like those below.
919 for op in ('ushr', 'ishr'):
920 optimizations.extend([(('extract_u8', (op, 'a@16', 8), 0), ('extract_u8', a, 1))])
921 optimizations.extend([(('extract_u8', (op, 'a@32', 8 * i), 0), ('extract_u8', a, i)) for i in range(1, 4)])
922 optimizations.extend([(('extract_u8', (op, 'a@64', 8 * i), 0), ('extract_u8', a, i)) for i in range(1, 8)])
923
924 optimizations.extend([(('extract_u8', ('extract_u16', a, 1), 0), ('extract_u8', a, 2))])
925
926 # After the ('extract_[iu]8', a, 3) patterns, above, trigger, there will be
927 # patterns like those below.
928 for op in ('extract_u8', 'extract_i8'):
929 optimizations.extend([((op, ('ishl', 'a@16', 8), 1), (op, a, 0))])
930 optimizations.extend([((op, ('ishl', 'a@32', 24 - 8 * i), 3), (op, a, i)) for i in range(2, -1, -1)])
931 optimizations.extend([((op, ('ishl', 'a@64', 56 - 8 * i), 7), (op, a, i)) for i in range(6, -1, -1)])
932
933 optimizations.extend([
934 # Word extraction
935 (('ushr', ('ishl', 'a@32', 16), 16), ('extract_u16', a, 0), '!options->lower_extract_word'),
936 (('ushr', 'a@32', 16), ('extract_u16', a, 1), '!options->lower_extract_word'),
937 (('ishr', ('ishl', 'a@32', 16), 16), ('extract_i16', a, 0), '!options->lower_extract_word'),
938 (('ishr', 'a@32', 16), ('extract_i16', a, 1), '!options->lower_extract_word'),
939 (('iand', 0xffff, a), ('extract_u16', a, 0), '!options->lower_extract_word'),
940
941 # Subtracts
942 (('ussub_4x8', a, 0), a),
943 (('ussub_4x8', a, ~0), 0),
944 # Lower all Subtractions first - they can get recombined later
945 (('fsub', a, b), ('fadd', a, ('fneg', b))),
946 (('isub', a, b), ('iadd', a, ('ineg', b))),
947
948 # Propagate negation up multiplication chains
949 (('fmul(is_used_by_non_fsat)', ('fneg', a), b), ('fneg', ('fmul', a, b))),
950 (('imul', ('ineg', a), b), ('ineg', ('imul', a, b))),
951
952 # Propagate constants up multiplication chains
953 (('~fmul(is_used_once)', ('fmul(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('fmul', ('fmul', a, c), b)),
954 (('imul(is_used_once)', ('imul(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('imul', ('imul', a, c), b)),
955 (('~fadd(is_used_once)', ('fadd(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('fadd', ('fadd', a, c), b)),
956 (('iadd(is_used_once)', ('iadd(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('iadd', ('iadd', a, c), b)),
957
958 # Reassociate constants in add/mul chains so they can be folded together.
959 # For now, we mostly only handle cases where the constants are separated by
960 # a single non-constant. We could do better eventually.
961 (('~fmul', '#a', ('fmul', 'b(is_not_const)', '#c')), ('fmul', ('fmul', a, c), b)),
962 (('imul', '#a', ('imul', 'b(is_not_const)', '#c')), ('imul', ('imul', a, c), b)),
963 (('~fadd', '#a', ('fadd', 'b(is_not_const)', '#c')), ('fadd', ('fadd', a, c), b)),
964 (('~fadd', '#a', ('fneg', ('fadd', 'b(is_not_const)', '#c'))), ('fadd', ('fadd', a, ('fneg', c)), ('fneg', b))),
965 (('iadd', '#a', ('iadd', 'b(is_not_const)', '#c')), ('iadd', ('iadd', a, c), b)),
966
967 # Drop mul-div by the same value when there's no wrapping.
968 (('idiv', ('imul(no_signed_wrap)', a, b), b), a),
969
970 # By definition...
971 (('bcsel', ('ige', ('find_lsb', a), 0), ('find_lsb', a), -1), ('find_lsb', a)),
972 (('bcsel', ('ige', ('ifind_msb', a), 0), ('ifind_msb', a), -1), ('ifind_msb', a)),
973 (('bcsel', ('ige', ('ufind_msb', a), 0), ('ufind_msb', a), -1), ('ufind_msb', a)),
974
975 (('bcsel', ('ine', a, 0), ('find_lsb', a), -1), ('find_lsb', a)),
976 (('bcsel', ('ine', a, 0), ('ifind_msb', a), -1), ('ifind_msb', a)),
977 (('bcsel', ('ine', a, 0), ('ufind_msb', a), -1), ('ufind_msb', a)),
978
979 (('bcsel', ('ine', a, -1), ('ifind_msb', a), -1), ('ifind_msb', a)),
980
981 # Misc. lowering
982 (('fmod@16', a, b), ('fsub', a, ('fmul', b, ('ffloor', ('fdiv', a, b)))), 'options->lower_fmod'),
983 (('fmod@32', a, b), ('fsub', a, ('fmul', b, ('ffloor', ('fdiv', a, b)))), 'options->lower_fmod'),
984 (('frem', a, b), ('fsub', a, ('fmul', b, ('ftrunc', ('fdiv', a, b)))), 'options->lower_fmod'),
985 (('uadd_carry@32', a, b), ('b2i', ('ult', ('iadd', a, b), a)), 'options->lower_uadd_carry'),
986 (('usub_borrow@32', a, b), ('b2i', ('ult', a, b)), 'options->lower_usub_borrow'),
987
988 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
989 ('bcsel', ('ult', 31, 'bits'), 'insert',
990 ('bfi', ('bfm', 'bits', 'offset'), 'insert', 'base')),
991 'options->lower_bitfield_insert'),
992 (('ihadd', a, b), ('iadd', ('iand', a, b), ('ishr', ('ixor', a, b), 1)), 'options->lower_hadd'),
993 (('uhadd', a, b), ('iadd', ('iand', a, b), ('ushr', ('ixor', a, b), 1)), 'options->lower_hadd'),
994 (('irhadd', a, b), ('isub', ('ior', a, b), ('ishr', ('ixor', a, b), 1)), 'options->lower_hadd'),
995 (('urhadd', a, b), ('isub', ('ior', a, b), ('ushr', ('ixor', a, b), 1)), 'options->lower_hadd'),
996 (('uadd_sat', a, b), ('bcsel', ('ult', ('iadd', a, b), a), -1, ('iadd', a, b)), 'options->lower_add_sat'),
997 (('usub_sat', a, b), ('bcsel', ('ult', a, b), 0, ('isub', a, b)), 'options->lower_add_sat'),
998
999 # Alternative lowering that doesn't rely on bfi.
1000 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
1001 ('bcsel', ('ult', 31, 'bits'),
1002 'insert',
1003 (('ior',
1004 ('iand', 'base', ('inot', ('ishl', ('isub', ('ishl', 1, 'bits'), 1), 'offset'))),
1005 ('iand', ('ishl', 'insert', 'offset'), ('ishl', ('isub', ('ishl', 1, 'bits'), 1), 'offset'))))),
1006 'options->lower_bitfield_insert_to_shifts'),
1007
1008 # Alternative lowering that uses bitfield_select.
1009 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
1010 ('bcsel', ('ult', 31, 'bits'), 'insert',
1011 ('bitfield_select', ('bfm', 'bits', 'offset'), ('ishl', 'insert', 'offset'), 'base')),
1012 'options->lower_bitfield_insert_to_bitfield_select'),
1013
1014 (('ibitfield_extract', 'value', 'offset', 'bits'),
1015 ('bcsel', ('ult', 31, 'bits'), 'value',
1016 ('ibfe', 'value', 'offset', 'bits')),
1017 'options->lower_bitfield_extract'),
1018
1019 (('ubitfield_extract', 'value', 'offset', 'bits'),
1020 ('bcsel', ('ult', 31, 'bits'), 'value',
1021 ('ubfe', 'value', 'offset', 'bits')),
1022 'options->lower_bitfield_extract'),
1023
1024 # Note that these opcodes are defined to only use the five least significant bits of 'offset' and 'bits'
1025 (('ubfe', 'value', 'offset', ('iand', 31, 'bits')), ('ubfe', 'value', 'offset', 'bits')),
1026 (('ubfe', 'value', ('iand', 31, 'offset'), 'bits'), ('ubfe', 'value', 'offset', 'bits')),
1027 (('ibfe', 'value', 'offset', ('iand', 31, 'bits')), ('ibfe', 'value', 'offset', 'bits')),
1028 (('ibfe', 'value', ('iand', 31, 'offset'), 'bits'), ('ibfe', 'value', 'offset', 'bits')),
1029 (('bfm', 'bits', ('iand', 31, 'offset')), ('bfm', 'bits', 'offset')),
1030 (('bfm', ('iand', 31, 'bits'), 'offset'), ('bfm', 'bits', 'offset')),
1031
1032 (('ibitfield_extract', 'value', 'offset', 'bits'),
1033 ('bcsel', ('ieq', 0, 'bits'),
1034 0,
1035 ('ishr',
1036 ('ishl', 'value', ('isub', ('isub', 32, 'bits'), 'offset')),
1037 ('isub', 32, 'bits'))),
1038 'options->lower_bitfield_extract_to_shifts'),
1039
1040 (('ubitfield_extract', 'value', 'offset', 'bits'),
1041 ('iand',
1042 ('ushr', 'value', 'offset'),
1043 ('bcsel', ('ieq', 'bits', 32),
1044 0xffffffff,
1045 ('isub', ('ishl', 1, 'bits'), 1))),
1046 'options->lower_bitfield_extract_to_shifts'),
1047
1048 (('ifind_msb', 'value'),
1049 ('ufind_msb', ('bcsel', ('ilt', 'value', 0), ('inot', 'value'), 'value')),
1050 'options->lower_ifind_msb'),
1051
1052 (('find_lsb', 'value'),
1053 ('ufind_msb', ('iand', 'value', ('ineg', 'value'))),
1054 'options->lower_find_lsb'),
1055
1056 (('extract_i8', a, 'b@32'),
1057 ('ishr', ('ishl', a, ('imul', ('isub', 3, b), 8)), 24),
1058 'options->lower_extract_byte'),
1059
1060 (('extract_u8', a, 'b@32'),
1061 ('iand', ('ushr', a, ('imul', b, 8)), 0xff),
1062 'options->lower_extract_byte'),
1063
1064 (('extract_i16', a, 'b@32'),
1065 ('ishr', ('ishl', a, ('imul', ('isub', 1, b), 16)), 16),
1066 'options->lower_extract_word'),
1067
1068 (('extract_u16', a, 'b@32'),
1069 ('iand', ('ushr', a, ('imul', b, 16)), 0xffff),
1070 'options->lower_extract_word'),
1071
1072 (('pack_unorm_2x16', 'v'),
1073 ('pack_uvec2_to_uint',
1074 ('f2u32', ('fround_even', ('fmul', ('fsat', 'v'), 65535.0)))),
1075 'options->lower_pack_unorm_2x16'),
1076
1077 (('pack_unorm_4x8', 'v'),
1078 ('pack_uvec4_to_uint',
1079 ('f2u32', ('fround_even', ('fmul', ('fsat', 'v'), 255.0)))),
1080 'options->lower_pack_unorm_4x8'),
1081
1082 (('pack_snorm_2x16', 'v'),
1083 ('pack_uvec2_to_uint',
1084 ('f2i32', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 32767.0)))),
1085 'options->lower_pack_snorm_2x16'),
1086
1087 (('pack_snorm_4x8', 'v'),
1088 ('pack_uvec4_to_uint',
1089 ('f2i32', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 127.0)))),
1090 'options->lower_pack_snorm_4x8'),
1091
1092 (('unpack_unorm_2x16', 'v'),
1093 ('fdiv', ('u2f32', ('vec2', ('extract_u16', 'v', 0),
1094 ('extract_u16', 'v', 1))),
1095 65535.0),
1096 'options->lower_unpack_unorm_2x16'),
1097
1098 (('unpack_unorm_4x8', 'v'),
1099 ('fdiv', ('u2f32', ('vec4', ('extract_u8', 'v', 0),
1100 ('extract_u8', 'v', 1),
1101 ('extract_u8', 'v', 2),
1102 ('extract_u8', 'v', 3))),
1103 255.0),
1104 'options->lower_unpack_unorm_4x8'),
1105
1106 (('unpack_snorm_2x16', 'v'),
1107 ('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec2', ('extract_i16', 'v', 0),
1108 ('extract_i16', 'v', 1))),
1109 32767.0))),
1110 'options->lower_unpack_snorm_2x16'),
1111
1112 (('unpack_snorm_4x8', 'v'),
1113 ('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec4', ('extract_i8', 'v', 0),
1114 ('extract_i8', 'v', 1),
1115 ('extract_i8', 'v', 2),
1116 ('extract_i8', 'v', 3))),
1117 127.0))),
1118 'options->lower_unpack_snorm_4x8'),
1119
1120 (('pack_half_2x16_split', 'a@32', 'b@32'),
1121 ('ior', ('ishl', ('u2u32', ('f2f16', b)), 16), ('u2u32', ('f2f16', a))),
1122 'options->lower_pack_half_2x16_split'),
1123
1124 (('unpack_half_2x16_split_x', 'a@32'),
1125 ('f2f32', ('u2u16', a)),
1126 'options->lower_unpack_half_2x16_split'),
1127
1128 (('unpack_half_2x16_split_y', 'a@32'),
1129 ('f2f32', ('u2u16', ('ushr', a, 16))),
1130 'options->lower_unpack_half_2x16_split'),
1131
1132 (('isign', a), ('imin', ('imax', a, -1), 1), 'options->lower_isign'),
1133 (('fsign', a), ('fsub', ('b2f', ('flt', 0.0, a)), ('b2f', ('flt', a, 0.0))), 'options->lower_fsign'),
1134
1135 # Address/offset calculations:
1136 # Drivers supporting imul24 should use the nir_lower_amul() pass, this
1137 # rule converts everyone else to imul:
1138 (('amul', a, b), ('imul', a, b), '!options->has_imul24'),
1139
1140 (('imad24_ir3', a, b, 0), ('imul24', a, b)),
1141 (('imad24_ir3', a, 0, c), (c)),
1142 (('imad24_ir3', a, 1, c), ('iadd', a, c)),
1143
1144 # if first two srcs are const, crack apart the imad so constant folding
1145 # can clean up the imul:
1146 # TODO ffma should probably get a similar rule:
1147 (('imad24_ir3', '#a', '#b', c), ('iadd', ('imul', a, b), c)),
1148
1149 # These will turn 24b address/offset calc back into 32b shifts, but
1150 # it should be safe to get back some of the bits of precision that we
1151 # already decided were no necessary:
1152 (('imul24', a, '#b@32(is_pos_power_of_two)'), ('ishl', a, ('find_lsb', b)), '!options->lower_bitops'),
1153 (('imul24', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('ishl', a, ('find_lsb', ('iabs', b)))), '!options->lower_bitops'),
1154 (('imul24', a, 0), (0)),
1155 ])
1156
1157 # bit_size dependent lowerings
1158 for bit_size in [8, 16, 32, 64]:
1159 # convenience constants
1160 intmax = (1 << (bit_size - 1)) - 1
1161 intmin = 1 << (bit_size - 1)
1162
1163 optimizations += [
1164 (('iadd_sat@' + str(bit_size), a, b),
1165 ('bcsel', ('ige', b, 1), ('bcsel', ('ilt', ('iadd', a, b), a), intmax, ('iadd', a, b)),
1166 ('bcsel', ('ilt', a, ('iadd', a, b)), intmin, ('iadd', a, b))), 'options->lower_add_sat'),
1167 (('isub_sat@' + str(bit_size), a, b),
1168 ('bcsel', ('ilt', b, 0), ('bcsel', ('ilt', ('isub', a, b), a), intmax, ('isub', a, b)),
1169 ('bcsel', ('ilt', a, ('isub', a, b)), intmin, ('isub', a, b))), 'options->lower_add_sat'),
1170 ]
1171
1172 invert = OrderedDict([('feq', 'fne'), ('fne', 'feq'), ('fge', 'flt'), ('flt', 'fge')])
1173
1174 for left, right in itertools.combinations_with_replacement(invert.keys(), 2):
1175 optimizations.append((('inot', ('ior(is_used_once)', (left, a, b), (right, c, d))),
1176 ('iand', (invert[left], a, b), (invert[right], c, d))))
1177 optimizations.append((('inot', ('iand(is_used_once)', (left, a, b), (right, c, d))),
1178 ('ior', (invert[left], a, b), (invert[right], c, d))))
1179
1180 # Optimize x2bN(b2x(x)) -> x
1181 for size in type_sizes('bool'):
1182 aN = 'a@' + str(size)
1183 f2bN = 'f2b' + str(size)
1184 i2bN = 'i2b' + str(size)
1185 optimizations.append(((f2bN, ('b2f', aN)), a))
1186 optimizations.append(((i2bN, ('b2i', aN)), a))
1187
1188 # Optimize x2yN(b2x(x)) -> b2y
1189 for x, y in itertools.product(['f', 'u', 'i'], ['f', 'u', 'i']):
1190 if x != 'f' and y != 'f' and x != y:
1191 continue
1192
1193 b2x = 'b2f' if x == 'f' else 'b2i'
1194 b2y = 'b2f' if y == 'f' else 'b2i'
1195 x2yN = '{}2{}'.format(x, y)
1196 optimizations.append(((x2yN, (b2x, a)), (b2y, a)))
1197
1198 # Optimize away x2xN(a@N)
1199 for t in ['int', 'uint', 'float']:
1200 for N in type_sizes(t):
1201 x2xN = '{0}2{0}{1}'.format(t[0], N)
1202 aN = 'a@{0}'.format(N)
1203 optimizations.append(((x2xN, aN), a))
1204
1205 # Optimize x2xN(y2yM(a@P)) -> y2yN(a) for integers
1206 # In particular, we can optimize away everything except upcast of downcast and
1207 # upcasts where the type differs from the other cast
1208 for N, M in itertools.product(type_sizes('uint'), type_sizes('uint')):
1209 if N < M:
1210 # The outer cast is a down-cast. It doesn't matter what the size of the
1211 # argument of the inner cast is because we'll never been in the upcast
1212 # of downcast case. Regardless of types, we'll always end up with y2yN
1213 # in the end.
1214 for x, y in itertools.product(['i', 'u'], ['i', 'u']):
1215 x2xN = '{0}2{0}{1}'.format(x, N)
1216 y2yM = '{0}2{0}{1}'.format(y, M)
1217 y2yN = '{0}2{0}{1}'.format(y, N)
1218 optimizations.append(((x2xN, (y2yM, a)), (y2yN, a)))
1219 elif N > M:
1220 # If the outer cast is an up-cast, we have to be more careful about the
1221 # size of the argument of the inner cast and with types. In this case,
1222 # the type is always the type of type up-cast which is given by the
1223 # outer cast.
1224 for P in type_sizes('uint'):
1225 # We can't optimize away up-cast of down-cast.
1226 if M < P:
1227 continue
1228
1229 # Because we're doing down-cast of down-cast, the types always have
1230 # to match between the two casts
1231 for x in ['i', 'u']:
1232 x2xN = '{0}2{0}{1}'.format(x, N)
1233 x2xM = '{0}2{0}{1}'.format(x, M)
1234 aP = 'a@{0}'.format(P)
1235 optimizations.append(((x2xN, (x2xM, aP)), (x2xN, a)))
1236 else:
1237 # The N == M case is handled by other optimizations
1238 pass
1239
1240 # Optimize comparisons with up-casts
1241 for t in ['int', 'uint', 'float']:
1242 for N, M in itertools.product(type_sizes(t), repeat=2):
1243 if N == 1 or N >= M:
1244 continue
1245
1246 x2xM = '{0}2{0}{1}'.format(t[0], M)
1247 x2xN = '{0}2{0}{1}'.format(t[0], N)
1248 aN = 'a@' + str(N)
1249 bN = 'b@' + str(N)
1250 xeq = 'feq' if t == 'float' else 'ieq'
1251 xne = 'fne' if t == 'float' else 'ine'
1252 xge = '{0}ge'.format(t[0])
1253 xlt = '{0}lt'.format(t[0])
1254
1255 # Up-casts are lossless so for correctly signed comparisons of
1256 # up-casted values we can do the comparison at the largest of the two
1257 # original sizes and drop one or both of the casts. (We have
1258 # optimizations to drop the no-op casts which this may generate.)
1259 for P in type_sizes(t):
1260 if P == 1 or P > N:
1261 continue
1262
1263 bP = 'b@' + str(P)
1264 optimizations += [
1265 ((xeq, (x2xM, aN), (x2xM, bP)), (xeq, a, (x2xN, b))),
1266 ((xne, (x2xM, aN), (x2xM, bP)), (xne, a, (x2xN, b))),
1267 ((xge, (x2xM, aN), (x2xM, bP)), (xge, a, (x2xN, b))),
1268 ((xlt, (x2xM, aN), (x2xM, bP)), (xlt, a, (x2xN, b))),
1269 ((xge, (x2xM, bP), (x2xM, aN)), (xge, (x2xN, b), a)),
1270 ((xlt, (x2xM, bP), (x2xM, aN)), (xlt, (x2xN, b), a)),
1271 ]
1272
1273 # The next bit doesn't work on floats because the range checks would
1274 # get way too complicated.
1275 if t in ['int', 'uint']:
1276 if t == 'int':
1277 xN_min = -(1 << (N - 1))
1278 xN_max = (1 << (N - 1)) - 1
1279 elif t == 'uint':
1280 xN_min = 0
1281 xN_max = (1 << N) - 1
1282 else:
1283 assert False
1284
1285 # If we're up-casting and comparing to a constant, we can unfold
1286 # the comparison into a comparison with the shrunk down constant
1287 # and a check that the constant fits in the smaller bit size.
1288 optimizations += [
1289 ((xeq, (x2xM, aN), '#b'),
1290 ('iand', (xeq, a, (x2xN, b)), (xeq, (x2xM, (x2xN, b)), b))),
1291 ((xne, (x2xM, aN), '#b'),
1292 ('ior', (xne, a, (x2xN, b)), (xne, (x2xM, (x2xN, b)), b))),
1293 ((xlt, (x2xM, aN), '#b'),
1294 ('iand', (xlt, xN_min, b),
1295 ('ior', (xlt, xN_max, b), (xlt, a, (x2xN, b))))),
1296 ((xlt, '#a', (x2xM, bN)),
1297 ('iand', (xlt, a, xN_max),
1298 ('ior', (xlt, a, xN_min), (xlt, (x2xN, a), b)))),
1299 ((xge, (x2xM, aN), '#b'),
1300 ('iand', (xge, xN_max, b),
1301 ('ior', (xge, xN_min, b), (xge, a, (x2xN, b))))),
1302 ((xge, '#a', (x2xM, bN)),
1303 ('iand', (xge, a, xN_min),
1304 ('ior', (xge, a, xN_max), (xge, (x2xN, a), b)))),
1305 ]
1306
1307 def fexp2i(exp, bits):
1308 # We assume that exp is already in the right range.
1309 if bits == 16:
1310 return ('i2i16', ('ishl', ('iadd', exp, 15), 10))
1311 elif bits == 32:
1312 return ('ishl', ('iadd', exp, 127), 23)
1313 elif bits == 64:
1314 return ('pack_64_2x32_split', 0, ('ishl', ('iadd', exp, 1023), 20))
1315 else:
1316 assert False
1317
1318 def ldexp(f, exp, bits):
1319 # First, we clamp exp to a reasonable range. The maximum possible range
1320 # for a normal exponent is [-126, 127] and, throwing in denormals, you get
1321 # a maximum range of [-149, 127]. This means that we can potentially have
1322 # a swing of +-276. If you start with FLT_MAX, you actually have to do
1323 # ldexp(FLT_MAX, -278) to get it to flush all the way to zero. The GLSL
1324 # spec, on the other hand, only requires that we handle an exponent value
1325 # in the range [-126, 128]. This implementation is *mostly* correct; it
1326 # handles a range on exp of [-252, 254] which allows you to create any
1327 # value (including denorms if the hardware supports it) and to adjust the
1328 # exponent of any normal value to anything you want.
1329 if bits == 16:
1330 exp = ('imin', ('imax', exp, -28), 30)
1331 elif bits == 32:
1332 exp = ('imin', ('imax', exp, -252), 254)
1333 elif bits == 64:
1334 exp = ('imin', ('imax', exp, -2044), 2046)
1335 else:
1336 assert False
1337
1338 # Now we compute two powers of 2, one for exp/2 and one for exp-exp/2.
1339 # (We use ishr which isn't the same for -1, but the -1 case still works
1340 # since we use exp-exp/2 as the second exponent.) While the spec
1341 # technically defines ldexp as f * 2.0^exp, simply multiplying once doesn't
1342 # work with denormals and doesn't allow for the full swing in exponents
1343 # that you can get with normalized values. Instead, we create two powers
1344 # of two and multiply by them each in turn. That way the effective range
1345 # of our exponent is doubled.
1346 pow2_1 = fexp2i(('ishr', exp, 1), bits)
1347 pow2_2 = fexp2i(('isub', exp, ('ishr', exp, 1)), bits)
1348 return ('fmul', ('fmul', f, pow2_1), pow2_2)
1349
1350 optimizations += [
1351 (('ldexp@16', 'x', 'exp'), ldexp('x', 'exp', 16), 'options->lower_ldexp'),
1352 (('ldexp@32', 'x', 'exp'), ldexp('x', 'exp', 32), 'options->lower_ldexp'),
1353 (('ldexp@64', 'x', 'exp'), ldexp('x', 'exp', 64), 'options->lower_ldexp'),
1354 ]
1355
1356 # Unreal Engine 4 demo applications open-codes bitfieldReverse()
1357 def bitfield_reverse(u):
1358 step1 = ('ior', ('ishl', u, 16), ('ushr', u, 16))
1359 step2 = ('ior', ('ishl', ('iand', step1, 0x00ff00ff), 8), ('ushr', ('iand', step1, 0xff00ff00), 8))
1360 step3 = ('ior', ('ishl', ('iand', step2, 0x0f0f0f0f), 4), ('ushr', ('iand', step2, 0xf0f0f0f0), 4))
1361 step4 = ('ior', ('ishl', ('iand', step3, 0x33333333), 2), ('ushr', ('iand', step3, 0xcccccccc), 2))
1362 step5 = ('ior(many-comm-expr)', ('ishl', ('iand', step4, 0x55555555), 1), ('ushr', ('iand', step4, 0xaaaaaaaa), 1))
1363
1364 return step5
1365
1366 optimizations += [(bitfield_reverse('x@32'), ('bitfield_reverse', 'x'), '!options->lower_bitfield_reverse')]
1367
1368 # For any float comparison operation, "cmp", if you have "a == a && a cmp b"
1369 # then the "a == a" is redundant because it's equivalent to "a is not NaN"
1370 # and, if a is a NaN then the second comparison will fail anyway.
1371 for op in ['flt', 'fge', 'feq']:
1372 optimizations += [
1373 (('iand', ('feq', a, a), (op, a, b)), ('!' + op, a, b)),
1374 (('iand', ('feq', a, a), (op, b, a)), ('!' + op, b, a)),
1375 ]
1376
1377 # Add optimizations to handle the case where the result of a ternary is
1378 # compared to a constant. This way we can take things like
1379 #
1380 # (a ? 0 : 1) > 0
1381 #
1382 # and turn it into
1383 #
1384 # a ? (0 > 0) : (1 > 0)
1385 #
1386 # which constant folding will eat for lunch. The resulting ternary will
1387 # further get cleaned up by the boolean reductions above and we will be
1388 # left with just the original variable "a".
1389 for op in ['flt', 'fge', 'feq', 'fne',
1390 'ilt', 'ige', 'ieq', 'ine', 'ult', 'uge']:
1391 optimizations += [
1392 ((op, ('bcsel', 'a', '#b', '#c'), '#d'),
1393 ('bcsel', 'a', (op, 'b', 'd'), (op, 'c', 'd'))),
1394 ((op, '#d', ('bcsel', a, '#b', '#c')),
1395 ('bcsel', 'a', (op, 'd', 'b'), (op, 'd', 'c'))),
1396 ]
1397
1398
1399 # For example, this converts things like
1400 #
1401 # 1 + mix(0, a - 1, condition)
1402 #
1403 # into
1404 #
1405 # mix(1, (a-1)+1, condition)
1406 #
1407 # Other optimizations will rearrange the constants.
1408 for op in ['fadd', 'fmul', 'iadd', 'imul']:
1409 optimizations += [
1410 ((op, ('bcsel(is_used_once)', a, '#b', c), '#d'), ('bcsel', a, (op, b, d), (op, c, d)))
1411 ]
1412
1413 # For derivatives in compute shaders, GLSL_NV_compute_shader_derivatives
1414 # states:
1415 #
1416 # If neither layout qualifier is specified, derivatives in compute shaders
1417 # return zero, which is consistent with the handling of built-in texture
1418 # functions like texture() in GLSL 4.50 compute shaders.
1419 for op in ['fddx', 'fddx_fine', 'fddx_coarse',
1420 'fddy', 'fddy_fine', 'fddy_coarse']:
1421 optimizations += [
1422 ((op, 'a'), 0.0, 'info->stage == MESA_SHADER_COMPUTE && info->cs.derivative_group == DERIVATIVE_GROUP_NONE')
1423 ]
1424
1425 # Some optimizations for ir3-specific instructions.
1426 optimizations += [
1427 # 'al * bl': If either 'al' or 'bl' is zero, return zero.
1428 (('umul_low', '#a(is_lower_half_zero)', 'b'), (0)),
1429 # '(ah * bl) << 16 + c': If either 'ah' or 'bl' is zero, return 'c'.
1430 (('imadsh_mix16', '#a@32(is_lower_half_zero)', 'b@32', 'c@32'), ('c')),
1431 (('imadsh_mix16', 'a@32', '#b@32(is_upper_half_zero)', 'c@32'), ('c')),
1432 ]
1433
1434 # These kinds of sequences can occur after nir_opt_peephole_select.
1435 #
1436 # NOTE: fadd is not handled here because that gets in the way of ffma
1437 # generation in the i965 driver. Instead, fadd and ffma are handled in
1438 # late_optimizations.
1439
1440 for op in ['flrp']:
1441 optimizations += [
1442 (('bcsel', a, (op + '(is_used_once)', b, c, d), (op, b, c, e)), (op, b, c, ('bcsel', a, d, e))),
1443 (('bcsel', a, (op, b, c, d), (op + '(is_used_once)', b, c, e)), (op, b, c, ('bcsel', a, d, e))),
1444 (('bcsel', a, (op + '(is_used_once)', b, c, d), (op, b, e, d)), (op, b, ('bcsel', a, c, e), d)),
1445 (('bcsel', a, (op, b, c, d), (op + '(is_used_once)', b, e, d)), (op, b, ('bcsel', a, c, e), d)),
1446 (('bcsel', a, (op + '(is_used_once)', b, c, d), (op, e, c, d)), (op, ('bcsel', a, b, e), c, d)),
1447 (('bcsel', a, (op, b, c, d), (op + '(is_used_once)', e, c, d)), (op, ('bcsel', a, b, e), c, d)),
1448 ]
1449
1450 for op in ['fmul', 'iadd', 'imul', 'iand', 'ior', 'ixor', 'fmin', 'fmax', 'imin', 'imax', 'umin', 'umax']:
1451 optimizations += [
1452 (('bcsel', a, (op + '(is_used_once)', b, c), (op, b, 'd(is_not_const)')), (op, b, ('bcsel', a, c, d))),
1453 (('bcsel', a, (op + '(is_used_once)', b, 'c(is_not_const)'), (op, b, d)), (op, b, ('bcsel', a, c, d))),
1454 (('bcsel', a, (op, b, 'c(is_not_const)'), (op + '(is_used_once)', b, d)), (op, b, ('bcsel', a, c, d))),
1455 (('bcsel', a, (op, b, c), (op + '(is_used_once)', b, 'd(is_not_const)')), (op, b, ('bcsel', a, c, d))),
1456 ]
1457
1458 for op in ['fpow']:
1459 optimizations += [
1460 (('bcsel', a, (op + '(is_used_once)', b, c), (op, b, d)), (op, b, ('bcsel', a, c, d))),
1461 (('bcsel', a, (op, b, c), (op + '(is_used_once)', b, d)), (op, b, ('bcsel', a, c, d))),
1462 (('bcsel', a, (op + '(is_used_once)', b, c), (op, d, c)), (op, ('bcsel', a, b, d), c)),
1463 (('bcsel', a, (op, b, c), (op + '(is_used_once)', d, c)), (op, ('bcsel', a, b, d), c)),
1464 ]
1465
1466 for op in ['frcp', 'frsq', 'fsqrt', 'fexp2', 'flog2', 'fsign', 'fsin', 'fcos']:
1467 optimizations += [
1468 (('bcsel', a, (op + '(is_used_once)', b), (op, c)), (op, ('bcsel', a, b, c))),
1469 (('bcsel', a, (op, b), (op + '(is_used_once)', c)), (op, ('bcsel', a, b, c))),
1470 ]
1471
1472 # This section contains "late" optimizations that should be run before
1473 # creating ffmas and calling regular optimizations for the final time.
1474 # Optimizations should go here if they help code generation and conflict
1475 # with the regular optimizations.
1476 before_ffma_optimizations = [
1477 # Propagate constants down multiplication chains
1478 (('~fmul(is_used_once)', ('fmul(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('fmul', ('fmul', a, c), b)),
1479 (('imul(is_used_once)', ('imul(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('imul', ('imul', a, c), b)),
1480 (('~fadd(is_used_once)', ('fadd(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('fadd', ('fadd', a, c), b)),
1481 (('iadd(is_used_once)', ('iadd(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('iadd', ('iadd', a, c), b)),
1482
1483 (('~fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))),
1484 (('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))),
1485 (('~fadd', ('fneg', a), a), 0.0),
1486 (('iadd', ('ineg', a), a), 0),
1487 (('iadd', ('ineg', a), ('iadd', a, b)), b),
1488 (('iadd', a, ('iadd', ('ineg', a), b)), b),
1489 (('~fadd', ('fneg', a), ('fadd', a, b)), b),
1490 (('~fadd', a, ('fadd', ('fneg', a), b)), b),
1491
1492 (('~flrp@32', ('fadd(is_used_once)', a, -1.0), ('fadd(is_used_once)', a, 1.0), d), ('fadd', ('flrp', -1.0, 1.0, d), a)),
1493 (('~flrp@32', ('fadd(is_used_once)', a, 1.0), ('fadd(is_used_once)', a, -1.0), d), ('fadd', ('flrp', 1.0, -1.0, d), a)),
1494 (('~flrp@32', ('fadd(is_used_once)', a, '#b'), ('fadd(is_used_once)', a, '#c'), d), ('fadd', ('fmul', d, ('fadd', c, ('fneg', b))), ('fadd', a, b))),
1495 ]
1496
1497 # This section contains "late" optimizations that should be run after the
1498 # regular optimizations have finished. Optimizations should go here if
1499 # they help code generation but do not necessarily produce code that is
1500 # more easily optimizable.
1501 late_optimizations = [
1502 # Most of these optimizations aren't quite safe when you get infinity or
1503 # Nan involved but the first one should be fine.
1504 (('flt', ('fadd', a, b), 0.0), ('flt', a, ('fneg', b))),
1505 (('flt', ('fneg', ('fadd', a, b)), 0.0), ('flt', ('fneg', a), b)),
1506 (('~fge', ('fadd', a, b), 0.0), ('fge', a, ('fneg', b))),
1507 (('~fge', ('fneg', ('fadd', a, b)), 0.0), ('fge', ('fneg', a), b)),
1508 (('~feq', ('fadd', a, b), 0.0), ('feq', a, ('fneg', b))),
1509 (('~fne', ('fadd', a, b), 0.0), ('fne', a, ('fneg', b))),
1510
1511 # nir_lower_to_source_mods will collapse this, but its existence during the
1512 # optimization loop can prevent other optimizations.
1513 (('fneg', ('fneg', a)), a),
1514
1515 # Subtractions get lowered during optimization, so we need to recombine them
1516 (('fadd', 'a', ('fneg', 'b')), ('fsub', 'a', 'b'), '!options->lower_sub'),
1517 (('iadd', 'a', ('ineg', 'b')), ('isub', 'a', 'b'), '!options->lower_sub'),
1518 (('fneg', a), ('fsub', 0.0, a), 'options->lower_negate'),
1519 (('ineg', a), ('isub', 0, a), 'options->lower_negate'),
1520
1521 # These are duplicated from the main optimizations table. The late
1522 # patterns that rearrange expressions like x - .5 < 0 to x < .5 can create
1523 # new patterns like these. The patterns that compare with zero are removed
1524 # because they are unlikely to be created in by anything in
1525 # late_optimizations.
1526 (('flt', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('flt', a, b)),
1527 (('flt', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('flt', b, a)),
1528 (('fge', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fge', a, b)),
1529 (('fge', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('fge', b, a)),
1530 (('feq', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('feq', a, b)),
1531 (('fne', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fne', a, b)),
1532
1533 (('fge', ('fsat(is_used_once)', a), 1.0), ('fge', a, 1.0)),
1534 (('flt', ('fsat(is_used_once)', a), 1.0), ('flt', a, 1.0)),
1535
1536 (('~fge', ('fmin(is_used_once)', ('fadd(is_used_once)', a, b), ('fadd', c, d)), 0.0), ('iand', ('fge', a, ('fneg', b)), ('fge', c, ('fneg', d)))),
1537
1538 (('flt', ('fneg', a), ('fneg', b)), ('flt', b, a)),
1539 (('fge', ('fneg', a), ('fneg', b)), ('fge', b, a)),
1540 (('feq', ('fneg', a), ('fneg', b)), ('feq', b, a)),
1541 (('fne', ('fneg', a), ('fneg', b)), ('fne', b, a)),
1542 (('flt', ('fneg', a), -1.0), ('flt', 1.0, a)),
1543 (('flt', -1.0, ('fneg', a)), ('flt', a, 1.0)),
1544 (('fge', ('fneg', a), -1.0), ('fge', 1.0, a)),
1545 (('fge', -1.0, ('fneg', a)), ('fge', a, 1.0)),
1546 (('fne', ('fneg', a), -1.0), ('fne', 1.0, a)),
1547 (('feq', -1.0, ('fneg', a)), ('feq', a, 1.0)),
1548
1549 (('ior', a, a), a),
1550 (('iand', a, a), a),
1551
1552 (('~fadd', ('fneg(is_used_once)', ('fsat(is_used_once)', 'a(is_not_fmul)')), 1.0), ('fsat', ('fadd', 1.0, ('fneg', a)))),
1553
1554 (('fdot2', a, b), ('fdot_replicated2', a, b), 'options->fdot_replicates'),
1555 (('fdot3', a, b), ('fdot_replicated3', a, b), 'options->fdot_replicates'),
1556 (('fdot4', a, b), ('fdot_replicated4', a, b), 'options->fdot_replicates'),
1557 (('fdph', a, b), ('fdph_replicated', a, b), 'options->fdot_replicates'),
1558
1559 (('~flrp@32', ('fadd(is_used_once)', a, b), ('fadd(is_used_once)', a, c), d), ('fadd', ('flrp', b, c, d), a)),
1560 (('~flrp@64', ('fadd(is_used_once)', a, b), ('fadd(is_used_once)', a, c), d), ('fadd', ('flrp', b, c, d), a)),
1561
1562 (('~fadd@32', 1.0, ('fmul(is_used_once)', c , ('fadd', b, -1.0 ))), ('fadd', ('fadd', 1.0, ('fneg', c)), ('fmul', b, c)), 'options->lower_flrp32'),
1563 (('~fadd@64', 1.0, ('fmul(is_used_once)', c , ('fadd', b, -1.0 ))), ('fadd', ('fadd', 1.0, ('fneg', c)), ('fmul', b, c)), 'options->lower_flrp64'),
1564
1565 # A similar operation could apply to any ffma(#a, b, #(-a/2)), but this
1566 # particular operation is common for expanding values stored in a texture
1567 # from [0,1] to [-1,1].
1568 (('~ffma@32', a, 2.0, -1.0), ('flrp', -1.0, 1.0, a ), '!options->lower_flrp32'),
1569 (('~ffma@32', a, -2.0, -1.0), ('flrp', -1.0, 1.0, ('fneg', a)), '!options->lower_flrp32'),
1570 (('~ffma@32', a, -2.0, 1.0), ('flrp', 1.0, -1.0, a ), '!options->lower_flrp32'),
1571 (('~ffma@32', a, 2.0, 1.0), ('flrp', 1.0, -1.0, ('fneg', a)), '!options->lower_flrp32'),
1572 (('~fadd@32', ('fmul(is_used_once)', 2.0, a), -1.0), ('flrp', -1.0, 1.0, a ), '!options->lower_flrp32'),
1573 (('~fadd@32', ('fmul(is_used_once)', -2.0, a), -1.0), ('flrp', -1.0, 1.0, ('fneg', a)), '!options->lower_flrp32'),
1574 (('~fadd@32', ('fmul(is_used_once)', -2.0, a), 1.0), ('flrp', 1.0, -1.0, a ), '!options->lower_flrp32'),
1575 (('~fadd@32', ('fmul(is_used_once)', 2.0, a), 1.0), ('flrp', 1.0, -1.0, ('fneg', a)), '!options->lower_flrp32'),
1576
1577 # flrp(a, b, a)
1578 # a*(1-a) + b*a
1579 # a + -a*a + a*b (1)
1580 # a + a*(b - a)
1581 # Option 1: ffma(a, (b-a), a)
1582 #
1583 # Alternately, after (1):
1584 # a*(1+b) + -a*a
1585 # a*((1+b) + -a)
1586 #
1587 # Let b=1
1588 #
1589 # Option 2: ffma(a, 2, -(a*a))
1590 # Option 3: ffma(a, 2, (-a)*a)
1591 # Option 4: ffma(a, -a, (2*a)
1592 # Option 5: a * (2 - a)
1593 #
1594 # There are a lot of other possible combinations.
1595 (('~ffma@32', ('fadd', b, ('fneg', a)), a, a), ('flrp', a, b, a), '!options->lower_flrp32'),
1596 (('~ffma@32', a, 2.0, ('fneg', ('fmul', a, a))), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1597 (('~ffma@32', a, 2.0, ('fmul', ('fneg', a), a)), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1598 (('~ffma@32', a, ('fneg', a), ('fmul', 2.0, a)), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1599 (('~fmul@32', a, ('fadd', 2.0, ('fneg', a))), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1600
1601 # we do these late so that we don't get in the way of creating ffmas
1602 (('fmin', ('fadd(is_used_once)', '#c', a), ('fadd(is_used_once)', '#c', b)), ('fadd', c, ('fmin', a, b))),
1603 (('fmax', ('fadd(is_used_once)', '#c', a), ('fadd(is_used_once)', '#c', b)), ('fadd', c, ('fmax', a, b))),
1604
1605 (('bcsel', a, 0, ('b2f32', ('inot', 'b@bool'))), ('b2f32', ('inot', ('ior', a, b)))),
1606
1607 # Putting this in 'optimizations' interferes with the bcsel(a, op(b, c),
1608 # op(b, d)) => op(b, bcsel(a, c, d)) transformations. I do not know why.
1609 (('bcsel', ('feq', ('fsqrt', 'a(is_not_negative)'), 0.0), intBitsToFloat(0x7f7fffff), ('frsq', a)),
1610 ('fmin', ('frsq', a), intBitsToFloat(0x7f7fffff))),
1611
1612 # Things that look like DPH in the source shader may get expanded to
1613 # something that looks like dot(v1.xyz, v2.xyz) + v1.w by the time it gets
1614 # to NIR. After FFMA is generated, this can look like:
1615 #
1616 # fadd(ffma(v1.z, v2.z, ffma(v1.y, v2.y, fmul(v1.x, v2.x))), v1.w)
1617 #
1618 # Reassociate the last addition into the first multiplication.
1619 #
1620 # Some shaders do not use 'invariant' in vertex and (possibly) geometry
1621 # shader stages on some outputs that are intended to be invariant. For
1622 # various reasons, this optimization may not be fully applied in all
1623 # shaders used for different rendering passes of the same geometry. This
1624 # can result in Z-fighting artifacts (at best). For now, disable this
1625 # optimization in these stages. See bugzilla #111490. In tessellation
1626 # stages applications seem to use 'precise' when necessary, so allow the
1627 # optimization in those stages.
1628 (('~fadd', ('ffma(is_used_once)', a, b, ('ffma', c, d, ('fmul', 'e(is_not_const_and_not_fsign)', 'f(is_not_const_and_not_fsign)'))), 'g(is_not_const)'),
1629 ('ffma', a, b, ('ffma', c, d, ('ffma', e, 'f', 'g'))), '(info->stage != MESA_SHADER_VERTEX && info->stage != MESA_SHADER_GEOMETRY) && !options->intel_vec4'),
1630 (('~fadd', ('ffma(is_used_once)', a, b, ('fmul', 'c(is_not_const_and_not_fsign)', 'd(is_not_const_and_not_fsign)') ), 'e(is_not_const)'),
1631 ('ffma', a, b, ('ffma', c, d, e)), '(info->stage != MESA_SHADER_VERTEX && info->stage != MESA_SHADER_GEOMETRY) && !options->intel_vec4'),
1632 ]
1633
1634 for op in ['fadd']:
1635 late_optimizations += [
1636 (('bcsel', a, (op + '(is_used_once)', b, c), (op, b, d)), (op, b, ('bcsel', a, c, d))),
1637 (('bcsel', a, (op, b, c), (op + '(is_used_once)', b, d)), (op, b, ('bcsel', a, c, d))),
1638 ]
1639
1640 for op in ['ffma']:
1641 late_optimizations += [
1642 (('bcsel', a, (op + '(is_used_once)', b, c, d), (op, b, c, e)), (op, b, c, ('bcsel', a, d, e))),
1643 (('bcsel', a, (op, b, c, d), (op + '(is_used_once)', b, c, e)), (op, b, c, ('bcsel', a, d, e))),
1644
1645 (('bcsel', a, (op + '(is_used_once)', b, c, d), (op, b, e, d)), (op, b, ('bcsel', a, c, e), d)),
1646 (('bcsel', a, (op, b, c, d), (op + '(is_used_once)', b, e, d)), (op, b, ('bcsel', a, c, e), d)),
1647 ]
1648
1649 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic", optimizations).render())
1650 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic_before_ffma",
1651 before_ffma_optimizations).render())
1652 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic_late",
1653 late_optimizations).render())