nir/algebraic: add option to lower fdph
[mesa.git] / src / compiler / nir / nir_opt_algebraic.py
1 #
2 # Copyright (C) 2014 Intel Corporation
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining a
5 # copy of this software and associated documentation files (the "Software"),
6 # to deal in the Software without restriction, including without limitation
7 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 # and/or sell copies of the Software, and to permit persons to whom the
9 # Software is furnished to do so, subject to the following conditions:
10 #
11 # The above copyright notice and this permission notice (including the next
12 # paragraph) shall be included in all copies or substantial portions of the
13 # Software.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 # IN THE SOFTWARE.
22 #
23 # Authors:
24 # Jason Ekstrand (jason@jlekstrand.net)
25
26 from __future__ import print_function
27
28 from collections import OrderedDict
29 import nir_algebraic
30 from nir_opcodes import type_sizes
31 import itertools
32 from math import pi
33
34 # Convenience variables
35 a = 'a'
36 b = 'b'
37 c = 'c'
38 d = 'd'
39 e = 'e'
40
41 # Written in the form (<search>, <replace>) where <search> is an expression
42 # and <replace> is either an expression or a value. An expression is
43 # defined as a tuple of the form ([~]<op>, <src0>, <src1>, <src2>, <src3>)
44 # where each source is either an expression or a value. A value can be
45 # either a numeric constant or a string representing a variable name.
46 #
47 # If the opcode in a search expression is prefixed by a '~' character, this
48 # indicates that the operation is inexact. Such operations will only get
49 # applied to SSA values that do not have the exact bit set. This should be
50 # used by by any optimizations that are not bit-for-bit exact. It should not,
51 # however, be used for backend-requested lowering operations as those need to
52 # happen regardless of precision.
53 #
54 # Variable names are specified as "[#]name[@type][(cond)][.swiz]" where:
55 # "#" indicates that the given variable will only match constants,
56 # type indicates that the given variable will only match values from ALU
57 # instructions with the given output type,
58 # (cond) specifies an additional condition function (see nir_search_helpers.h),
59 # swiz is a swizzle applied to the variable (only in the <replace> expression)
60 #
61 # For constants, you have to be careful to make sure that it is the right
62 # type because python is unaware of the source and destination types of the
63 # opcodes.
64 #
65 # All expression types can have a bit-size specified. For opcodes, this
66 # looks like "op@32", for variables it is "a@32" or "a@uint32" to specify a
67 # type and size. In the search half of the expression this indicates that it
68 # should only match that particular bit-size. In the replace half of the
69 # expression this indicates that the constructed value should have that
70 # bit-size.
71 #
72 # A special condition "many-comm-expr" can be used with expressions to note
73 # that the expression and its subexpressions have more commutative expressions
74 # than nir_replace_instr can handle. If this special condition is needed with
75 # another condition, the two can be separated by a comma (e.g.,
76 # "(many-comm-expr,is_used_once)").
77
78 # based on https://web.archive.org/web/20180105155939/http://forum.devmaster.net/t/fast-and-accurate-sine-cosine/9648
79 def lowered_sincos(c):
80 x = ('fsub', ('fmul', 2.0, ('ffract', ('fadd', ('fmul', 0.5 / pi, a), c))), 1.0)
81 x = ('fmul', ('fsub', x, ('fmul', x, ('fabs', x))), 4.0)
82 return ('ffma', ('ffma', x, ('fabs', x), ('fneg', x)), 0.225, x)
83
84 optimizations = [
85
86 (('imul', a, '#b@32(is_pos_power_of_two)'), ('ishl', a, ('find_lsb', b)), '!options->lower_bitshift'),
87 (('imul', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('ishl', a, ('find_lsb', ('iabs', b)))), '!options->lower_bitshift'),
88 (('ishl', a, '#b@32'), ('imul', a, ('ishl', 1, b)), 'options->lower_bitshift'),
89
90 (('unpack_64_2x32_split_x', ('imul_2x32_64(is_used_once)', a, b)), ('imul', a, b)),
91 (('unpack_64_2x32_split_x', ('umul_2x32_64(is_used_once)', a, b)), ('imul', a, b)),
92 (('imul_2x32_64', a, b), ('pack_64_2x32_split', ('imul', a, b), ('imul_high', a, b)), 'options->lower_mul_2x32_64'),
93 (('umul_2x32_64', a, b), ('pack_64_2x32_split', ('imul', a, b), ('umul_high', a, b)), 'options->lower_mul_2x32_64'),
94 (('udiv', a, 1), a),
95 (('idiv', a, 1), a),
96 (('umod', a, 1), 0),
97 (('imod', a, 1), 0),
98 (('udiv', a, '#b@32(is_pos_power_of_two)'), ('ushr', a, ('find_lsb', b)), '!options->lower_bitshift'),
99 (('idiv', a, '#b@32(is_pos_power_of_two)'), ('imul', ('isign', a), ('ushr', ('iabs', a), ('find_lsb', b))), 'options->lower_idiv'),
100 (('idiv', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('imul', ('isign', a), ('ushr', ('iabs', a), ('find_lsb', ('iabs', b))))), 'options->lower_idiv'),
101 (('umod', a, '#b(is_pos_power_of_two)'), ('iand', a, ('isub', b, 1))),
102
103 (('fneg', ('fneg', a)), a),
104 (('ineg', ('ineg', a)), a),
105 (('fabs', ('fabs', a)), ('fabs', a)),
106 (('fabs', ('fneg', a)), ('fabs', a)),
107 (('fabs', ('u2f', a)), ('u2f', a)),
108 (('iabs', ('iabs', a)), ('iabs', a)),
109 (('iabs', ('ineg', a)), ('iabs', a)),
110 (('f2b', ('fneg', a)), ('f2b', a)),
111 (('i2b', ('ineg', a)), ('i2b', a)),
112 (('~fadd', a, 0.0), a),
113 (('iadd', a, 0), a),
114 (('usadd_4x8', a, 0), a),
115 (('usadd_4x8', a, ~0), ~0),
116 (('~fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))),
117 (('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))),
118 (('~fadd', ('fneg', a), a), 0.0),
119 (('iadd', ('ineg', a), a), 0),
120 (('iadd', ('ineg', a), ('iadd', a, b)), b),
121 (('iadd', a, ('iadd', ('ineg', a), b)), b),
122 (('~fadd', ('fneg', a), ('fadd', a, b)), b),
123 (('~fadd', a, ('fadd', ('fneg', a), b)), b),
124 (('fadd', ('fsat', a), ('fsat', ('fneg', a))), ('fsat', ('fabs', a))),
125 (('~fmul', a, 0.0), 0.0),
126 (('imul', a, 0), 0),
127 (('umul_unorm_4x8', a, 0), 0),
128 (('umul_unorm_4x8', a, ~0), a),
129 (('fmul', a, 1.0), a),
130 (('imul', a, 1), a),
131 (('fmul', a, -1.0), ('fneg', a)),
132 (('imul', a, -1), ('ineg', a)),
133 # If a < 0: fsign(a)*a*a => -1*a*a => -a*a => abs(a)*a
134 # If a > 0: fsign(a)*a*a => 1*a*a => a*a => abs(a)*a
135 # If a == 0: fsign(a)*a*a => 0*0*0 => abs(0)*0
136 (('fmul', ('fsign', a), ('fmul', a, a)), ('fmul', ('fabs', a), a)),
137 (('fmul', ('fmul', ('fsign', a), a), a), ('fmul', ('fabs', a), a)),
138 (('~ffma', 0.0, a, b), b),
139 (('~ffma', a, b, 0.0), ('fmul', a, b)),
140 (('ffma', 1.0, a, b), ('fadd', a, b)),
141 (('ffma', -1.0, a, b), ('fadd', ('fneg', a), b)),
142 (('~flrp', a, b, 0.0), a),
143 (('~flrp', a, b, 1.0), b),
144 (('~flrp', a, a, b), a),
145 (('~flrp', 0.0, a, b), ('fmul', a, b)),
146
147 # flrp(a, a + b, c) => a + flrp(0, b, c) => a + (b * c)
148 (('~flrp', a, ('fadd(is_used_once)', a, b), c), ('fadd', ('fmul', b, c), a)),
149 (('~flrp@32', a, ('fadd', a, b), c), ('fadd', ('fmul', b, c), a), 'options->lower_flrp32'),
150 (('~flrp@64', a, ('fadd', a, b), c), ('fadd', ('fmul', b, c), a), 'options->lower_flrp64'),
151
152 (('~flrp@32', ('fadd', a, b), ('fadd', a, c), d), ('fadd', ('flrp', b, c, d), a), 'options->lower_flrp32'),
153 (('~flrp@64', ('fadd', a, b), ('fadd', a, c), d), ('fadd', ('flrp', b, c, d), a), 'options->lower_flrp64'),
154
155 (('~flrp@32', a, ('fmul(is_used_once)', a, b), c), ('fmul', ('flrp', 1.0, b, c), a), 'options->lower_flrp32'),
156 (('~flrp@64', a, ('fmul(is_used_once)', a, b), c), ('fmul', ('flrp', 1.0, b, c), a), 'options->lower_flrp64'),
157
158 (('~flrp', ('fmul(is_used_once)', a, b), ('fmul(is_used_once)', a, c), d), ('fmul', ('flrp', b, c, d), a)),
159
160 (('~flrp', a, b, ('b2f', 'c@1')), ('bcsel', c, b, a), 'options->lower_flrp32'),
161 (('~flrp', a, 0.0, c), ('fadd', ('fmul', ('fneg', a), c), a)),
162 (('ftrunc', a), ('bcsel', ('flt', a, 0.0), ('fneg', ('ffloor', ('fabs', a))), ('ffloor', ('fabs', a))), 'options->lower_ftrunc'),
163 (('ffloor', a), ('fsub', a, ('ffract', a)), 'options->lower_ffloor'),
164 (('fadd', a, ('fneg', ('ffract', a))), ('ffloor', a), '!options->lower_ffloor'),
165 (('ffract', a), ('fsub', a, ('ffloor', a)), 'options->lower_ffract'),
166 (('fceil', a), ('fneg', ('ffloor', ('fneg', a))), 'options->lower_fceil'),
167 (('~fadd', ('fmul', a, ('fadd', 1.0, ('fneg', ('b2f', 'c@1')))), ('fmul', b, ('b2f', c))), ('bcsel', c, b, a), 'options->lower_flrp32'),
168 (('~fadd@32', ('fmul', a, ('fadd', 1.0, ('fneg', c ) )), ('fmul', b, c )), ('flrp', a, b, c), '!options->lower_flrp32'),
169 (('~fadd@64', ('fmul', a, ('fadd', 1.0, ('fneg', c ) )), ('fmul', b, c )), ('flrp', a, b, c), '!options->lower_flrp64'),
170 # These are the same as the previous three rules, but it depends on
171 # 1-fsat(x) <=> fsat(1-x). See below.
172 (('~fadd@32', ('fmul', a, ('fsat', ('fadd', 1.0, ('fneg', c )))), ('fmul', b, ('fsat', c))), ('flrp', a, b, ('fsat', c)), '!options->lower_flrp32'),
173 (('~fadd@64', ('fmul', a, ('fsat', ('fadd', 1.0, ('fneg', c )))), ('fmul', b, ('fsat', c))), ('flrp', a, b, ('fsat', c)), '!options->lower_flrp64'),
174
175 (('~fadd', a, ('fmul', ('b2f', 'c@1'), ('fadd', b, ('fneg', a)))), ('bcsel', c, b, a), 'options->lower_flrp32'),
176 (('~fadd@32', a, ('fmul', c , ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp32'),
177 (('~fadd@64', a, ('fmul', c , ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp64'),
178 (('ffma', a, b, c), ('fadd', ('fmul', a, b), c), 'options->lower_ffma'),
179 (('~fadd', ('fmul', a, b), c), ('ffma', a, b, c), 'options->fuse_ffma'),
180
181 (('~fmul', ('fadd', ('iand', ('ineg', ('b2i32', 'a@bool')), ('fmul', b, c)), '#d'), '#e'),
182 ('bcsel', a, ('fmul', ('fadd', ('fmul', b, c), d), e), ('fmul', d, e))),
183
184 (('fdph', a, b), ('fdot4', ('vec4', 'a.x', 'a.y', 'a.z', 1.0), b), 'options->lower_fdph'),
185
186 (('fdot4', ('vec4', a, b, c, 1.0), d), ('fdph', ('vec3', a, b, c), d), '!options->lower_fdph'),
187 (('fdot4', ('vec4', a, 0.0, 0.0, 0.0), b), ('fmul', a, b)),
188 (('fdot4', ('vec4', a, b, 0.0, 0.0), c), ('fdot2', ('vec2', a, b), c)),
189 (('fdot4', ('vec4', a, b, c, 0.0), d), ('fdot3', ('vec3', a, b, c), d)),
190
191 (('fdot3', ('vec3', a, 0.0, 0.0), b), ('fmul', a, b)),
192 (('fdot3', ('vec3', a, b, 0.0), c), ('fdot2', ('vec2', a, b), c)),
193
194 # If x >= 0 and x <= 1: fsat(1 - x) == 1 - fsat(x) trivially
195 # If x < 0: 1 - fsat(x) => 1 - 0 => 1 and fsat(1 - x) => fsat(> 1) => 1
196 # If x > 1: 1 - fsat(x) => 1 - 1 => 0 and fsat(1 - x) => fsat(< 0) => 0
197 (('~fadd', ('fneg(is_used_once)', ('fsat(is_used_once)', 'a(is_not_fmul)')), 1.0), ('fsat', ('fadd', 1.0, ('fneg', a)))),
198
199 # 1 - ((1 - a) * (1 - b))
200 # 1 - (1 - a - b + a*b)
201 # 1 - 1 + a + b - a*b
202 # a + b - a*b
203 # a + b*(1 - a)
204 # b*(1 - a) + 1*a
205 # flrp(b, 1, a)
206 (('~fadd@32', 1.0, ('fneg', ('fmul', ('fadd', 1.0, ('fneg', a)), ('fadd', 1.0, ('fneg', b))))),
207 ('flrp', b, 1.0, a), '!options->lower_flrp32'),
208
209 # (a * #b + #c) << #d
210 # ((a * #b) << #d) + (#c << #d)
211 # (a * (#b << #d)) + (#c << #d)
212 (('ishl', ('iadd', ('imul', a, '#b'), '#c'), '#d'),
213 ('iadd', ('imul', a, ('ishl', b, d)), ('ishl', c, d))),
214
215 # (a * #b) << #c
216 # a * (#b << #c)
217 (('ishl', ('imul', a, '#b'), '#c'), ('imul', a, ('ishl', b, c))),
218
219 # Comparison simplifications
220 (('~inot', ('flt', a, b)), ('fge', a, b)),
221 (('~inot', ('fge', a, b)), ('flt', a, b)),
222 (('inot', ('feq', a, b)), ('fne', a, b)),
223 (('inot', ('fne', a, b)), ('feq', a, b)),
224 (('inot', ('ilt', a, b)), ('ige', a, b)),
225 (('inot', ('ult', a, b)), ('uge', a, b)),
226 (('inot', ('ige', a, b)), ('ilt', a, b)),
227 (('inot', ('uge', a, b)), ('ult', a, b)),
228 (('inot', ('ieq', a, b)), ('ine', a, b)),
229 (('inot', ('ine', a, b)), ('ieq', a, b)),
230
231 (('iand', ('feq', a, b), ('fne', a, b)), False),
232 (('iand', ('flt', a, b), ('flt', b, a)), False),
233 (('iand', ('ieq', a, b), ('ine', a, b)), False),
234 (('iand', ('ilt', a, b), ('ilt', b, a)), False),
235 (('iand', ('ult', a, b), ('ult', b, a)), False),
236
237 # This helps some shaders because, after some optimizations, they end up
238 # with patterns like (-a < -b) || (b < a). In an ideal world, this sort of
239 # matching would be handled by CSE.
240 (('flt', ('fneg', a), ('fneg', b)), ('flt', b, a)),
241 (('fge', ('fneg', a), ('fneg', b)), ('fge', b, a)),
242 (('feq', ('fneg', a), ('fneg', b)), ('feq', b, a)),
243 (('fne', ('fneg', a), ('fneg', b)), ('fne', b, a)),
244 (('flt', ('fneg', a), -1.0), ('flt', 1.0, a)),
245 (('flt', -1.0, ('fneg', a)), ('flt', a, 1.0)),
246 (('fge', ('fneg', a), -1.0), ('fge', 1.0, a)),
247 (('fge', -1.0, ('fneg', a)), ('fge', a, 1.0)),
248 (('fne', ('fneg', a), -1.0), ('fne', 1.0, a)),
249 (('feq', -1.0, ('fneg', a)), ('feq', a, 1.0)),
250
251 (('flt', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('flt', a, b)),
252 (('flt', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('flt', b, a)),
253 (('fge', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fge', a, b)),
254 (('fge', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('fge', b, a)),
255 (('feq', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('feq', a, b)),
256 (('fne', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fne', a, b)),
257
258 (('fge', ('fsat(is_used_once)', a), 1.0), ('fge', a, 1.0)),
259 (('flt', ('fsat(is_used_once)', a), 1.0), ('flt', a, 1.0)),
260 (('fge', 0.0, ('fsat(is_used_once)', a)), ('fge', 0.0, a)),
261 (('flt', 0.0, ('fsat(is_used_once)', a)), ('flt', 0.0, a)),
262
263 # 0.0 >= b2f(a)
264 # b2f(a) <= 0.0
265 # b2f(a) == 0.0 because b2f(a) can only be 0 or 1
266 # inot(a)
267 (('fge', 0.0, ('b2f', 'a@1')), ('inot', a)),
268
269 (('fge', ('fneg', ('b2f', 'a@1')), 0.0), ('inot', a)),
270
271 (('fne', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a, b)),
272 (('fne', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a, b)),
273 (('fne', ('bcsel', a, 1.0, ('b2f', 'b@1')) , 0.0), ('ior', a, b)),
274 (('fne', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), ('ior', a, b)),
275 (('fne', ('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a, b)),
276 (('fne', ('fmin', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a, b)),
277 (('fne', ('bcsel', a, ('b2f', 'b@1'), 0.0) , 0.0), ('iand', a, b)),
278 (('fne', ('fadd', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), 0.0), ('ixor', a, b)),
279 (('fne', ('b2f', 'a@1') , ('b2f', 'b@1') ), ('ixor', a, b)),
280 (('fne', ('fneg', ('b2f', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('ixor', a, b)),
281 (('feq', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('ior', a, b))),
282 (('feq', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('ior', a, b))),
283 (('feq', ('bcsel', a, 1.0, ('b2f', 'b@1')) , 0.0), ('inot', ('ior', a, b))),
284 (('feq', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), ('inot', ('ior', a, b))),
285 (('feq', ('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('iand', a, b))),
286 (('feq', ('fmin', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('iand', a, b))),
287 (('feq', ('bcsel', a, ('b2f', 'b@1'), 0.0) , 0.0), ('inot', ('iand', a, b))),
288 (('feq', ('fadd', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), 0.0), ('ieq', a, b)),
289 (('feq', ('b2f', 'a@1') , ('b2f', 'b@1') ), ('ieq', a, b)),
290 (('feq', ('fneg', ('b2f', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('ieq', a, b)),
291
292 # -(b2f(a) + b2f(b)) < 0
293 # 0 < b2f(a) + b2f(b)
294 # 0 != b2f(a) + b2f(b) b2f must be 0 or 1, so the sum is non-negative
295 # a || b
296 (('flt', ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), 0.0), ('ior', a, b)),
297 (('flt', 0.0, ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('ior', a, b)),
298
299 # -(b2f(a) + b2f(b)) >= 0
300 # 0 >= b2f(a) + b2f(b)
301 # 0 == b2f(a) + b2f(b) b2f must be 0 or 1, so the sum is non-negative
302 # !(a || b)
303 (('fge', ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), 0.0), ('inot', ('ior', a, b))),
304 (('fge', 0.0, ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('inot', ('ior', a, b))),
305
306 (('flt', a, ('fneg', a)), ('flt', a, 0.0)),
307 (('fge', a, ('fneg', a)), ('fge', a, 0.0)),
308
309 # Some optimizations (below) convert things like (a < b || c < b) into
310 # (min(a, c) < b). However, this interfers with the previous optimizations
311 # that try to remove comparisons with negated sums of b2f. This just
312 # breaks that apart.
313 (('flt', ('fmin', c, ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')))), 0.0),
314 ('ior', ('flt', c, 0.0), ('ior', a, b))),
315
316 (('~flt', ('fadd', a, b), a), ('flt', b, 0.0)),
317 (('~fge', ('fadd', a, b), a), ('fge', b, 0.0)),
318 (('~feq', ('fadd', a, b), a), ('feq', b, 0.0)),
319 (('~fne', ('fadd', a, b), a), ('fne', b, 0.0)),
320
321 # Cannot remove the addition from ilt or ige due to overflow.
322 (('ieq', ('iadd', a, b), a), ('ieq', b, 0)),
323 (('ine', ('iadd', a, b), a), ('ine', b, 0)),
324
325 # fmin(-b2f(a), b) >= 0.0
326 # -b2f(a) >= 0.0 && b >= 0.0
327 # -b2f(a) == 0.0 && b >= 0.0 -b2f can only be 0 or -1, never >0
328 # b2f(a) == 0.0 && b >= 0.0
329 # a == False && b >= 0.0
330 # !a && b >= 0.0
331 #
332 # The fge in the second replacement is not a typo. I leave the proof that
333 # "fmin(-b2f(a), b) >= 0 <=> fmin(-b2f(a), b) == 0" as an exercise for the
334 # reader.
335 (('fge', ('fmin', ('fneg', ('b2f', 'a@1')), 'b@1'), 0.0), ('iand', ('inot', a), ('fge', b, 0.0))),
336 (('feq', ('fmin', ('fneg', ('b2f', 'a@1')), 'b@1'), 0.0), ('iand', ('inot', a), ('fge', b, 0.0))),
337
338 (('feq', ('b2f', 'a@1'), 0.0), ('inot', a)),
339 (('fne', ('b2f', 'a@1'), 0.0), a),
340 (('ieq', ('b2i', 'a@1'), 0), ('inot', a)),
341 (('ine', ('b2i', 'a@1'), 0), a),
342
343 (('fne', ('u2f', a), 0.0), ('ine', a, 0)),
344 (('feq', ('u2f', a), 0.0), ('ieq', a, 0)),
345 (('fge', ('u2f', a), 0.0), True),
346 (('fge', 0.0, ('u2f', a)), ('uge', 0, a)), # ieq instead?
347 (('flt', ('u2f', a), 0.0), False),
348 (('flt', 0.0, ('u2f', a)), ('ult', 0, a)), # ine instead?
349 (('fne', ('i2f', a), 0.0), ('ine', a, 0)),
350 (('feq', ('i2f', a), 0.0), ('ieq', a, 0)),
351 (('fge', ('i2f', a), 0.0), ('ige', a, 0)),
352 (('fge', 0.0, ('i2f', a)), ('ige', 0, a)),
353 (('flt', ('i2f', a), 0.0), ('ilt', a, 0)),
354 (('flt', 0.0, ('i2f', a)), ('ilt', 0, a)),
355
356 # 0.0 < fabs(a)
357 # fabs(a) > 0.0
358 # fabs(a) != 0.0 because fabs(a) must be >= 0
359 # a != 0.0
360 (('~flt', 0.0, ('fabs', a)), ('fne', a, 0.0)),
361
362 # -fabs(a) < 0.0
363 # fabs(a) > 0.0
364 (('~flt', ('fneg', ('fabs', a)), 0.0), ('fne', a, 0.0)),
365
366 # 0.0 >= fabs(a)
367 # 0.0 == fabs(a) because fabs(a) must be >= 0
368 # 0.0 == a
369 (('fge', 0.0, ('fabs', a)), ('feq', a, 0.0)),
370
371 # -fabs(a) >= 0.0
372 # 0.0 >= fabs(a)
373 (('fge', ('fneg', ('fabs', a)), 0.0), ('feq', a, 0.0)),
374
375 (('fmax', ('b2f(is_used_once)', 'a@1'), ('b2f', 'b@1')), ('b2f', ('ior', a, b))),
376 (('fmax', ('fneg(is_used_once)', ('b2f(is_used_once)', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('fneg', ('b2f', ('ior', a, b)))),
377 (('fmin', ('b2f(is_used_once)', 'a@1'), ('b2f', 'b@1')), ('b2f', ('iand', a, b))),
378 (('fmin', ('fneg(is_used_once)', ('b2f(is_used_once)', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('fneg', ('b2f', ('iand', a, b)))),
379
380 # fmin(b2f(a), b)
381 # bcsel(a, fmin(b2f(a), b), fmin(b2f(a), b))
382 # bcsel(a, fmin(b2f(True), b), fmin(b2f(False), b))
383 # bcsel(a, fmin(1.0, b), fmin(0.0, b))
384 #
385 # Since b is a constant, constant folding will eliminate the fmin and the
386 # fmax. If b is > 1.0, the bcsel will be replaced with a b2f.
387 (('fmin', ('b2f', 'a@1'), '#b'), ('bcsel', a, ('fmin', b, 1.0), ('fmin', b, 0.0))),
388
389 (('flt', ('fadd(is_used_once)', a, ('fneg', b)), 0.0), ('flt', a, b)),
390
391 (('fge', ('fneg', ('fabs', a)), 0.0), ('feq', a, 0.0)),
392 (('~bcsel', ('flt', b, a), b, a), ('fmin', a, b)),
393 (('~bcsel', ('flt', a, b), b, a), ('fmax', a, b)),
394 (('~bcsel', ('fge', a, b), b, a), ('fmin', a, b)),
395 (('~bcsel', ('fge', b, a), b, a), ('fmax', a, b)),
396 (('bcsel', ('i2b', a), b, c), ('bcsel', ('ine', a, 0), b, c)),
397 (('bcsel', ('inot', a), b, c), ('bcsel', a, c, b)),
398 (('bcsel', a, ('bcsel', a, b, c), d), ('bcsel', a, b, d)),
399 (('bcsel', a, b, ('bcsel', a, c, d)), ('bcsel', a, b, d)),
400 (('bcsel', a, ('bcsel', b, c, d), ('bcsel(is_used_once)', b, c, 'e')), ('bcsel', b, c, ('bcsel', a, d, 'e'))),
401 (('bcsel', a, ('bcsel(is_used_once)', b, c, d), ('bcsel', b, c, 'e')), ('bcsel', b, c, ('bcsel', a, d, 'e'))),
402 (('bcsel', a, ('bcsel', b, c, d), ('bcsel(is_used_once)', b, 'e', d)), ('bcsel', b, ('bcsel', a, c, 'e'), d)),
403 (('bcsel', a, ('bcsel(is_used_once)', b, c, d), ('bcsel', b, 'e', d)), ('bcsel', b, ('bcsel', a, c, 'e'), d)),
404 (('bcsel', a, True, b), ('ior', a, b)),
405 (('bcsel', a, a, b), ('ior', a, b)),
406 (('bcsel', a, b, False), ('iand', a, b)),
407 (('bcsel', a, b, a), ('iand', a, b)),
408 (('fmin', a, a), a),
409 (('fmax', a, a), a),
410 (('imin', a, a), a),
411 (('imax', a, a), a),
412 (('umin', a, a), a),
413 (('umax', a, a), a),
414 (('fmax', ('fmax', a, b), b), ('fmax', a, b)),
415 (('umax', ('umax', a, b), b), ('umax', a, b)),
416 (('imax', ('imax', a, b), b), ('imax', a, b)),
417 (('fmin', ('fmin', a, b), b), ('fmin', a, b)),
418 (('umin', ('umin', a, b), b), ('umin', a, b)),
419 (('imin', ('imin', a, b), b), ('imin', a, b)),
420 (('fmax', a, ('fneg', a)), ('fabs', a)),
421 (('imax', a, ('ineg', a)), ('iabs', a)),
422 (('fmin', a, ('fneg', a)), ('fneg', ('fabs', a))),
423 (('imin', a, ('ineg', a)), ('ineg', ('iabs', a))),
424 (('fmin', a, ('fneg', ('fabs', a))), ('fneg', ('fabs', a))),
425 (('imin', a, ('ineg', ('iabs', a))), ('ineg', ('iabs', a))),
426 (('fmin', a, ('fabs', a)), a),
427 (('imin', a, ('iabs', a)), a),
428 (('fmax', a, ('fneg', ('fabs', a))), a),
429 (('imax', a, ('ineg', ('iabs', a))), a),
430 (('fmax', a, ('fabs', a)), ('fabs', a)),
431 (('imax', a, ('iabs', a)), ('iabs', a)),
432 (('fmax', a, ('fneg', a)), ('fabs', a)),
433 (('imax', a, ('ineg', a)), ('iabs', a)),
434 (('~fmax', ('fabs', a), 0.0), ('fabs', a)),
435 (('~fmin', ('fmax', a, 0.0), 1.0), ('fsat', a), '!options->lower_fsat'),
436 (('~fmax', ('fmin', a, 1.0), 0.0), ('fsat', a), '!options->lower_fsat'),
437 (('~fmin', ('fmax', a, -1.0), 0.0), ('fneg', ('fsat', ('fneg', a))), '!options->lower_negate && !options->lower_fsat'),
438 (('~fmax', ('fmin', a, 0.0), -1.0), ('fneg', ('fsat', ('fneg', a))), '!options->lower_negate && !options->lower_fsat'),
439 (('fsat', ('fsign', a)), ('b2f', ('flt', 0.0, a))),
440 (('fsat', ('b2f', a)), ('b2f', a)),
441 (('fsat', a), ('fmin', ('fmax', a, 0.0), 1.0), 'options->lower_fsat'),
442 (('fsat', ('fsat', a)), ('fsat', a)),
443 (('fsat', ('fneg(is_used_once)', ('fadd(is_used_once)', a, b))), ('fsat', ('fadd', ('fneg', a), ('fneg', b))), '!options->lower_negate && !options->lower_fsat'),
444 (('fsat', ('fneg(is_used_once)', ('fmul(is_used_once)', a, b))), ('fsat', ('fmul', ('fneg', a), b)), '!options->lower_negate && !options->lower_fsat'),
445 (('fsat', ('fabs(is_used_once)', ('fmul(is_used_once)', a, b))), ('fsat', ('fmul', ('fabs', a), ('fabs', b))), '!options->lower_fsat'),
446 (('fmin', ('fmax', ('fmin', ('fmax', a, b), c), b), c), ('fmin', ('fmax', a, b), c)),
447 (('imin', ('imax', ('imin', ('imax', a, b), c), b), c), ('imin', ('imax', a, b), c)),
448 (('umin', ('umax', ('umin', ('umax', a, b), c), b), c), ('umin', ('umax', a, b), c)),
449 (('fmax', ('fsat', a), '#b@32(is_zero_to_one)'), ('fsat', ('fmax', a, b))),
450 (('fmin', ('fsat', a), '#b@32(is_zero_to_one)'), ('fsat', ('fmin', a, b))),
451 (('extract_u8', ('imin', ('imax', a, 0), 0xff), 0), ('imin', ('imax', a, 0), 0xff)),
452 (('~ior', ('flt(is_used_once)', a, b), ('flt', a, c)), ('flt', a, ('fmax', b, c))),
453 (('~ior', ('flt(is_used_once)', a, c), ('flt', b, c)), ('flt', ('fmin', a, b), c)),
454 (('~ior', ('fge(is_used_once)', a, b), ('fge', a, c)), ('fge', a, ('fmin', b, c))),
455 (('~ior', ('fge(is_used_once)', a, c), ('fge', b, c)), ('fge', ('fmax', a, b), c)),
456 (('~ior', ('flt', a, '#b'), ('flt', a, '#c')), ('flt', a, ('fmax', b, c))),
457 (('~ior', ('flt', '#a', c), ('flt', '#b', c)), ('flt', ('fmin', a, b), c)),
458 (('~ior', ('fge', a, '#b'), ('fge', a, '#c')), ('fge', a, ('fmin', b, c))),
459 (('~ior', ('fge', '#a', c), ('fge', '#b', c)), ('fge', ('fmax', a, b), c)),
460 (('~iand', ('flt(is_used_once)', a, b), ('flt', a, c)), ('flt', a, ('fmin', b, c))),
461 (('~iand', ('flt(is_used_once)', a, c), ('flt', b, c)), ('flt', ('fmax', a, b), c)),
462 (('~iand', ('fge(is_used_once)', a, b), ('fge', a, c)), ('fge', a, ('fmax', b, c))),
463 (('~iand', ('fge(is_used_once)', a, c), ('fge', b, c)), ('fge', ('fmin', a, b), c)),
464 (('~iand', ('flt', a, '#b'), ('flt', a, '#c')), ('flt', a, ('fmin', b, c))),
465 (('~iand', ('flt', '#a', c), ('flt', '#b', c)), ('flt', ('fmax', a, b), c)),
466 (('~iand', ('fge', a, '#b'), ('fge', a, '#c')), ('fge', a, ('fmax', b, c))),
467 (('~iand', ('fge', '#a', c), ('fge', '#b', c)), ('fge', ('fmin', a, b), c)),
468
469 (('ior', ('ilt(is_used_once)', a, b), ('ilt', a, c)), ('ilt', a, ('imax', b, c))),
470 (('ior', ('ilt(is_used_once)', a, c), ('ilt', b, c)), ('ilt', ('imin', a, b), c)),
471 (('ior', ('ige(is_used_once)', a, b), ('ige', a, c)), ('ige', a, ('imin', b, c))),
472 (('ior', ('ige(is_used_once)', a, c), ('ige', b, c)), ('ige', ('imax', a, b), c)),
473 (('ior', ('ult(is_used_once)', a, b), ('ult', a, c)), ('ult', a, ('umax', b, c))),
474 (('ior', ('ult(is_used_once)', a, c), ('ult', b, c)), ('ult', ('umin', a, b), c)),
475 (('ior', ('uge(is_used_once)', a, b), ('uge', a, c)), ('uge', a, ('umin', b, c))),
476 (('ior', ('uge(is_used_once)', a, c), ('uge', b, c)), ('uge', ('umax', a, b), c)),
477 (('iand', ('ilt(is_used_once)', a, b), ('ilt', a, c)), ('ilt', a, ('imin', b, c))),
478 (('iand', ('ilt(is_used_once)', a, c), ('ilt', b, c)), ('ilt', ('imax', a, b), c)),
479 (('iand', ('ige(is_used_once)', a, b), ('ige', a, c)), ('ige', a, ('imax', b, c))),
480 (('iand', ('ige(is_used_once)', a, c), ('ige', b, c)), ('ige', ('imin', a, b), c)),
481 (('iand', ('ult(is_used_once)', a, b), ('ult', a, c)), ('ult', a, ('umin', b, c))),
482 (('iand', ('ult(is_used_once)', a, c), ('ult', b, c)), ('ult', ('umax', a, b), c)),
483 (('iand', ('uge(is_used_once)', a, b), ('uge', a, c)), ('uge', a, ('umax', b, c))),
484 (('iand', ('uge(is_used_once)', a, c), ('uge', b, c)), ('uge', ('umin', a, b), c)),
485
486 # Common pattern like 'if (i == 0 || i == 1 || ...)'
487 (('ior', ('ieq', a, 0), ('ieq', a, 1)), ('uge', 1, a)),
488 (('ior', ('uge', 1, a), ('ieq', a, 2)), ('uge', 2, a)),
489 (('ior', ('uge', 2, a), ('ieq', a, 3)), ('uge', 3, a)),
490
491 # The (i2f32, ...) part is an open-coded fsign. When that is combined with
492 # the bcsel, it's basically copysign(1.0, a). There is no copysign in NIR,
493 # so emit an open-coded version of that.
494 (('bcsel@32', ('feq', a, 0.0), 1.0, ('i2f32', ('iadd', ('b2i32', ('flt', 0.0, 'a@32')), ('ineg', ('b2i32', ('flt', 'a@32', 0.0)))))),
495 ('ior', 0x3f800000, ('iand', a, 0x80000000))),
496
497 (('ior', a, ('ieq', a, False)), True),
498 (('ior', a, ('inot', a)), -1),
499
500 (('ine', ('ineg', ('b2i32', 'a@1')), ('ineg', ('b2i32', 'b@1'))), ('ine', a, b)),
501 (('b2i32', ('ine', 'a@1', 'b@1')), ('b2i32', ('ixor', a, b))),
502
503 (('iand', ('ieq', 'a@32', 0), ('ieq', 'b@32', 0)), ('ieq', ('ior', 'a@32', 'b@32'), 0)),
504
505 # These patterns can result when (a < b || a < c) => (a < min(b, c))
506 # transformations occur before constant propagation and loop-unrolling.
507 (('~flt', a, ('fmax', b, a)), ('flt', a, b)),
508 (('~flt', ('fmin', a, b), a), ('flt', b, a)),
509 (('~fge', a, ('fmin', b, a)), True),
510 (('~fge', ('fmax', a, b), a), True),
511 (('~flt', a, ('fmin', b, a)), False),
512 (('~flt', ('fmax', a, b), a), False),
513 (('~fge', a, ('fmax', b, a)), ('fge', a, b)),
514 (('~fge', ('fmin', a, b), a), ('fge', b, a)),
515
516 (('ilt', a, ('imax', b, a)), ('ilt', a, b)),
517 (('ilt', ('imin', a, b), a), ('ilt', b, a)),
518 (('ige', a, ('imin', b, a)), True),
519 (('ige', ('imax', a, b), a), True),
520 (('ult', a, ('umax', b, a)), ('ult', a, b)),
521 (('ult', ('umin', a, b), a), ('ult', b, a)),
522 (('uge', a, ('umin', b, a)), True),
523 (('uge', ('umax', a, b), a), True),
524 (('ilt', a, ('imin', b, a)), False),
525 (('ilt', ('imax', a, b), a), False),
526 (('ige', a, ('imax', b, a)), ('ige', a, b)),
527 (('ige', ('imin', a, b), a), ('ige', b, a)),
528 (('ult', a, ('umin', b, a)), False),
529 (('ult', ('umax', a, b), a), False),
530 (('uge', a, ('umax', b, a)), ('uge', a, b)),
531 (('uge', ('umin', a, b), a), ('uge', b, a)),
532 (('ult', a, ('iand', b, a)), False),
533 (('ult', ('ior', a, b), a), False),
534 (('uge', a, ('iand', b, a)), True),
535 (('uge', ('ior', a, b), a), True),
536
537 (('ilt', '#a', ('imax', '#b', c)), ('ior', ('ilt', a, b), ('ilt', a, c))),
538 (('ilt', ('imin', '#a', b), '#c'), ('ior', ('ilt', a, c), ('ilt', b, c))),
539 (('ige', '#a', ('imin', '#b', c)), ('ior', ('ige', a, b), ('ige', a, c))),
540 (('ige', ('imax', '#a', b), '#c'), ('ior', ('ige', a, c), ('ige', b, c))),
541 (('ult', '#a', ('umax', '#b', c)), ('ior', ('ult', a, b), ('ult', a, c))),
542 (('ult', ('umin', '#a', b), '#c'), ('ior', ('ult', a, c), ('ult', b, c))),
543 (('uge', '#a', ('umin', '#b', c)), ('ior', ('uge', a, b), ('uge', a, c))),
544 (('uge', ('umax', '#a', b), '#c'), ('ior', ('uge', a, c), ('uge', b, c))),
545 (('ilt', '#a', ('imin', '#b', c)), ('iand', ('ilt', a, b), ('ilt', a, c))),
546 (('ilt', ('imax', '#a', b), '#c'), ('iand', ('ilt', a, c), ('ilt', b, c))),
547 (('ige', '#a', ('imax', '#b', c)), ('iand', ('ige', a, b), ('ige', a, c))),
548 (('ige', ('imin', '#a', b), '#c'), ('iand', ('ige', a, c), ('ige', b, c))),
549 (('ult', '#a', ('umin', '#b', c)), ('iand', ('ult', a, b), ('ult', a, c))),
550 (('ult', ('umax', '#a', b), '#c'), ('iand', ('ult', a, c), ('ult', b, c))),
551 (('uge', '#a', ('umax', '#b', c)), ('iand', ('uge', a, b), ('uge', a, c))),
552 (('uge', ('umin', '#a', b), '#c'), ('iand', ('uge', a, c), ('uge', b, c))),
553
554 # Thanks to sign extension, the ishr(a, b) is negative if and only if a is
555 # negative.
556 (('bcsel', ('ilt', a, 0), ('ineg', ('ishr', a, b)), ('ishr', a, b)),
557 ('iabs', ('ishr', a, b))),
558 (('iabs', ('ishr', ('iabs', a), b)), ('ishr', ('iabs', a), b)),
559
560 (('fabs', ('slt', a, b)), ('slt', a, b)),
561 (('fabs', ('sge', a, b)), ('sge', a, b)),
562 (('fabs', ('seq', a, b)), ('seq', a, b)),
563 (('fabs', ('sne', a, b)), ('sne', a, b)),
564 (('slt', a, b), ('b2f', ('flt', a, b)), 'options->lower_scmp'),
565 (('sge', a, b), ('b2f', ('fge', a, b)), 'options->lower_scmp'),
566 (('seq', a, b), ('b2f', ('feq', a, b)), 'options->lower_scmp'),
567 (('sne', a, b), ('b2f', ('fne', a, b)), 'options->lower_scmp'),
568 (('fne', ('fneg', a), a), ('fne', a, 0.0)),
569 (('feq', ('fneg', a), a), ('feq', a, 0.0)),
570 # Emulating booleans
571 (('imul', ('b2i', 'a@1'), ('b2i', 'b@1')), ('b2i', ('iand', a, b))),
572 (('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), ('b2f', ('iand', a, b))),
573 (('fsat', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('b2f', ('ior', a, b))),
574 (('iand', 'a@bool32', 1.0), ('b2f', a)),
575 # True/False are ~0 and 0 in NIR. b2i of True is 1, and -1 is ~0 (True).
576 (('ineg', ('b2i32', 'a@32')), a),
577 (('flt', ('fneg', ('b2f', 'a@1')), 0), a), # Generated by TGSI KILL_IF.
578 (('flt', ('fsub', 0.0, ('b2f', 'a@1')), 0), a), # Generated by TGSI KILL_IF.
579 # Comparison with the same args. Note that these are not done for
580 # the float versions because NaN always returns false on float
581 # inequalities.
582 (('ilt', a, a), False),
583 (('ige', a, a), True),
584 (('ieq', a, a), True),
585 (('ine', a, a), False),
586 (('ult', a, a), False),
587 (('uge', a, a), True),
588 # Logical and bit operations
589 (('iand', a, a), a),
590 (('iand', a, ~0), a),
591 (('iand', a, 0), 0),
592 (('ior', a, a), a),
593 (('ior', a, 0), a),
594 (('ior', a, True), True),
595 (('ixor', a, a), 0),
596 (('ixor', a, 0), a),
597 (('inot', ('inot', a)), a),
598 (('ior', ('iand', a, b), b), b),
599 (('ior', ('ior', a, b), b), ('ior', a, b)),
600 (('iand', ('ior', a, b), b), b),
601 (('iand', ('iand', a, b), b), ('iand', a, b)),
602 # DeMorgan's Laws
603 (('iand', ('inot', a), ('inot', b)), ('inot', ('ior', a, b))),
604 (('ior', ('inot', a), ('inot', b)), ('inot', ('iand', a, b))),
605 # Shift optimizations
606 (('ishl', 0, a), 0),
607 (('ishl', a, 0), a),
608 (('ishr', 0, a), 0),
609 (('ishr', a, 0), a),
610 (('ushr', 0, a), 0),
611 (('ushr', a, 0), a),
612 (('iand', 0xff, ('ushr@32', a, 24)), ('ushr', a, 24)),
613 (('iand', 0xffff, ('ushr@32', a, 16)), ('ushr', a, 16)),
614 (('ior', ('ishl@16', a, b), ('ushr@16', a, ('iadd', 16, ('ineg', b)))), ('urol', a, b), '!options->lower_rotate'),
615 (('ior', ('ishl@16', a, b), ('ushr@16', a, ('isub', 16, b))), ('urol', a, b), '!options->lower_rotate'),
616 (('ior', ('ishl@32', a, b), ('ushr@32', a, ('iadd', 32, ('ineg', b)))), ('urol', a, b), '!options->lower_rotate'),
617 (('ior', ('ishl@32', a, b), ('ushr@32', a, ('isub', 32, b))), ('urol', a, b), '!options->lower_rotate'),
618 (('ior', ('ushr@16', a, b), ('ishl@16', a, ('iadd', 16, ('ineg', b)))), ('uror', a, b), '!options->lower_rotate'),
619 (('ior', ('ushr@16', a, b), ('ishl@16', a, ('isub', 16, b))), ('uror', a, b), '!options->lower_rotate'),
620 (('ior', ('ushr@32', a, b), ('ishl@32', a, ('iadd', 32, ('ineg', b)))), ('uror', a, b), '!options->lower_rotate'),
621 (('ior', ('ushr@32', a, b), ('ishl@32', a, ('isub', 32, b))), ('uror', a, b), '!options->lower_rotate'),
622 (('urol@16', a, b), ('ior', ('ishl', a, b), ('ushr', a, ('isub', 16, b))), 'options->lower_rotate'),
623 (('urol@32', a, b), ('ior', ('ishl', a, b), ('ushr', a, ('isub', 32, b))), 'options->lower_rotate'),
624 (('uror@16', a, b), ('ior', ('ushr', a, b), ('ishl', a, ('isub', 16, b))), 'options->lower_rotate'),
625 (('uror@32', a, b), ('ior', ('ushr', a, b), ('ishl', a, ('isub', 32, b))), 'options->lower_rotate'),
626 # Exponential/logarithmic identities
627 (('~fexp2', ('flog2', a)), a), # 2^lg2(a) = a
628 (('~flog2', ('fexp2', a)), a), # lg2(2^a) = a
629 (('fpow', a, b), ('fexp2', ('fmul', ('flog2', a), b)), 'options->lower_fpow'), # a^b = 2^(lg2(a)*b)
630 (('~fexp2', ('fmul', ('flog2', a), b)), ('fpow', a, b), '!options->lower_fpow'), # 2^(lg2(a)*b) = a^b
631 (('~fexp2', ('fadd', ('fmul', ('flog2', a), b), ('fmul', ('flog2', c), d))),
632 ('~fmul', ('fpow', a, b), ('fpow', c, d)), '!options->lower_fpow'), # 2^(lg2(a) * b + lg2(c) + d) = a^b * c^d
633 (('~fexp2', ('fmul', ('flog2', a), 2.0)), ('fmul', a, a)),
634 (('~fexp2', ('fmul', ('flog2', a), 4.0)), ('fmul', ('fmul', a, a), ('fmul', a, a))),
635 (('~fpow', a, 1.0), a),
636 (('~fpow', a, 2.0), ('fmul', a, a)),
637 (('~fpow', a, 4.0), ('fmul', ('fmul', a, a), ('fmul', a, a))),
638 (('~fpow', 2.0, a), ('fexp2', a)),
639 (('~fpow', ('fpow', a, 2.2), 0.454545), a),
640 (('~fpow', ('fabs', ('fpow', a, 2.2)), 0.454545), ('fabs', a)),
641 (('~fsqrt', ('fexp2', a)), ('fexp2', ('fmul', 0.5, a))),
642 (('~frcp', ('fexp2', a)), ('fexp2', ('fneg', a))),
643 (('~frsq', ('fexp2', a)), ('fexp2', ('fmul', -0.5, a))),
644 (('~flog2', ('fsqrt', a)), ('fmul', 0.5, ('flog2', a))),
645 (('~flog2', ('frcp', a)), ('fneg', ('flog2', a))),
646 (('~flog2', ('frsq', a)), ('fmul', -0.5, ('flog2', a))),
647 (('~flog2', ('fpow', a, b)), ('fmul', b, ('flog2', a))),
648 (('~fmul', ('fexp2(is_used_once)', a), ('fexp2(is_used_once)', b)), ('fexp2', ('fadd', a, b))),
649 (('bcsel', ('flt', a, 0.0), 0.0, ('fsqrt', a)), ('fsqrt', ('fmax', a, 0.0))),
650 # Division and reciprocal
651 (('~fdiv', 1.0, a), ('frcp', a)),
652 (('fdiv', a, b), ('fmul', a, ('frcp', b)), 'options->lower_fdiv'),
653 (('~frcp', ('frcp', a)), a),
654 (('~frcp', ('fsqrt', a)), ('frsq', a)),
655 (('fsqrt', a), ('frcp', ('frsq', a)), 'options->lower_fsqrt'),
656 (('~frcp', ('frsq', a)), ('fsqrt', a), '!options->lower_fsqrt'),
657 # Trig
658 (('fsin', a), lowered_sincos(0.5), 'options->lower_sincos'),
659 (('fcos', a), lowered_sincos(0.75), 'options->lower_sincos'),
660 # Boolean simplifications
661 (('i2b32(is_used_by_if)', a), ('ine32', a, 0)),
662 (('i2b1(is_used_by_if)', a), ('ine', a, 0)),
663 (('ieq', a, True), a),
664 (('ine(is_not_used_by_if)', a, True), ('inot', a)),
665 (('ine', a, False), a),
666 (('ieq(is_not_used_by_if)', a, False), ('inot', 'a')),
667 (('bcsel', a, True, False), a),
668 (('bcsel', a, False, True), ('inot', a)),
669 (('bcsel@32', a, 1.0, 0.0), ('b2f', a)),
670 (('bcsel@32', a, 0.0, 1.0), ('b2f', ('inot', a))),
671 (('bcsel@32', a, -1.0, -0.0), ('fneg', ('b2f', a))),
672 (('bcsel@32', a, -0.0, -1.0), ('fneg', ('b2f', ('inot', a)))),
673 (('bcsel', True, b, c), b),
674 (('bcsel', False, b, c), c),
675 (('bcsel', a, ('b2f(is_used_once)', 'b@32'), ('b2f', 'c@32')), ('b2f', ('bcsel', a, b, c))),
676
677 (('bcsel', a, b, b), b),
678 (('fcsel', a, b, b), b),
679
680 # D3D Boolean emulation
681 (('bcsel', a, -1, 0), ('ineg', ('b2i', 'a@1'))),
682 (('bcsel', a, 0, -1), ('ineg', ('b2i', ('inot', a)))),
683 (('iand', ('ineg', ('b2i', 'a@1')), ('ineg', ('b2i', 'b@1'))),
684 ('ineg', ('b2i', ('iand', a, b)))),
685 (('ior', ('ineg', ('b2i','a@1')), ('ineg', ('b2i', 'b@1'))),
686 ('ineg', ('b2i', ('ior', a, b)))),
687 (('ieq', ('ineg', ('b2i', 'a@1')), 0), ('inot', a)),
688 (('ieq', ('ineg', ('b2i', 'a@1')), -1), a),
689 (('ine', ('ineg', ('b2i', 'a@1')), 0), a),
690 (('ine', ('ineg', ('b2i', 'a@1')), -1), ('inot', a)),
691 (('iand', ('ineg', ('b2i', a)), 1.0), ('b2f', a)),
692
693 # SM5 32-bit shifts are defined to use the 5 least significant bits
694 (('ishl', 'a@32', ('iand', 31, b)), ('ishl', a, b)),
695 (('ishr', 'a@32', ('iand', 31, b)), ('ishr', a, b)),
696 (('ushr', 'a@32', ('iand', 31, b)), ('ushr', a, b)),
697
698 # Conversions
699 (('i2b32', ('b2i', 'a@32')), a),
700 (('f2i', ('ftrunc', a)), ('f2i', a)),
701 (('f2u', ('ftrunc', a)), ('f2u', a)),
702 (('i2b', ('ineg', a)), ('i2b', a)),
703 (('i2b', ('iabs', a)), ('i2b', a)),
704 (('fabs', ('b2f', a)), ('b2f', a)),
705 (('iabs', ('b2i', a)), ('b2i', a)),
706 (('inot', ('f2b1', a)), ('feq', a, 0.0)),
707
708 # Ironically, mark these as imprecise because removing the conversions may
709 # preserve more precision than doing the conversions (e.g.,
710 # uint(float(0x81818181u)) == 0x81818200).
711 (('~f2i32', ('i2f', 'a@32')), a),
712 (('~f2i32', ('u2f', 'a@32')), a),
713 (('~f2u32', ('i2f', 'a@32')), a),
714 (('~f2u32', ('u2f', 'a@32')), a),
715
716 # Section 5.4.1 (Conversion and Scalar Constructors) of the GLSL 4.60 spec
717 # says:
718 #
719 # It is undefined to convert a negative floating-point value to an
720 # uint.
721 #
722 # Assuming that (uint)some_float behaves like (uint)(int)some_float allows
723 # some optimizations in the i965 backend to proceed.
724 (('ige', ('f2u', a), b), ('ige', ('f2i', a), b)),
725 (('ige', b, ('f2u', a)), ('ige', b, ('f2i', a))),
726 (('ilt', ('f2u', a), b), ('ilt', ('f2i', a), b)),
727 (('ilt', b, ('f2u', a)), ('ilt', b, ('f2i', a))),
728
729 (('~fmin', ('fabs', a), 1.0), ('fsat', ('fabs', a)), '!options->lower_fsat'),
730
731 # The result of the multiply must be in [-1, 0], so the result of the ffma
732 # must be in [0, 1].
733 (('flt', ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0), 0.0), False),
734 (('flt', ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0), 0.0), False),
735 (('fmax', ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0), 0.0), ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0)),
736 (('fmax', ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0), 0.0), ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0)),
737
738 # Packing and then unpacking does nothing
739 (('unpack_64_2x32_split_x', ('pack_64_2x32_split', a, b)), a),
740 (('unpack_64_2x32_split_y', ('pack_64_2x32_split', a, b)), b),
741 (('pack_64_2x32_split', ('unpack_64_2x32_split_x', a),
742 ('unpack_64_2x32_split_y', a)), a),
743
744 # Comparing two halves of an unpack separately. While this optimization
745 # should be correct for non-constant values, it's less obvious that it's
746 # useful in that case. For constant values, the pack will fold and we're
747 # guaranteed to reduce the whole tree to one instruction.
748 (('iand', ('ieq', ('unpack_32_2x16_split_x', a), '#b'),
749 ('ieq', ('unpack_32_2x16_split_y', a), '#c')),
750 ('ieq', a, ('pack_32_2x16_split', b, c))),
751
752 # Byte extraction
753 (('ushr', 'a@16', 8), ('extract_u8', a, 1), '!options->lower_extract_byte'),
754 (('ushr', 'a@32', 24), ('extract_u8', a, 3), '!options->lower_extract_byte'),
755 (('ushr', 'a@64', 56), ('extract_u8', a, 7), '!options->lower_extract_byte'),
756 (('ishr', 'a@16', 8), ('extract_i8', a, 1), '!options->lower_extract_byte'),
757 (('ishr', 'a@32', 24), ('extract_i8', a, 3), '!options->lower_extract_byte'),
758 (('ishr', 'a@64', 56), ('extract_i8', a, 7), '!options->lower_extract_byte'),
759 (('iand', 0xff, a), ('extract_u8', a, 0), '!options->lower_extract_byte')
760 ]
761
762 # After the ('extract_u8', a, 0) pattern, above, triggers, there will be
763 # patterns like those below.
764 for op in ('ushr', 'ishr'):
765 optimizations.extend([(('extract_u8', (op, 'a@16', 8), 0), ('extract_u8', a, 1))])
766 optimizations.extend([(('extract_u8', (op, 'a@32', 8 * i), 0), ('extract_u8', a, i)) for i in range(1, 4)])
767 optimizations.extend([(('extract_u8', (op, 'a@64', 8 * i), 0), ('extract_u8', a, i)) for i in range(1, 8)])
768
769 optimizations.extend([(('extract_u8', ('extract_u16', a, 1), 0), ('extract_u8', a, 2))])
770
771 # After the ('extract_[iu]8', a, 3) patterns, above, trigger, there will be
772 # patterns like those below.
773 for op in ('extract_u8', 'extract_i8'):
774 optimizations.extend([((op, ('ishl', 'a@16', 8), 1), (op, a, 0))])
775 optimizations.extend([((op, ('ishl', 'a@32', 24 - 8 * i), 3), (op, a, i)) for i in range(2, -1, -1)])
776 optimizations.extend([((op, ('ishl', 'a@64', 56 - 8 * i), 7), (op, a, i)) for i in range(6, -1, -1)])
777
778 optimizations.extend([
779 # Word extraction
780 (('ushr', ('ishl', 'a@32', 16), 16), ('extract_u16', a, 0), '!options->lower_extract_word'),
781 (('ushr', 'a@32', 16), ('extract_u16', a, 1), '!options->lower_extract_word'),
782 (('ishr', ('ishl', 'a@32', 16), 16), ('extract_i16', a, 0), '!options->lower_extract_word'),
783 (('ishr', 'a@32', 16), ('extract_i16', a, 1), '!options->lower_extract_word'),
784 (('iand', 0xffff, a), ('extract_u16', a, 0), '!options->lower_extract_word'),
785
786 # Subtracts
787 (('~fsub', a, ('fsub', 0.0, b)), ('fadd', a, b)),
788 (('isub', a, ('isub', 0, b)), ('iadd', a, b)),
789 (('ussub_4x8', a, 0), a),
790 (('ussub_4x8', a, ~0), 0),
791 (('fsub', a, b), ('fadd', a, ('fneg', b)), 'options->lower_sub'),
792 (('isub', a, b), ('iadd', a, ('ineg', b)), 'options->lower_sub'),
793 (('fneg', a), ('fsub', 0.0, a), 'options->lower_negate'),
794 (('ineg', a), ('isub', 0, a), 'options->lower_negate'),
795 (('~fadd', a, ('fsub', 0.0, b)), ('fsub', a, b)),
796 (('iadd', a, ('isub', 0, b)), ('isub', a, b)),
797 (('fabs', ('fsub', 0.0, a)), ('fabs', a)),
798 (('iabs', ('isub', 0, a)), ('iabs', a)),
799
800 # Propagate negation up multiplication chains
801 (('fmul(is_used_by_non_fsat)', ('fneg', a), b), ('fneg', ('fmul', a, b))),
802 (('imul', ('ineg', a), b), ('ineg', ('imul', a, b))),
803
804 # Propagate constants up multiplication chains
805 (('~fmul(is_used_once)', ('fmul(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('fmul', ('fmul', a, c), b)),
806 (('imul(is_used_once)', ('imul(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('imul', ('imul', a, c), b)),
807 (('~fadd(is_used_once)', ('fadd(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('fadd', ('fadd', a, c), b)),
808 (('iadd(is_used_once)', ('iadd(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('iadd', ('iadd', a, c), b)),
809
810 # Reassociate constants in add/mul chains so they can be folded together.
811 # For now, we mostly only handle cases where the constants are separated by
812 # a single non-constant. We could do better eventually.
813 (('~fmul', '#a', ('fmul', 'b(is_not_const)', '#c')), ('fmul', ('fmul', a, c), b)),
814 (('imul', '#a', ('imul', 'b(is_not_const)', '#c')), ('imul', ('imul', a, c), b)),
815 (('~fadd', '#a', ('fadd', 'b(is_not_const)', '#c')), ('fadd', ('fadd', a, c), b)),
816 (('~fadd', '#a', ('fneg', ('fadd', 'b(is_not_const)', '#c'))), ('fadd', ('fadd', a, ('fneg', c)), ('fneg', b))),
817 (('iadd', '#a', ('iadd', 'b(is_not_const)', '#c')), ('iadd', ('iadd', a, c), b)),
818
819 # Drop mul-div by the same value when there's no wrapping.
820 (('idiv', ('imul(no_signed_wrap)', a, b), b), a),
821
822 # By definition...
823 (('bcsel', ('ige', ('find_lsb', a), 0), ('find_lsb', a), -1), ('find_lsb', a)),
824 (('bcsel', ('ige', ('ifind_msb', a), 0), ('ifind_msb', a), -1), ('ifind_msb', a)),
825 (('bcsel', ('ige', ('ufind_msb', a), 0), ('ufind_msb', a), -1), ('ufind_msb', a)),
826
827 (('bcsel', ('ine', a, 0), ('find_lsb', a), -1), ('find_lsb', a)),
828 (('bcsel', ('ine', a, 0), ('ifind_msb', a), -1), ('ifind_msb', a)),
829 (('bcsel', ('ine', a, 0), ('ufind_msb', a), -1), ('ufind_msb', a)),
830
831 (('bcsel', ('ine', a, -1), ('ifind_msb', a), -1), ('ifind_msb', a)),
832
833 # Misc. lowering
834 (('fmod@16', a, b), ('fsub', a, ('fmul', b, ('ffloor', ('fdiv', a, b)))), 'options->lower_fmod'),
835 (('fmod@32', a, b), ('fsub', a, ('fmul', b, ('ffloor', ('fdiv', a, b)))), 'options->lower_fmod'),
836 (('frem', a, b), ('fsub', a, ('fmul', b, ('ftrunc', ('fdiv', a, b)))), 'options->lower_fmod'),
837 (('uadd_carry@32', a, b), ('b2i', ('ult', ('iadd', a, b), a)), 'options->lower_uadd_carry'),
838 (('usub_borrow@32', a, b), ('b2i', ('ult', a, b)), 'options->lower_usub_borrow'),
839
840 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
841 ('bcsel', ('ult', 31, 'bits'), 'insert',
842 ('bfi', ('bfm', 'bits', 'offset'), 'insert', 'base')),
843 'options->lower_bitfield_insert'),
844 (('ihadd', a, b), ('iadd', ('iand', a, b), ('ishr', ('ixor', a, b), 1)), 'options->lower_hadd'),
845 (('uhadd', a, b), ('iadd', ('iand', a, b), ('ushr', ('ixor', a, b), 1)), 'options->lower_hadd'),
846 (('irhadd', a, b), ('isub', ('ior', a, b), ('ishr', ('ixor', a, b), 1)), 'options->lower_hadd'),
847 (('urhadd', a, b), ('isub', ('ior', a, b), ('ushr', ('ixor', a, b), 1)), 'options->lower_hadd'),
848 (('uadd_sat', a, b), ('bcsel', ('ult', ('iadd', a, b), a), -1, ('iadd', a, b)), 'options->lower_add_sat'),
849 (('usub_sat', a, b), ('bcsel', ('ult', a, b), 0, ('isub', a, b)), 'options->lower_add_sat'),
850
851 # Alternative lowering that doesn't rely on bfi.
852 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
853 ('bcsel', ('ult', 31, 'bits'),
854 'insert',
855 (('ior',
856 ('iand', 'base', ('inot', ('ishl', ('isub', ('ishl', 1, 'bits'), 1), 'offset'))),
857 ('iand', ('ishl', 'insert', 'offset'), ('ishl', ('isub', ('ishl', 1, 'bits'), 1), 'offset'))))),
858 'options->lower_bitfield_insert_to_shifts'),
859
860 # Alternative lowering that uses bitfield_select.
861 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
862 ('bcsel', ('ult', 31, 'bits'), 'insert',
863 ('bitfield_select', ('bfm', 'bits', 'offset'), ('ishl', 'insert', 'offset'), 'base')),
864 'options->lower_bitfield_insert_to_bitfield_select'),
865
866 (('ibitfield_extract', 'value', 'offset', 'bits'),
867 ('bcsel', ('ult', 31, 'bits'), 'value',
868 ('ibfe', 'value', 'offset', 'bits')),
869 'options->lower_bitfield_extract'),
870
871 (('ubitfield_extract', 'value', 'offset', 'bits'),
872 ('bcsel', ('ult', 31, 'bits'), 'value',
873 ('ubfe', 'value', 'offset', 'bits')),
874 'options->lower_bitfield_extract'),
875
876 # Note that these opcodes are defined to only use the five least significant bits of 'offset' and 'bits'
877 (('ubfe', 'value', 'offset', ('iand', 31, 'bits')), ('ubfe', 'value', 'offset', 'bits')),
878 (('ubfe', 'value', ('iand', 31, 'offset'), 'bits'), ('ubfe', 'value', 'offset', 'bits')),
879 (('ibfe', 'value', 'offset', ('iand', 31, 'bits')), ('ibfe', 'value', 'offset', 'bits')),
880 (('ibfe', 'value', ('iand', 31, 'offset'), 'bits'), ('ibfe', 'value', 'offset', 'bits')),
881 (('bfm', 'bits', ('iand', 31, 'offset')), ('bfm', 'bits', 'offset')),
882 (('bfm', ('iand', 31, 'bits'), 'offset'), ('bfm', 'bits', 'offset')),
883
884 (('ibitfield_extract', 'value', 'offset', 'bits'),
885 ('bcsel', ('ieq', 0, 'bits'),
886 0,
887 ('ishr',
888 ('ishl', 'value', ('isub', ('isub', 32, 'bits'), 'offset')),
889 ('isub', 32, 'bits'))),
890 'options->lower_bitfield_extract_to_shifts'),
891
892 (('ubitfield_extract', 'value', 'offset', 'bits'),
893 ('iand',
894 ('ushr', 'value', 'offset'),
895 ('bcsel', ('ieq', 'bits', 32),
896 0xffffffff,
897 ('isub', ('ishl', 1, 'bits'), 1))),
898 'options->lower_bitfield_extract_to_shifts'),
899
900 (('ifind_msb', 'value'),
901 ('ufind_msb', ('bcsel', ('ilt', 'value', 0), ('inot', 'value'), 'value')),
902 'options->lower_ifind_msb'),
903
904 (('find_lsb', 'value'),
905 ('ufind_msb', ('iand', 'value', ('ineg', 'value'))),
906 'options->lower_find_lsb'),
907
908 (('extract_i8', a, 'b@32'),
909 ('ishr', ('ishl', a, ('imul', ('isub', 3, b), 8)), 24),
910 'options->lower_extract_byte'),
911
912 (('extract_u8', a, 'b@32'),
913 ('iand', ('ushr', a, ('imul', b, 8)), 0xff),
914 'options->lower_extract_byte'),
915
916 (('extract_i16', a, 'b@32'),
917 ('ishr', ('ishl', a, ('imul', ('isub', 1, b), 16)), 16),
918 'options->lower_extract_word'),
919
920 (('extract_u16', a, 'b@32'),
921 ('iand', ('ushr', a, ('imul', b, 16)), 0xffff),
922 'options->lower_extract_word'),
923
924 (('pack_unorm_2x16', 'v'),
925 ('pack_uvec2_to_uint',
926 ('f2u32', ('fround_even', ('fmul', ('fsat', 'v'), 65535.0)))),
927 'options->lower_pack_unorm_2x16'),
928
929 (('pack_unorm_4x8', 'v'),
930 ('pack_uvec4_to_uint',
931 ('f2u32', ('fround_even', ('fmul', ('fsat', 'v'), 255.0)))),
932 'options->lower_pack_unorm_4x8'),
933
934 (('pack_snorm_2x16', 'v'),
935 ('pack_uvec2_to_uint',
936 ('f2i32', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 32767.0)))),
937 'options->lower_pack_snorm_2x16'),
938
939 (('pack_snorm_4x8', 'v'),
940 ('pack_uvec4_to_uint',
941 ('f2i32', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 127.0)))),
942 'options->lower_pack_snorm_4x8'),
943
944 (('unpack_unorm_2x16', 'v'),
945 ('fdiv', ('u2f32', ('vec2', ('extract_u16', 'v', 0),
946 ('extract_u16', 'v', 1))),
947 65535.0),
948 'options->lower_unpack_unorm_2x16'),
949
950 (('unpack_unorm_4x8', 'v'),
951 ('fdiv', ('u2f32', ('vec4', ('extract_u8', 'v', 0),
952 ('extract_u8', 'v', 1),
953 ('extract_u8', 'v', 2),
954 ('extract_u8', 'v', 3))),
955 255.0),
956 'options->lower_unpack_unorm_4x8'),
957
958 (('unpack_snorm_2x16', 'v'),
959 ('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec2', ('extract_i16', 'v', 0),
960 ('extract_i16', 'v', 1))),
961 32767.0))),
962 'options->lower_unpack_snorm_2x16'),
963
964 (('unpack_snorm_4x8', 'v'),
965 ('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec4', ('extract_i8', 'v', 0),
966 ('extract_i8', 'v', 1),
967 ('extract_i8', 'v', 2),
968 ('extract_i8', 'v', 3))),
969 127.0))),
970 'options->lower_unpack_snorm_4x8'),
971
972 (('isign', a), ('imin', ('imax', a, -1), 1), 'options->lower_isign'),
973 (('fsign', a), ('fsub', ('b2f', ('flt', 0.0, a)), ('b2f', ('flt', a, 0.0))), 'options->lower_fsign'),
974 ])
975
976 # bit_size dependent lowerings
977 for bit_size in [8, 16, 32, 64]:
978 # convenience constants
979 intmax = (1 << (bit_size - 1)) - 1
980 intmin = 1 << (bit_size - 1)
981
982 optimizations += [
983 (('iadd_sat@' + str(bit_size), a, b),
984 ('bcsel', ('ige', b, 1), ('bcsel', ('ilt', ('iadd', a, b), a), intmax, ('iadd', a, b)),
985 ('bcsel', ('ilt', a, ('iadd', a, b)), intmin, ('iadd', a, b))), 'options->lower_add_sat'),
986 (('isub_sat@' + str(bit_size), a, b),
987 ('bcsel', ('ilt', b, 0), ('bcsel', ('ilt', ('isub', a, b), a), intmax, ('isub', a, b)),
988 ('bcsel', ('ilt', a, ('isub', a, b)), intmin, ('isub', a, b))), 'options->lower_add_sat'),
989 ]
990
991 invert = OrderedDict([('feq', 'fne'), ('fne', 'feq'), ('fge', 'flt'), ('flt', 'fge')])
992
993 for left, right in itertools.combinations_with_replacement(invert.keys(), 2):
994 optimizations.append((('inot', ('ior(is_used_once)', (left, a, b), (right, c, d))),
995 ('iand', (invert[left], a, b), (invert[right], c, d))))
996 optimizations.append((('inot', ('iand(is_used_once)', (left, a, b), (right, c, d))),
997 ('ior', (invert[left], a, b), (invert[right], c, d))))
998
999 # Optimize x2bN(b2x(x)) -> x
1000 for size in type_sizes('bool'):
1001 aN = 'a@' + str(size)
1002 f2bN = 'f2b' + str(size)
1003 i2bN = 'i2b' + str(size)
1004 optimizations.append(((f2bN, ('b2f', aN)), a))
1005 optimizations.append(((i2bN, ('b2i', aN)), a))
1006
1007 # Optimize x2yN(b2x(x)) -> b2y
1008 for x, y in itertools.product(['f', 'u', 'i'], ['f', 'u', 'i']):
1009 if x != 'f' and y != 'f' and x != y:
1010 continue
1011
1012 b2x = 'b2f' if x == 'f' else 'b2i'
1013 b2y = 'b2f' if y == 'f' else 'b2i'
1014 x2yN = '{}2{}'.format(x, y)
1015 optimizations.append(((x2yN, (b2x, a)), (b2y, a)))
1016
1017 # Optimize away x2xN(a@N)
1018 for t in ['int', 'uint', 'float']:
1019 for N in type_sizes(t):
1020 x2xN = '{0}2{0}{1}'.format(t[0], N)
1021 aN = 'a@{0}'.format(N)
1022 optimizations.append(((x2xN, aN), a))
1023
1024 # Optimize x2xN(y2yM(a@P)) -> y2yN(a) for integers
1025 # In particular, we can optimize away everything except upcast of downcast and
1026 # upcasts where the type differs from the other cast
1027 for N, M in itertools.product(type_sizes('uint'), type_sizes('uint')):
1028 if N < M:
1029 # The outer cast is a down-cast. It doesn't matter what the size of the
1030 # argument of the inner cast is because we'll never been in the upcast
1031 # of downcast case. Regardless of types, we'll always end up with y2yN
1032 # in the end.
1033 for x, y in itertools.product(['i', 'u'], ['i', 'u']):
1034 x2xN = '{0}2{0}{1}'.format(x, N)
1035 y2yM = '{0}2{0}{1}'.format(y, M)
1036 y2yN = '{0}2{0}{1}'.format(y, N)
1037 optimizations.append(((x2xN, (y2yM, a)), (y2yN, a)))
1038 elif N > M:
1039 # If the outer cast is an up-cast, we have to be more careful about the
1040 # size of the argument of the inner cast and with types. In this case,
1041 # the type is always the type of type up-cast which is given by the
1042 # outer cast.
1043 for P in type_sizes('uint'):
1044 # We can't optimize away up-cast of down-cast.
1045 if M < P:
1046 continue
1047
1048 # Because we're doing down-cast of down-cast, the types always have
1049 # to match between the two casts
1050 for x in ['i', 'u']:
1051 x2xN = '{0}2{0}{1}'.format(x, N)
1052 x2xM = '{0}2{0}{1}'.format(x, M)
1053 aP = 'a@{0}'.format(P)
1054 optimizations.append(((x2xN, (x2xM, aP)), (x2xN, a)))
1055 else:
1056 # The N == M case is handled by other optimizations
1057 pass
1058
1059 # Optimize comparisons with up-casts
1060 for t in ['int', 'uint', 'float']:
1061 for N, M in itertools.product(type_sizes(t), repeat=2):
1062 if N == 1 or N >= M:
1063 continue
1064
1065 x2xM = '{0}2{0}{1}'.format(t[0], M)
1066 x2xN = '{0}2{0}{1}'.format(t[0], N)
1067 aN = 'a@' + str(N)
1068 bN = 'b@' + str(N)
1069 xeq = 'feq' if t == 'float' else 'ieq'
1070 xne = 'fne' if t == 'float' else 'ine'
1071 xge = '{0}ge'.format(t[0])
1072 xlt = '{0}lt'.format(t[0])
1073
1074 # Up-casts are lossless so for correctly signed comparisons of
1075 # up-casted values we can do the comparison at the largest of the two
1076 # original sizes and drop one or both of the casts. (We have
1077 # optimizations to drop the no-op casts which this may generate.)
1078 for P in type_sizes(t):
1079 if P == 1 or P > N:
1080 continue
1081
1082 bP = 'b@' + str(P)
1083 optimizations += [
1084 ((xeq, (x2xM, aN), (x2xM, bP)), (xeq, a, (x2xN, b))),
1085 ((xne, (x2xM, aN), (x2xM, bP)), (xne, a, (x2xN, b))),
1086 ((xge, (x2xM, aN), (x2xM, bP)), (xge, a, (x2xN, b))),
1087 ((xlt, (x2xM, aN), (x2xM, bP)), (xlt, a, (x2xN, b))),
1088 ((xge, (x2xM, bP), (x2xM, aN)), (xge, (x2xN, b), a)),
1089 ((xlt, (x2xM, bP), (x2xM, aN)), (xlt, (x2xN, b), a)),
1090 ]
1091
1092 # The next bit doesn't work on floats because the range checks would
1093 # get way too complicated.
1094 if t in ['int', 'uint']:
1095 if t == 'int':
1096 xN_min = -(1 << (N - 1))
1097 xN_max = (1 << (N - 1)) - 1
1098 elif t == 'uint':
1099 xN_min = 0
1100 xN_max = (1 << N) - 1
1101 else:
1102 assert False
1103
1104 # If we're up-casting and comparing to a constant, we can unfold
1105 # the comparison into a comparison with the shrunk down constant
1106 # and a check that the constant fits in the smaller bit size.
1107 optimizations += [
1108 ((xeq, (x2xM, aN), '#b'),
1109 ('iand', (xeq, a, (x2xN, b)), (xeq, (x2xM, (x2xN, b)), b))),
1110 ((xne, (x2xM, aN), '#b'),
1111 ('ior', (xne, a, (x2xN, b)), (xne, (x2xM, (x2xN, b)), b))),
1112 ((xlt, (x2xM, aN), '#b'),
1113 ('iand', (xlt, xN_min, b),
1114 ('ior', (xlt, xN_max, b), (xlt, a, (x2xN, b))))),
1115 ((xlt, '#a', (x2xM, bN)),
1116 ('iand', (xlt, a, xN_max),
1117 ('ior', (xlt, a, xN_min), (xlt, (x2xN, a), b)))),
1118 ((xge, (x2xM, aN), '#b'),
1119 ('iand', (xge, xN_max, b),
1120 ('ior', (xge, xN_min, b), (xge, a, (x2xN, b))))),
1121 ((xge, '#a', (x2xM, bN)),
1122 ('iand', (xge, a, xN_min),
1123 ('ior', (xge, a, xN_max), (xge, (x2xN, a), b)))),
1124 ]
1125
1126 def fexp2i(exp, bits):
1127 # We assume that exp is already in the right range.
1128 if bits == 16:
1129 return ('i2i16', ('ishl', ('iadd', exp, 15), 10))
1130 elif bits == 32:
1131 return ('ishl', ('iadd', exp, 127), 23)
1132 elif bits == 64:
1133 return ('pack_64_2x32_split', 0, ('ishl', ('iadd', exp, 1023), 20))
1134 else:
1135 assert False
1136
1137 def ldexp(f, exp, bits):
1138 # First, we clamp exp to a reasonable range. The maximum possible range
1139 # for a normal exponent is [-126, 127] and, throwing in denormals, you get
1140 # a maximum range of [-149, 127]. This means that we can potentially have
1141 # a swing of +-276. If you start with FLT_MAX, you actually have to do
1142 # ldexp(FLT_MAX, -278) to get it to flush all the way to zero. The GLSL
1143 # spec, on the other hand, only requires that we handle an exponent value
1144 # in the range [-126, 128]. This implementation is *mostly* correct; it
1145 # handles a range on exp of [-252, 254] which allows you to create any
1146 # value (including denorms if the hardware supports it) and to adjust the
1147 # exponent of any normal value to anything you want.
1148 if bits == 16:
1149 exp = ('imin', ('imax', exp, -28), 30)
1150 elif bits == 32:
1151 exp = ('imin', ('imax', exp, -252), 254)
1152 elif bits == 64:
1153 exp = ('imin', ('imax', exp, -2044), 2046)
1154 else:
1155 assert False
1156
1157 # Now we compute two powers of 2, one for exp/2 and one for exp-exp/2.
1158 # (We use ishr which isn't the same for -1, but the -1 case still works
1159 # since we use exp-exp/2 as the second exponent.) While the spec
1160 # technically defines ldexp as f * 2.0^exp, simply multiplying once doesn't
1161 # work with denormals and doesn't allow for the full swing in exponents
1162 # that you can get with normalized values. Instead, we create two powers
1163 # of two and multiply by them each in turn. That way the effective range
1164 # of our exponent is doubled.
1165 pow2_1 = fexp2i(('ishr', exp, 1), bits)
1166 pow2_2 = fexp2i(('isub', exp, ('ishr', exp, 1)), bits)
1167 return ('fmul', ('fmul', f, pow2_1), pow2_2)
1168
1169 optimizations += [
1170 (('ldexp@16', 'x', 'exp'), ldexp('x', 'exp', 16), 'options->lower_ldexp'),
1171 (('ldexp@32', 'x', 'exp'), ldexp('x', 'exp', 32), 'options->lower_ldexp'),
1172 (('ldexp@64', 'x', 'exp'), ldexp('x', 'exp', 64), 'options->lower_ldexp'),
1173 ]
1174
1175 # Unreal Engine 4 demo applications open-codes bitfieldReverse()
1176 def bitfield_reverse(u):
1177 step1 = ('ior', ('ishl', u, 16), ('ushr', u, 16))
1178 step2 = ('ior', ('ishl', ('iand', step1, 0x00ff00ff), 8), ('ushr', ('iand', step1, 0xff00ff00), 8))
1179 step3 = ('ior', ('ishl', ('iand', step2, 0x0f0f0f0f), 4), ('ushr', ('iand', step2, 0xf0f0f0f0), 4))
1180 step4 = ('ior', ('ishl', ('iand', step3, 0x33333333), 2), ('ushr', ('iand', step3, 0xcccccccc), 2))
1181 step5 = ('ior(many-comm-expr)', ('ishl', ('iand', step4, 0x55555555), 1), ('ushr', ('iand', step4, 0xaaaaaaaa), 1))
1182
1183 return step5
1184
1185 optimizations += [(bitfield_reverse('x@32'), ('bitfield_reverse', 'x'))]
1186
1187 # For any float comparison operation, "cmp", if you have "a == a && a cmp b"
1188 # then the "a == a" is redundant because it's equivalent to "a is not NaN"
1189 # and, if a is a NaN then the second comparison will fail anyway.
1190 for op in ['flt', 'fge', 'feq']:
1191 optimizations += [
1192 (('iand', ('feq', a, a), (op, a, b)), (op, a, b)),
1193 (('iand', ('feq', a, a), (op, b, a)), (op, b, a)),
1194 ]
1195
1196 # Add optimizations to handle the case where the result of a ternary is
1197 # compared to a constant. This way we can take things like
1198 #
1199 # (a ? 0 : 1) > 0
1200 #
1201 # and turn it into
1202 #
1203 # a ? (0 > 0) : (1 > 0)
1204 #
1205 # which constant folding will eat for lunch. The resulting ternary will
1206 # further get cleaned up by the boolean reductions above and we will be
1207 # left with just the original variable "a".
1208 for op in ['flt', 'fge', 'feq', 'fne',
1209 'ilt', 'ige', 'ieq', 'ine', 'ult', 'uge']:
1210 optimizations += [
1211 ((op, ('bcsel', 'a', '#b', '#c'), '#d'),
1212 ('bcsel', 'a', (op, 'b', 'd'), (op, 'c', 'd'))),
1213 ((op, '#d', ('bcsel', a, '#b', '#c')),
1214 ('bcsel', 'a', (op, 'd', 'b'), (op, 'd', 'c'))),
1215 ]
1216
1217
1218 # For example, this converts things like
1219 #
1220 # 1 + mix(0, a - 1, condition)
1221 #
1222 # into
1223 #
1224 # mix(1, (a-1)+1, condition)
1225 #
1226 # Other optimizations will rearrange the constants.
1227 for op in ['fadd', 'fmul', 'iadd', 'imul']:
1228 optimizations += [
1229 ((op, ('bcsel(is_used_once)', a, '#b', c), '#d'), ('bcsel', a, (op, b, d), (op, c, d)))
1230 ]
1231
1232 # For derivatives in compute shaders, GLSL_NV_compute_shader_derivatives
1233 # states:
1234 #
1235 # If neither layout qualifier is specified, derivatives in compute shaders
1236 # return zero, which is consistent with the handling of built-in texture
1237 # functions like texture() in GLSL 4.50 compute shaders.
1238 for op in ['fddx', 'fddx_fine', 'fddx_coarse',
1239 'fddy', 'fddy_fine', 'fddy_coarse']:
1240 optimizations += [
1241 ((op, 'a'), 0.0, 'info->stage == MESA_SHADER_COMPUTE && info->cs.derivative_group == DERIVATIVE_GROUP_NONE')
1242 ]
1243
1244 # Some optimizations for ir3-specific instructions.
1245 optimizations += [
1246 # 'al * bl': If either 'al' or 'bl' is zero, return zero.
1247 (('umul_low', '#a(is_lower_half_zero)', 'b'), (0)),
1248 # '(ah * bl) << 16 + c': If either 'ah' or 'bl' is zero, return 'c'.
1249 (('imadsh_mix16', '#a@32(is_lower_half_zero)', 'b@32', 'c@32'), ('c')),
1250 (('imadsh_mix16', 'a@32', '#b@32(is_upper_half_zero)', 'c@32'), ('c')),
1251 ]
1252
1253 # This section contains "late" optimizations that should be run before
1254 # creating ffmas and calling regular optimizations for the final time.
1255 # Optimizations should go here if they help code generation and conflict
1256 # with the regular optimizations.
1257 before_ffma_optimizations = [
1258 # Propagate constants down multiplication chains
1259 (('~fmul(is_used_once)', ('fmul(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('fmul', ('fmul', a, c), b)),
1260 (('imul(is_used_once)', ('imul(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('imul', ('imul', a, c), b)),
1261 (('~fadd(is_used_once)', ('fadd(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('fadd', ('fadd', a, c), b)),
1262 (('iadd(is_used_once)', ('iadd(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('iadd', ('iadd', a, c), b)),
1263
1264 (('~fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))),
1265 (('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))),
1266 (('~fadd', ('fneg', a), a), 0.0),
1267 (('iadd', ('ineg', a), a), 0),
1268 (('iadd', ('ineg', a), ('iadd', a, b)), b),
1269 (('iadd', a, ('iadd', ('ineg', a), b)), b),
1270 (('~fadd', ('fneg', a), ('fadd', a, b)), b),
1271 (('~fadd', a, ('fadd', ('fneg', a), b)), b),
1272
1273 (('~flrp@32', ('fadd(is_used_once)', a, -1.0), ('fadd(is_used_once)', a, 1.0), d), ('fadd', ('flrp', -1.0, 1.0, d), a)),
1274 (('~flrp@32', ('fadd(is_used_once)', a, 1.0), ('fadd(is_used_once)', a, -1.0), d), ('fadd', ('flrp', 1.0, -1.0, d), a)),
1275 (('~flrp@32', ('fadd(is_used_once)', a, '#b'), ('fadd(is_used_once)', a, '#c'), d), ('fadd', ('fmul', d, ('fadd', c, ('fneg', b))), ('fadd', a, b))),
1276 ]
1277
1278 # This section contains "late" optimizations that should be run after the
1279 # regular optimizations have finished. Optimizations should go here if
1280 # they help code generation but do not necessarily produce code that is
1281 # more easily optimizable.
1282 late_optimizations = [
1283 # Most of these optimizations aren't quite safe when you get infinity or
1284 # Nan involved but the first one should be fine.
1285 (('flt', ('fadd', a, b), 0.0), ('flt', a, ('fneg', b))),
1286 (('flt', ('fneg', ('fadd', a, b)), 0.0), ('flt', ('fneg', a), b)),
1287 (('~fge', ('fadd', a, b), 0.0), ('fge', a, ('fneg', b))),
1288 (('~fge', ('fneg', ('fadd', a, b)), 0.0), ('fge', ('fneg', a), b)),
1289 (('~feq', ('fadd', a, b), 0.0), ('feq', a, ('fneg', b))),
1290 (('~fne', ('fadd', a, b), 0.0), ('fne', a, ('fneg', b))),
1291
1292 # nir_lower_to_source_mods will collapse this, but its existence during the
1293 # optimization loop can prevent other optimizations.
1294 (('fneg', ('fneg', a)), a),
1295
1296 # These are duplicated from the main optimizations table. The late
1297 # patterns that rearrange expressions like x - .5 < 0 to x < .5 can create
1298 # new patterns like these. The patterns that compare with zero are removed
1299 # because they are unlikely to be created in by anything in
1300 # late_optimizations.
1301 (('flt', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('flt', a, b)),
1302 (('flt', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('flt', b, a)),
1303 (('fge', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fge', a, b)),
1304 (('fge', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('fge', b, a)),
1305 (('feq', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('feq', a, b)),
1306 (('fne', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fne', a, b)),
1307
1308 (('fge', ('fsat(is_used_once)', a), 1.0), ('fge', a, 1.0)),
1309 (('flt', ('fsat(is_used_once)', a), 1.0), ('flt', a, 1.0)),
1310
1311 (('~fge', ('fmin(is_used_once)', ('fadd(is_used_once)', a, b), ('fadd', c, d)), 0.0), ('iand', ('fge', a, ('fneg', b)), ('fge', c, ('fneg', d)))),
1312
1313 (('flt', ('fneg', a), ('fneg', b)), ('flt', b, a)),
1314 (('fge', ('fneg', a), ('fneg', b)), ('fge', b, a)),
1315 (('feq', ('fneg', a), ('fneg', b)), ('feq', b, a)),
1316 (('fne', ('fneg', a), ('fneg', b)), ('fne', b, a)),
1317 (('flt', ('fneg', a), -1.0), ('flt', 1.0, a)),
1318 (('flt', -1.0, ('fneg', a)), ('flt', a, 1.0)),
1319 (('fge', ('fneg', a), -1.0), ('fge', 1.0, a)),
1320 (('fge', -1.0, ('fneg', a)), ('fge', a, 1.0)),
1321 (('fne', ('fneg', a), -1.0), ('fne', 1.0, a)),
1322 (('feq', -1.0, ('fneg', a)), ('feq', a, 1.0)),
1323
1324 (('ior', a, a), a),
1325 (('iand', a, a), a),
1326
1327 (('~fadd', ('fneg(is_used_once)', ('fsat(is_used_once)', 'a(is_not_fmul)')), 1.0), ('fsat', ('fadd', 1.0, ('fneg', a)))),
1328
1329 (('fdot2', a, b), ('fdot_replicated2', a, b), 'options->fdot_replicates'),
1330 (('fdot3', a, b), ('fdot_replicated3', a, b), 'options->fdot_replicates'),
1331 (('fdot4', a, b), ('fdot_replicated4', a, b), 'options->fdot_replicates'),
1332 (('fdph', a, b), ('fdph_replicated', a, b), 'options->fdot_replicates'),
1333
1334 (('~flrp@32', ('fadd(is_used_once)', a, b), ('fadd(is_used_once)', a, c), d), ('fadd', ('flrp', b, c, d), a)),
1335 (('~flrp@64', ('fadd(is_used_once)', a, b), ('fadd(is_used_once)', a, c), d), ('fadd', ('flrp', b, c, d), a)),
1336
1337 (('~fadd@32', 1.0, ('fmul(is_used_once)', c , ('fadd', b, -1.0 ))), ('fadd', ('fadd', 1.0, ('fneg', c)), ('fmul', b, c)), 'options->lower_flrp32'),
1338 (('~fadd@64', 1.0, ('fmul(is_used_once)', c , ('fadd', b, -1.0 ))), ('fadd', ('fadd', 1.0, ('fneg', c)), ('fmul', b, c)), 'options->lower_flrp64'),
1339
1340 # A similar operation could apply to any ffma(#a, b, #(-a/2)), but this
1341 # particular operation is common for expanding values stored in a texture
1342 # from [0,1] to [-1,1].
1343 (('~ffma@32', a, 2.0, -1.0), ('flrp', -1.0, 1.0, a ), '!options->lower_flrp32'),
1344 (('~ffma@32', a, -2.0, -1.0), ('flrp', -1.0, 1.0, ('fneg', a)), '!options->lower_flrp32'),
1345 (('~ffma@32', a, -2.0, 1.0), ('flrp', 1.0, -1.0, a ), '!options->lower_flrp32'),
1346 (('~ffma@32', a, 2.0, 1.0), ('flrp', 1.0, -1.0, ('fneg', a)), '!options->lower_flrp32'),
1347 (('~fadd@32', ('fmul(is_used_once)', 2.0, a), -1.0), ('flrp', -1.0, 1.0, a ), '!options->lower_flrp32'),
1348 (('~fadd@32', ('fmul(is_used_once)', -2.0, a), -1.0), ('flrp', -1.0, 1.0, ('fneg', a)), '!options->lower_flrp32'),
1349 (('~fadd@32', ('fmul(is_used_once)', -2.0, a), 1.0), ('flrp', 1.0, -1.0, a ), '!options->lower_flrp32'),
1350 (('~fadd@32', ('fmul(is_used_once)', 2.0, a), 1.0), ('flrp', 1.0, -1.0, ('fneg', a)), '!options->lower_flrp32'),
1351
1352 # flrp(a, b, a)
1353 # a*(1-a) + b*a
1354 # a + -a*a + a*b (1)
1355 # a + a*(b - a)
1356 # Option 1: ffma(a, (b-a), a)
1357 #
1358 # Alternately, after (1):
1359 # a*(1+b) + -a*a
1360 # a*((1+b) + -a)
1361 #
1362 # Let b=1
1363 #
1364 # Option 2: ffma(a, 2, -(a*a))
1365 # Option 3: ffma(a, 2, (-a)*a)
1366 # Option 4: ffma(a, -a, (2*a)
1367 # Option 5: a * (2 - a)
1368 #
1369 # There are a lot of other possible combinations.
1370 (('~ffma@32', ('fadd', b, ('fneg', a)), a, a), ('flrp', a, b, a), '!options->lower_flrp32'),
1371 (('~ffma@32', a, 2.0, ('fneg', ('fmul', a, a))), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1372 (('~ffma@32', a, 2.0, ('fmul', ('fneg', a), a)), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1373 (('~ffma@32', a, ('fneg', a), ('fmul', 2.0, a)), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1374 (('~fmul@32', a, ('fadd', 2.0, ('fneg', a))), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1375
1376 # we do these late so that we don't get in the way of creating ffmas
1377 (('fmin', ('fadd(is_used_once)', '#c', a), ('fadd(is_used_once)', '#c', b)), ('fadd', c, ('fmin', a, b))),
1378 (('fmax', ('fadd(is_used_once)', '#c', a), ('fadd(is_used_once)', '#c', b)), ('fadd', c, ('fmax', a, b))),
1379
1380 (('bcsel', a, 0, ('b2f32', ('inot', 'b@bool'))), ('b2f32', ('inot', ('ior', a, b)))),
1381
1382 # Things that look like DPH in the source shader may get expanded to
1383 # something that looks like dot(v1.xyz, v2.xyz) + v1.w by the time it gets
1384 # to NIR. After FFMA is generated, this can look like:
1385 #
1386 # fadd(ffma(v1.z, v2.z, ffma(v1.y, v2.y, fmul(v1.x, v2.x))), v1.w)
1387 #
1388 # Reassociate the last addition into the first multiplication.
1389 (('~fadd', ('ffma(is_used_once)', a, b, ('ffma', c, d, ('fmul', 'e(is_not_const_and_not_fsign)', 'f(is_not_const_and_not_fsign)'))), 'g(is_not_const)'),
1390 ('ffma', a, b, ('ffma', c, d, ('ffma', e, 'f', 'g'))), '!options->intel_vec4'),
1391 (('~fadd', ('ffma(is_used_once)', a, b, ('fmul', 'e(is_not_const_and_not_fsign)', 'f(is_not_const_and_not_fsign)') ), 'g(is_not_const)'),
1392 ('ffma', a, b, ('ffma', e, 'f', 'g') ), '!options->intel_vec4'),
1393 ]
1394
1395 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic", optimizations).render())
1396 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic_before_ffma",
1397 before_ffma_optimizations).render())
1398 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic_late",
1399 late_optimizations).render())