nir/algebraic: optimize contradictory iand operands
[mesa.git] / src / compiler / nir / nir_opt_algebraic.py
1 #
2 # Copyright (C) 2014 Intel Corporation
3 #
4 # Permission is hereby granted, free of charge, to any person obtaining a
5 # copy of this software and associated documentation files (the "Software"),
6 # to deal in the Software without restriction, including without limitation
7 # the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 # and/or sell copies of the Software, and to permit persons to whom the
9 # Software is furnished to do so, subject to the following conditions:
10 #
11 # The above copyright notice and this permission notice (including the next
12 # paragraph) shall be included in all copies or substantial portions of the
13 # Software.
14 #
15 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 # IN THE SOFTWARE.
22 #
23 # Authors:
24 # Jason Ekstrand (jason@jlekstrand.net)
25
26 from __future__ import print_function
27
28 from collections import OrderedDict
29 import nir_algebraic
30 from nir_opcodes import type_sizes
31 import itertools
32
33 # Convenience variables
34 a = 'a'
35 b = 'b'
36 c = 'c'
37 d = 'd'
38 e = 'e'
39
40 # Written in the form (<search>, <replace>) where <search> is an expression
41 # and <replace> is either an expression or a value. An expression is
42 # defined as a tuple of the form ([~]<op>, <src0>, <src1>, <src2>, <src3>)
43 # where each source is either an expression or a value. A value can be
44 # either a numeric constant or a string representing a variable name.
45 #
46 # If the opcode in a search expression is prefixed by a '~' character, this
47 # indicates that the operation is inexact. Such operations will only get
48 # applied to SSA values that do not have the exact bit set. This should be
49 # used by by any optimizations that are not bit-for-bit exact. It should not,
50 # however, be used for backend-requested lowering operations as those need to
51 # happen regardless of precision.
52 #
53 # Variable names are specified as "[#]name[@type][(cond)]" where "#" inicates
54 # that the given variable will only match constants and the type indicates that
55 # the given variable will only match values from ALU instructions with the
56 # given output type, and (cond) specifies an additional condition function
57 # (see nir_search_helpers.h).
58 #
59 # For constants, you have to be careful to make sure that it is the right
60 # type because python is unaware of the source and destination types of the
61 # opcodes.
62 #
63 # All expression types can have a bit-size specified. For opcodes, this
64 # looks like "op@32", for variables it is "a@32" or "a@uint32" to specify a
65 # type and size. In the search half of the expression this indicates that it
66 # should only match that particular bit-size. In the replace half of the
67 # expression this indicates that the constructed value should have that
68 # bit-size.
69 #
70 # A special condition "many-comm-expr" can be used with expressions to note
71 # that the expression and its subexpressions have more commutative expressions
72 # than nir_replace_instr can handle. If this special condition is needed with
73 # another condition, the two can be separated by a comma (e.g.,
74 # "(many-comm-expr,is_used_once)").
75
76 optimizations = [
77
78 (('imul', a, '#b@32(is_pos_power_of_two)'), ('ishl', a, ('find_lsb', b)), '!options->lower_bitshift'),
79 (('imul', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('ishl', a, ('find_lsb', ('iabs', b)))), '!options->lower_bitshift'),
80 (('ishl', a, '#b@32'), ('imul', a, ('ishl', 1, b)), 'options->lower_bitshift'),
81
82 (('unpack_64_2x32_split_x', ('imul_2x32_64(is_used_once)', a, b)), ('imul', a, b)),
83 (('unpack_64_2x32_split_x', ('umul_2x32_64(is_used_once)', a, b)), ('imul', a, b)),
84 (('imul_2x32_64', a, b), ('pack_64_2x32_split', ('imul', a, b), ('imul_high', a, b)), 'options->lower_mul_2x32_64'),
85 (('umul_2x32_64', a, b), ('pack_64_2x32_split', ('imul', a, b), ('umul_high', a, b)), 'options->lower_mul_2x32_64'),
86 (('udiv', a, 1), a),
87 (('idiv', a, 1), a),
88 (('umod', a, 1), 0),
89 (('imod', a, 1), 0),
90 (('udiv', a, '#b@32(is_pos_power_of_two)'), ('ushr', a, ('find_lsb', b)), '!options->lower_bitshift'),
91 (('idiv', a, '#b@32(is_pos_power_of_two)'), ('imul', ('isign', a), ('ushr', ('iabs', a), ('find_lsb', b))), 'options->lower_idiv'),
92 (('idiv', a, '#b@32(is_neg_power_of_two)'), ('ineg', ('imul', ('isign', a), ('ushr', ('iabs', a), ('find_lsb', ('iabs', b))))), 'options->lower_idiv'),
93 (('umod', a, '#b(is_pos_power_of_two)'), ('iand', a, ('isub', b, 1))),
94
95 (('fneg', ('fneg', a)), a),
96 (('ineg', ('ineg', a)), a),
97 (('fabs', ('fabs', a)), ('fabs', a)),
98 (('fabs', ('fneg', a)), ('fabs', a)),
99 (('fabs', ('u2f', a)), ('u2f', a)),
100 (('iabs', ('iabs', a)), ('iabs', a)),
101 (('iabs', ('ineg', a)), ('iabs', a)),
102 (('f2b', ('fneg', a)), ('f2b', a)),
103 (('i2b', ('ineg', a)), ('i2b', a)),
104 (('~fadd', a, 0.0), a),
105 (('iadd', a, 0), a),
106 (('usadd_4x8', a, 0), a),
107 (('usadd_4x8', a, ~0), ~0),
108 (('~fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))),
109 (('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))),
110 (('~fadd', ('fneg', a), a), 0.0),
111 (('iadd', ('ineg', a), a), 0),
112 (('iadd', ('ineg', a), ('iadd', a, b)), b),
113 (('iadd', a, ('iadd', ('ineg', a), b)), b),
114 (('~fadd', ('fneg', a), ('fadd', a, b)), b),
115 (('~fadd', a, ('fadd', ('fneg', a), b)), b),
116 (('fadd', ('fsat', a), ('fsat', ('fneg', a))), ('fsat', ('fabs', a))),
117 (('~fmul', a, 0.0), 0.0),
118 (('imul', a, 0), 0),
119 (('umul_unorm_4x8', a, 0), 0),
120 (('umul_unorm_4x8', a, ~0), a),
121 (('fmul', a, 1.0), a),
122 (('imul', a, 1), a),
123 (('fmul', a, -1.0), ('fneg', a)),
124 (('imul', a, -1), ('ineg', a)),
125 # If a < 0: fsign(a)*a*a => -1*a*a => -a*a => abs(a)*a
126 # If a > 0: fsign(a)*a*a => 1*a*a => a*a => abs(a)*a
127 # If a == 0: fsign(a)*a*a => 0*0*0 => abs(0)*0
128 (('fmul', ('fsign', a), ('fmul', a, a)), ('fmul', ('fabs', a), a)),
129 (('fmul', ('fmul', ('fsign', a), a), a), ('fmul', ('fabs', a), a)),
130 (('~ffma', 0.0, a, b), b),
131 (('~ffma', a, b, 0.0), ('fmul', a, b)),
132 (('ffma', 1.0, a, b), ('fadd', a, b)),
133 (('ffma', -1.0, a, b), ('fadd', ('fneg', a), b)),
134 (('~flrp', a, b, 0.0), a),
135 (('~flrp', a, b, 1.0), b),
136 (('~flrp', a, a, b), a),
137 (('~flrp', 0.0, a, b), ('fmul', a, b)),
138
139 # flrp(a, a + b, c) => a + flrp(0, b, c) => a + (b * c)
140 (('~flrp', a, ('fadd(is_used_once)', a, b), c), ('fadd', ('fmul', b, c), a)),
141 (('~flrp@32', a, ('fadd', a, b), c), ('fadd', ('fmul', b, c), a), 'options->lower_flrp32'),
142 (('~flrp@64', a, ('fadd', a, b), c), ('fadd', ('fmul', b, c), a), 'options->lower_flrp64'),
143
144 (('~flrp@32', ('fadd', a, b), ('fadd', a, c), d), ('fadd', ('flrp', b, c, d), a), 'options->lower_flrp32'),
145 (('~flrp@64', ('fadd', a, b), ('fadd', a, c), d), ('fadd', ('flrp', b, c, d), a), 'options->lower_flrp64'),
146
147 (('~flrp@32', a, ('fmul(is_used_once)', a, b), c), ('fmul', ('flrp', 1.0, b, c), a), 'options->lower_flrp32'),
148 (('~flrp@64', a, ('fmul(is_used_once)', a, b), c), ('fmul', ('flrp', 1.0, b, c), a), 'options->lower_flrp64'),
149
150 (('~flrp', ('fmul(is_used_once)', a, b), ('fmul(is_used_once)', a, c), d), ('fmul', ('flrp', b, c, d), a)),
151
152 (('~flrp', a, b, ('b2f', 'c@1')), ('bcsel', c, b, a), 'options->lower_flrp32'),
153 (('~flrp', a, 0.0, c), ('fadd', ('fmul', ('fneg', a), c), a)),
154 (('ftrunc', a), ('bcsel', ('flt', a, 0.0), ('fneg', ('ffloor', ('fabs', a))), ('ffloor', ('fabs', a))), 'options->lower_ftrunc'),
155 (('ffloor', a), ('fsub', a, ('ffract', a)), 'options->lower_ffloor'),
156 (('fadd', a, ('fneg', ('ffract', a))), ('ffloor', a), '!options->lower_ffloor'),
157 (('ffract', a), ('fsub', a, ('ffloor', a)), 'options->lower_ffract'),
158 (('fceil', a), ('fneg', ('ffloor', ('fneg', a))), 'options->lower_fceil'),
159 (('~fadd', ('fmul', a, ('fadd', 1.0, ('fneg', ('b2f', 'c@1')))), ('fmul', b, ('b2f', c))), ('bcsel', c, b, a), 'options->lower_flrp32'),
160 (('~fadd@32', ('fmul', a, ('fadd', 1.0, ('fneg', c ) )), ('fmul', b, c )), ('flrp', a, b, c), '!options->lower_flrp32'),
161 (('~fadd@64', ('fmul', a, ('fadd', 1.0, ('fneg', c ) )), ('fmul', b, c )), ('flrp', a, b, c), '!options->lower_flrp64'),
162 # These are the same as the previous three rules, but it depends on
163 # 1-fsat(x) <=> fsat(1-x). See below.
164 (('~fadd@32', ('fmul', a, ('fsat', ('fadd', 1.0, ('fneg', c )))), ('fmul', b, ('fsat', c))), ('flrp', a, b, ('fsat', c)), '!options->lower_flrp32'),
165 (('~fadd@64', ('fmul', a, ('fsat', ('fadd', 1.0, ('fneg', c )))), ('fmul', b, ('fsat', c))), ('flrp', a, b, ('fsat', c)), '!options->lower_flrp64'),
166
167 (('~fadd', a, ('fmul', ('b2f', 'c@1'), ('fadd', b, ('fneg', a)))), ('bcsel', c, b, a), 'options->lower_flrp32'),
168 (('~fadd@32', a, ('fmul', c , ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp32'),
169 (('~fadd@64', a, ('fmul', c , ('fadd', b, ('fneg', a)))), ('flrp', a, b, c), '!options->lower_flrp64'),
170 (('ffma', a, b, c), ('fadd', ('fmul', a, b), c), 'options->lower_ffma'),
171 (('~fadd', ('fmul', a, b), c), ('ffma', a, b, c), 'options->fuse_ffma'),
172
173 (('~fmul', ('fadd', ('iand', ('ineg', ('b2i32', 'a@bool')), ('fmul', b, c)), '#d'), '#e'),
174 ('bcsel', a, ('fmul', ('fadd', ('fmul', b, c), d), e), ('fmul', d, e))),
175
176 (('fdot4', ('vec4', a, b, c, 1.0), d), ('fdph', ('vec3', a, b, c), d)),
177 (('fdot4', ('vec4', a, 0.0, 0.0, 0.0), b), ('fmul', a, b)),
178 (('fdot4', ('vec4', a, b, 0.0, 0.0), c), ('fdot2', ('vec2', a, b), c)),
179 (('fdot4', ('vec4', a, b, c, 0.0), d), ('fdot3', ('vec3', a, b, c), d)),
180
181 (('fdot3', ('vec3', a, 0.0, 0.0), b), ('fmul', a, b)),
182 (('fdot3', ('vec3', a, b, 0.0), c), ('fdot2', ('vec2', a, b), c)),
183
184 # If x >= 0 and x <= 1: fsat(1 - x) == 1 - fsat(x) trivially
185 # If x < 0: 1 - fsat(x) => 1 - 0 => 1 and fsat(1 - x) => fsat(> 1) => 1
186 # If x > 1: 1 - fsat(x) => 1 - 1 => 0 and fsat(1 - x) => fsat(< 0) => 0
187 (('~fadd', ('fneg(is_used_once)', ('fsat(is_used_once)', 'a(is_not_fmul)')), 1.0), ('fsat', ('fadd', 1.0, ('fneg', a)))),
188
189 # 1 - ((1 - a) * (1 - b))
190 # 1 - (1 - a - b + a*b)
191 # 1 - 1 + a + b - a*b
192 # a + b - a*b
193 # a + b*(1 - a)
194 # b*(1 - a) + 1*a
195 # flrp(b, 1, a)
196 (('~fadd@32', 1.0, ('fneg', ('fmul', ('fadd', 1.0, ('fneg', a)), ('fadd', 1.0, ('fneg', b))))),
197 ('flrp', b, 1.0, a), '!options->lower_flrp32'),
198
199 # (a * #b + #c) << #d
200 # ((a * #b) << #d) + (#c << #d)
201 # (a * (#b << #d)) + (#c << #d)
202 (('ishl', ('iadd', ('imul', a, '#b'), '#c'), '#d'),
203 ('iadd', ('imul', a, ('ishl', b, d)), ('ishl', c, d))),
204
205 # (a * #b) << #c
206 # a * (#b << #c)
207 (('ishl', ('imul', a, '#b'), '#c'), ('imul', a, ('ishl', b, c))),
208
209 # Comparison simplifications
210 (('~inot', ('flt', a, b)), ('fge', a, b)),
211 (('~inot', ('fge', a, b)), ('flt', a, b)),
212 (('~inot', ('feq', a, b)), ('fne', a, b)),
213 (('~inot', ('fne', a, b)), ('feq', a, b)),
214 (('inot', ('ilt', a, b)), ('ige', a, b)),
215 (('inot', ('ult', a, b)), ('uge', a, b)),
216 (('inot', ('ige', a, b)), ('ilt', a, b)),
217 (('inot', ('uge', a, b)), ('ult', a, b)),
218 (('inot', ('ieq', a, b)), ('ine', a, b)),
219 (('inot', ('ine', a, b)), ('ieq', a, b)),
220
221 (('iand', ('feq', a, b), ('fne', a, b)), False),
222 (('iand', ('flt', a, b), ('flt', b, a)), False),
223 (('iand', ('ieq', a, b), ('ine', a, b)), False),
224 (('iand', ('ilt', a, b), ('ilt', b, a)), False),
225 (('iand', ('ult', a, b), ('ult', b, a)), False),
226
227 # This helps some shaders because, after some optimizations, they end up
228 # with patterns like (-a < -b) || (b < a). In an ideal world, this sort of
229 # matching would be handled by CSE.
230 (('flt', ('fneg', a), ('fneg', b)), ('flt', b, a)),
231 (('fge', ('fneg', a), ('fneg', b)), ('fge', b, a)),
232 (('feq', ('fneg', a), ('fneg', b)), ('feq', b, a)),
233 (('fne', ('fneg', a), ('fneg', b)), ('fne', b, a)),
234 (('flt', ('fneg', a), -1.0), ('flt', 1.0, a)),
235 (('flt', -1.0, ('fneg', a)), ('flt', a, 1.0)),
236 (('fge', ('fneg', a), -1.0), ('fge', 1.0, a)),
237 (('fge', -1.0, ('fneg', a)), ('fge', a, 1.0)),
238 (('fne', ('fneg', a), -1.0), ('fne', 1.0, a)),
239 (('feq', -1.0, ('fneg', a)), ('feq', a, 1.0)),
240
241 (('flt', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('flt', a, b)),
242 (('flt', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('flt', b, a)),
243 (('fge', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fge', a, b)),
244 (('fge', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('fge', b, a)),
245 (('feq', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('feq', a, b)),
246 (('fne', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fne', a, b)),
247
248 (('fge', ('fsat(is_used_once)', a), 1.0), ('fge', a, 1.0)),
249 (('flt', ('fsat(is_used_once)', a), 1.0), ('flt', a, 1.0)),
250 (('fge', 0.0, ('fsat(is_used_once)', a)), ('fge', 0.0, a)),
251 (('flt', 0.0, ('fsat(is_used_once)', a)), ('flt', 0.0, a)),
252
253 # 0.0 >= b2f(a)
254 # b2f(a) <= 0.0
255 # b2f(a) == 0.0 because b2f(a) can only be 0 or 1
256 # inot(a)
257 (('fge', 0.0, ('b2f', 'a@1')), ('inot', a)),
258
259 (('fge', ('fneg', ('b2f', 'a@1')), 0.0), ('inot', a)),
260
261 (('fne', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a, b)),
262 (('fne', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('ior', a, b)),
263 (('fne', ('bcsel', a, 1.0, ('b2f', 'b@1')) , 0.0), ('ior', a, b)),
264 (('fne', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), ('ior', a, b)),
265 (('fne', ('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a, b)),
266 (('fne', ('fmin', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('iand', a, b)),
267 (('fne', ('bcsel', a, ('b2f', 'b@1'), 0.0) , 0.0), ('iand', a, b)),
268 (('fne', ('fadd', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), 0.0), ('ixor', a, b)),
269 (('fne', ('b2f', 'a@1') , ('b2f', 'b@1') ), ('ixor', a, b)),
270 (('fne', ('fneg', ('b2f', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('ixor', a, b)),
271 (('feq', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('ior', a, b))),
272 (('feq', ('fmax', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('ior', a, b))),
273 (('feq', ('bcsel', a, 1.0, ('b2f', 'b@1')) , 0.0), ('inot', ('ior', a, b))),
274 (('feq', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), ('inot', ('ior', a, b))),
275 (('feq', ('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('iand', a, b))),
276 (('feq', ('fmin', ('b2f', 'a@1'), ('b2f', 'b@1')), 0.0), ('inot', ('iand', a, b))),
277 (('feq', ('bcsel', a, ('b2f', 'b@1'), 0.0) , 0.0), ('inot', ('iand', a, b))),
278 (('feq', ('fadd', ('b2f', 'a@1'), ('fneg', ('b2f', 'b@1'))), 0.0), ('ieq', a, b)),
279 (('feq', ('b2f', 'a@1') , ('b2f', 'b@1') ), ('ieq', a, b)),
280 (('feq', ('fneg', ('b2f', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('ieq', a, b)),
281
282 # -(b2f(a) + b2f(b)) < 0
283 # 0 < b2f(a) + b2f(b)
284 # 0 != b2f(a) + b2f(b) b2f must be 0 or 1, so the sum is non-negative
285 # a || b
286 (('flt', ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), 0.0), ('ior', a, b)),
287 (('flt', 0.0, ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('ior', a, b)),
288
289 # -(b2f(a) + b2f(b)) >= 0
290 # 0 >= b2f(a) + b2f(b)
291 # 0 == b2f(a) + b2f(b) b2f must be 0 or 1, so the sum is non-negative
292 # !(a || b)
293 (('fge', ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), 0.0), ('inot', ('ior', a, b))),
294 (('fge', 0.0, ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('inot', ('ior', a, b))),
295
296 (('flt', a, ('fneg', a)), ('flt', a, 0.0)),
297 (('fge', a, ('fneg', a)), ('fge', a, 0.0)),
298
299 # Some optimizations (below) convert things like (a < b || c < b) into
300 # (min(a, c) < b). However, this interfers with the previous optimizations
301 # that try to remove comparisons with negated sums of b2f. This just
302 # breaks that apart.
303 (('flt', ('fmin', c, ('fneg', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1')))), 0.0),
304 ('ior', ('flt', c, 0.0), ('ior', a, b))),
305
306 (('~flt', ('fadd', a, b), a), ('flt', b, 0.0)),
307 (('~fge', ('fadd', a, b), a), ('fge', b, 0.0)),
308 (('~feq', ('fadd', a, b), a), ('feq', b, 0.0)),
309 (('~fne', ('fadd', a, b), a), ('fne', b, 0.0)),
310
311 # Cannot remove the addition from ilt or ige due to overflow.
312 (('ieq', ('iadd', a, b), a), ('ieq', b, 0)),
313 (('ine', ('iadd', a, b), a), ('ine', b, 0)),
314
315 # fmin(-b2f(a), b) >= 0.0
316 # -b2f(a) >= 0.0 && b >= 0.0
317 # -b2f(a) == 0.0 && b >= 0.0 -b2f can only be 0 or -1, never >0
318 # b2f(a) == 0.0 && b >= 0.0
319 # a == False && b >= 0.0
320 # !a && b >= 0.0
321 #
322 # The fge in the second replacement is not a typo. I leave the proof that
323 # "fmin(-b2f(a), b) >= 0 <=> fmin(-b2f(a), b) == 0" as an exercise for the
324 # reader.
325 (('fge', ('fmin', ('fneg', ('b2f', 'a@1')), 'b@1'), 0.0), ('iand', ('inot', a), ('fge', b, 0.0))),
326 (('feq', ('fmin', ('fneg', ('b2f', 'a@1')), 'b@1'), 0.0), ('iand', ('inot', a), ('fge', b, 0.0))),
327
328 (('feq', ('b2f', 'a@1'), 0.0), ('inot', a)),
329 (('fne', ('b2f', 'a@1'), 0.0), a),
330 (('ieq', ('b2i', 'a@1'), 0), ('inot', a)),
331 (('ine', ('b2i', 'a@1'), 0), a),
332
333 (('fne', ('u2f', a), 0.0), ('ine', a, 0)),
334 (('feq', ('u2f', a), 0.0), ('ieq', a, 0)),
335 (('fge', ('u2f', a), 0.0), True),
336 (('fge', 0.0, ('u2f', a)), ('uge', 0, a)), # ieq instead?
337 (('flt', ('u2f', a), 0.0), False),
338 (('flt', 0.0, ('u2f', a)), ('ult', 0, a)), # ine instead?
339 (('fne', ('i2f', a), 0.0), ('ine', a, 0)),
340 (('feq', ('i2f', a), 0.0), ('ieq', a, 0)),
341 (('fge', ('i2f', a), 0.0), ('ige', a, 0)),
342 (('fge', 0.0, ('i2f', a)), ('ige', 0, a)),
343 (('flt', ('i2f', a), 0.0), ('ilt', a, 0)),
344 (('flt', 0.0, ('i2f', a)), ('ilt', 0, a)),
345
346 # 0.0 < fabs(a)
347 # fabs(a) > 0.0
348 # fabs(a) != 0.0 because fabs(a) must be >= 0
349 # a != 0.0
350 (('~flt', 0.0, ('fabs', a)), ('fne', a, 0.0)),
351
352 # -fabs(a) < 0.0
353 # fabs(a) > 0.0
354 (('~flt', ('fneg', ('fabs', a)), 0.0), ('fne', a, 0.0)),
355
356 # 0.0 >= fabs(a)
357 # 0.0 == fabs(a) because fabs(a) must be >= 0
358 # 0.0 == a
359 (('fge', 0.0, ('fabs', a)), ('feq', a, 0.0)),
360
361 # -fabs(a) >= 0.0
362 # 0.0 >= fabs(a)
363 (('fge', ('fneg', ('fabs', a)), 0.0), ('feq', a, 0.0)),
364
365 (('fmax', ('b2f(is_used_once)', 'a@1'), ('b2f', 'b@1')), ('b2f', ('ior', a, b))),
366 (('fmax', ('fneg(is_used_once)', ('b2f(is_used_once)', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('fneg', ('b2f', ('ior', a, b)))),
367 (('fmin', ('b2f(is_used_once)', 'a@1'), ('b2f', 'b@1')), ('b2f', ('iand', a, b))),
368 (('fmin', ('fneg(is_used_once)', ('b2f(is_used_once)', 'a@1')), ('fneg', ('b2f', 'b@1'))), ('fneg', ('b2f', ('iand', a, b)))),
369
370 # fmin(b2f(a), b)
371 # bcsel(a, fmin(b2f(a), b), fmin(b2f(a), b))
372 # bcsel(a, fmin(b2f(True), b), fmin(b2f(False), b))
373 # bcsel(a, fmin(1.0, b), fmin(0.0, b))
374 #
375 # Since b is a constant, constant folding will eliminate the fmin and the
376 # fmax. If b is > 1.0, the bcsel will be replaced with a b2f.
377 (('fmin', ('b2f', 'a@1'), '#b'), ('bcsel', a, ('fmin', b, 1.0), ('fmin', b, 0.0))),
378
379 (('flt', ('fadd(is_used_once)', a, ('fneg', b)), 0.0), ('flt', a, b)),
380
381 (('fge', ('fneg', ('fabs', a)), 0.0), ('feq', a, 0.0)),
382 (('~bcsel', ('flt', b, a), b, a), ('fmin', a, b)),
383 (('~bcsel', ('flt', a, b), b, a), ('fmax', a, b)),
384 (('~bcsel', ('fge', a, b), b, a), ('fmin', a, b)),
385 (('~bcsel', ('fge', b, a), b, a), ('fmax', a, b)),
386 (('bcsel', ('i2b', a), b, c), ('bcsel', ('ine', a, 0), b, c)),
387 (('bcsel', ('inot', a), b, c), ('bcsel', a, c, b)),
388 (('bcsel', a, ('bcsel', a, b, c), d), ('bcsel', a, b, d)),
389 (('bcsel', a, b, ('bcsel', a, c, d)), ('bcsel', a, b, d)),
390 (('bcsel', a, ('bcsel', b, c, d), ('bcsel(is_used_once)', b, c, 'e')), ('bcsel', b, c, ('bcsel', a, d, 'e'))),
391 (('bcsel', a, ('bcsel(is_used_once)', b, c, d), ('bcsel', b, c, 'e')), ('bcsel', b, c, ('bcsel', a, d, 'e'))),
392 (('bcsel', a, ('bcsel', b, c, d), ('bcsel(is_used_once)', b, 'e', d)), ('bcsel', b, ('bcsel', a, c, 'e'), d)),
393 (('bcsel', a, ('bcsel(is_used_once)', b, c, d), ('bcsel', b, 'e', d)), ('bcsel', b, ('bcsel', a, c, 'e'), d)),
394 (('bcsel', a, True, b), ('ior', a, b)),
395 (('bcsel', a, a, b), ('ior', a, b)),
396 (('bcsel', a, b, False), ('iand', a, b)),
397 (('bcsel', a, b, a), ('iand', a, b)),
398 (('fmin', a, a), a),
399 (('fmax', a, a), a),
400 (('imin', a, a), a),
401 (('imax', a, a), a),
402 (('umin', a, a), a),
403 (('umax', a, a), a),
404 (('fmax', ('fmax', a, b), b), ('fmax', a, b)),
405 (('umax', ('umax', a, b), b), ('umax', a, b)),
406 (('imax', ('imax', a, b), b), ('imax', a, b)),
407 (('fmin', ('fmin', a, b), b), ('fmin', a, b)),
408 (('umin', ('umin', a, b), b), ('umin', a, b)),
409 (('imin', ('imin', a, b), b), ('imin', a, b)),
410 (('fmax', a, ('fneg', a)), ('fabs', a)),
411 (('imax', a, ('ineg', a)), ('iabs', a)),
412 (('fmin', a, ('fneg', a)), ('fneg', ('fabs', a))),
413 (('imin', a, ('ineg', a)), ('ineg', ('iabs', a))),
414 (('fmin', a, ('fneg', ('fabs', a))), ('fneg', ('fabs', a))),
415 (('imin', a, ('ineg', ('iabs', a))), ('ineg', ('iabs', a))),
416 (('fmin', a, ('fabs', a)), a),
417 (('imin', a, ('iabs', a)), a),
418 (('fmax', a, ('fneg', ('fabs', a))), a),
419 (('imax', a, ('ineg', ('iabs', a))), a),
420 (('fmax', a, ('fabs', a)), ('fabs', a)),
421 (('imax', a, ('iabs', a)), ('iabs', a)),
422 (('fmax', a, ('fneg', a)), ('fabs', a)),
423 (('imax', a, ('ineg', a)), ('iabs', a)),
424 (('~fmax', ('fabs', a), 0.0), ('fabs', a)),
425 (('~fmin', ('fmax', a, 0.0), 1.0), ('fsat', a), '!options->lower_fsat'),
426 (('~fmax', ('fmin', a, 1.0), 0.0), ('fsat', a), '!options->lower_fsat'),
427 (('~fmin', ('fmax', a, -1.0), 0.0), ('fneg', ('fsat', ('fneg', a))), '!options->lower_negate && !options->lower_fsat'),
428 (('~fmax', ('fmin', a, 0.0), -1.0), ('fneg', ('fsat', ('fneg', a))), '!options->lower_negate && !options->lower_fsat'),
429 (('fsat', ('fsign', a)), ('b2f', ('flt', 0.0, a))),
430 (('fsat', ('b2f', a)), ('b2f', a)),
431 (('fsat', a), ('fmin', ('fmax', a, 0.0), 1.0), 'options->lower_fsat'),
432 (('fsat', ('fsat', a)), ('fsat', a)),
433 (('fsat', ('fneg(is_used_once)', ('fadd(is_used_once)', a, b))), ('fsat', ('fadd', ('fneg', a), ('fneg', b))), '!options->lower_negate && !options->lower_fsat'),
434 (('fsat', ('fneg(is_used_once)', ('fmul(is_used_once)', a, b))), ('fsat', ('fmul', ('fneg', a), b)), '!options->lower_negate && !options->lower_fsat'),
435 (('fsat', ('fabs(is_used_once)', ('fmul(is_used_once)', a, b))), ('fsat', ('fmul', ('fabs', a), ('fabs', b))), '!options->lower_fsat'),
436 (('fmin', ('fmax', ('fmin', ('fmax', a, b), c), b), c), ('fmin', ('fmax', a, b), c)),
437 (('imin', ('imax', ('imin', ('imax', a, b), c), b), c), ('imin', ('imax', a, b), c)),
438 (('umin', ('umax', ('umin', ('umax', a, b), c), b), c), ('umin', ('umax', a, b), c)),
439 (('fmax', ('fsat', a), '#b@32(is_zero_to_one)'), ('fsat', ('fmax', a, b))),
440 (('fmin', ('fsat', a), '#b@32(is_zero_to_one)'), ('fsat', ('fmin', a, b))),
441 (('extract_u8', ('imin', ('imax', a, 0), 0xff), 0), ('imin', ('imax', a, 0), 0xff)),
442 (('~ior', ('flt(is_used_once)', a, b), ('flt', a, c)), ('flt', a, ('fmax', b, c))),
443 (('~ior', ('flt(is_used_once)', a, c), ('flt', b, c)), ('flt', ('fmin', a, b), c)),
444 (('~ior', ('fge(is_used_once)', a, b), ('fge', a, c)), ('fge', a, ('fmin', b, c))),
445 (('~ior', ('fge(is_used_once)', a, c), ('fge', b, c)), ('fge', ('fmax', a, b), c)),
446 (('~ior', ('flt', a, '#b'), ('flt', a, '#c')), ('flt', a, ('fmax', b, c))),
447 (('~ior', ('flt', '#a', c), ('flt', '#b', c)), ('flt', ('fmin', a, b), c)),
448 (('~ior', ('fge', a, '#b'), ('fge', a, '#c')), ('fge', a, ('fmin', b, c))),
449 (('~ior', ('fge', '#a', c), ('fge', '#b', c)), ('fge', ('fmax', a, b), c)),
450 (('~iand', ('flt(is_used_once)', a, b), ('flt', a, c)), ('flt', a, ('fmin', b, c))),
451 (('~iand', ('flt(is_used_once)', a, c), ('flt', b, c)), ('flt', ('fmax', a, b), c)),
452 (('~iand', ('fge(is_used_once)', a, b), ('fge', a, c)), ('fge', a, ('fmax', b, c))),
453 (('~iand', ('fge(is_used_once)', a, c), ('fge', b, c)), ('fge', ('fmin', a, b), c)),
454 (('~iand', ('flt', a, '#b'), ('flt', a, '#c')), ('flt', a, ('fmin', b, c))),
455 (('~iand', ('flt', '#a', c), ('flt', '#b', c)), ('flt', ('fmax', a, b), c)),
456 (('~iand', ('fge', a, '#b'), ('fge', a, '#c')), ('fge', a, ('fmax', b, c))),
457 (('~iand', ('fge', '#a', c), ('fge', '#b', c)), ('fge', ('fmin', a, b), c)),
458
459 (('ior', ('ilt(is_used_once)', a, b), ('ilt', a, c)), ('ilt', a, ('imax', b, c))),
460 (('ior', ('ilt(is_used_once)', a, c), ('ilt', b, c)), ('ilt', ('imin', a, b), c)),
461 (('ior', ('ige(is_used_once)', a, b), ('ige', a, c)), ('ige', a, ('imin', b, c))),
462 (('ior', ('ige(is_used_once)', a, c), ('ige', b, c)), ('ige', ('imax', a, b), c)),
463 (('ior', ('ult(is_used_once)', a, b), ('ult', a, c)), ('ult', a, ('umax', b, c))),
464 (('ior', ('ult(is_used_once)', a, c), ('ult', b, c)), ('ult', ('umin', a, b), c)),
465 (('ior', ('uge(is_used_once)', a, b), ('uge', a, c)), ('uge', a, ('umin', b, c))),
466 (('ior', ('uge(is_used_once)', a, c), ('uge', b, c)), ('uge', ('umax', a, b), c)),
467 (('iand', ('ilt(is_used_once)', a, b), ('ilt', a, c)), ('ilt', a, ('imin', b, c))),
468 (('iand', ('ilt(is_used_once)', a, c), ('ilt', b, c)), ('ilt', ('imax', a, b), c)),
469 (('iand', ('ige(is_used_once)', a, b), ('ige', a, c)), ('ige', a, ('imax', b, c))),
470 (('iand', ('ige(is_used_once)', a, c), ('ige', b, c)), ('ige', ('imin', a, b), c)),
471 (('iand', ('ult(is_used_once)', a, b), ('ult', a, c)), ('ult', a, ('umin', b, c))),
472 (('iand', ('ult(is_used_once)', a, c), ('ult', b, c)), ('ult', ('umax', a, b), c)),
473 (('iand', ('uge(is_used_once)', a, b), ('uge', a, c)), ('uge', a, ('umax', b, c))),
474 (('iand', ('uge(is_used_once)', a, c), ('uge', b, c)), ('uge', ('umin', a, b), c)),
475
476 # Common pattern like 'if (i == 0 || i == 1 || ...)'
477 (('ior', ('ieq', a, 0), ('ieq', a, 1)), ('uge', 1, a)),
478 (('ior', ('uge', 1, a), ('ieq', a, 2)), ('uge', 2, a)),
479 (('ior', ('uge', 2, a), ('ieq', a, 3)), ('uge', 3, a)),
480
481 # The (i2f32, ...) part is an open-coded fsign. When that is combined with
482 # the bcsel, it's basically copysign(1.0, a). There is no copysign in NIR,
483 # so emit an open-coded version of that.
484 (('bcsel@32', ('feq', a, 0.0), 1.0, ('i2f32', ('iadd', ('b2i32', ('flt', 0.0, 'a@32')), ('ineg', ('b2i32', ('flt', 'a@32', 0.0)))))),
485 ('ior', 0x3f800000, ('iand', a, 0x80000000))),
486
487 (('ior', a, ('ieq', a, False)), True),
488 (('ior', a, ('inot', a)), -1),
489
490 (('ine', ('ineg', ('b2i32', 'a@1')), ('ineg', ('b2i32', 'b@1'))), ('ine', a, b)),
491 (('b2i32', ('ine', 'a@1', 'b@1')), ('b2i32', ('ixor', a, b))),
492
493 (('iand', ('ieq', 'a@32', 0), ('ieq', 'b@32', 0)), ('ieq', ('ior', 'a@32', 'b@32'), 0)),
494
495 # These patterns can result when (a < b || a < c) => (a < min(b, c))
496 # transformations occur before constant propagation and loop-unrolling.
497 (('~flt', a, ('fmax', b, a)), ('flt', a, b)),
498 (('~flt', ('fmin', a, b), a), ('flt', b, a)),
499 (('~fge', a, ('fmin', b, a)), True),
500 (('~fge', ('fmax', a, b), a), True),
501 (('~flt', a, ('fmin', b, a)), False),
502 (('~flt', ('fmax', a, b), a), False),
503 (('~fge', a, ('fmax', b, a)), ('fge', a, b)),
504 (('~fge', ('fmin', a, b), a), ('fge', b, a)),
505
506 (('ilt', a, ('imax', b, a)), ('ilt', a, b)),
507 (('ilt', ('imin', a, b), a), ('ilt', b, a)),
508 (('ige', a, ('imin', b, a)), True),
509 (('ige', ('imax', a, b), a), True),
510 (('ult', a, ('umax', b, a)), ('ult', a, b)),
511 (('ult', ('umin', a, b), a), ('ult', b, a)),
512 (('uge', a, ('umin', b, a)), True),
513 (('uge', ('umax', a, b), a), True),
514 (('ilt', a, ('imin', b, a)), False),
515 (('ilt', ('imax', a, b), a), False),
516 (('ige', a, ('imax', b, a)), ('ige', a, b)),
517 (('ige', ('imin', a, b), a), ('ige', b, a)),
518 (('ult', a, ('umin', b, a)), False),
519 (('ult', ('umax', a, b), a), False),
520 (('uge', a, ('umax', b, a)), ('uge', a, b)),
521 (('uge', ('umin', a, b), a), ('uge', b, a)),
522 (('ult', a, ('iand', b, a)), False),
523 (('ult', ('ior', a, b), a), False),
524 (('uge', a, ('iand', b, a)), True),
525 (('uge', ('ior', a, b), a), True),
526
527 (('ilt', '#a', ('imax', '#b', c)), ('ior', ('ilt', a, b), ('ilt', a, c))),
528 (('ilt', ('imin', '#a', b), '#c'), ('ior', ('ilt', a, c), ('ilt', b, c))),
529 (('ige', '#a', ('imin', '#b', c)), ('ior', ('ige', a, b), ('ige', a, c))),
530 (('ige', ('imax', '#a', b), '#c'), ('ior', ('ige', a, c), ('ige', b, c))),
531 (('ult', '#a', ('umax', '#b', c)), ('ior', ('ult', a, b), ('ult', a, c))),
532 (('ult', ('umin', '#a', b), '#c'), ('ior', ('ult', a, c), ('ult', b, c))),
533 (('uge', '#a', ('umin', '#b', c)), ('ior', ('uge', a, b), ('uge', a, c))),
534 (('uge', ('umax', '#a', b), '#c'), ('ior', ('uge', a, c), ('uge', b, c))),
535 (('ilt', '#a', ('imin', '#b', c)), ('iand', ('ilt', a, b), ('ilt', a, c))),
536 (('ilt', ('imax', '#a', b), '#c'), ('iand', ('ilt', a, c), ('ilt', b, c))),
537 (('ige', '#a', ('imax', '#b', c)), ('iand', ('ige', a, b), ('ige', a, c))),
538 (('ige', ('imin', '#a', b), '#c'), ('iand', ('ige', a, c), ('ige', b, c))),
539 (('ult', '#a', ('umin', '#b', c)), ('iand', ('ult', a, b), ('ult', a, c))),
540 (('ult', ('umax', '#a', b), '#c'), ('iand', ('ult', a, c), ('ult', b, c))),
541 (('uge', '#a', ('umax', '#b', c)), ('iand', ('uge', a, b), ('uge', a, c))),
542 (('uge', ('umin', '#a', b), '#c'), ('iand', ('uge', a, c), ('uge', b, c))),
543
544 # Thanks to sign extension, the ishr(a, b) is negative if and only if a is
545 # negative.
546 (('bcsel', ('ilt', a, 0), ('ineg', ('ishr', a, b)), ('ishr', a, b)),
547 ('iabs', ('ishr', a, b))),
548 (('iabs', ('ishr', ('iabs', a), b)), ('ishr', ('iabs', a), b)),
549
550 (('fabs', ('slt', a, b)), ('slt', a, b)),
551 (('fabs', ('sge', a, b)), ('sge', a, b)),
552 (('fabs', ('seq', a, b)), ('seq', a, b)),
553 (('fabs', ('sne', a, b)), ('sne', a, b)),
554 (('slt', a, b), ('b2f', ('flt', a, b)), 'options->lower_scmp'),
555 (('sge', a, b), ('b2f', ('fge', a, b)), 'options->lower_scmp'),
556 (('seq', a, b), ('b2f', ('feq', a, b)), 'options->lower_scmp'),
557 (('sne', a, b), ('b2f', ('fne', a, b)), 'options->lower_scmp'),
558 (('fne', ('fneg', a), a), ('fne', a, 0.0)),
559 (('feq', ('fneg', a), a), ('feq', a, 0.0)),
560 # Emulating booleans
561 (('imul', ('b2i', 'a@1'), ('b2i', 'b@1')), ('b2i', ('iand', a, b))),
562 (('fmul', ('b2f', 'a@1'), ('b2f', 'b@1')), ('b2f', ('iand', a, b))),
563 (('fsat', ('fadd', ('b2f', 'a@1'), ('b2f', 'b@1'))), ('b2f', ('ior', a, b))),
564 (('iand', 'a@bool32', 1.0), ('b2f', a)),
565 # True/False are ~0 and 0 in NIR. b2i of True is 1, and -1 is ~0 (True).
566 (('ineg', ('b2i32', 'a@32')), a),
567 (('flt', ('fneg', ('b2f', 'a@1')), 0), a), # Generated by TGSI KILL_IF.
568 (('flt', ('fsub', 0.0, ('b2f', 'a@1')), 0), a), # Generated by TGSI KILL_IF.
569 # Comparison with the same args. Note that these are not done for
570 # the float versions because NaN always returns false on float
571 # inequalities.
572 (('ilt', a, a), False),
573 (('ige', a, a), True),
574 (('ieq', a, a), True),
575 (('ine', a, a), False),
576 (('ult', a, a), False),
577 (('uge', a, a), True),
578 # Logical and bit operations
579 (('iand', a, a), a),
580 (('iand', a, ~0), a),
581 (('iand', a, 0), 0),
582 (('ior', a, a), a),
583 (('ior', a, 0), a),
584 (('ior', a, True), True),
585 (('ixor', a, a), 0),
586 (('ixor', a, 0), a),
587 (('inot', ('inot', a)), a),
588 (('ior', ('iand', a, b), b), b),
589 (('ior', ('ior', a, b), b), ('ior', a, b)),
590 (('iand', ('ior', a, b), b), b),
591 (('iand', ('iand', a, b), b), ('iand', a, b)),
592 # DeMorgan's Laws
593 (('iand', ('inot', a), ('inot', b)), ('inot', ('ior', a, b))),
594 (('ior', ('inot', a), ('inot', b)), ('inot', ('iand', a, b))),
595 # Shift optimizations
596 (('ishl', 0, a), 0),
597 (('ishl', a, 0), a),
598 (('ishr', 0, a), 0),
599 (('ishr', a, 0), a),
600 (('ushr', 0, a), 0),
601 (('ushr', a, 0), a),
602 (('iand', 0xff, ('ushr@32', a, 24)), ('ushr', a, 24)),
603 (('iand', 0xffff, ('ushr@32', a, 16)), ('ushr', a, 16)),
604 (('ior', ('ishl@16', a, b), ('ushr@16', a, ('iadd', 16, ('ineg', b)))), ('urol', a, b), '!options->lower_rotate'),
605 (('ior', ('ishl@16', a, b), ('ushr@16', a, ('isub', 16, b))), ('urol', a, b), '!options->lower_rotate'),
606 (('ior', ('ishl@32', a, b), ('ushr@32', a, ('iadd', 32, ('ineg', b)))), ('urol', a, b), '!options->lower_rotate'),
607 (('ior', ('ishl@32', a, b), ('ushr@32', a, ('isub', 32, b))), ('urol', a, b), '!options->lower_rotate'),
608 (('ior', ('ushr@16', a, b), ('ishl@16', a, ('iadd', 16, ('ineg', b)))), ('uror', a, b), '!options->lower_rotate'),
609 (('ior', ('ushr@16', a, b), ('ishl@16', a, ('isub', 16, b))), ('uror', a, b), '!options->lower_rotate'),
610 (('ior', ('ushr@32', a, b), ('ishl@32', a, ('iadd', 32, ('ineg', b)))), ('uror', a, b), '!options->lower_rotate'),
611 (('ior', ('ushr@32', a, b), ('ishl@32', a, ('isub', 32, b))), ('uror', a, b), '!options->lower_rotate'),
612 (('urol@16', a, b), ('ior', ('ishl', a, b), ('ushr', a, ('isub', 16, b))), 'options->lower_rotate'),
613 (('urol@32', a, b), ('ior', ('ishl', a, b), ('ushr', a, ('isub', 32, b))), 'options->lower_rotate'),
614 (('uror@16', a, b), ('ior', ('ushr', a, b), ('ishl', a, ('isub', 16, b))), 'options->lower_rotate'),
615 (('uror@32', a, b), ('ior', ('ushr', a, b), ('ishl', a, ('isub', 32, b))), 'options->lower_rotate'),
616 # Exponential/logarithmic identities
617 (('~fexp2', ('flog2', a)), a), # 2^lg2(a) = a
618 (('~flog2', ('fexp2', a)), a), # lg2(2^a) = a
619 (('fpow', a, b), ('fexp2', ('fmul', ('flog2', a), b)), 'options->lower_fpow'), # a^b = 2^(lg2(a)*b)
620 (('~fexp2', ('fmul', ('flog2', a), b)), ('fpow', a, b), '!options->lower_fpow'), # 2^(lg2(a)*b) = a^b
621 (('~fexp2', ('fadd', ('fmul', ('flog2', a), b), ('fmul', ('flog2', c), d))),
622 ('~fmul', ('fpow', a, b), ('fpow', c, d)), '!options->lower_fpow'), # 2^(lg2(a) * b + lg2(c) + d) = a^b * c^d
623 (('~fexp2', ('fmul', ('flog2', a), 2.0)), ('fmul', a, a)),
624 (('~fexp2', ('fmul', ('flog2', a), 4.0)), ('fmul', ('fmul', a, a), ('fmul', a, a))),
625 (('~fpow', a, 1.0), a),
626 (('~fpow', a, 2.0), ('fmul', a, a)),
627 (('~fpow', a, 4.0), ('fmul', ('fmul', a, a), ('fmul', a, a))),
628 (('~fpow', 2.0, a), ('fexp2', a)),
629 (('~fpow', ('fpow', a, 2.2), 0.454545), a),
630 (('~fpow', ('fabs', ('fpow', a, 2.2)), 0.454545), ('fabs', a)),
631 (('~fsqrt', ('fexp2', a)), ('fexp2', ('fmul', 0.5, a))),
632 (('~frcp', ('fexp2', a)), ('fexp2', ('fneg', a))),
633 (('~frsq', ('fexp2', a)), ('fexp2', ('fmul', -0.5, a))),
634 (('~flog2', ('fsqrt', a)), ('fmul', 0.5, ('flog2', a))),
635 (('~flog2', ('frcp', a)), ('fneg', ('flog2', a))),
636 (('~flog2', ('frsq', a)), ('fmul', -0.5, ('flog2', a))),
637 (('~flog2', ('fpow', a, b)), ('fmul', b, ('flog2', a))),
638 (('~fmul', ('fexp2(is_used_once)', a), ('fexp2(is_used_once)', b)), ('fexp2', ('fadd', a, b))),
639 (('bcsel', ('flt', a, 0.0), 0.0, ('fsqrt', a)), ('fsqrt', ('fmax', a, 0.0))),
640 # Division and reciprocal
641 (('~fdiv', 1.0, a), ('frcp', a)),
642 (('fdiv', a, b), ('fmul', a, ('frcp', b)), 'options->lower_fdiv'),
643 (('~frcp', ('frcp', a)), a),
644 (('~frcp', ('fsqrt', a)), ('frsq', a)),
645 (('fsqrt', a), ('frcp', ('frsq', a)), 'options->lower_fsqrt'),
646 (('~frcp', ('frsq', a)), ('fsqrt', a), '!options->lower_fsqrt'),
647 # Boolean simplifications
648 (('i2b32(is_used_by_if)', a), ('ine32', a, 0)),
649 (('i2b1(is_used_by_if)', a), ('ine', a, 0)),
650 (('ieq', a, True), a),
651 (('ine(is_not_used_by_if)', a, True), ('inot', a)),
652 (('ine', a, False), a),
653 (('ieq(is_not_used_by_if)', a, False), ('inot', 'a')),
654 (('bcsel', a, True, False), a),
655 (('bcsel', a, False, True), ('inot', a)),
656 (('bcsel@32', a, 1.0, 0.0), ('b2f', a)),
657 (('bcsel@32', a, 0.0, 1.0), ('b2f', ('inot', a))),
658 (('bcsel@32', a, -1.0, -0.0), ('fneg', ('b2f', a))),
659 (('bcsel@32', a, -0.0, -1.0), ('fneg', ('b2f', ('inot', a)))),
660 (('bcsel', True, b, c), b),
661 (('bcsel', False, b, c), c),
662 (('bcsel', a, ('b2f(is_used_once)', 'b@32'), ('b2f', 'c@32')), ('b2f', ('bcsel', a, b, c))),
663
664 (('bcsel', a, b, b), b),
665 (('fcsel', a, b, b), b),
666
667 # D3D Boolean emulation
668 (('bcsel', a, -1, 0), ('ineg', ('b2i', 'a@1'))),
669 (('bcsel', a, 0, -1), ('ineg', ('b2i', ('inot', a)))),
670 (('iand', ('ineg', ('b2i', 'a@1')), ('ineg', ('b2i', 'b@1'))),
671 ('ineg', ('b2i', ('iand', a, b)))),
672 (('ior', ('ineg', ('b2i','a@1')), ('ineg', ('b2i', 'b@1'))),
673 ('ineg', ('b2i', ('ior', a, b)))),
674 (('ieq', ('ineg', ('b2i', 'a@1')), 0), ('inot', a)),
675 (('ieq', ('ineg', ('b2i', 'a@1')), -1), a),
676 (('ine', ('ineg', ('b2i', 'a@1')), 0), a),
677 (('ine', ('ineg', ('b2i', 'a@1')), -1), ('inot', a)),
678 (('iand', ('ineg', ('b2i', a)), 1.0), ('b2f', a)),
679
680 # SM5 32-bit shifts are defined to use the 5 least significant bits
681 (('ishl', 'a@32', ('iand', 31, b)), ('ishl', a, b)),
682 (('ishr', 'a@32', ('iand', 31, b)), ('ishr', a, b)),
683 (('ushr', 'a@32', ('iand', 31, b)), ('ushr', a, b)),
684
685 # Conversions
686 (('i2b32', ('b2i', 'a@32')), a),
687 (('f2i', ('ftrunc', a)), ('f2i', a)),
688 (('f2u', ('ftrunc', a)), ('f2u', a)),
689 (('i2b', ('ineg', a)), ('i2b', a)),
690 (('i2b', ('iabs', a)), ('i2b', a)),
691 (('fabs', ('b2f', a)), ('b2f', a)),
692 (('iabs', ('b2i', a)), ('b2i', a)),
693 (('inot', ('f2b1', a)), ('feq', a, 0.0)),
694
695 # Ironically, mark these as imprecise because removing the conversions may
696 # preserve more precision than doing the conversions (e.g.,
697 # uint(float(0x81818181u)) == 0x81818200).
698 (('~f2i32', ('i2f', 'a@32')), a),
699 (('~f2i32', ('u2f', 'a@32')), a),
700 (('~f2u32', ('i2f', 'a@32')), a),
701 (('~f2u32', ('u2f', 'a@32')), a),
702
703 # Section 5.4.1 (Conversion and Scalar Constructors) of the GLSL 4.60 spec
704 # says:
705 #
706 # It is undefined to convert a negative floating-point value to an
707 # uint.
708 #
709 # Assuming that (uint)some_float behaves like (uint)(int)some_float allows
710 # some optimizations in the i965 backend to proceed.
711 (('ige', ('f2u', a), b), ('ige', ('f2i', a), b)),
712 (('ige', b, ('f2u', a)), ('ige', b, ('f2i', a))),
713 (('ilt', ('f2u', a), b), ('ilt', ('f2i', a), b)),
714 (('ilt', b, ('f2u', a)), ('ilt', b, ('f2i', a))),
715
716 (('~fmin', ('fabs', a), 1.0), ('fsat', ('fabs', a)), '!options->lower_fsat'),
717
718 # The result of the multiply must be in [-1, 0], so the result of the ffma
719 # must be in [0, 1].
720 (('flt', ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0), 0.0), False),
721 (('flt', ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0), 0.0), False),
722 (('fmax', ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0), 0.0), ('fadd', ('fmul', ('fsat', a), ('fneg', ('fsat', a))), 1.0)),
723 (('fmax', ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0), 0.0), ('fadd', ('fneg', ('fmul', ('fsat', a), ('fsat', a))), 1.0)),
724
725 # Packing and then unpacking does nothing
726 (('unpack_64_2x32_split_x', ('pack_64_2x32_split', a, b)), a),
727 (('unpack_64_2x32_split_y', ('pack_64_2x32_split', a, b)), b),
728 (('pack_64_2x32_split', ('unpack_64_2x32_split_x', a),
729 ('unpack_64_2x32_split_y', a)), a),
730
731 # Comparing two halves of an unpack separately. While this optimization
732 # should be correct for non-constant values, it's less obvious that it's
733 # useful in that case. For constant values, the pack will fold and we're
734 # guaranteed to reduce the whole tree to one instruction.
735 (('iand', ('ieq', ('unpack_32_2x16_split_x', a), '#b'),
736 ('ieq', ('unpack_32_2x16_split_y', a), '#c')),
737 ('ieq', a, ('pack_32_2x16_split', b, c))),
738
739 # Byte extraction
740 (('ushr', 'a@16', 8), ('extract_u8', a, 1), '!options->lower_extract_byte'),
741 (('ushr', 'a@32', 24), ('extract_u8', a, 3), '!options->lower_extract_byte'),
742 (('ushr', 'a@64', 56), ('extract_u8', a, 7), '!options->lower_extract_byte'),
743 (('ishr', 'a@16', 8), ('extract_i8', a, 1), '!options->lower_extract_byte'),
744 (('ishr', 'a@32', 24), ('extract_i8', a, 3), '!options->lower_extract_byte'),
745 (('ishr', 'a@64', 56), ('extract_i8', a, 7), '!options->lower_extract_byte'),
746 (('iand', 0xff, a), ('extract_u8', a, 0), '!options->lower_extract_byte')
747 ]
748
749 # After the ('extract_u8', a, 0) pattern, above, triggers, there will be
750 # patterns like those below.
751 for op in ('ushr', 'ishr'):
752 optimizations.extend([(('extract_u8', (op, 'a@16', 8), 0), ('extract_u8', a, 1))])
753 optimizations.extend([(('extract_u8', (op, 'a@32', 8 * i), 0), ('extract_u8', a, i)) for i in range(1, 4)])
754 optimizations.extend([(('extract_u8', (op, 'a@64', 8 * i), 0), ('extract_u8', a, i)) for i in range(1, 8)])
755
756 optimizations.extend([(('extract_u8', ('extract_u16', a, 1), 0), ('extract_u8', a, 2))])
757
758 # After the ('extract_[iu]8', a, 3) patterns, above, trigger, there will be
759 # patterns like those below.
760 for op in ('extract_u8', 'extract_i8'):
761 optimizations.extend([((op, ('ishl', 'a@16', 8), 1), (op, a, 0))])
762 optimizations.extend([((op, ('ishl', 'a@32', 24 - 8 * i), 3), (op, a, i)) for i in range(2, -1, -1)])
763 optimizations.extend([((op, ('ishl', 'a@64', 56 - 8 * i), 7), (op, a, i)) for i in range(6, -1, -1)])
764
765 optimizations.extend([
766 # Word extraction
767 (('ushr', ('ishl', 'a@32', 16), 16), ('extract_u16', a, 0), '!options->lower_extract_word'),
768 (('ushr', 'a@32', 16), ('extract_u16', a, 1), '!options->lower_extract_word'),
769 (('ishr', ('ishl', 'a@32', 16), 16), ('extract_i16', a, 0), '!options->lower_extract_word'),
770 (('ishr', 'a@32', 16), ('extract_i16', a, 1), '!options->lower_extract_word'),
771 (('iand', 0xffff, a), ('extract_u16', a, 0), '!options->lower_extract_word'),
772
773 # Subtracts
774 (('~fsub', a, ('fsub', 0.0, b)), ('fadd', a, b)),
775 (('isub', a, ('isub', 0, b)), ('iadd', a, b)),
776 (('ussub_4x8', a, 0), a),
777 (('ussub_4x8', a, ~0), 0),
778 (('fsub', a, b), ('fadd', a, ('fneg', b)), 'options->lower_sub'),
779 (('isub', a, b), ('iadd', a, ('ineg', b)), 'options->lower_sub'),
780 (('fneg', a), ('fsub', 0.0, a), 'options->lower_negate'),
781 (('ineg', a), ('isub', 0, a), 'options->lower_negate'),
782 (('~fadd', a, ('fsub', 0.0, b)), ('fsub', a, b)),
783 (('iadd', a, ('isub', 0, b)), ('isub', a, b)),
784 (('fabs', ('fsub', 0.0, a)), ('fabs', a)),
785 (('iabs', ('isub', 0, a)), ('iabs', a)),
786
787 # Propagate negation up multiplication chains
788 (('fmul(is_used_by_non_fsat)', ('fneg', a), b), ('fneg', ('fmul', a, b))),
789 (('imul', ('ineg', a), b), ('ineg', ('imul', a, b))),
790
791 # Propagate constants up multiplication chains
792 (('~fmul(is_used_once)', ('fmul(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('fmul', ('fmul', a, c), b)),
793 (('imul(is_used_once)', ('imul(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('imul', ('imul', a, c), b)),
794 (('~fadd(is_used_once)', ('fadd(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('fadd', ('fadd', a, c), b)),
795 (('iadd(is_used_once)', ('iadd(is_used_once)', 'a(is_not_const)', 'b(is_not_const)'), '#c'), ('iadd', ('iadd', a, c), b)),
796
797 # Reassociate constants in add/mul chains so they can be folded together.
798 # For now, we mostly only handle cases where the constants are separated by
799 # a single non-constant. We could do better eventually.
800 (('~fmul', '#a', ('fmul', 'b(is_not_const)', '#c')), ('fmul', ('fmul', a, c), b)),
801 (('imul', '#a', ('imul', 'b(is_not_const)', '#c')), ('imul', ('imul', a, c), b)),
802 (('~fadd', '#a', ('fadd', 'b(is_not_const)', '#c')), ('fadd', ('fadd', a, c), b)),
803 (('~fadd', '#a', ('fneg', ('fadd', 'b(is_not_const)', '#c'))), ('fadd', ('fadd', a, ('fneg', c)), ('fneg', b))),
804 (('iadd', '#a', ('iadd', 'b(is_not_const)', '#c')), ('iadd', ('iadd', a, c), b)),
805
806 # Drop mul-div by the same value when there's no wrapping.
807 (('idiv', ('imul(no_signed_wrap)', a, b), b), a),
808
809 # By definition...
810 (('bcsel', ('ige', ('find_lsb', a), 0), ('find_lsb', a), -1), ('find_lsb', a)),
811 (('bcsel', ('ige', ('ifind_msb', a), 0), ('ifind_msb', a), -1), ('ifind_msb', a)),
812 (('bcsel', ('ige', ('ufind_msb', a), 0), ('ufind_msb', a), -1), ('ufind_msb', a)),
813
814 (('bcsel', ('ine', a, 0), ('find_lsb', a), -1), ('find_lsb', a)),
815 (('bcsel', ('ine', a, 0), ('ifind_msb', a), -1), ('ifind_msb', a)),
816 (('bcsel', ('ine', a, 0), ('ufind_msb', a), -1), ('ufind_msb', a)),
817
818 (('bcsel', ('ine', a, -1), ('ifind_msb', a), -1), ('ifind_msb', a)),
819
820 # Misc. lowering
821 (('fmod@16', a, b), ('fsub', a, ('fmul', b, ('ffloor', ('fdiv', a, b)))), 'options->lower_fmod'),
822 (('fmod@32', a, b), ('fsub', a, ('fmul', b, ('ffloor', ('fdiv', a, b)))), 'options->lower_fmod'),
823 (('frem', a, b), ('fsub', a, ('fmul', b, ('ftrunc', ('fdiv', a, b)))), 'options->lower_fmod'),
824 (('uadd_carry@32', a, b), ('b2i', ('ult', ('iadd', a, b), a)), 'options->lower_uadd_carry'),
825 (('usub_borrow@32', a, b), ('b2i', ('ult', a, b)), 'options->lower_usub_borrow'),
826
827 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
828 ('bcsel', ('ult', 31, 'bits'), 'insert',
829 ('bfi', ('bfm', 'bits', 'offset'), 'insert', 'base')),
830 'options->lower_bitfield_insert'),
831 (('ihadd', a, b), ('iadd', ('iand', a, b), ('ishr', ('ixor', a, b), 1)), 'options->lower_hadd'),
832 (('uhadd', a, b), ('iadd', ('iand', a, b), ('ushr', ('ixor', a, b), 1)), 'options->lower_hadd'),
833 (('irhadd', a, b), ('isub', ('ior', a, b), ('ishr', ('ixor', a, b), 1)), 'options->lower_hadd'),
834 (('urhadd', a, b), ('isub', ('ior', a, b), ('ushr', ('ixor', a, b), 1)), 'options->lower_hadd'),
835 (('uadd_sat', a, b), ('bcsel', ('ult', ('iadd', a, b), a), -1, ('iadd', a, b)), 'options->lower_add_sat'),
836 (('usub_sat', a, b), ('bcsel', ('ult', a, b), 0, ('isub', a, b)), 'options->lower_add_sat'),
837
838 # Alternative lowering that doesn't rely on bfi.
839 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
840 ('bcsel', ('ult', 31, 'bits'),
841 'insert',
842 (('ior',
843 ('iand', 'base', ('inot', ('ishl', ('isub', ('ishl', 1, 'bits'), 1), 'offset'))),
844 ('iand', ('ishl', 'insert', 'offset'), ('ishl', ('isub', ('ishl', 1, 'bits'), 1), 'offset'))))),
845 'options->lower_bitfield_insert_to_shifts'),
846
847 # Alternative lowering that uses bitfield_select.
848 (('bitfield_insert', 'base', 'insert', 'offset', 'bits'),
849 ('bcsel', ('ult', 31, 'bits'), 'insert',
850 ('bitfield_select', ('bfm', 'bits', 'offset'), ('ishl', 'insert', 'offset'), 'base')),
851 'options->lower_bitfield_insert_to_bitfield_select'),
852
853 (('ibitfield_extract', 'value', 'offset', 'bits'),
854 ('bcsel', ('ult', 31, 'bits'), 'value',
855 ('ibfe', 'value', 'offset', 'bits')),
856 'options->lower_bitfield_extract'),
857
858 (('ubitfield_extract', 'value', 'offset', 'bits'),
859 ('bcsel', ('ult', 31, 'bits'), 'value',
860 ('ubfe', 'value', 'offset', 'bits')),
861 'options->lower_bitfield_extract'),
862
863 # Note that these opcodes are defined to only use the five least significant bits of 'offset' and 'bits'
864 (('ubfe', 'value', 'offset', ('iand', 31, 'bits')), ('ubfe', 'value', 'offset', 'bits')),
865 (('ubfe', 'value', ('iand', 31, 'offset'), 'bits'), ('ubfe', 'value', 'offset', 'bits')),
866 (('ibfe', 'value', 'offset', ('iand', 31, 'bits')), ('ibfe', 'value', 'offset', 'bits')),
867 (('ibfe', 'value', ('iand', 31, 'offset'), 'bits'), ('ibfe', 'value', 'offset', 'bits')),
868 (('bfm', 'bits', ('iand', 31, 'offset')), ('bfm', 'bits', 'offset')),
869 (('bfm', ('iand', 31, 'bits'), 'offset'), ('bfm', 'bits', 'offset')),
870
871 (('ibitfield_extract', 'value', 'offset', 'bits'),
872 ('bcsel', ('ieq', 0, 'bits'),
873 0,
874 ('ishr',
875 ('ishl', 'value', ('isub', ('isub', 32, 'bits'), 'offset')),
876 ('isub', 32, 'bits'))),
877 'options->lower_bitfield_extract_to_shifts'),
878
879 (('ubitfield_extract', 'value', 'offset', 'bits'),
880 ('iand',
881 ('ushr', 'value', 'offset'),
882 ('bcsel', ('ieq', 'bits', 32),
883 0xffffffff,
884 ('isub', ('ishl', 1, 'bits'), 1))),
885 'options->lower_bitfield_extract_to_shifts'),
886
887 (('ifind_msb', 'value'),
888 ('ufind_msb', ('bcsel', ('ilt', 'value', 0), ('inot', 'value'), 'value')),
889 'options->lower_ifind_msb'),
890
891 (('find_lsb', 'value'),
892 ('ufind_msb', ('iand', 'value', ('ineg', 'value'))),
893 'options->lower_find_lsb'),
894
895 (('extract_i8', a, 'b@32'),
896 ('ishr', ('ishl', a, ('imul', ('isub', 3, b), 8)), 24),
897 'options->lower_extract_byte'),
898
899 (('extract_u8', a, 'b@32'),
900 ('iand', ('ushr', a, ('imul', b, 8)), 0xff),
901 'options->lower_extract_byte'),
902
903 (('extract_i16', a, 'b@32'),
904 ('ishr', ('ishl', a, ('imul', ('isub', 1, b), 16)), 16),
905 'options->lower_extract_word'),
906
907 (('extract_u16', a, 'b@32'),
908 ('iand', ('ushr', a, ('imul', b, 16)), 0xffff),
909 'options->lower_extract_word'),
910
911 (('pack_unorm_2x16', 'v'),
912 ('pack_uvec2_to_uint',
913 ('f2u32', ('fround_even', ('fmul', ('fsat', 'v'), 65535.0)))),
914 'options->lower_pack_unorm_2x16'),
915
916 (('pack_unorm_4x8', 'v'),
917 ('pack_uvec4_to_uint',
918 ('f2u32', ('fround_even', ('fmul', ('fsat', 'v'), 255.0)))),
919 'options->lower_pack_unorm_4x8'),
920
921 (('pack_snorm_2x16', 'v'),
922 ('pack_uvec2_to_uint',
923 ('f2i32', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 32767.0)))),
924 'options->lower_pack_snorm_2x16'),
925
926 (('pack_snorm_4x8', 'v'),
927 ('pack_uvec4_to_uint',
928 ('f2i32', ('fround_even', ('fmul', ('fmin', 1.0, ('fmax', -1.0, 'v')), 127.0)))),
929 'options->lower_pack_snorm_4x8'),
930
931 (('unpack_unorm_2x16', 'v'),
932 ('fdiv', ('u2f32', ('vec2', ('extract_u16', 'v', 0),
933 ('extract_u16', 'v', 1))),
934 65535.0),
935 'options->lower_unpack_unorm_2x16'),
936
937 (('unpack_unorm_4x8', 'v'),
938 ('fdiv', ('u2f32', ('vec4', ('extract_u8', 'v', 0),
939 ('extract_u8', 'v', 1),
940 ('extract_u8', 'v', 2),
941 ('extract_u8', 'v', 3))),
942 255.0),
943 'options->lower_unpack_unorm_4x8'),
944
945 (('unpack_snorm_2x16', 'v'),
946 ('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec2', ('extract_i16', 'v', 0),
947 ('extract_i16', 'v', 1))),
948 32767.0))),
949 'options->lower_unpack_snorm_2x16'),
950
951 (('unpack_snorm_4x8', 'v'),
952 ('fmin', 1.0, ('fmax', -1.0, ('fdiv', ('i2f', ('vec4', ('extract_i8', 'v', 0),
953 ('extract_i8', 'v', 1),
954 ('extract_i8', 'v', 2),
955 ('extract_i8', 'v', 3))),
956 127.0))),
957 'options->lower_unpack_snorm_4x8'),
958
959 (('isign', a), ('imin', ('imax', a, -1), 1), 'options->lower_isign'),
960 (('fsign', a), ('fsub', ('b2f', ('flt', 0.0, a)), ('b2f', ('flt', a, 0.0))), 'options->lower_fsign'),
961 ])
962
963 # bit_size dependent lowerings
964 for bit_size in [8, 16, 32, 64]:
965 # convenience constants
966 intmax = (1 << (bit_size - 1)) - 1
967 intmin = 1 << (bit_size - 1)
968
969 optimizations += [
970 (('iadd_sat@' + str(bit_size), a, b),
971 ('bcsel', ('ige', b, 1), ('bcsel', ('ilt', ('iadd', a, b), a), intmax, ('iadd', a, b)),
972 ('bcsel', ('ilt', a, ('iadd', a, b)), intmin, ('iadd', a, b))), 'options->lower_add_sat'),
973 (('isub_sat@' + str(bit_size), a, b),
974 ('bcsel', ('ilt', b, 0), ('bcsel', ('ilt', ('isub', a, b), a), intmax, ('isub', a, b)),
975 ('bcsel', ('ilt', a, ('isub', a, b)), intmin, ('isub', a, b))), 'options->lower_add_sat'),
976 ]
977
978 invert = OrderedDict([('feq', 'fne'), ('fne', 'feq'), ('fge', 'flt'), ('flt', 'fge')])
979
980 for left, right in itertools.combinations_with_replacement(invert.keys(), 2):
981 optimizations.append((('inot', ('ior(is_used_once)', (left, a, b), (right, c, d))),
982 ('iand', (invert[left], a, b), (invert[right], c, d))))
983 optimizations.append((('inot', ('iand(is_used_once)', (left, a, b), (right, c, d))),
984 ('ior', (invert[left], a, b), (invert[right], c, d))))
985
986 # Optimize x2bN(b2x(x)) -> x
987 for size in type_sizes('bool'):
988 aN = 'a@' + str(size)
989 f2bN = 'f2b' + str(size)
990 i2bN = 'i2b' + str(size)
991 optimizations.append(((f2bN, ('b2f', aN)), a))
992 optimizations.append(((i2bN, ('b2i', aN)), a))
993
994 # Optimize x2yN(b2x(x)) -> b2y
995 for x, y in itertools.product(['f', 'u', 'i'], ['f', 'u', 'i']):
996 if x != 'f' and y != 'f' and x != y:
997 continue
998
999 b2x = 'b2f' if x == 'f' else 'b2i'
1000 b2y = 'b2f' if y == 'f' else 'b2i'
1001 x2yN = '{}2{}'.format(x, y)
1002 optimizations.append(((x2yN, (b2x, a)), (b2y, a)))
1003
1004 # Optimize away x2xN(a@N)
1005 for t in ['int', 'uint', 'float']:
1006 for N in type_sizes(t):
1007 x2xN = '{0}2{0}{1}'.format(t[0], N)
1008 aN = 'a@{0}'.format(N)
1009 optimizations.append(((x2xN, aN), a))
1010
1011 # Optimize x2xN(y2yM(a@P)) -> y2yN(a) for integers
1012 # In particular, we can optimize away everything except upcast of downcast and
1013 # upcasts where the type differs from the other cast
1014 for N, M in itertools.product(type_sizes('uint'), type_sizes('uint')):
1015 if N < M:
1016 # The outer cast is a down-cast. It doesn't matter what the size of the
1017 # argument of the inner cast is because we'll never been in the upcast
1018 # of downcast case. Regardless of types, we'll always end up with y2yN
1019 # in the end.
1020 for x, y in itertools.product(['i', 'u'], ['i', 'u']):
1021 x2xN = '{0}2{0}{1}'.format(x, N)
1022 y2yM = '{0}2{0}{1}'.format(y, M)
1023 y2yN = '{0}2{0}{1}'.format(y, N)
1024 optimizations.append(((x2xN, (y2yM, a)), (y2yN, a)))
1025 elif N > M:
1026 # If the outer cast is an up-cast, we have to be more careful about the
1027 # size of the argument of the inner cast and with types. In this case,
1028 # the type is always the type of type up-cast which is given by the
1029 # outer cast.
1030 for P in type_sizes('uint'):
1031 # We can't optimize away up-cast of down-cast.
1032 if M < P:
1033 continue
1034
1035 # Because we're doing down-cast of down-cast, the types always have
1036 # to match between the two casts
1037 for x in ['i', 'u']:
1038 x2xN = '{0}2{0}{1}'.format(x, N)
1039 x2xM = '{0}2{0}{1}'.format(x, M)
1040 aP = 'a@{0}'.format(P)
1041 optimizations.append(((x2xN, (x2xM, aP)), (x2xN, a)))
1042 else:
1043 # The N == M case is handled by other optimizations
1044 pass
1045
1046 # Optimize comparisons with up-casts
1047 for t in ['int', 'uint', 'float']:
1048 for N, M in itertools.product(type_sizes(t), repeat=2):
1049 if N == 1 or N >= M:
1050 continue
1051
1052 x2xM = '{0}2{0}{1}'.format(t[0], M)
1053 x2xN = '{0}2{0}{1}'.format(t[0], N)
1054 aN = 'a@' + str(N)
1055 bN = 'b@' + str(N)
1056 xeq = 'feq' if t == 'float' else 'ieq'
1057 xne = 'fne' if t == 'float' else 'ine'
1058 xge = '{0}ge'.format(t[0])
1059 xlt = '{0}lt'.format(t[0])
1060
1061 # Up-casts are lossless so for correctly signed comparisons of
1062 # up-casted values we can do the comparison at the largest of the two
1063 # original sizes and drop one or both of the casts. (We have
1064 # optimizations to drop the no-op casts which this may generate.)
1065 for P in type_sizes(t):
1066 if P == 1 or P > N:
1067 continue
1068
1069 bP = 'b@' + str(P)
1070 optimizations += [
1071 ((xeq, (x2xM, aN), (x2xM, bP)), (xeq, a, (x2xN, b))),
1072 ((xne, (x2xM, aN), (x2xM, bP)), (xne, a, (x2xN, b))),
1073 ((xge, (x2xM, aN), (x2xM, bP)), (xge, a, (x2xN, b))),
1074 ((xlt, (x2xM, aN), (x2xM, bP)), (xlt, a, (x2xN, b))),
1075 ((xge, (x2xM, bP), (x2xM, aN)), (xge, (x2xN, b), a)),
1076 ((xlt, (x2xM, bP), (x2xM, aN)), (xlt, (x2xN, b), a)),
1077 ]
1078
1079 # The next bit doesn't work on floats because the range checks would
1080 # get way too complicated.
1081 if t in ['int', 'uint']:
1082 if t == 'int':
1083 xN_min = -(1 << (N - 1))
1084 xN_max = (1 << (N - 1)) - 1
1085 elif t == 'uint':
1086 xN_min = 0
1087 xN_max = (1 << N) - 1
1088 else:
1089 assert False
1090
1091 # If we're up-casting and comparing to a constant, we can unfold
1092 # the comparison into a comparison with the shrunk down constant
1093 # and a check that the constant fits in the smaller bit size.
1094 optimizations += [
1095 ((xeq, (x2xM, aN), '#b'),
1096 ('iand', (xeq, a, (x2xN, b)), (xeq, (x2xM, (x2xN, b)), b))),
1097 ((xne, (x2xM, aN), '#b'),
1098 ('ior', (xne, a, (x2xN, b)), (xne, (x2xM, (x2xN, b)), b))),
1099 ((xlt, (x2xM, aN), '#b'),
1100 ('iand', (xlt, xN_min, b),
1101 ('ior', (xlt, xN_max, b), (xlt, a, (x2xN, b))))),
1102 ((xlt, '#a', (x2xM, bN)),
1103 ('iand', (xlt, a, xN_max),
1104 ('ior', (xlt, a, xN_min), (xlt, (x2xN, a), b)))),
1105 ((xge, (x2xM, aN), '#b'),
1106 ('iand', (xge, xN_max, b),
1107 ('ior', (xge, xN_min, b), (xge, a, (x2xN, b))))),
1108 ((xge, '#a', (x2xM, bN)),
1109 ('iand', (xge, a, xN_min),
1110 ('ior', (xge, a, xN_max), (xge, (x2xN, a), b)))),
1111 ]
1112
1113 def fexp2i(exp, bits):
1114 # We assume that exp is already in the right range.
1115 if bits == 16:
1116 return ('i2i16', ('ishl', ('iadd', exp, 15), 10))
1117 elif bits == 32:
1118 return ('ishl', ('iadd', exp, 127), 23)
1119 elif bits == 64:
1120 return ('pack_64_2x32_split', 0, ('ishl', ('iadd', exp, 1023), 20))
1121 else:
1122 assert False
1123
1124 def ldexp(f, exp, bits):
1125 # First, we clamp exp to a reasonable range. The maximum possible range
1126 # for a normal exponent is [-126, 127] and, throwing in denormals, you get
1127 # a maximum range of [-149, 127]. This means that we can potentially have
1128 # a swing of +-276. If you start with FLT_MAX, you actually have to do
1129 # ldexp(FLT_MAX, -278) to get it to flush all the way to zero. The GLSL
1130 # spec, on the other hand, only requires that we handle an exponent value
1131 # in the range [-126, 128]. This implementation is *mostly* correct; it
1132 # handles a range on exp of [-252, 254] which allows you to create any
1133 # value (including denorms if the hardware supports it) and to adjust the
1134 # exponent of any normal value to anything you want.
1135 if bits == 16:
1136 exp = ('imin', ('imax', exp, -28), 30)
1137 elif bits == 32:
1138 exp = ('imin', ('imax', exp, -252), 254)
1139 elif bits == 64:
1140 exp = ('imin', ('imax', exp, -2044), 2046)
1141 else:
1142 assert False
1143
1144 # Now we compute two powers of 2, one for exp/2 and one for exp-exp/2.
1145 # (We use ishr which isn't the same for -1, but the -1 case still works
1146 # since we use exp-exp/2 as the second exponent.) While the spec
1147 # technically defines ldexp as f * 2.0^exp, simply multiplying once doesn't
1148 # work with denormals and doesn't allow for the full swing in exponents
1149 # that you can get with normalized values. Instead, we create two powers
1150 # of two and multiply by them each in turn. That way the effective range
1151 # of our exponent is doubled.
1152 pow2_1 = fexp2i(('ishr', exp, 1), bits)
1153 pow2_2 = fexp2i(('isub', exp, ('ishr', exp, 1)), bits)
1154 return ('fmul', ('fmul', f, pow2_1), pow2_2)
1155
1156 optimizations += [
1157 (('ldexp@16', 'x', 'exp'), ldexp('x', 'exp', 16), 'options->lower_ldexp'),
1158 (('ldexp@32', 'x', 'exp'), ldexp('x', 'exp', 32), 'options->lower_ldexp'),
1159 (('ldexp@64', 'x', 'exp'), ldexp('x', 'exp', 64), 'options->lower_ldexp'),
1160 ]
1161
1162 # Unreal Engine 4 demo applications open-codes bitfieldReverse()
1163 def bitfield_reverse(u):
1164 step1 = ('ior', ('ishl', u, 16), ('ushr', u, 16))
1165 step2 = ('ior', ('ishl', ('iand', step1, 0x00ff00ff), 8), ('ushr', ('iand', step1, 0xff00ff00), 8))
1166 step3 = ('ior', ('ishl', ('iand', step2, 0x0f0f0f0f), 4), ('ushr', ('iand', step2, 0xf0f0f0f0), 4))
1167 step4 = ('ior', ('ishl', ('iand', step3, 0x33333333), 2), ('ushr', ('iand', step3, 0xcccccccc), 2))
1168 step5 = ('ior(many-comm-expr)', ('ishl', ('iand', step4, 0x55555555), 1), ('ushr', ('iand', step4, 0xaaaaaaaa), 1))
1169
1170 return step5
1171
1172 optimizations += [(bitfield_reverse('x@32'), ('bitfield_reverse', 'x'))]
1173
1174 # For any float comparison operation, "cmp", if you have "a == a && a cmp b"
1175 # then the "a == a" is redundant because it's equivalent to "a is not NaN"
1176 # and, if a is a NaN then the second comparison will fail anyway.
1177 for op in ['flt', 'fge', 'feq']:
1178 optimizations += [
1179 (('iand', ('feq', a, a), (op, a, b)), (op, a, b)),
1180 (('iand', ('feq', a, a), (op, b, a)), (op, b, a)),
1181 ]
1182
1183 # Add optimizations to handle the case where the result of a ternary is
1184 # compared to a constant. This way we can take things like
1185 #
1186 # (a ? 0 : 1) > 0
1187 #
1188 # and turn it into
1189 #
1190 # a ? (0 > 0) : (1 > 0)
1191 #
1192 # which constant folding will eat for lunch. The resulting ternary will
1193 # further get cleaned up by the boolean reductions above and we will be
1194 # left with just the original variable "a".
1195 for op in ['flt', 'fge', 'feq', 'fne',
1196 'ilt', 'ige', 'ieq', 'ine', 'ult', 'uge']:
1197 optimizations += [
1198 ((op, ('bcsel', 'a', '#b', '#c'), '#d'),
1199 ('bcsel', 'a', (op, 'b', 'd'), (op, 'c', 'd'))),
1200 ((op, '#d', ('bcsel', a, '#b', '#c')),
1201 ('bcsel', 'a', (op, 'd', 'b'), (op, 'd', 'c'))),
1202 ]
1203
1204
1205 # For example, this converts things like
1206 #
1207 # 1 + mix(0, a - 1, condition)
1208 #
1209 # into
1210 #
1211 # mix(1, (a-1)+1, condition)
1212 #
1213 # Other optimizations will rearrange the constants.
1214 for op in ['fadd', 'fmul', 'iadd', 'imul']:
1215 optimizations += [
1216 ((op, ('bcsel(is_used_once)', a, '#b', c), '#d'), ('bcsel', a, (op, b, d), (op, c, d)))
1217 ]
1218
1219 # For derivatives in compute shaders, GLSL_NV_compute_shader_derivatives
1220 # states:
1221 #
1222 # If neither layout qualifier is specified, derivatives in compute shaders
1223 # return zero, which is consistent with the handling of built-in texture
1224 # functions like texture() in GLSL 4.50 compute shaders.
1225 for op in ['fddx', 'fddx_fine', 'fddx_coarse',
1226 'fddy', 'fddy_fine', 'fddy_coarse']:
1227 optimizations += [
1228 ((op, 'a'), 0.0, 'info->stage == MESA_SHADER_COMPUTE && info->cs.derivative_group == DERIVATIVE_GROUP_NONE')
1229 ]
1230
1231 # Some optimizations for ir3-specific instructions.
1232 optimizations += [
1233 # 'al * bl': If either 'al' or 'bl' is zero, return zero.
1234 (('umul_low', '#a(is_lower_half_zero)', 'b'), (0)),
1235 # '(ah * bl) << 16 + c': If either 'ah' or 'bl' is zero, return 'c'.
1236 (('imadsh_mix16', '#a@32(is_lower_half_zero)', 'b@32', 'c@32'), ('c')),
1237 (('imadsh_mix16', 'a@32', '#b@32(is_upper_half_zero)', 'c@32'), ('c')),
1238 ]
1239
1240 # This section contains "late" optimizations that should be run before
1241 # creating ffmas and calling regular optimizations for the final time.
1242 # Optimizations should go here if they help code generation and conflict
1243 # with the regular optimizations.
1244 before_ffma_optimizations = [
1245 # Propagate constants down multiplication chains
1246 (('~fmul(is_used_once)', ('fmul(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('fmul', ('fmul', a, c), b)),
1247 (('imul(is_used_once)', ('imul(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('imul', ('imul', a, c), b)),
1248 (('~fadd(is_used_once)', ('fadd(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('fadd', ('fadd', a, c), b)),
1249 (('iadd(is_used_once)', ('iadd(is_used_once)', 'a(is_not_const)', '#b'), 'c(is_not_const)'), ('iadd', ('iadd', a, c), b)),
1250
1251 (('~fadd', ('fmul', a, b), ('fmul', a, c)), ('fmul', a, ('fadd', b, c))),
1252 (('iadd', ('imul', a, b), ('imul', a, c)), ('imul', a, ('iadd', b, c))),
1253 (('~fadd', ('fneg', a), a), 0.0),
1254 (('iadd', ('ineg', a), a), 0),
1255 (('iadd', ('ineg', a), ('iadd', a, b)), b),
1256 (('iadd', a, ('iadd', ('ineg', a), b)), b),
1257 (('~fadd', ('fneg', a), ('fadd', a, b)), b),
1258 (('~fadd', a, ('fadd', ('fneg', a), b)), b),
1259
1260 (('~flrp@32', ('fadd(is_used_once)', a, -1.0), ('fadd(is_used_once)', a, 1.0), d), ('fadd', ('flrp', -1.0, 1.0, d), a)),
1261 (('~flrp@32', ('fadd(is_used_once)', a, 1.0), ('fadd(is_used_once)', a, -1.0), d), ('fadd', ('flrp', 1.0, -1.0, d), a)),
1262 (('~flrp@32', ('fadd(is_used_once)', a, '#b'), ('fadd(is_used_once)', a, '#c'), d), ('fadd', ('fmul', d, ('fadd', c, ('fneg', b))), ('fadd', a, b))),
1263 ]
1264
1265 # This section contains "late" optimizations that should be run after the
1266 # regular optimizations have finished. Optimizations should go here if
1267 # they help code generation but do not necessarily produce code that is
1268 # more easily optimizable.
1269 late_optimizations = [
1270 # Most of these optimizations aren't quite safe when you get infinity or
1271 # Nan involved but the first one should be fine.
1272 (('flt', ('fadd', a, b), 0.0), ('flt', a, ('fneg', b))),
1273 (('flt', ('fneg', ('fadd', a, b)), 0.0), ('flt', ('fneg', a), b)),
1274 (('~fge', ('fadd', a, b), 0.0), ('fge', a, ('fneg', b))),
1275 (('~fge', ('fneg', ('fadd', a, b)), 0.0), ('fge', ('fneg', a), b)),
1276 (('~feq', ('fadd', a, b), 0.0), ('feq', a, ('fneg', b))),
1277 (('~fne', ('fadd', a, b), 0.0), ('fne', a, ('fneg', b))),
1278
1279 # nir_lower_to_source_mods will collapse this, but its existence during the
1280 # optimization loop can prevent other optimizations.
1281 (('fneg', ('fneg', a)), a),
1282
1283 # These are duplicated from the main optimizations table. The late
1284 # patterns that rearrange expressions like x - .5 < 0 to x < .5 can create
1285 # new patterns like these. The patterns that compare with zero are removed
1286 # because they are unlikely to be created in by anything in
1287 # late_optimizations.
1288 (('flt', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('flt', a, b)),
1289 (('flt', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('flt', b, a)),
1290 (('fge', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fge', a, b)),
1291 (('fge', '#b(is_gt_0_and_lt_1)', ('fsat(is_used_once)', a)), ('fge', b, a)),
1292 (('feq', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('feq', a, b)),
1293 (('fne', ('fsat(is_used_once)', a), '#b(is_gt_0_and_lt_1)'), ('fne', a, b)),
1294
1295 (('fge', ('fsat(is_used_once)', a), 1.0), ('fge', a, 1.0)),
1296 (('flt', ('fsat(is_used_once)', a), 1.0), ('flt', a, 1.0)),
1297
1298 (('~fge', ('fmin(is_used_once)', ('fadd(is_used_once)', a, b), ('fadd', c, d)), 0.0), ('iand', ('fge', a, ('fneg', b)), ('fge', c, ('fneg', d)))),
1299
1300 (('flt', ('fneg', a), ('fneg', b)), ('flt', b, a)),
1301 (('fge', ('fneg', a), ('fneg', b)), ('fge', b, a)),
1302 (('feq', ('fneg', a), ('fneg', b)), ('feq', b, a)),
1303 (('fne', ('fneg', a), ('fneg', b)), ('fne', b, a)),
1304 (('flt', ('fneg', a), -1.0), ('flt', 1.0, a)),
1305 (('flt', -1.0, ('fneg', a)), ('flt', a, 1.0)),
1306 (('fge', ('fneg', a), -1.0), ('fge', 1.0, a)),
1307 (('fge', -1.0, ('fneg', a)), ('fge', a, 1.0)),
1308 (('fne', ('fneg', a), -1.0), ('fne', 1.0, a)),
1309 (('feq', -1.0, ('fneg', a)), ('feq', a, 1.0)),
1310
1311 (('ior', a, a), a),
1312 (('iand', a, a), a),
1313
1314 (('~fadd', ('fneg(is_used_once)', ('fsat(is_used_once)', 'a(is_not_fmul)')), 1.0), ('fsat', ('fadd', 1.0, ('fneg', a)))),
1315
1316 (('fdot2', a, b), ('fdot_replicated2', a, b), 'options->fdot_replicates'),
1317 (('fdot3', a, b), ('fdot_replicated3', a, b), 'options->fdot_replicates'),
1318 (('fdot4', a, b), ('fdot_replicated4', a, b), 'options->fdot_replicates'),
1319 (('fdph', a, b), ('fdph_replicated', a, b), 'options->fdot_replicates'),
1320
1321 (('~flrp@32', ('fadd(is_used_once)', a, b), ('fadd(is_used_once)', a, c), d), ('fadd', ('flrp', b, c, d), a)),
1322 (('~flrp@64', ('fadd(is_used_once)', a, b), ('fadd(is_used_once)', a, c), d), ('fadd', ('flrp', b, c, d), a)),
1323
1324 (('~fadd@32', 1.0, ('fmul(is_used_once)', c , ('fadd', b, -1.0 ))), ('fadd', ('fadd', 1.0, ('fneg', c)), ('fmul', b, c)), 'options->lower_flrp32'),
1325 (('~fadd@64', 1.0, ('fmul(is_used_once)', c , ('fadd', b, -1.0 ))), ('fadd', ('fadd', 1.0, ('fneg', c)), ('fmul', b, c)), 'options->lower_flrp64'),
1326
1327 # A similar operation could apply to any ffma(#a, b, #(-a/2)), but this
1328 # particular operation is common for expanding values stored in a texture
1329 # from [0,1] to [-1,1].
1330 (('~ffma@32', a, 2.0, -1.0), ('flrp', -1.0, 1.0, a ), '!options->lower_flrp32'),
1331 (('~ffma@32', a, -2.0, -1.0), ('flrp', -1.0, 1.0, ('fneg', a)), '!options->lower_flrp32'),
1332 (('~ffma@32', a, -2.0, 1.0), ('flrp', 1.0, -1.0, a ), '!options->lower_flrp32'),
1333 (('~ffma@32', a, 2.0, 1.0), ('flrp', 1.0, -1.0, ('fneg', a)), '!options->lower_flrp32'),
1334 (('~fadd@32', ('fmul(is_used_once)', 2.0, a), -1.0), ('flrp', -1.0, 1.0, a ), '!options->lower_flrp32'),
1335 (('~fadd@32', ('fmul(is_used_once)', -2.0, a), -1.0), ('flrp', -1.0, 1.0, ('fneg', a)), '!options->lower_flrp32'),
1336 (('~fadd@32', ('fmul(is_used_once)', -2.0, a), 1.0), ('flrp', 1.0, -1.0, a ), '!options->lower_flrp32'),
1337 (('~fadd@32', ('fmul(is_used_once)', 2.0, a), 1.0), ('flrp', 1.0, -1.0, ('fneg', a)), '!options->lower_flrp32'),
1338
1339 # flrp(a, b, a)
1340 # a*(1-a) + b*a
1341 # a + -a*a + a*b (1)
1342 # a + a*(b - a)
1343 # Option 1: ffma(a, (b-a), a)
1344 #
1345 # Alternately, after (1):
1346 # a*(1+b) + -a*a
1347 # a*((1+b) + -a)
1348 #
1349 # Let b=1
1350 #
1351 # Option 2: ffma(a, 2, -(a*a))
1352 # Option 3: ffma(a, 2, (-a)*a)
1353 # Option 4: ffma(a, -a, (2*a)
1354 # Option 5: a * (2 - a)
1355 #
1356 # There are a lot of other possible combinations.
1357 (('~ffma@32', ('fadd', b, ('fneg', a)), a, a), ('flrp', a, b, a), '!options->lower_flrp32'),
1358 (('~ffma@32', a, 2.0, ('fneg', ('fmul', a, a))), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1359 (('~ffma@32', a, 2.0, ('fmul', ('fneg', a), a)), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1360 (('~ffma@32', a, ('fneg', a), ('fmul', 2.0, a)), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1361 (('~fmul@32', a, ('fadd', 2.0, ('fneg', a))), ('flrp', a, 1.0, a), '!options->lower_flrp32'),
1362
1363 # we do these late so that we don't get in the way of creating ffmas
1364 (('fmin', ('fadd(is_used_once)', '#c', a), ('fadd(is_used_once)', '#c', b)), ('fadd', c, ('fmin', a, b))),
1365 (('fmax', ('fadd(is_used_once)', '#c', a), ('fadd(is_used_once)', '#c', b)), ('fadd', c, ('fmax', a, b))),
1366
1367 (('bcsel', a, 0, ('b2f32', ('inot', 'b@bool'))), ('b2f32', ('inot', ('ior', a, b)))),
1368
1369 # Things that look like DPH in the source shader may get expanded to
1370 # something that looks like dot(v1.xyz, v2.xyz) + v1.w by the time it gets
1371 # to NIR. After FFMA is generated, this can look like:
1372 #
1373 # fadd(ffma(v1.z, v2.z, ffma(v1.y, v2.y, fmul(v1.x, v2.x))), v1.w)
1374 #
1375 # Reassociate the last addition into the first multiplication.
1376 (('~fadd', ('ffma(is_used_once)', a, b, ('ffma', c, d, ('fmul', 'e(is_not_const_and_not_fsign)', 'f(is_not_const_and_not_fsign)'))), 'g(is_not_const)'),
1377 ('ffma', a, b, ('ffma', c, d, ('ffma', e, 'f', 'g'))), '!options->intel_vec4'),
1378 (('~fadd', ('ffma(is_used_once)', a, b, ('fmul', 'e(is_not_const_and_not_fsign)', 'f(is_not_const_and_not_fsign)') ), 'g(is_not_const)'),
1379 ('ffma', a, b, ('ffma', e, 'f', 'g') ), '!options->intel_vec4'),
1380 ]
1381
1382 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic", optimizations).render())
1383 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic_before_ffma",
1384 before_ffma_optimizations).render())
1385 print(nir_algebraic.AlgebraicPass("nir_opt_algebraic_late",
1386 late_optimizations).render())