re PR sanitizer/87837 (-O2 -fsanitize=signed-integer-overflow misses overflows on...
[gcc.git] / gcc / match.pd
1 /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
4
5 Copyright (C) 2014-2018 Free Software Foundation, Inc.
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25
26 /* Generic tree predicates we inherit. */
27 (define_predicates
28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
29 integer_each_onep integer_truep integer_nonzerop
30 real_zerop real_onep real_minus_onep
31 zerop
32 CONSTANT_CLASS_P
33 tree_expr_nonnegative_p
34 tree_expr_nonzero_p
35 integer_valued_real_p
36 integer_pow2p
37 HONOR_NANS)
38
39 /* Operator lists. */
40 (define_operator_list tcc_comparison
41 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
42 (define_operator_list inverted_tcc_comparison
43 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
44 (define_operator_list inverted_tcc_comparison_with_nans
45 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
46 (define_operator_list swapped_tcc_comparison
47 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
48 (define_operator_list simple_comparison lt le eq ne ge gt)
49 (define_operator_list swapped_simple_comparison gt ge eq ne le lt)
50
51 #include "cfn-operators.pd"
52
53 /* Define operand lists for math rounding functions {,i,l,ll}FN,
54 where the versions prefixed with "i" return an int, those prefixed with
55 "l" return a long and those prefixed with "ll" return a long long.
56
57 Also define operand lists:
58
59 X<FN>F for all float functions, in the order i, l, ll
60 X<FN> for all double functions, in the same order
61 X<FN>L for all long double functions, in the same order. */
62 #define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
63 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
64 BUILT_IN_L##FN##F \
65 BUILT_IN_LL##FN##F) \
66 (define_operator_list X##FN BUILT_IN_I##FN \
67 BUILT_IN_L##FN \
68 BUILT_IN_LL##FN) \
69 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
70 BUILT_IN_L##FN##L \
71 BUILT_IN_LL##FN##L)
72
73 DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
74 DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
75 DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
76 DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
77
78 /* Binary operations and their associated IFN_COND_* function. */
79 (define_operator_list UNCOND_BINARY
80 plus minus
81 mult trunc_div trunc_mod rdiv
82 min max
83 bit_and bit_ior bit_xor)
84 (define_operator_list COND_BINARY
85 IFN_COND_ADD IFN_COND_SUB
86 IFN_COND_MUL IFN_COND_DIV IFN_COND_MOD IFN_COND_RDIV
87 IFN_COND_MIN IFN_COND_MAX
88 IFN_COND_AND IFN_COND_IOR IFN_COND_XOR)
89
90 /* Same for ternary operations. */
91 (define_operator_list UNCOND_TERNARY
92 IFN_FMA IFN_FMS IFN_FNMA IFN_FNMS)
93 (define_operator_list COND_TERNARY
94 IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS)
95
96 /* As opposed to convert?, this still creates a single pattern, so
97 it is not a suitable replacement for convert? in all cases. */
98 (match (nop_convert @0)
99 (convert @0)
100 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
101 (match (nop_convert @0)
102 (view_convert @0)
103 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
104 && known_eq (TYPE_VECTOR_SUBPARTS (type),
105 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0)))
106 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
107 /* This one has to be last, or it shadows the others. */
108 (match (nop_convert @0)
109 @0)
110
111 /* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x>
112 ABSU_EXPR returns unsigned absolute value of the operand and the operand
113 of the ABSU_EXPR will have the corresponding signed type. */
114 (simplify (abs (convert @0))
115 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
116 && !TYPE_UNSIGNED (TREE_TYPE (@0))
117 && element_precision (type) > element_precision (TREE_TYPE (@0)))
118 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
119 (convert (absu:utype @0)))))
120
121
122 /* Simplifications of operations with one constant operand and
123 simplifications to constants or single values. */
124
125 (for op (plus pointer_plus minus bit_ior bit_xor)
126 (simplify
127 (op @0 integer_zerop)
128 (non_lvalue @0)))
129
130 /* 0 +p index -> (type)index */
131 (simplify
132 (pointer_plus integer_zerop @1)
133 (non_lvalue (convert @1)))
134
135 /* ptr - 0 -> (type)ptr */
136 (simplify
137 (pointer_diff @0 integer_zerop)
138 (convert @0))
139
140 /* See if ARG1 is zero and X + ARG1 reduces to X.
141 Likewise if the operands are reversed. */
142 (simplify
143 (plus:c @0 real_zerop@1)
144 (if (fold_real_zero_addition_p (type, @1, 0))
145 (non_lvalue @0)))
146
147 /* See if ARG1 is zero and X - ARG1 reduces to X. */
148 (simplify
149 (minus @0 real_zerop@1)
150 (if (fold_real_zero_addition_p (type, @1, 1))
151 (non_lvalue @0)))
152
153 /* Simplify x - x.
154 This is unsafe for certain floats even in non-IEEE formats.
155 In IEEE, it is unsafe because it does wrong for NaNs.
156 Also note that operand_equal_p is always false if an operand
157 is volatile. */
158 (simplify
159 (minus @0 @0)
160 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
161 { build_zero_cst (type); }))
162 (simplify
163 (pointer_diff @@0 @0)
164 { build_zero_cst (type); })
165
166 (simplify
167 (mult @0 integer_zerop@1)
168 @1)
169
170 /* Maybe fold x * 0 to 0. The expressions aren't the same
171 when x is NaN, since x * 0 is also NaN. Nor are they the
172 same in modes with signed zeros, since multiplying a
173 negative value by 0 gives -0, not +0. */
174 (simplify
175 (mult @0 real_zerop@1)
176 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
177 @1))
178
179 /* In IEEE floating point, x*1 is not equivalent to x for snans.
180 Likewise for complex arithmetic with signed zeros. */
181 (simplify
182 (mult @0 real_onep)
183 (if (!HONOR_SNANS (type)
184 && (!HONOR_SIGNED_ZEROS (type)
185 || !COMPLEX_FLOAT_TYPE_P (type)))
186 (non_lvalue @0)))
187
188 /* Transform x * -1.0 into -x. */
189 (simplify
190 (mult @0 real_minus_onep)
191 (if (!HONOR_SNANS (type)
192 && (!HONOR_SIGNED_ZEROS (type)
193 || !COMPLEX_FLOAT_TYPE_P (type)))
194 (negate @0)))
195
196 (for cmp (gt ge lt le)
197 outp (convert convert negate negate)
198 outn (negate negate convert convert)
199 /* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */
200 /* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */
201 /* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
202 /* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
203 (simplify
204 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)
205 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
206 && types_match (type, TREE_TYPE (@0)))
207 (switch
208 (if (types_match (type, float_type_node))
209 (BUILT_IN_COPYSIGNF @1 (outp @0)))
210 (if (types_match (type, double_type_node))
211 (BUILT_IN_COPYSIGN @1 (outp @0)))
212 (if (types_match (type, long_double_type_node))
213 (BUILT_IN_COPYSIGNL @1 (outp @0))))))
214 /* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
215 /* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
216 /* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */
217 /* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */
218 (simplify
219 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1)
220 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
221 && types_match (type, TREE_TYPE (@0)))
222 (switch
223 (if (types_match (type, float_type_node))
224 (BUILT_IN_COPYSIGNF @1 (outn @0)))
225 (if (types_match (type, double_type_node))
226 (BUILT_IN_COPYSIGN @1 (outn @0)))
227 (if (types_match (type, long_double_type_node))
228 (BUILT_IN_COPYSIGNL @1 (outn @0)))))))
229
230 /* Transform X * copysign (1.0, X) into abs(X). */
231 (simplify
232 (mult:c @0 (COPYSIGN_ALL real_onep @0))
233 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
234 (abs @0)))
235
236 /* Transform X * copysign (1.0, -X) into -abs(X). */
237 (simplify
238 (mult:c @0 (COPYSIGN_ALL real_onep (negate @0)))
239 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
240 (negate (abs @0))))
241
242 /* Transform copysign (CST, X) into copysign (ABS(CST), X). */
243 (simplify
244 (COPYSIGN_ALL REAL_CST@0 @1)
245 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
246 (COPYSIGN_ALL (negate @0) @1)))
247
248 /* X * 1, X / 1 -> X. */
249 (for op (mult trunc_div ceil_div floor_div round_div exact_div)
250 (simplify
251 (op @0 integer_onep)
252 (non_lvalue @0)))
253
254 /* (A / (1 << B)) -> (A >> B).
255 Only for unsigned A. For signed A, this would not preserve rounding
256 toward zero.
257 For example: (-1 / ( 1 << B)) != -1 >> B. */
258 (simplify
259 (trunc_div @0 (lshift integer_onep@1 @2))
260 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
261 && (!VECTOR_TYPE_P (type)
262 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
263 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar)))
264 (rshift @0 @2)))
265
266 /* Preserve explicit divisions by 0: the C++ front-end wants to detect
267 undefined behavior in constexpr evaluation, and assuming that the division
268 traps enables better optimizations than these anyway. */
269 (for div (trunc_div ceil_div floor_div round_div exact_div)
270 /* 0 / X is always zero. */
271 (simplify
272 (div integer_zerop@0 @1)
273 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
274 (if (!integer_zerop (@1))
275 @0))
276 /* X / -1 is -X. */
277 (simplify
278 (div @0 integer_minus_onep@1)
279 (if (!TYPE_UNSIGNED (type))
280 (negate @0)))
281 /* X / X is one. */
282 (simplify
283 (div @0 @0)
284 /* But not for 0 / 0 so that we can get the proper warnings and errors.
285 And not for _Fract types where we can't build 1. */
286 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type)))
287 { build_one_cst (type); }))
288 /* X / abs (X) is X < 0 ? -1 : 1. */
289 (simplify
290 (div:C @0 (abs @0))
291 (if (INTEGRAL_TYPE_P (type)
292 && TYPE_OVERFLOW_UNDEFINED (type))
293 (cond (lt @0 { build_zero_cst (type); })
294 { build_minus_one_cst (type); } { build_one_cst (type); })))
295 /* X / -X is -1. */
296 (simplify
297 (div:C @0 (negate @0))
298 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
299 && TYPE_OVERFLOW_UNDEFINED (type))
300 { build_minus_one_cst (type); })))
301
302 /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
303 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
304 (simplify
305 (floor_div @0 @1)
306 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
307 && TYPE_UNSIGNED (type))
308 (trunc_div @0 @1)))
309
310 /* Combine two successive divisions. Note that combining ceil_div
311 and floor_div is trickier and combining round_div even more so. */
312 (for div (trunc_div exact_div)
313 (simplify
314 (div (div @0 INTEGER_CST@1) INTEGER_CST@2)
315 (with {
316 wi::overflow_type overflow;
317 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
318 TYPE_SIGN (type), &overflow);
319 }
320 (if (!overflow)
321 (div @0 { wide_int_to_tree (type, mul); })
322 (if (TYPE_UNSIGNED (type)
323 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
324 { build_zero_cst (type); })))))
325
326 /* Combine successive multiplications. Similar to above, but handling
327 overflow is different. */
328 (simplify
329 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
330 (with {
331 wi::overflow_type overflow;
332 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
333 TYPE_SIGN (type), &overflow);
334 }
335 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
336 otherwise undefined overflow implies that @0 must be zero. */
337 (if (!overflow || TYPE_OVERFLOW_WRAPS (type))
338 (mult @0 { wide_int_to_tree (type, mul); }))))
339
340 /* Optimize A / A to 1.0 if we don't care about
341 NaNs or Infinities. */
342 (simplify
343 (rdiv @0 @0)
344 (if (FLOAT_TYPE_P (type)
345 && ! HONOR_NANS (type)
346 && ! HONOR_INFINITIES (type))
347 { build_one_cst (type); }))
348
349 /* Optimize -A / A to -1.0 if we don't care about
350 NaNs or Infinities. */
351 (simplify
352 (rdiv:C @0 (negate @0))
353 (if (FLOAT_TYPE_P (type)
354 && ! HONOR_NANS (type)
355 && ! HONOR_INFINITIES (type))
356 { build_minus_one_cst (type); }))
357
358 /* PR71078: x / abs(x) -> copysign (1.0, x) */
359 (simplify
360 (rdiv:C (convert? @0) (convert? (abs @0)))
361 (if (SCALAR_FLOAT_TYPE_P (type)
362 && ! HONOR_NANS (type)
363 && ! HONOR_INFINITIES (type))
364 (switch
365 (if (types_match (type, float_type_node))
366 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
367 (if (types_match (type, double_type_node))
368 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
369 (if (types_match (type, long_double_type_node))
370 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
371
372 /* In IEEE floating point, x/1 is not equivalent to x for snans. */
373 (simplify
374 (rdiv @0 real_onep)
375 (if (!HONOR_SNANS (type))
376 (non_lvalue @0)))
377
378 /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
379 (simplify
380 (rdiv @0 real_minus_onep)
381 (if (!HONOR_SNANS (type))
382 (negate @0)))
383
384 (if (flag_reciprocal_math)
385 /* Convert (A/B)/C to A/(B*C). */
386 (simplify
387 (rdiv (rdiv:s @0 @1) @2)
388 (rdiv @0 (mult @1 @2)))
389
390 /* Canonicalize x / (C1 * y) to (x * C2) / y. */
391 (simplify
392 (rdiv @0 (mult:s @1 REAL_CST@2))
393 (with
394 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); }
395 (if (tem)
396 (rdiv (mult @0 { tem; } ) @1))))
397
398 /* Convert A/(B/C) to (A/B)*C */
399 (simplify
400 (rdiv @0 (rdiv:s @1 @2))
401 (mult (rdiv @0 @1) @2)))
402
403 /* Simplify x / (- y) to -x / y. */
404 (simplify
405 (rdiv @0 (negate @1))
406 (rdiv (negate @0) @1))
407
408 /* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
409 (for div (trunc_div ceil_div floor_div round_div exact_div)
410 (simplify
411 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
412 (if (integer_pow2p (@2)
413 && tree_int_cst_sgn (@2) > 0
414 && tree_nop_conversion_p (type, TREE_TYPE (@0))
415 && wi::to_wide (@2) + wi::to_wide (@1) == 0)
416 (rshift (convert @0)
417 { build_int_cst (integer_type_node,
418 wi::exact_log2 (wi::to_wide (@2))); }))))
419
420 /* If ARG1 is a constant, we can convert this to a multiply by the
421 reciprocal. This does not have the same rounding properties,
422 so only do this if -freciprocal-math. We can actually
423 always safely do it if ARG1 is a power of two, but it's hard to
424 tell if it is or not in a portable manner. */
425 (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
426 (simplify
427 (rdiv @0 cst@1)
428 (if (optimize)
429 (if (flag_reciprocal_math
430 && !real_zerop (@1))
431 (with
432 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
433 (if (tem)
434 (mult @0 { tem; } )))
435 (if (cst != COMPLEX_CST)
436 (with { tree inverse = exact_inverse (type, @1); }
437 (if (inverse)
438 (mult @0 { inverse; } ))))))))
439
440 (for mod (ceil_mod floor_mod round_mod trunc_mod)
441 /* 0 % X is always zero. */
442 (simplify
443 (mod integer_zerop@0 @1)
444 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
445 (if (!integer_zerop (@1))
446 @0))
447 /* X % 1 is always zero. */
448 (simplify
449 (mod @0 integer_onep)
450 { build_zero_cst (type); })
451 /* X % -1 is zero. */
452 (simplify
453 (mod @0 integer_minus_onep@1)
454 (if (!TYPE_UNSIGNED (type))
455 { build_zero_cst (type); }))
456 /* X % X is zero. */
457 (simplify
458 (mod @0 @0)
459 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
460 (if (!integer_zerop (@0))
461 { build_zero_cst (type); }))
462 /* (X % Y) % Y is just X % Y. */
463 (simplify
464 (mod (mod@2 @0 @1) @1)
465 @2)
466 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
467 (simplify
468 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
469 (if (ANY_INTEGRAL_TYPE_P (type)
470 && TYPE_OVERFLOW_UNDEFINED (type)
471 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
472 TYPE_SIGN (type)))
473 { build_zero_cst (type); }))
474 /* For (X % C) == 0, if X is signed and C is power of 2, use unsigned
475 modulo and comparison, since it is simpler and equivalent. */
476 (for cmp (eq ne)
477 (simplify
478 (cmp (mod @0 integer_pow2p@2) integer_zerop@1)
479 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
480 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
481 (cmp (mod (convert:utype @0) (convert:utype @2)) (convert:utype @1)))))))
482
483 /* X % -C is the same as X % C. */
484 (simplify
485 (trunc_mod @0 INTEGER_CST@1)
486 (if (TYPE_SIGN (type) == SIGNED
487 && !TREE_OVERFLOW (@1)
488 && wi::neg_p (wi::to_wide (@1))
489 && !TYPE_OVERFLOW_TRAPS (type)
490 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
491 && !sign_bit_p (@1, @1))
492 (trunc_mod @0 (negate @1))))
493
494 /* X % -Y is the same as X % Y. */
495 (simplify
496 (trunc_mod @0 (convert? (negate @1)))
497 (if (INTEGRAL_TYPE_P (type)
498 && !TYPE_UNSIGNED (type)
499 && !TYPE_OVERFLOW_TRAPS (type)
500 && tree_nop_conversion_p (type, TREE_TYPE (@1))
501 /* Avoid this transformation if X might be INT_MIN or
502 Y might be -1, because we would then change valid
503 INT_MIN % -(-1) into invalid INT_MIN % -1. */
504 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
505 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
506 (TREE_TYPE (@1))))))
507 (trunc_mod @0 (convert @1))))
508
509 /* X - (X / Y) * Y is the same as X % Y. */
510 (simplify
511 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
512 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
513 (convert (trunc_mod @0 @1))))
514
515 /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
516 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
517 Also optimize A % (C << N) where C is a power of 2,
518 to A & ((C << N) - 1). */
519 (match (power_of_two_cand @1)
520 INTEGER_CST@1)
521 (match (power_of_two_cand @1)
522 (lshift INTEGER_CST@1 @2))
523 (for mod (trunc_mod floor_mod)
524 (simplify
525 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
526 (if ((TYPE_UNSIGNED (type)
527 || tree_expr_nonnegative_p (@0))
528 && tree_nop_conversion_p (type, TREE_TYPE (@3))
529 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
530 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
531
532 /* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
533 (simplify
534 (trunc_div (mult @0 integer_pow2p@1) @1)
535 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
536 (bit_and @0 { wide_int_to_tree
537 (type, wi::mask (TYPE_PRECISION (type)
538 - wi::exact_log2 (wi::to_wide (@1)),
539 false, TYPE_PRECISION (type))); })))
540
541 /* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
542 (simplify
543 (mult (trunc_div @0 integer_pow2p@1) @1)
544 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
545 (bit_and @0 (negate @1))))
546
547 /* Simplify (t * 2) / 2) -> t. */
548 (for div (trunc_div ceil_div floor_div round_div exact_div)
549 (simplify
550 (div (mult:c @0 @1) @1)
551 (if (ANY_INTEGRAL_TYPE_P (type)
552 && TYPE_OVERFLOW_UNDEFINED (type))
553 @0)))
554
555 (for op (negate abs)
556 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
557 (for coss (COS COSH)
558 (simplify
559 (coss (op @0))
560 (coss @0)))
561 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
562 (for pows (POW)
563 (simplify
564 (pows (op @0) REAL_CST@1)
565 (with { HOST_WIDE_INT n; }
566 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
567 (pows @0 @1)))))
568 /* Likewise for powi. */
569 (for pows (POWI)
570 (simplify
571 (pows (op @0) INTEGER_CST@1)
572 (if ((wi::to_wide (@1) & 1) == 0)
573 (pows @0 @1))))
574 /* Strip negate and abs from both operands of hypot. */
575 (for hypots (HYPOT)
576 (simplify
577 (hypots (op @0) @1)
578 (hypots @0 @1))
579 (simplify
580 (hypots @0 (op @1))
581 (hypots @0 @1)))
582 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
583 (for copysigns (COPYSIGN_ALL)
584 (simplify
585 (copysigns (op @0) @1)
586 (copysigns @0 @1))))
587
588 /* abs(x)*abs(x) -> x*x. Should be valid for all types. */
589 (simplify
590 (mult (abs@1 @0) @1)
591 (mult @0 @0))
592
593 /* Convert absu(x)*absu(x) -> x*x. */
594 (simplify
595 (mult (absu@1 @0) @1)
596 (mult (convert@2 @0) @2))
597
598 /* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
599 (for coss (COS COSH)
600 copysigns (COPYSIGN)
601 (simplify
602 (coss (copysigns @0 @1))
603 (coss @0)))
604
605 /* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
606 (for pows (POW)
607 copysigns (COPYSIGN)
608 (simplify
609 (pows (copysigns @0 @2) REAL_CST@1)
610 (with { HOST_WIDE_INT n; }
611 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
612 (pows @0 @1)))))
613 /* Likewise for powi. */
614 (for pows (POWI)
615 copysigns (COPYSIGN)
616 (simplify
617 (pows (copysigns @0 @2) INTEGER_CST@1)
618 (if ((wi::to_wide (@1) & 1) == 0)
619 (pows @0 @1))))
620
621 (for hypots (HYPOT)
622 copysigns (COPYSIGN)
623 /* hypot(copysign(x, y), z) -> hypot(x, z). */
624 (simplify
625 (hypots (copysigns @0 @1) @2)
626 (hypots @0 @2))
627 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
628 (simplify
629 (hypots @0 (copysigns @1 @2))
630 (hypots @0 @1)))
631
632 /* copysign(x, CST) -> [-]abs (x). */
633 (for copysigns (COPYSIGN_ALL)
634 (simplify
635 (copysigns @0 REAL_CST@1)
636 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
637 (negate (abs @0))
638 (abs @0))))
639
640 /* copysign(copysign(x, y), z) -> copysign(x, z). */
641 (for copysigns (COPYSIGN_ALL)
642 (simplify
643 (copysigns (copysigns @0 @1) @2)
644 (copysigns @0 @2)))
645
646 /* copysign(x,y)*copysign(x,y) -> x*x. */
647 (for copysigns (COPYSIGN_ALL)
648 (simplify
649 (mult (copysigns@2 @0 @1) @2)
650 (mult @0 @0)))
651
652 /* ccos(-x) -> ccos(x). Similarly for ccosh. */
653 (for ccoss (CCOS CCOSH)
654 (simplify
655 (ccoss (negate @0))
656 (ccoss @0)))
657
658 /* cabs(-x) and cos(conj(x)) -> cabs(x). */
659 (for ops (conj negate)
660 (for cabss (CABS)
661 (simplify
662 (cabss (ops @0))
663 (cabss @0))))
664
665 /* Fold (a * (1 << b)) into (a << b) */
666 (simplify
667 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
668 (if (! FLOAT_TYPE_P (type)
669 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
670 (lshift @0 @2)))
671
672 /* Fold (1 << (C - x)) where C = precision(type) - 1
673 into ((1 << C) >> x). */
674 (simplify
675 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
676 (if (INTEGRAL_TYPE_P (type)
677 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
678 && single_use (@1))
679 (if (TYPE_UNSIGNED (type))
680 (rshift (lshift @0 @2) @3)
681 (with
682 { tree utype = unsigned_type_for (type); }
683 (convert (rshift (lshift (convert:utype @0) @2) @3))))))
684
685 /* Fold (C1/X)*C2 into (C1*C2)/X. */
686 (simplify
687 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
688 (if (flag_associative_math
689 && single_use (@3))
690 (with
691 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
692 (if (tem)
693 (rdiv { tem; } @1)))))
694
695 /* Simplify ~X & X as zero. */
696 (simplify
697 (bit_and:c (convert? @0) (convert? (bit_not @0)))
698 { build_zero_cst (type); })
699
700 /* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
701 (simplify
702 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
703 (if (TYPE_UNSIGNED (type))
704 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
705
706 (for bitop (bit_and bit_ior)
707 cmp (eq ne)
708 /* PR35691: Transform
709 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
710 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
711 (simplify
712 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
713 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
714 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
715 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
716 (cmp (bit_ior @0 (convert @1)) @2)))
717 /* Transform:
718 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
719 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */
720 (simplify
721 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
722 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
723 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
724 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
725 (cmp (bit_and @0 (convert @1)) @2))))
726
727 /* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
728 (simplify
729 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
730 (minus (bit_xor @0 @1) @1))
731 (simplify
732 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
733 (if (~wi::to_wide (@2) == wi::to_wide (@1))
734 (minus (bit_xor @0 @1) @1)))
735
736 /* Fold (A & B) - (A & ~B) into B - (A ^ B). */
737 (simplify
738 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
739 (minus @1 (bit_xor @0 @1)))
740
741 /* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
742 (for op (bit_ior bit_xor plus)
743 (simplify
744 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
745 (bit_xor @0 @1))
746 (simplify
747 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
748 (if (~wi::to_wide (@2) == wi::to_wide (@1))
749 (bit_xor @0 @1))))
750
751 /* PR53979: Transform ((a ^ b) | a) -> (a | b) */
752 (simplify
753 (bit_ior:c (bit_xor:c @0 @1) @0)
754 (bit_ior @0 @1))
755
756 /* (a & ~b) | (a ^ b) --> a ^ b */
757 (simplify
758 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
759 @2)
760
761 /* (a & ~b) ^ ~a --> ~(a & b) */
762 (simplify
763 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
764 (bit_not (bit_and @0 @1)))
765
766 /* (a | b) & ~(a ^ b) --> a & b */
767 (simplify
768 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
769 (bit_and @0 @1))
770
771 /* a | ~(a ^ b) --> a | ~b */
772 (simplify
773 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
774 (bit_ior @0 (bit_not @1)))
775
776 /* (a | b) | (a &^ b) --> a | b */
777 (for op (bit_and bit_xor)
778 (simplify
779 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
780 @2))
781
782 /* (a & b) | ~(a ^ b) --> ~(a ^ b) */
783 (simplify
784 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
785 @2)
786
787 /* ~(~a & b) --> a | ~b */
788 (simplify
789 (bit_not (bit_and:cs (bit_not @0) @1))
790 (bit_ior @0 (bit_not @1)))
791
792 /* ~(~a | b) --> a & ~b */
793 (simplify
794 (bit_not (bit_ior:cs (bit_not @0) @1))
795 (bit_and @0 (bit_not @1)))
796
797 /* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
798 #if GIMPLE
799 (simplify
800 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
801 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
802 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
803 (bit_xor @0 @1)))
804 #endif
805
806 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
807 ((A & N) + B) & M -> (A + B) & M
808 Similarly if (N & M) == 0,
809 ((A | N) + B) & M -> (A + B) & M
810 and for - instead of + (or unary - instead of +)
811 and/or ^ instead of |.
812 If B is constant and (B & M) == 0, fold into A & M. */
813 (for op (plus minus)
814 (for bitop (bit_and bit_ior bit_xor)
815 (simplify
816 (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2)
817 (with
818 { tree pmop[2];
819 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop,
820 @3, @4, @1, ERROR_MARK, NULL_TREE,
821 NULL_TREE, pmop); }
822 (if (utype)
823 (convert (bit_and (op (convert:utype { pmop[0]; })
824 (convert:utype { pmop[1]; }))
825 (convert:utype @2))))))
826 (simplify
827 (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2)
828 (with
829 { tree pmop[2];
830 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
831 NULL_TREE, NULL_TREE, @1, bitop, @3,
832 @4, pmop); }
833 (if (utype)
834 (convert (bit_and (op (convert:utype { pmop[0]; })
835 (convert:utype { pmop[1]; }))
836 (convert:utype @2)))))))
837 (simplify
838 (bit_and (op:s @0 @1) INTEGER_CST@2)
839 (with
840 { tree pmop[2];
841 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
842 NULL_TREE, NULL_TREE, @1, ERROR_MARK,
843 NULL_TREE, NULL_TREE, pmop); }
844 (if (utype)
845 (convert (bit_and (op (convert:utype { pmop[0]; })
846 (convert:utype { pmop[1]; }))
847 (convert:utype @2)))))))
848 (for bitop (bit_and bit_ior bit_xor)
849 (simplify
850 (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1)
851 (with
852 { tree pmop[2];
853 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0,
854 bitop, @2, @3, NULL_TREE, ERROR_MARK,
855 NULL_TREE, NULL_TREE, pmop); }
856 (if (utype)
857 (convert (bit_and (negate (convert:utype { pmop[0]; }))
858 (convert:utype @1)))))))
859
860 /* X % Y is smaller than Y. */
861 (for cmp (lt ge)
862 (simplify
863 (cmp (trunc_mod @0 @1) @1)
864 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
865 { constant_boolean_node (cmp == LT_EXPR, type); })))
866 (for cmp (gt le)
867 (simplify
868 (cmp @1 (trunc_mod @0 @1))
869 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
870 { constant_boolean_node (cmp == GT_EXPR, type); })))
871
872 /* x | ~0 -> ~0 */
873 (simplify
874 (bit_ior @0 integer_all_onesp@1)
875 @1)
876
877 /* x | 0 -> x */
878 (simplify
879 (bit_ior @0 integer_zerop)
880 @0)
881
882 /* x & 0 -> 0 */
883 (simplify
884 (bit_and @0 integer_zerop@1)
885 @1)
886
887 /* ~x | x -> -1 */
888 /* ~x ^ x -> -1 */
889 /* ~x + x -> -1 */
890 (for op (bit_ior bit_xor plus)
891 (simplify
892 (op:c (convert? @0) (convert? (bit_not @0)))
893 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
894
895 /* x ^ x -> 0 */
896 (simplify
897 (bit_xor @0 @0)
898 { build_zero_cst (type); })
899
900 /* Canonicalize X ^ ~0 to ~X. */
901 (simplify
902 (bit_xor @0 integer_all_onesp@1)
903 (bit_not @0))
904
905 /* x & ~0 -> x */
906 (simplify
907 (bit_and @0 integer_all_onesp)
908 (non_lvalue @0))
909
910 /* x & x -> x, x | x -> x */
911 (for bitop (bit_and bit_ior)
912 (simplify
913 (bitop @0 @0)
914 (non_lvalue @0)))
915
916 /* x & C -> x if we know that x & ~C == 0. */
917 #if GIMPLE
918 (simplify
919 (bit_and SSA_NAME@0 INTEGER_CST@1)
920 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
921 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
922 @0))
923 #endif
924
925 /* x + (x & 1) -> (x + 1) & ~1 */
926 (simplify
927 (plus:c @0 (bit_and:s @0 integer_onep@1))
928 (bit_and (plus @0 @1) (bit_not @1)))
929
930 /* x & ~(x & y) -> x & ~y */
931 /* x | ~(x | y) -> x | ~y */
932 (for bitop (bit_and bit_ior)
933 (simplify
934 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
935 (bitop @0 (bit_not @1))))
936
937 /* (~x & y) | ~(x | y) -> ~x */
938 (simplify
939 (bit_ior:c (bit_and:c (bit_not@2 @0) @1) (bit_not (bit_ior:c @0 @1)))
940 @2)
941
942 /* (x | y) ^ (x | ~y) -> ~x */
943 (simplify
944 (bit_xor:c (bit_ior:c @0 @1) (bit_ior:c @0 (bit_not @1)))
945 (bit_not @0))
946
947 /* (x & y) | ~(x | y) -> ~(x ^ y) */
948 (simplify
949 (bit_ior:c (bit_and:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
950 (bit_not (bit_xor @0 @1)))
951
952 /* (~x | y) ^ (x ^ y) -> x | ~y */
953 (simplify
954 (bit_xor:c (bit_ior:cs (bit_not @0) @1) (bit_xor:s @0 @1))
955 (bit_ior @0 (bit_not @1)))
956
957 /* (x ^ y) | ~(x | y) -> ~(x & y) */
958 (simplify
959 (bit_ior:c (bit_xor:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
960 (bit_not (bit_and @0 @1)))
961
962 /* (x | y) & ~x -> y & ~x */
963 /* (x & y) | ~x -> y | ~x */
964 (for bitop (bit_and bit_ior)
965 rbitop (bit_ior bit_and)
966 (simplify
967 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
968 (bitop @1 @2)))
969
970 /* (x & y) ^ (x | y) -> x ^ y */
971 (simplify
972 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
973 (bit_xor @0 @1))
974
975 /* (x ^ y) ^ (x | y) -> x & y */
976 (simplify
977 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
978 (bit_and @0 @1))
979
980 /* (x & y) + (x ^ y) -> x | y */
981 /* (x & y) | (x ^ y) -> x | y */
982 /* (x & y) ^ (x ^ y) -> x | y */
983 (for op (plus bit_ior bit_xor)
984 (simplify
985 (op:c (bit_and @0 @1) (bit_xor @0 @1))
986 (bit_ior @0 @1)))
987
988 /* (x & y) + (x | y) -> x + y */
989 (simplify
990 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
991 (plus @0 @1))
992
993 /* (x + y) - (x | y) -> x & y */
994 (simplify
995 (minus (plus @0 @1) (bit_ior @0 @1))
996 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
997 && !TYPE_SATURATING (type))
998 (bit_and @0 @1)))
999
1000 /* (x + y) - (x & y) -> x | y */
1001 (simplify
1002 (minus (plus @0 @1) (bit_and @0 @1))
1003 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1004 && !TYPE_SATURATING (type))
1005 (bit_ior @0 @1)))
1006
1007 /* (x | y) - (x ^ y) -> x & y */
1008 (simplify
1009 (minus (bit_ior @0 @1) (bit_xor @0 @1))
1010 (bit_and @0 @1))
1011
1012 /* (x | y) - (x & y) -> x ^ y */
1013 (simplify
1014 (minus (bit_ior @0 @1) (bit_and @0 @1))
1015 (bit_xor @0 @1))
1016
1017 /* (x | y) & ~(x & y) -> x ^ y */
1018 (simplify
1019 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
1020 (bit_xor @0 @1))
1021
1022 /* (x | y) & (~x ^ y) -> x & y */
1023 (simplify
1024 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
1025 (bit_and @0 @1))
1026
1027 /* (~x | y) & (x | ~y) -> ~(x ^ y) */
1028 (simplify
1029 (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1)))
1030 (bit_not (bit_xor @0 @1)))
1031
1032 /* (~x | y) ^ (x | ~y) -> x ^ y */
1033 (simplify
1034 (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1)))
1035 (bit_xor @0 @1))
1036
1037 /* ~x & ~y -> ~(x | y)
1038 ~x | ~y -> ~(x & y) */
1039 (for op (bit_and bit_ior)
1040 rop (bit_ior bit_and)
1041 (simplify
1042 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1043 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1044 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1045 (bit_not (rop (convert @0) (convert @1))))))
1046
1047 /* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
1048 with a constant, and the two constants have no bits in common,
1049 we should treat this as a BIT_IOR_EXPR since this may produce more
1050 simplifications. */
1051 (for op (bit_xor plus)
1052 (simplify
1053 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
1054 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
1055 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1056 && tree_nop_conversion_p (type, TREE_TYPE (@2))
1057 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
1058 (bit_ior (convert @4) (convert @5)))))
1059
1060 /* (X | Y) ^ X -> Y & ~ X*/
1061 (simplify
1062 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
1063 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1064 (convert (bit_and @1 (bit_not @0)))))
1065
1066 /* Convert ~X ^ ~Y to X ^ Y. */
1067 (simplify
1068 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1069 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1070 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1071 (bit_xor (convert @0) (convert @1))))
1072
1073 /* Convert ~X ^ C to X ^ ~C. */
1074 (simplify
1075 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
1076 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1077 (bit_xor (convert @0) (bit_not @1))))
1078
1079 /* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
1080 (for opo (bit_and bit_xor)
1081 opi (bit_xor bit_and)
1082 (simplify
1083 (opo:c (opi:cs @0 @1) @1)
1084 (bit_and (bit_not @0) @1)))
1085
1086 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
1087 operands are another bit-wise operation with a common input. If so,
1088 distribute the bit operations to save an operation and possibly two if
1089 constants are involved. For example, convert
1090 (A | B) & (A | C) into A | (B & C)
1091 Further simplification will occur if B and C are constants. */
1092 (for op (bit_and bit_ior bit_xor)
1093 rop (bit_ior bit_and bit_and)
1094 (simplify
1095 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
1096 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1097 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1098 (rop (convert @0) (op (convert @1) (convert @2))))))
1099
1100 /* Some simple reassociation for bit operations, also handled in reassoc. */
1101 /* (X & Y) & Y -> X & Y
1102 (X | Y) | Y -> X | Y */
1103 (for op (bit_and bit_ior)
1104 (simplify
1105 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
1106 @2))
1107 /* (X ^ Y) ^ Y -> X */
1108 (simplify
1109 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
1110 (convert @0))
1111 /* (X & Y) & (X & Z) -> (X & Y) & Z
1112 (X | Y) | (X | Z) -> (X | Y) | Z */
1113 (for op (bit_and bit_ior)
1114 (simplify
1115 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
1116 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1117 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1118 (if (single_use (@5) && single_use (@6))
1119 (op @3 (convert @2))
1120 (if (single_use (@3) && single_use (@4))
1121 (op (convert @1) @5))))))
1122 /* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
1123 (simplify
1124 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
1125 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1126 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1127 (bit_xor (convert @1) (convert @2))))
1128
1129 /* Convert abs (abs (X)) into abs (X).
1130 also absu (absu (X)) into absu (X). */
1131 (simplify
1132 (abs (abs@1 @0))
1133 @1)
1134
1135 (simplify
1136 (absu (convert@2 (absu@1 @0)))
1137 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@1)))
1138 @1))
1139
1140 /* Convert abs[u] (-X) -> abs[u] (X). */
1141 (simplify
1142 (abs (negate @0))
1143 (abs @0))
1144
1145 (simplify
1146 (absu (negate @0))
1147 (absu @0))
1148
1149 /* Convert abs[u] (X) where X is nonnegative -> (X). */
1150 (simplify
1151 (abs tree_expr_nonnegative_p@0)
1152 @0)
1153
1154 (simplify
1155 (absu tree_expr_nonnegative_p@0)
1156 (convert @0))
1157
1158 /* A few cases of fold-const.c negate_expr_p predicate. */
1159 (match negate_expr_p
1160 INTEGER_CST
1161 (if ((INTEGRAL_TYPE_P (type)
1162 && TYPE_UNSIGNED (type))
1163 || (!TYPE_OVERFLOW_SANITIZED (type)
1164 && may_negate_without_overflow_p (t)))))
1165 (match negate_expr_p
1166 FIXED_CST)
1167 (match negate_expr_p
1168 (negate @0)
1169 (if (!TYPE_OVERFLOW_SANITIZED (type))))
1170 (match negate_expr_p
1171 REAL_CST
1172 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
1173 /* VECTOR_CST handling of non-wrapping types would recurse in unsupported
1174 ways. */
1175 (match negate_expr_p
1176 VECTOR_CST
1177 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
1178 (match negate_expr_p
1179 (minus @0 @1)
1180 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
1181 || (FLOAT_TYPE_P (type)
1182 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1183 && !HONOR_SIGNED_ZEROS (type)))))
1184
1185 /* (-A) * (-B) -> A * B */
1186 (simplify
1187 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
1188 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1189 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1190 (mult (convert @0) (convert (negate @1)))))
1191
1192 /* -(A + B) -> (-B) - A. */
1193 (simplify
1194 (negate (plus:c @0 negate_expr_p@1))
1195 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
1196 && !HONOR_SIGNED_ZEROS (element_mode (type)))
1197 (minus (negate @1) @0)))
1198
1199 /* -(A - B) -> B - A. */
1200 (simplify
1201 (negate (minus @0 @1))
1202 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
1203 || (FLOAT_TYPE_P (type)
1204 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1205 && !HONOR_SIGNED_ZEROS (type)))
1206 (minus @1 @0)))
1207 (simplify
1208 (negate (pointer_diff @0 @1))
1209 (if (TYPE_OVERFLOW_UNDEFINED (type))
1210 (pointer_diff @1 @0)))
1211
1212 /* A - B -> A + (-B) if B is easily negatable. */
1213 (simplify
1214 (minus @0 negate_expr_p@1)
1215 (if (!FIXED_POINT_TYPE_P (type))
1216 (plus @0 (negate @1))))
1217
1218 /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
1219 when profitable.
1220 For bitwise binary operations apply operand conversions to the
1221 binary operation result instead of to the operands. This allows
1222 to combine successive conversions and bitwise binary operations.
1223 We combine the above two cases by using a conditional convert. */
1224 (for bitop (bit_and bit_ior bit_xor)
1225 (simplify
1226 (bitop (convert @0) (convert? @1))
1227 (if (((TREE_CODE (@1) == INTEGER_CST
1228 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1229 && int_fits_type_p (@1, TREE_TYPE (@0)))
1230 || types_match (@0, @1))
1231 /* ??? This transform conflicts with fold-const.c doing
1232 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
1233 constants (if x has signed type, the sign bit cannot be set
1234 in c). This folds extension into the BIT_AND_EXPR.
1235 Restrict it to GIMPLE to avoid endless recursions. */
1236 && (bitop != BIT_AND_EXPR || GIMPLE)
1237 && (/* That's a good idea if the conversion widens the operand, thus
1238 after hoisting the conversion the operation will be narrower. */
1239 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
1240 /* It's also a good idea if the conversion is to a non-integer
1241 mode. */
1242 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
1243 /* Or if the precision of TO is not the same as the precision
1244 of its mode. */
1245 || !type_has_mode_precision_p (type)))
1246 (convert (bitop @0 (convert @1))))))
1247
1248 (for bitop (bit_and bit_ior)
1249 rbitop (bit_ior bit_and)
1250 /* (x | y) & x -> x */
1251 /* (x & y) | x -> x */
1252 (simplify
1253 (bitop:c (rbitop:c @0 @1) @0)
1254 @0)
1255 /* (~x | y) & x -> x & y */
1256 /* (~x & y) | x -> x | y */
1257 (simplify
1258 (bitop:c (rbitop:c (bit_not @0) @1) @0)
1259 (bitop @0 @1)))
1260
1261 /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
1262 (simplify
1263 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1264 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
1265
1266 /* Combine successive equal operations with constants. */
1267 (for bitop (bit_and bit_ior bit_xor)
1268 (simplify
1269 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1270 (if (!CONSTANT_CLASS_P (@0))
1271 /* This is the canonical form regardless of whether (bitop @1 @2) can be
1272 folded to a constant. */
1273 (bitop @0 (bitop @1 @2))
1274 /* In this case we have three constants and (bitop @0 @1) doesn't fold
1275 to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if
1276 the values involved are such that the operation can't be decided at
1277 compile time. Try folding one of @0 or @1 with @2 to see whether
1278 that combination can be decided at compile time.
1279
1280 Keep the existing form if both folds fail, to avoid endless
1281 oscillation. */
1282 (with { tree cst1 = const_binop (bitop, type, @0, @2); }
1283 (if (cst1)
1284 (bitop @1 { cst1; })
1285 (with { tree cst2 = const_binop (bitop, type, @1, @2); }
1286 (if (cst2)
1287 (bitop @0 { cst2; }))))))))
1288
1289 /* Try simple folding for X op !X, and X op X with the help
1290 of the truth_valued_p and logical_inverted_value predicates. */
1291 (match truth_valued_p
1292 @0
1293 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
1294 (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
1295 (match truth_valued_p
1296 (op @0 @1)))
1297 (match truth_valued_p
1298 (truth_not @0))
1299
1300 (match (logical_inverted_value @0)
1301 (truth_not @0))
1302 (match (logical_inverted_value @0)
1303 (bit_not truth_valued_p@0))
1304 (match (logical_inverted_value @0)
1305 (eq @0 integer_zerop))
1306 (match (logical_inverted_value @0)
1307 (ne truth_valued_p@0 integer_truep))
1308 (match (logical_inverted_value @0)
1309 (bit_xor truth_valued_p@0 integer_truep))
1310
1311 /* X & !X -> 0. */
1312 (simplify
1313 (bit_and:c @0 (logical_inverted_value @0))
1314 { build_zero_cst (type); })
1315 /* X | !X and X ^ !X -> 1, , if X is truth-valued. */
1316 (for op (bit_ior bit_xor)
1317 (simplify
1318 (op:c truth_valued_p@0 (logical_inverted_value @0))
1319 { constant_boolean_node (true, type); }))
1320 /* X ==/!= !X is false/true. */
1321 (for op (eq ne)
1322 (simplify
1323 (op:c truth_valued_p@0 (logical_inverted_value @0))
1324 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
1325
1326 /* ~~x -> x */
1327 (simplify
1328 (bit_not (bit_not @0))
1329 @0)
1330
1331 /* Convert ~ (-A) to A - 1. */
1332 (simplify
1333 (bit_not (convert? (negate @0)))
1334 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1335 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1336 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
1337
1338 /* Convert - (~A) to A + 1. */
1339 (simplify
1340 (negate (nop_convert (bit_not @0)))
1341 (plus (view_convert @0) { build_each_one_cst (type); }))
1342
1343 /* Convert ~ (A - 1) or ~ (A + -1) to -A. */
1344 (simplify
1345 (bit_not (convert? (minus @0 integer_each_onep)))
1346 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1347 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1348 (convert (negate @0))))
1349 (simplify
1350 (bit_not (convert? (plus @0 integer_all_onesp)))
1351 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1352 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1353 (convert (negate @0))))
1354
1355 /* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
1356 (simplify
1357 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
1358 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1359 (convert (bit_xor @0 (bit_not @1)))))
1360 (simplify
1361 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
1362 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1363 (convert (bit_xor @0 @1))))
1364
1365 /* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
1366 (simplify
1367 (bit_xor:c (nop_convert:s (bit_not:s @0)) @1)
1368 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1369 (bit_not (bit_xor (view_convert @0) @1))))
1370
1371 /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1372 (simplify
1373 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1374 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
1375
1376 /* Fold A - (A & B) into ~B & A. */
1377 (simplify
1378 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
1379 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1380 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1381 (convert (bit_and (bit_not @1) @0))))
1382
1383 /* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
1384 (for cmp (gt lt ge le)
1385 (simplify
1386 (mult (convert (cmp @0 @1)) @2)
1387 (cond (cmp @0 @1) @2 { build_zero_cst (type); })))
1388
1389 /* For integral types with undefined overflow and C != 0 fold
1390 x * C EQ/NE y * C into x EQ/NE y. */
1391 (for cmp (eq ne)
1392 (simplify
1393 (cmp (mult:c @0 @1) (mult:c @2 @1))
1394 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1395 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1396 && tree_expr_nonzero_p (@1))
1397 (cmp @0 @2))))
1398
1399 /* For integral types with wrapping overflow and C odd fold
1400 x * C EQ/NE y * C into x EQ/NE y. */
1401 (for cmp (eq ne)
1402 (simplify
1403 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
1404 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1405 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
1406 && (TREE_INT_CST_LOW (@1) & 1) != 0)
1407 (cmp @0 @2))))
1408
1409 /* For integral types with undefined overflow and C != 0 fold
1410 x * C RELOP y * C into:
1411
1412 x RELOP y for nonnegative C
1413 y RELOP x for negative C */
1414 (for cmp (lt gt le ge)
1415 (simplify
1416 (cmp (mult:c @0 @1) (mult:c @2 @1))
1417 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1418 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1419 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
1420 (cmp @0 @2)
1421 (if (TREE_CODE (@1) == INTEGER_CST
1422 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
1423 (cmp @2 @0))))))
1424
1425 /* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
1426 (for cmp (le gt)
1427 icmp (gt le)
1428 (simplify
1429 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
1430 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1431 && TYPE_UNSIGNED (TREE_TYPE (@0))
1432 && TYPE_PRECISION (TREE_TYPE (@0)) > 1
1433 && (wi::to_wide (@2)
1434 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
1435 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1436 (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
1437
1438 /* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
1439 (for cmp (simple_comparison)
1440 (simplify
1441 (cmp (exact_div @0 INTEGER_CST@2) (exact_div @1 @2))
1442 (if (wi::gt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
1443 (cmp @0 @1))))
1444
1445 /* X / C1 op C2 into a simple range test. */
1446 (for cmp (simple_comparison)
1447 (simplify
1448 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
1449 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1450 && integer_nonzerop (@1)
1451 && !TREE_OVERFLOW (@1)
1452 && !TREE_OVERFLOW (@2))
1453 (with { tree lo, hi; bool neg_overflow;
1454 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
1455 &neg_overflow); }
1456 (switch
1457 (if (code == LT_EXPR || code == GE_EXPR)
1458 (if (TREE_OVERFLOW (lo))
1459 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
1460 (if (code == LT_EXPR)
1461 (lt @0 { lo; })
1462 (ge @0 { lo; }))))
1463 (if (code == LE_EXPR || code == GT_EXPR)
1464 (if (TREE_OVERFLOW (hi))
1465 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
1466 (if (code == LE_EXPR)
1467 (le @0 { hi; })
1468 (gt @0 { hi; }))))
1469 (if (!lo && !hi)
1470 { build_int_cst (type, code == NE_EXPR); })
1471 (if (code == EQ_EXPR && !hi)
1472 (ge @0 { lo; }))
1473 (if (code == EQ_EXPR && !lo)
1474 (le @0 { hi; }))
1475 (if (code == NE_EXPR && !hi)
1476 (lt @0 { lo; }))
1477 (if (code == NE_EXPR && !lo)
1478 (gt @0 { hi; }))
1479 (if (GENERIC)
1480 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
1481 lo, hi); })
1482 (with
1483 {
1484 tree etype = range_check_type (TREE_TYPE (@0));
1485 if (etype)
1486 {
1487 if (! TYPE_UNSIGNED (etype))
1488 etype = unsigned_type_for (etype);
1489 hi = fold_convert (etype, hi);
1490 lo = fold_convert (etype, lo);
1491 hi = const_binop (MINUS_EXPR, etype, hi, lo);
1492 }
1493 }
1494 (if (etype && hi && !TREE_OVERFLOW (hi))
1495 (if (code == EQ_EXPR)
1496 (le (minus (convert:etype @0) { lo; }) { hi; })
1497 (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
1498
1499 /* X + Z < Y + Z is the same as X < Y when there is no overflow. */
1500 (for op (lt le ge gt)
1501 (simplify
1502 (op (plus:c @0 @2) (plus:c @1 @2))
1503 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1504 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1505 (op @0 @1))))
1506 /* For equality and subtraction, this is also true with wrapping overflow. */
1507 (for op (eq ne minus)
1508 (simplify
1509 (op (plus:c @0 @2) (plus:c @1 @2))
1510 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1511 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1512 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1513 (op @0 @1))))
1514
1515 /* X - Z < Y - Z is the same as X < Y when there is no overflow. */
1516 (for op (lt le ge gt)
1517 (simplify
1518 (op (minus @0 @2) (minus @1 @2))
1519 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1520 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1521 (op @0 @1))))
1522 /* For equality and subtraction, this is also true with wrapping overflow. */
1523 (for op (eq ne minus)
1524 (simplify
1525 (op (minus @0 @2) (minus @1 @2))
1526 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1527 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1528 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1529 (op @0 @1))))
1530 /* And for pointers... */
1531 (for op (simple_comparison)
1532 (simplify
1533 (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1534 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1535 (op @0 @1))))
1536 (simplify
1537 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1538 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1539 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1540 (pointer_diff @0 @1)))
1541
1542 /* Z - X < Z - Y is the same as Y < X when there is no overflow. */
1543 (for op (lt le ge gt)
1544 (simplify
1545 (op (minus @2 @0) (minus @2 @1))
1546 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1547 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1548 (op @1 @0))))
1549 /* For equality and subtraction, this is also true with wrapping overflow. */
1550 (for op (eq ne minus)
1551 (simplify
1552 (op (minus @2 @0) (minus @2 @1))
1553 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1554 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1555 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1556 (op @1 @0))))
1557 /* And for pointers... */
1558 (for op (simple_comparison)
1559 (simplify
1560 (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1561 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1562 (op @1 @0))))
1563 (simplify
1564 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1565 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1566 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1567 (pointer_diff @1 @0)))
1568
1569 /* X + Y < Y is the same as X < 0 when there is no overflow. */
1570 (for op (lt le gt ge)
1571 (simplify
1572 (op:c (plus:c@2 @0 @1) @1)
1573 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1574 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1575 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
1576 && (CONSTANT_CLASS_P (@0) || single_use (@2)))
1577 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))))
1578 /* For equality, this is also true with wrapping overflow. */
1579 (for op (eq ne)
1580 (simplify
1581 (op:c (nop_convert@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
1582 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1583 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1584 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1585 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3)))
1586 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2))
1587 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
1588 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))
1589 (simplify
1590 (op:c (nop_convert@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
1591 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
1592 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
1593 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
1594 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1595
1596 /* X - Y < X is the same as Y > 0 when there is no overflow.
1597 For equality, this is also true with wrapping overflow. */
1598 (for op (simple_comparison)
1599 (simplify
1600 (op:c @0 (minus@2 @0 @1))
1601 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1602 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1603 || ((op == EQ_EXPR || op == NE_EXPR)
1604 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1605 && (CONSTANT_CLASS_P (@1) || single_use (@2)))
1606 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1607
1608 /* Transform:
1609 (X / Y) == 0 -> X < Y if X, Y are unsigned.
1610 (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */
1611 (for cmp (eq ne)
1612 ocmp (lt ge)
1613 (simplify
1614 (cmp (trunc_div @0 @1) integer_zerop)
1615 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
1616 /* Complex ==/!= is allowed, but not </>=. */
1617 && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE
1618 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0))))
1619 (ocmp @0 @1))))
1620
1621 /* X == C - X can never be true if C is odd. */
1622 (for cmp (eq ne)
1623 (simplify
1624 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
1625 (if (TREE_INT_CST_LOW (@1) & 1)
1626 { constant_boolean_node (cmp == NE_EXPR, type); })))
1627
1628 /* Arguments on which one can call get_nonzero_bits to get the bits
1629 possibly set. */
1630 (match with_possible_nonzero_bits
1631 INTEGER_CST@0)
1632 (match with_possible_nonzero_bits
1633 SSA_NAME@0
1634 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
1635 /* Slightly extended version, do not make it recursive to keep it cheap. */
1636 (match (with_possible_nonzero_bits2 @0)
1637 with_possible_nonzero_bits@0)
1638 (match (with_possible_nonzero_bits2 @0)
1639 (bit_and:c with_possible_nonzero_bits@0 @2))
1640
1641 /* Same for bits that are known to be set, but we do not have
1642 an equivalent to get_nonzero_bits yet. */
1643 (match (with_certain_nonzero_bits2 @0)
1644 INTEGER_CST@0)
1645 (match (with_certain_nonzero_bits2 @0)
1646 (bit_ior @1 INTEGER_CST@0))
1647
1648 /* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
1649 (for cmp (eq ne)
1650 (simplify
1651 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
1652 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
1653 { constant_boolean_node (cmp == NE_EXPR, type); })))
1654
1655 /* ((X inner_op C0) outer_op C1)
1656 With X being a tree where value_range has reasoned certain bits to always be
1657 zero throughout its computed value range,
1658 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1659 where zero_mask has 1's for all bits that are sure to be 0 in
1660 and 0's otherwise.
1661 if (inner_op == '^') C0 &= ~C1;
1662 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1663 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1664 */
1665 (for inner_op (bit_ior bit_xor)
1666 outer_op (bit_xor bit_ior)
1667 (simplify
1668 (outer_op
1669 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1670 (with
1671 {
1672 bool fail = false;
1673 wide_int zero_mask_not;
1674 wide_int C0;
1675 wide_int cst_emit;
1676
1677 if (TREE_CODE (@2) == SSA_NAME)
1678 zero_mask_not = get_nonzero_bits (@2);
1679 else
1680 fail = true;
1681
1682 if (inner_op == BIT_XOR_EXPR)
1683 {
1684 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
1685 cst_emit = C0 | wi::to_wide (@1);
1686 }
1687 else
1688 {
1689 C0 = wi::to_wide (@0);
1690 cst_emit = C0 ^ wi::to_wide (@1);
1691 }
1692 }
1693 (if (!fail && (C0 & zero_mask_not) == 0)
1694 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
1695 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
1696 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1697
1698 /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1699 (simplify
1700 (pointer_plus (pointer_plus:s @0 @1) @3)
1701 (pointer_plus @0 (plus @1 @3)))
1702
1703 /* Pattern match
1704 tem1 = (long) ptr1;
1705 tem2 = (long) ptr2;
1706 tem3 = tem2 - tem1;
1707 tem4 = (unsigned long) tem3;
1708 tem5 = ptr1 + tem4;
1709 and produce
1710 tem5 = ptr2; */
1711 (simplify
1712 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1713 /* Conditionally look through a sign-changing conversion. */
1714 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1715 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1716 || (GENERIC && type == TREE_TYPE (@1))))
1717 @1))
1718 (simplify
1719 (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0)))
1720 (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3)))
1721 (convert @1)))
1722
1723 /* Pattern match
1724 tem = (sizetype) ptr;
1725 tem = tem & algn;
1726 tem = -tem;
1727 ... = ptr p+ tem;
1728 and produce the simpler and easier to analyze with respect to alignment
1729 ... = ptr & ~algn; */
1730 (simplify
1731 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
1732 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
1733 (bit_and @0 { algn; })))
1734
1735 /* Try folding difference of addresses. */
1736 (simplify
1737 (minus (convert ADDR_EXPR@0) (convert @1))
1738 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1739 (with { poly_int64 diff; }
1740 (if (ptr_difference_const (@0, @1, &diff))
1741 { build_int_cst_type (type, diff); }))))
1742 (simplify
1743 (minus (convert @0) (convert ADDR_EXPR@1))
1744 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1745 (with { poly_int64 diff; }
1746 (if (ptr_difference_const (@0, @1, &diff))
1747 { build_int_cst_type (type, diff); }))))
1748 (simplify
1749 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1))
1750 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1751 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
1752 (with { poly_int64 diff; }
1753 (if (ptr_difference_const (@0, @1, &diff))
1754 { build_int_cst_type (type, diff); }))))
1755 (simplify
1756 (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1))
1757 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1758 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
1759 (with { poly_int64 diff; }
1760 (if (ptr_difference_const (@0, @1, &diff))
1761 { build_int_cst_type (type, diff); }))))
1762
1763 /* If arg0 is derived from the address of an object or function, we may
1764 be able to fold this expression using the object or function's
1765 alignment. */
1766 (simplify
1767 (bit_and (convert? @0) INTEGER_CST@1)
1768 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1769 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1770 (with
1771 {
1772 unsigned int align;
1773 unsigned HOST_WIDE_INT bitpos;
1774 get_pointer_alignment_1 (@0, &align, &bitpos);
1775 }
1776 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
1777 { wide_int_to_tree (type, (wi::to_wide (@1)
1778 & (bitpos / BITS_PER_UNIT))); }))))
1779
1780
1781 /* We can't reassociate at all for saturating types. */
1782 (if (!TYPE_SATURATING (type))
1783
1784 /* Contract negates. */
1785 /* A + (-B) -> A - B */
1786 (simplify
1787 (plus:c @0 (convert? (negate @1)))
1788 /* Apply STRIP_NOPS on the negate. */
1789 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1790 && !TYPE_OVERFLOW_SANITIZED (type))
1791 (with
1792 {
1793 tree t1 = type;
1794 if (INTEGRAL_TYPE_P (type)
1795 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1796 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1797 }
1798 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
1799 /* A - (-B) -> A + B */
1800 (simplify
1801 (minus @0 (convert? (negate @1)))
1802 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1803 && !TYPE_OVERFLOW_SANITIZED (type))
1804 (with
1805 {
1806 tree t1 = type;
1807 if (INTEGRAL_TYPE_P (type)
1808 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1809 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1810 }
1811 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
1812 /* -(T)(-A) -> (T)A
1813 Sign-extension is ok except for INT_MIN, which thankfully cannot
1814 happen without overflow. */
1815 (simplify
1816 (negate (convert (negate @1)))
1817 (if (INTEGRAL_TYPE_P (type)
1818 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
1819 || (!TYPE_UNSIGNED (TREE_TYPE (@1))
1820 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
1821 && !TYPE_OVERFLOW_SANITIZED (type)
1822 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
1823 (convert @1)))
1824 (simplify
1825 (negate (convert negate_expr_p@1))
1826 (if (SCALAR_FLOAT_TYPE_P (type)
1827 && ((DECIMAL_FLOAT_TYPE_P (type)
1828 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))
1829 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1)))
1830 || !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
1831 (convert (negate @1))))
1832 (simplify
1833 (negate (nop_convert (negate @1)))
1834 (if (!TYPE_OVERFLOW_SANITIZED (type)
1835 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
1836 (view_convert @1)))
1837
1838 /* We can't reassociate floating-point unless -fassociative-math
1839 or fixed-point plus or minus because of saturation to +-Inf. */
1840 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
1841 && !FIXED_POINT_TYPE_P (type))
1842
1843 /* Match patterns that allow contracting a plus-minus pair
1844 irrespective of overflow issues. */
1845 /* (A +- B) - A -> +- B */
1846 /* (A +- B) -+ B -> A */
1847 /* A - (A +- B) -> -+ B */
1848 /* A +- (B -+ A) -> +- B */
1849 (simplify
1850 (minus (plus:c @0 @1) @0)
1851 @1)
1852 (simplify
1853 (minus (minus @0 @1) @0)
1854 (negate @1))
1855 (simplify
1856 (plus:c (minus @0 @1) @1)
1857 @0)
1858 (simplify
1859 (minus @0 (plus:c @0 @1))
1860 (negate @1))
1861 (simplify
1862 (minus @0 (minus @0 @1))
1863 @1)
1864 /* (A +- B) + (C - A) -> C +- B */
1865 /* (A + B) - (A - C) -> B + C */
1866 /* More cases are handled with comparisons. */
1867 (simplify
1868 (plus:c (plus:c @0 @1) (minus @2 @0))
1869 (plus @2 @1))
1870 (simplify
1871 (plus:c (minus @0 @1) (minus @2 @0))
1872 (minus @2 @1))
1873 (simplify
1874 (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0))
1875 (if (TYPE_OVERFLOW_UNDEFINED (type)
1876 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)))
1877 (pointer_diff @2 @1)))
1878 (simplify
1879 (minus (plus:c @0 @1) (minus @0 @2))
1880 (plus @1 @2))
1881
1882 /* (A +- CST1) +- CST2 -> A + CST3
1883 Use view_convert because it is safe for vectors and equivalent for
1884 scalars. */
1885 (for outer_op (plus minus)
1886 (for inner_op (plus minus)
1887 neg_inner_op (minus plus)
1888 (simplify
1889 (outer_op (nop_convert (inner_op @0 CONSTANT_CLASS_P@1))
1890 CONSTANT_CLASS_P@2)
1891 /* If one of the types wraps, use that one. */
1892 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
1893 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
1894 forever if something doesn't simplify into a constant. */
1895 (if (!CONSTANT_CLASS_P (@0))
1896 (if (outer_op == PLUS_EXPR)
1897 (plus (view_convert @0) (inner_op @2 (view_convert @1)))
1898 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1)))))
1899 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1900 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1901 (if (outer_op == PLUS_EXPR)
1902 (view_convert (plus @0 (inner_op (view_convert @2) @1)))
1903 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
1904 /* If the constant operation overflows we cannot do the transform
1905 directly as we would introduce undefined overflow, for example
1906 with (a - 1) + INT_MIN. */
1907 (if (types_match (type, @0))
1908 (with { tree cst = const_binop (outer_op == inner_op
1909 ? PLUS_EXPR : MINUS_EXPR,
1910 type, @1, @2); }
1911 (if (cst && !TREE_OVERFLOW (cst))
1912 (inner_op @0 { cst; } )
1913 /* X+INT_MAX+1 is X-INT_MIN. */
1914 (if (INTEGRAL_TYPE_P (type) && cst
1915 && wi::to_wide (cst) == wi::min_value (type))
1916 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
1917 /* Last resort, use some unsigned type. */
1918 (with { tree utype = unsigned_type_for (type); }
1919 (if (utype)
1920 (view_convert (inner_op
1921 (view_convert:utype @0)
1922 (view_convert:utype
1923 { drop_tree_overflow (cst); }))))))))))))))
1924
1925 /* (CST1 - A) +- CST2 -> CST3 - A */
1926 (for outer_op (plus minus)
1927 (simplify
1928 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
1929 (with { tree cst = const_binop (outer_op, type, @1, @2); }
1930 (if (cst && !TREE_OVERFLOW (cst))
1931 (minus { cst; } @0)))))
1932
1933 /* CST1 - (CST2 - A) -> CST3 + A */
1934 (simplify
1935 (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0))
1936 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
1937 (if (cst && !TREE_OVERFLOW (cst))
1938 (plus { cst; } @0))))
1939
1940 /* ~A + A -> -1 */
1941 (simplify
1942 (plus:c (bit_not @0) @0)
1943 (if (!TYPE_OVERFLOW_TRAPS (type))
1944 { build_all_ones_cst (type); }))
1945
1946 /* ~A + 1 -> -A */
1947 (simplify
1948 (plus (convert? (bit_not @0)) integer_each_onep)
1949 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1950 (negate (convert @0))))
1951
1952 /* -A - 1 -> ~A */
1953 (simplify
1954 (minus (convert? (negate @0)) integer_each_onep)
1955 (if (!TYPE_OVERFLOW_TRAPS (type)
1956 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1957 (bit_not (convert @0))))
1958
1959 /* -1 - A -> ~A */
1960 (simplify
1961 (minus integer_all_onesp @0)
1962 (bit_not @0))
1963
1964 /* (T)(P + A) - (T)P -> (T) A */
1965 (simplify
1966 (minus (convert (plus:c @@0 @1))
1967 (convert? @0))
1968 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1969 /* For integer types, if A has a smaller type
1970 than T the result depends on the possible
1971 overflow in P + A.
1972 E.g. T=size_t, A=(unsigned)429497295, P>0.
1973 However, if an overflow in P + A would cause
1974 undefined behavior, we can assume that there
1975 is no overflow. */
1976 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1977 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
1978 (convert @1)))
1979 (simplify
1980 (minus (convert (pointer_plus @@0 @1))
1981 (convert @0))
1982 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1983 /* For pointer types, if the conversion of A to the
1984 final type requires a sign- or zero-extension,
1985 then we have to punt - it is not defined which
1986 one is correct. */
1987 || (POINTER_TYPE_P (TREE_TYPE (@0))
1988 && TREE_CODE (@1) == INTEGER_CST
1989 && tree_int_cst_sign_bit (@1) == 0))
1990 (convert @1)))
1991 (simplify
1992 (pointer_diff (pointer_plus @@0 @1) @0)
1993 /* The second argument of pointer_plus must be interpreted as signed, and
1994 thus sign-extended if necessary. */
1995 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
1996 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
1997 second arg is unsigned even when we need to consider it as signed,
1998 we don't want to diagnose overflow here. */
1999 (convert (view_convert:stype @1))))
2000
2001 /* (T)P - (T)(P + A) -> -(T) A */
2002 (simplify
2003 (minus (convert? @0)
2004 (convert (plus:c @@0 @1)))
2005 (if (INTEGRAL_TYPE_P (type)
2006 && TYPE_OVERFLOW_UNDEFINED (type)
2007 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2008 (with { tree utype = unsigned_type_for (type); }
2009 (convert (negate (convert:utype @1))))
2010 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2011 /* For integer types, if A has a smaller type
2012 than T the result depends on the possible
2013 overflow in P + A.
2014 E.g. T=size_t, A=(unsigned)429497295, P>0.
2015 However, if an overflow in P + A would cause
2016 undefined behavior, we can assume that there
2017 is no overflow. */
2018 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2019 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2020 (negate (convert @1)))))
2021 (simplify
2022 (minus (convert @0)
2023 (convert (pointer_plus @@0 @1)))
2024 (if (INTEGRAL_TYPE_P (type)
2025 && TYPE_OVERFLOW_UNDEFINED (type)
2026 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2027 (with { tree utype = unsigned_type_for (type); }
2028 (convert (negate (convert:utype @1))))
2029 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2030 /* For pointer types, if the conversion of A to the
2031 final type requires a sign- or zero-extension,
2032 then we have to punt - it is not defined which
2033 one is correct. */
2034 || (POINTER_TYPE_P (TREE_TYPE (@0))
2035 && TREE_CODE (@1) == INTEGER_CST
2036 && tree_int_cst_sign_bit (@1) == 0))
2037 (negate (convert @1)))))
2038 (simplify
2039 (pointer_diff @0 (pointer_plus @@0 @1))
2040 /* The second argument of pointer_plus must be interpreted as signed, and
2041 thus sign-extended if necessary. */
2042 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2043 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2044 second arg is unsigned even when we need to consider it as signed,
2045 we don't want to diagnose overflow here. */
2046 (negate (convert (view_convert:stype @1)))))
2047
2048 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
2049 (simplify
2050 (minus (convert (plus:c @@0 @1))
2051 (convert (plus:c @0 @2)))
2052 (if (INTEGRAL_TYPE_P (type)
2053 && TYPE_OVERFLOW_UNDEFINED (type)
2054 && element_precision (type) <= element_precision (TREE_TYPE (@1))
2055 && element_precision (type) <= element_precision (TREE_TYPE (@2)))
2056 (with { tree utype = unsigned_type_for (type); }
2057 (convert (minus (convert:utype @1) (convert:utype @2))))
2058 (if (((element_precision (type) <= element_precision (TREE_TYPE (@1)))
2059 == (element_precision (type) <= element_precision (TREE_TYPE (@2))))
2060 && (element_precision (type) <= element_precision (TREE_TYPE (@1))
2061 /* For integer types, if A has a smaller type
2062 than T the result depends on the possible
2063 overflow in P + A.
2064 E.g. T=size_t, A=(unsigned)429497295, P>0.
2065 However, if an overflow in P + A would cause
2066 undefined behavior, we can assume that there
2067 is no overflow. */
2068 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2069 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
2070 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))
2071 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2)))))
2072 (minus (convert @1) (convert @2)))))
2073 (simplify
2074 (minus (convert (pointer_plus @@0 @1))
2075 (convert (pointer_plus @0 @2)))
2076 (if (INTEGRAL_TYPE_P (type)
2077 && TYPE_OVERFLOW_UNDEFINED (type)
2078 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2079 (with { tree utype = unsigned_type_for (type); }
2080 (convert (minus (convert:utype @1) (convert:utype @2))))
2081 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2082 /* For pointer types, if the conversion of A to the
2083 final type requires a sign- or zero-extension,
2084 then we have to punt - it is not defined which
2085 one is correct. */
2086 || (POINTER_TYPE_P (TREE_TYPE (@0))
2087 && TREE_CODE (@1) == INTEGER_CST
2088 && tree_int_cst_sign_bit (@1) == 0
2089 && TREE_CODE (@2) == INTEGER_CST
2090 && tree_int_cst_sign_bit (@2) == 0))
2091 (minus (convert @1) (convert @2)))))
2092 (simplify
2093 (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2))
2094 /* The second argument of pointer_plus must be interpreted as signed, and
2095 thus sign-extended if necessary. */
2096 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2097 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2098 second arg is unsigned even when we need to consider it as signed,
2099 we don't want to diagnose overflow here. */
2100 (minus (convert (view_convert:stype @1))
2101 (convert (view_convert:stype @2)))))))
2102
2103 /* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1).
2104 Modeled after fold_plusminus_mult_expr. */
2105 (if (!TYPE_SATURATING (type)
2106 && (!FLOAT_TYPE_P (type) || flag_associative_math))
2107 (for plusminus (plus minus)
2108 (simplify
2109 (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2))
2110 (if ((!ANY_INTEGRAL_TYPE_P (type)
2111 || TYPE_OVERFLOW_WRAPS (type)
2112 || (INTEGRAL_TYPE_P (type)
2113 && tree_expr_nonzero_p (@0)
2114 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2115 /* If @1 +- @2 is constant require a hard single-use on either
2116 original operand (but not on both). */
2117 && (single_use (@3) || single_use (@4)))
2118 (mult (plusminus @1 @2) @0)))
2119 /* We cannot generate constant 1 for fract. */
2120 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type)))
2121 (simplify
2122 (plusminus @0 (mult:c@3 @0 @2))
2123 (if ((!ANY_INTEGRAL_TYPE_P (type)
2124 || TYPE_OVERFLOW_WRAPS (type)
2125 || (INTEGRAL_TYPE_P (type)
2126 && tree_expr_nonzero_p (@0)
2127 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2128 && single_use (@3))
2129 (mult (plusminus { build_one_cst (type); } @2) @0)))
2130 (simplify
2131 (plusminus (mult:c@3 @0 @2) @0)
2132 (if ((!ANY_INTEGRAL_TYPE_P (type)
2133 || TYPE_OVERFLOW_WRAPS (type)
2134 || (INTEGRAL_TYPE_P (type)
2135 && tree_expr_nonzero_p (@0)
2136 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2137 && single_use (@3))
2138 (mult (plusminus @2 { build_one_cst (type); }) @0))))))
2139
2140 /* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
2141
2142 (for minmax (min max FMIN_ALL FMAX_ALL)
2143 (simplify
2144 (minmax @0 @0)
2145 @0))
2146 /* min(max(x,y),y) -> y. */
2147 (simplify
2148 (min:c (max:c @0 @1) @1)
2149 @1)
2150 /* max(min(x,y),y) -> y. */
2151 (simplify
2152 (max:c (min:c @0 @1) @1)
2153 @1)
2154 /* max(a,-a) -> abs(a). */
2155 (simplify
2156 (max:c @0 (negate @0))
2157 (if (TREE_CODE (type) != COMPLEX_TYPE
2158 && (! ANY_INTEGRAL_TYPE_P (type)
2159 || TYPE_OVERFLOW_UNDEFINED (type)))
2160 (abs @0)))
2161 /* min(a,-a) -> -abs(a). */
2162 (simplify
2163 (min:c @0 (negate @0))
2164 (if (TREE_CODE (type) != COMPLEX_TYPE
2165 && (! ANY_INTEGRAL_TYPE_P (type)
2166 || TYPE_OVERFLOW_UNDEFINED (type)))
2167 (negate (abs @0))))
2168 (simplify
2169 (min @0 @1)
2170 (switch
2171 (if (INTEGRAL_TYPE_P (type)
2172 && TYPE_MIN_VALUE (type)
2173 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2174 @1)
2175 (if (INTEGRAL_TYPE_P (type)
2176 && TYPE_MAX_VALUE (type)
2177 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2178 @0)))
2179 (simplify
2180 (max @0 @1)
2181 (switch
2182 (if (INTEGRAL_TYPE_P (type)
2183 && TYPE_MAX_VALUE (type)
2184 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2185 @1)
2186 (if (INTEGRAL_TYPE_P (type)
2187 && TYPE_MIN_VALUE (type)
2188 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2189 @0)))
2190
2191 /* max (a, a + CST) -> a + CST where CST is positive. */
2192 /* max (a, a + CST) -> a where CST is negative. */
2193 (simplify
2194 (max:c @0 (plus@2 @0 INTEGER_CST@1))
2195 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2196 (if (tree_int_cst_sgn (@1) > 0)
2197 @2
2198 @0)))
2199
2200 /* min (a, a + CST) -> a where CST is positive. */
2201 /* min (a, a + CST) -> a + CST where CST is negative. */
2202 (simplify
2203 (min:c @0 (plus@2 @0 INTEGER_CST@1))
2204 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2205 (if (tree_int_cst_sgn (@1) > 0)
2206 @0
2207 @2)))
2208
2209 /* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
2210 and the outer convert demotes the expression back to x's type. */
2211 (for minmax (min max)
2212 (simplify
2213 (convert (minmax@0 (convert @1) INTEGER_CST@2))
2214 (if (INTEGRAL_TYPE_P (type)
2215 && types_match (@1, type) && int_fits_type_p (@2, type)
2216 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
2217 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
2218 (minmax @1 (convert @2)))))
2219
2220 (for minmax (FMIN_ALL FMAX_ALL)
2221 /* If either argument is NaN, return the other one. Avoid the
2222 transformation if we get (and honor) a signalling NaN. */
2223 (simplify
2224 (minmax:c @0 REAL_CST@1)
2225 (if (real_isnan (TREE_REAL_CST_PTR (@1))
2226 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
2227 @0)))
2228 /* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
2229 functions to return the numeric arg if the other one is NaN.
2230 MIN and MAX don't honor that, so only transform if -ffinite-math-only
2231 is set. C99 doesn't require -0.0 to be handled, so we don't have to
2232 worry about it either. */
2233 (if (flag_finite_math_only)
2234 (simplify
2235 (FMIN_ALL @0 @1)
2236 (min @0 @1))
2237 (simplify
2238 (FMAX_ALL @0 @1)
2239 (max @0 @1)))
2240 /* min (-A, -B) -> -max (A, B) */
2241 (for minmax (min max FMIN_ALL FMAX_ALL)
2242 maxmin (max min FMAX_ALL FMIN_ALL)
2243 (simplify
2244 (minmax (negate:s@2 @0) (negate:s@3 @1))
2245 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2246 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2247 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2248 (negate (maxmin @0 @1)))))
2249 /* MIN (~X, ~Y) -> ~MAX (X, Y)
2250 MAX (~X, ~Y) -> ~MIN (X, Y) */
2251 (for minmax (min max)
2252 maxmin (max min)
2253 (simplify
2254 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
2255 (bit_not (maxmin @0 @1))))
2256
2257 /* MIN (X, Y) == X -> X <= Y */
2258 (for minmax (min min max max)
2259 cmp (eq ne eq ne )
2260 out (le gt ge lt )
2261 (simplify
2262 (cmp:c (minmax:c @0 @1) @0)
2263 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
2264 (out @0 @1))))
2265 /* MIN (X, 5) == 0 -> X == 0
2266 MIN (X, 5) == 7 -> false */
2267 (for cmp (eq ne)
2268 (simplify
2269 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
2270 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2271 TYPE_SIGN (TREE_TYPE (@0))))
2272 { constant_boolean_node (cmp == NE_EXPR, type); }
2273 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2274 TYPE_SIGN (TREE_TYPE (@0))))
2275 (cmp @0 @2)))))
2276 (for cmp (eq ne)
2277 (simplify
2278 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
2279 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2280 TYPE_SIGN (TREE_TYPE (@0))))
2281 { constant_boolean_node (cmp == NE_EXPR, type); }
2282 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2283 TYPE_SIGN (TREE_TYPE (@0))))
2284 (cmp @0 @2)))))
2285 /* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
2286 (for minmax (min min max max min min max max )
2287 cmp (lt le gt ge gt ge lt le )
2288 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
2289 (simplify
2290 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
2291 (comb (cmp @0 @2) (cmp @1 @2))))
2292
2293 /* Simplifications of shift and rotates. */
2294
2295 (for rotate (lrotate rrotate)
2296 (simplify
2297 (rotate integer_all_onesp@0 @1)
2298 @0))
2299
2300 /* Optimize -1 >> x for arithmetic right shifts. */
2301 (simplify
2302 (rshift integer_all_onesp@0 @1)
2303 (if (!TYPE_UNSIGNED (type)
2304 && tree_expr_nonnegative_p (@1))
2305 @0))
2306
2307 /* Optimize (x >> c) << c into x & (-1<<c). */
2308 (simplify
2309 (lshift (rshift @0 INTEGER_CST@1) @1)
2310 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
2311 (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
2312
2313 /* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
2314 types. */
2315 (simplify
2316 (rshift (lshift @0 INTEGER_CST@1) @1)
2317 (if (TYPE_UNSIGNED (type)
2318 && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
2319 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
2320
2321 (for shiftrotate (lrotate rrotate lshift rshift)
2322 (simplify
2323 (shiftrotate @0 integer_zerop)
2324 (non_lvalue @0))
2325 (simplify
2326 (shiftrotate integer_zerop@0 @1)
2327 @0)
2328 /* Prefer vector1 << scalar to vector1 << vector2
2329 if vector2 is uniform. */
2330 (for vec (VECTOR_CST CONSTRUCTOR)
2331 (simplify
2332 (shiftrotate @0 vec@1)
2333 (with { tree tem = uniform_vector_p (@1); }
2334 (if (tem)
2335 (shiftrotate @0 { tem; }))))))
2336
2337 /* Simplify X << Y where Y's low width bits are 0 to X, as only valid
2338 Y is 0. Similarly for X >> Y. */
2339 #if GIMPLE
2340 (for shift (lshift rshift)
2341 (simplify
2342 (shift @0 SSA_NAME@1)
2343 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
2344 (with {
2345 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
2346 int prec = TYPE_PRECISION (TREE_TYPE (@1));
2347 }
2348 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
2349 @0)))))
2350 #endif
2351
2352 /* Rewrite an LROTATE_EXPR by a constant into an
2353 RROTATE_EXPR by a new constant. */
2354 (simplify
2355 (lrotate @0 INTEGER_CST@1)
2356 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
2357 build_int_cst (TREE_TYPE (@1),
2358 element_precision (type)), @1); }))
2359
2360 /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
2361 (for op (lrotate rrotate rshift lshift)
2362 (simplify
2363 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
2364 (with { unsigned int prec = element_precision (type); }
2365 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
2366 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
2367 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
2368 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
2369 (with { unsigned int low = (tree_to_uhwi (@1)
2370 + tree_to_uhwi (@2)); }
2371 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
2372 being well defined. */
2373 (if (low >= prec)
2374 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
2375 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
2376 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
2377 { build_zero_cst (type); }
2378 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
2379 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
2380
2381
2382 /* ((1 << A) & 1) != 0 -> A == 0
2383 ((1 << A) & 1) == 0 -> A != 0 */
2384 (for cmp (ne eq)
2385 icmp (eq ne)
2386 (simplify
2387 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
2388 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
2389
2390 /* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
2391 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
2392 if CST2 != 0. */
2393 (for cmp (ne eq)
2394 (simplify
2395 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
2396 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
2397 (if (cand < 0
2398 || (!integer_zerop (@2)
2399 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
2400 { constant_boolean_node (cmp == NE_EXPR, type); }
2401 (if (!integer_zerop (@2)
2402 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
2403 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
2404
2405 /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
2406 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
2407 if the new mask might be further optimized. */
2408 (for shift (lshift rshift)
2409 (simplify
2410 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
2411 INTEGER_CST@2)
2412 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
2413 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
2414 && tree_fits_uhwi_p (@1)
2415 && tree_to_uhwi (@1) > 0
2416 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
2417 (with
2418 {
2419 unsigned int shiftc = tree_to_uhwi (@1);
2420 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
2421 unsigned HOST_WIDE_INT newmask, zerobits = 0;
2422 tree shift_type = TREE_TYPE (@3);
2423 unsigned int prec;
2424
2425 if (shift == LSHIFT_EXPR)
2426 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
2427 else if (shift == RSHIFT_EXPR
2428 && type_has_mode_precision_p (shift_type))
2429 {
2430 prec = TYPE_PRECISION (TREE_TYPE (@3));
2431 tree arg00 = @0;
2432 /* See if more bits can be proven as zero because of
2433 zero extension. */
2434 if (@3 != @0
2435 && TYPE_UNSIGNED (TREE_TYPE (@0)))
2436 {
2437 tree inner_type = TREE_TYPE (@0);
2438 if (type_has_mode_precision_p (inner_type)
2439 && TYPE_PRECISION (inner_type) < prec)
2440 {
2441 prec = TYPE_PRECISION (inner_type);
2442 /* See if we can shorten the right shift. */
2443 if (shiftc < prec)
2444 shift_type = inner_type;
2445 /* Otherwise X >> C1 is all zeros, so we'll optimize
2446 it into (X, 0) later on by making sure zerobits
2447 is all ones. */
2448 }
2449 }
2450 zerobits = HOST_WIDE_INT_M1U;
2451 if (shiftc < prec)
2452 {
2453 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
2454 zerobits <<= prec - shiftc;
2455 }
2456 /* For arithmetic shift if sign bit could be set, zerobits
2457 can contain actually sign bits, so no transformation is
2458 possible, unless MASK masks them all away. In that
2459 case the shift needs to be converted into logical shift. */
2460 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
2461 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
2462 {
2463 if ((mask & zerobits) == 0)
2464 shift_type = unsigned_type_for (TREE_TYPE (@3));
2465 else
2466 zerobits = 0;
2467 }
2468 }
2469 }
2470 /* ((X << 16) & 0xff00) is (X, 0). */
2471 (if ((mask & zerobits) == mask)
2472 { build_int_cst (type, 0); }
2473 (with { newmask = mask | zerobits; }
2474 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
2475 (with
2476 {
2477 /* Only do the transformation if NEWMASK is some integer
2478 mode's mask. */
2479 for (prec = BITS_PER_UNIT;
2480 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
2481 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
2482 break;
2483 }
2484 (if (prec < HOST_BITS_PER_WIDE_INT
2485 || newmask == HOST_WIDE_INT_M1U)
2486 (with
2487 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
2488 (if (!tree_int_cst_equal (newmaskt, @2))
2489 (if (shift_type != TREE_TYPE (@3))
2490 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
2491 (bit_and @4 { newmaskt; })))))))))))))
2492
2493 /* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
2494 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
2495 (for shift (lshift rshift)
2496 (for bit_op (bit_and bit_xor bit_ior)
2497 (simplify
2498 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
2499 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2500 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
2501 (bit_op (shift (convert @0) @1) { mask; }))))))
2502
2503 /* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
2504 (simplify
2505 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
2506 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
2507 && (element_precision (TREE_TYPE (@0))
2508 <= element_precision (TREE_TYPE (@1))
2509 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
2510 (with
2511 { tree shift_type = TREE_TYPE (@0); }
2512 (convert (rshift (convert:shift_type @1) @2)))))
2513
2514 /* ~(~X >>r Y) -> X >>r Y
2515 ~(~X <<r Y) -> X <<r Y */
2516 (for rotate (lrotate rrotate)
2517 (simplify
2518 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
2519 (if ((element_precision (TREE_TYPE (@0))
2520 <= element_precision (TREE_TYPE (@1))
2521 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
2522 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
2523 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
2524 (with
2525 { tree rotate_type = TREE_TYPE (@0); }
2526 (convert (rotate (convert:rotate_type @1) @2))))))
2527
2528 /* Simplifications of conversions. */
2529
2530 /* Basic strip-useless-type-conversions / strip_nops. */
2531 (for cvt (convert view_convert float fix_trunc)
2532 (simplify
2533 (cvt @0)
2534 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
2535 || (GENERIC && type == TREE_TYPE (@0)))
2536 @0)))
2537
2538 /* Contract view-conversions. */
2539 (simplify
2540 (view_convert (view_convert @0))
2541 (view_convert @0))
2542
2543 /* For integral conversions with the same precision or pointer
2544 conversions use a NOP_EXPR instead. */
2545 (simplify
2546 (view_convert @0)
2547 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
2548 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2549 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
2550 (convert @0)))
2551
2552 /* Strip inner integral conversions that do not change precision or size, or
2553 zero-extend while keeping the same size (for bool-to-char). */
2554 (simplify
2555 (view_convert (convert@0 @1))
2556 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2557 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
2558 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
2559 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
2560 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
2561 && TYPE_UNSIGNED (TREE_TYPE (@1)))))
2562 (view_convert @1)))
2563
2564 /* Re-association barriers around constants and other re-association
2565 barriers can be removed. */
2566 (simplify
2567 (paren CONSTANT_CLASS_P@0)
2568 @0)
2569 (simplify
2570 (paren (paren@1 @0))
2571 @1)
2572
2573 /* Handle cases of two conversions in a row. */
2574 (for ocvt (convert float fix_trunc)
2575 (for icvt (convert float)
2576 (simplify
2577 (ocvt (icvt@1 @0))
2578 (with
2579 {
2580 tree inside_type = TREE_TYPE (@0);
2581 tree inter_type = TREE_TYPE (@1);
2582 int inside_int = INTEGRAL_TYPE_P (inside_type);
2583 int inside_ptr = POINTER_TYPE_P (inside_type);
2584 int inside_float = FLOAT_TYPE_P (inside_type);
2585 int inside_vec = VECTOR_TYPE_P (inside_type);
2586 unsigned int inside_prec = TYPE_PRECISION (inside_type);
2587 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
2588 int inter_int = INTEGRAL_TYPE_P (inter_type);
2589 int inter_ptr = POINTER_TYPE_P (inter_type);
2590 int inter_float = FLOAT_TYPE_P (inter_type);
2591 int inter_vec = VECTOR_TYPE_P (inter_type);
2592 unsigned int inter_prec = TYPE_PRECISION (inter_type);
2593 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
2594 int final_int = INTEGRAL_TYPE_P (type);
2595 int final_ptr = POINTER_TYPE_P (type);
2596 int final_float = FLOAT_TYPE_P (type);
2597 int final_vec = VECTOR_TYPE_P (type);
2598 unsigned int final_prec = TYPE_PRECISION (type);
2599 int final_unsignedp = TYPE_UNSIGNED (type);
2600 }
2601 (switch
2602 /* In addition to the cases of two conversions in a row
2603 handled below, if we are converting something to its own
2604 type via an object of identical or wider precision, neither
2605 conversion is needed. */
2606 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
2607 || (GENERIC
2608 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
2609 && (((inter_int || inter_ptr) && final_int)
2610 || (inter_float && final_float))
2611 && inter_prec >= final_prec)
2612 (ocvt @0))
2613
2614 /* Likewise, if the intermediate and initial types are either both
2615 float or both integer, we don't need the middle conversion if the
2616 former is wider than the latter and doesn't change the signedness
2617 (for integers). Avoid this if the final type is a pointer since
2618 then we sometimes need the middle conversion. */
2619 (if (((inter_int && inside_int) || (inter_float && inside_float))
2620 && (final_int || final_float)
2621 && inter_prec >= inside_prec
2622 && (inter_float || inter_unsignedp == inside_unsignedp))
2623 (ocvt @0))
2624
2625 /* If we have a sign-extension of a zero-extended value, we can
2626 replace that by a single zero-extension. Likewise if the
2627 final conversion does not change precision we can drop the
2628 intermediate conversion. */
2629 (if (inside_int && inter_int && final_int
2630 && ((inside_prec < inter_prec && inter_prec < final_prec
2631 && inside_unsignedp && !inter_unsignedp)
2632 || final_prec == inter_prec))
2633 (ocvt @0))
2634
2635 /* Two conversions in a row are not needed unless:
2636 - some conversion is floating-point (overstrict for now), or
2637 - some conversion is a vector (overstrict for now), or
2638 - the intermediate type is narrower than both initial and
2639 final, or
2640 - the intermediate type and innermost type differ in signedness,
2641 and the outermost type is wider than the intermediate, or
2642 - the initial type is a pointer type and the precisions of the
2643 intermediate and final types differ, or
2644 - the final type is a pointer type and the precisions of the
2645 initial and intermediate types differ. */
2646 (if (! inside_float && ! inter_float && ! final_float
2647 && ! inside_vec && ! inter_vec && ! final_vec
2648 && (inter_prec >= inside_prec || inter_prec >= final_prec)
2649 && ! (inside_int && inter_int
2650 && inter_unsignedp != inside_unsignedp
2651 && inter_prec < final_prec)
2652 && ((inter_unsignedp && inter_prec > inside_prec)
2653 == (final_unsignedp && final_prec > inter_prec))
2654 && ! (inside_ptr && inter_prec != final_prec)
2655 && ! (final_ptr && inside_prec != inter_prec))
2656 (ocvt @0))
2657
2658 /* A truncation to an unsigned type (a zero-extension) should be
2659 canonicalized as bitwise and of a mask. */
2660 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
2661 && final_int && inter_int && inside_int
2662 && final_prec == inside_prec
2663 && final_prec > inter_prec
2664 && inter_unsignedp)
2665 (convert (bit_and @0 { wide_int_to_tree
2666 (inside_type,
2667 wi::mask (inter_prec, false,
2668 TYPE_PRECISION (inside_type))); })))
2669
2670 /* If we are converting an integer to a floating-point that can
2671 represent it exactly and back to an integer, we can skip the
2672 floating-point conversion. */
2673 (if (GIMPLE /* PR66211 */
2674 && inside_int && inter_float && final_int &&
2675 (unsigned) significand_size (TYPE_MODE (inter_type))
2676 >= inside_prec - !inside_unsignedp)
2677 (convert @0)))))))
2678
2679 /* If we have a narrowing conversion to an integral type that is fed by a
2680 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
2681 masks off bits outside the final type (and nothing else). */
2682 (simplify
2683 (convert (bit_and @0 INTEGER_CST@1))
2684 (if (INTEGRAL_TYPE_P (type)
2685 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2686 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
2687 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
2688 TYPE_PRECISION (type)), 0))
2689 (convert @0)))
2690
2691
2692 /* (X /[ex] A) * A -> X. */
2693 (simplify
2694 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
2695 (convert @0))
2696
2697 /* ((X /[ex] A) +- B) * A --> X +- A * B. */
2698 (for op (plus minus)
2699 (simplify
2700 (mult (convert1? (op (convert2? (exact_div @0 INTEGER_CST@@1)) INTEGER_CST@2)) @1)
2701 (if (tree_nop_conversion_p (type, TREE_TYPE (@2))
2702 && tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2)))
2703 (with
2704 {
2705 wi::overflow_type overflow;
2706 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
2707 TYPE_SIGN (type), &overflow);
2708 }
2709 (if (types_match (type, TREE_TYPE (@2))
2710 && types_match (TREE_TYPE (@0), TREE_TYPE (@2)) && !overflow)
2711 (op @0 { wide_int_to_tree (type, mul); })
2712 (with { tree utype = unsigned_type_for (type); }
2713 (convert (op (convert:utype @0)
2714 (mult (convert:utype @1) (convert:utype @2))))))))))
2715
2716 /* Canonicalization of binary operations. */
2717
2718 /* Convert X + -C into X - C. */
2719 (simplify
2720 (plus @0 REAL_CST@1)
2721 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
2722 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
2723 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
2724 (minus @0 { tem; })))))
2725
2726 /* Convert x+x into x*2. */
2727 (simplify
2728 (plus @0 @0)
2729 (if (SCALAR_FLOAT_TYPE_P (type))
2730 (mult @0 { build_real (type, dconst2); })
2731 (if (INTEGRAL_TYPE_P (type))
2732 (mult @0 { build_int_cst (type, 2); }))))
2733
2734 /* 0 - X -> -X. */
2735 (simplify
2736 (minus integer_zerop @1)
2737 (negate @1))
2738 (simplify
2739 (pointer_diff integer_zerop @1)
2740 (negate (convert @1)))
2741
2742 /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
2743 ARG0 is zero and X + ARG0 reduces to X, since that would mean
2744 (-ARG1 + ARG0) reduces to -ARG1. */
2745 (simplify
2746 (minus real_zerop@0 @1)
2747 (if (fold_real_zero_addition_p (type, @0, 0))
2748 (negate @1)))
2749
2750 /* Transform x * -1 into -x. */
2751 (simplify
2752 (mult @0 integer_minus_onep)
2753 (negate @0))
2754
2755 /* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
2756 signed overflow for CST != 0 && CST != -1. */
2757 (simplify
2758 (mult:c (mult:s@3 @0 INTEGER_CST@1) @2)
2759 (if (TREE_CODE (@2) != INTEGER_CST
2760 && single_use (@3)
2761 && !integer_zerop (@1) && !integer_minus_onep (@1))
2762 (mult (mult @0 @2) @1)))
2763
2764 /* True if we can easily extract the real and imaginary parts of a complex
2765 number. */
2766 (match compositional_complex
2767 (convert? (complex @0 @1)))
2768
2769 /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
2770 (simplify
2771 (complex (realpart @0) (imagpart @0))
2772 @0)
2773 (simplify
2774 (realpart (complex @0 @1))
2775 @0)
2776 (simplify
2777 (imagpart (complex @0 @1))
2778 @1)
2779
2780 /* Sometimes we only care about half of a complex expression. */
2781 (simplify
2782 (realpart (convert?:s (conj:s @0)))
2783 (convert (realpart @0)))
2784 (simplify
2785 (imagpart (convert?:s (conj:s @0)))
2786 (convert (negate (imagpart @0))))
2787 (for part (realpart imagpart)
2788 (for op (plus minus)
2789 (simplify
2790 (part (convert?:s@2 (op:s @0 @1)))
2791 (convert (op (part @0) (part @1))))))
2792 (simplify
2793 (realpart (convert?:s (CEXPI:s @0)))
2794 (convert (COS @0)))
2795 (simplify
2796 (imagpart (convert?:s (CEXPI:s @0)))
2797 (convert (SIN @0)))
2798
2799 /* conj(conj(x)) -> x */
2800 (simplify
2801 (conj (convert? (conj @0)))
2802 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
2803 (convert @0)))
2804
2805 /* conj({x,y}) -> {x,-y} */
2806 (simplify
2807 (conj (convert?:s (complex:s @0 @1)))
2808 (with { tree itype = TREE_TYPE (type); }
2809 (complex (convert:itype @0) (negate (convert:itype @1)))))
2810
2811 /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
2812 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
2813 (simplify
2814 (bswap (bswap @0))
2815 @0)
2816 (simplify
2817 (bswap (bit_not (bswap @0)))
2818 (bit_not @0))
2819 (for bitop (bit_xor bit_ior bit_and)
2820 (simplify
2821 (bswap (bitop:c (bswap @0) @1))
2822 (bitop @0 (bswap @1)))))
2823
2824
2825 /* Combine COND_EXPRs and VEC_COND_EXPRs. */
2826
2827 /* Simplify constant conditions.
2828 Only optimize constant conditions when the selected branch
2829 has the same type as the COND_EXPR. This avoids optimizing
2830 away "c ? x : throw", where the throw has a void type.
2831 Note that we cannot throw away the fold-const.c variant nor
2832 this one as we depend on doing this transform before possibly
2833 A ? B : B -> B triggers and the fold-const.c one can optimize
2834 0 ? A : B to B even if A has side-effects. Something
2835 genmatch cannot handle. */
2836 (simplify
2837 (cond INTEGER_CST@0 @1 @2)
2838 (if (integer_zerop (@0))
2839 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
2840 @2)
2841 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
2842 @1)))
2843 (simplify
2844 (vec_cond VECTOR_CST@0 @1 @2)
2845 (if (integer_all_onesp (@0))
2846 @1
2847 (if (integer_zerop (@0))
2848 @2)))
2849
2850 /* Simplification moved from fold_cond_expr_with_comparison. It may also
2851 be extended. */
2852 /* This pattern implements two kinds simplification:
2853
2854 Case 1)
2855 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
2856 1) Conversions are type widening from smaller type.
2857 2) Const c1 equals to c2 after canonicalizing comparison.
2858 3) Comparison has tree code LT, LE, GT or GE.
2859 This specific pattern is needed when (cmp (convert x) c) may not
2860 be simplified by comparison patterns because of multiple uses of
2861 x. It also makes sense here because simplifying across multiple
2862 referred var is always benefitial for complicated cases.
2863
2864 Case 2)
2865 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
2866 (for cmp (lt le gt ge eq)
2867 (simplify
2868 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
2869 (with
2870 {
2871 tree from_type = TREE_TYPE (@1);
2872 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
2873 enum tree_code code = ERROR_MARK;
2874
2875 if (INTEGRAL_TYPE_P (from_type)
2876 && int_fits_type_p (@2, from_type)
2877 && (types_match (c1_type, from_type)
2878 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
2879 && (TYPE_UNSIGNED (from_type)
2880 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
2881 && (types_match (c2_type, from_type)
2882 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
2883 && (TYPE_UNSIGNED (from_type)
2884 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
2885 {
2886 if (cmp != EQ_EXPR)
2887 {
2888 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
2889 {
2890 /* X <= Y - 1 equals to X < Y. */
2891 if (cmp == LE_EXPR)
2892 code = LT_EXPR;
2893 /* X > Y - 1 equals to X >= Y. */
2894 if (cmp == GT_EXPR)
2895 code = GE_EXPR;
2896 }
2897 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
2898 {
2899 /* X < Y + 1 equals to X <= Y. */
2900 if (cmp == LT_EXPR)
2901 code = LE_EXPR;
2902 /* X >= Y + 1 equals to X > Y. */
2903 if (cmp == GE_EXPR)
2904 code = GT_EXPR;
2905 }
2906 if (code != ERROR_MARK
2907 || wi::to_widest (@2) == wi::to_widest (@3))
2908 {
2909 if (cmp == LT_EXPR || cmp == LE_EXPR)
2910 code = MIN_EXPR;
2911 if (cmp == GT_EXPR || cmp == GE_EXPR)
2912 code = MAX_EXPR;
2913 }
2914 }
2915 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
2916 else if (int_fits_type_p (@3, from_type))
2917 code = EQ_EXPR;
2918 }
2919 }
2920 (if (code == MAX_EXPR)
2921 (convert (max @1 (convert @2)))
2922 (if (code == MIN_EXPR)
2923 (convert (min @1 (convert @2)))
2924 (if (code == EQ_EXPR)
2925 (convert (cond (eq @1 (convert @3))
2926 (convert:from_type @3) (convert:from_type @2)))))))))
2927
2928 /* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
2929
2930 1) OP is PLUS or MINUS.
2931 2) CMP is LT, LE, GT or GE.
2932 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
2933
2934 This pattern also handles special cases like:
2935
2936 A) Operand x is a unsigned to signed type conversion and c1 is
2937 integer zero. In this case,
2938 (signed type)x < 0 <=> x > MAX_VAL(signed type)
2939 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
2940 B) Const c1 may not equal to (C3 op' C2). In this case we also
2941 check equality for (c1+1) and (c1-1) by adjusting comparison
2942 code.
2943
2944 TODO: Though signed type is handled by this pattern, it cannot be
2945 simplified at the moment because C standard requires additional
2946 type promotion. In order to match&simplify it here, the IR needs
2947 to be cleaned up by other optimizers, i.e, VRP. */
2948 (for op (plus minus)
2949 (for cmp (lt le gt ge)
2950 (simplify
2951 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
2952 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
2953 (if (types_match (from_type, to_type)
2954 /* Check if it is special case A). */
2955 || (TYPE_UNSIGNED (from_type)
2956 && !TYPE_UNSIGNED (to_type)
2957 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
2958 && integer_zerop (@1)
2959 && (cmp == LT_EXPR || cmp == GE_EXPR)))
2960 (with
2961 {
2962 wi::overflow_type overflow = wi::OVF_NONE;
2963 enum tree_code code, cmp_code = cmp;
2964 wide_int real_c1;
2965 wide_int c1 = wi::to_wide (@1);
2966 wide_int c2 = wi::to_wide (@2);
2967 wide_int c3 = wi::to_wide (@3);
2968 signop sgn = TYPE_SIGN (from_type);
2969
2970 /* Handle special case A), given x of unsigned type:
2971 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
2972 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
2973 if (!types_match (from_type, to_type))
2974 {
2975 if (cmp_code == LT_EXPR)
2976 cmp_code = GT_EXPR;
2977 if (cmp_code == GE_EXPR)
2978 cmp_code = LE_EXPR;
2979 c1 = wi::max_value (to_type);
2980 }
2981 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
2982 compute (c3 op' c2) and check if it equals to c1 with op' being
2983 the inverted operator of op. Make sure overflow doesn't happen
2984 if it is undefined. */
2985 if (op == PLUS_EXPR)
2986 real_c1 = wi::sub (c3, c2, sgn, &overflow);
2987 else
2988 real_c1 = wi::add (c3, c2, sgn, &overflow);
2989
2990 code = cmp_code;
2991 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
2992 {
2993 /* Check if c1 equals to real_c1. Boundary condition is handled
2994 by adjusting comparison operation if necessary. */
2995 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
2996 && !overflow)
2997 {
2998 /* X <= Y - 1 equals to X < Y. */
2999 if (cmp_code == LE_EXPR)
3000 code = LT_EXPR;
3001 /* X > Y - 1 equals to X >= Y. */
3002 if (cmp_code == GT_EXPR)
3003 code = GE_EXPR;
3004 }
3005 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
3006 && !overflow)
3007 {
3008 /* X < Y + 1 equals to X <= Y. */
3009 if (cmp_code == LT_EXPR)
3010 code = LE_EXPR;
3011 /* X >= Y + 1 equals to X > Y. */
3012 if (cmp_code == GE_EXPR)
3013 code = GT_EXPR;
3014 }
3015 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
3016 {
3017 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
3018 code = MIN_EXPR;
3019 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
3020 code = MAX_EXPR;
3021 }
3022 }
3023 }
3024 (if (code == MAX_EXPR)
3025 (op (max @X { wide_int_to_tree (from_type, real_c1); })
3026 { wide_int_to_tree (from_type, c2); })
3027 (if (code == MIN_EXPR)
3028 (op (min @X { wide_int_to_tree (from_type, real_c1); })
3029 { wide_int_to_tree (from_type, c2); })))))))))
3030
3031 (for cnd (cond vec_cond)
3032 /* A ? B : (A ? X : C) -> A ? B : C. */
3033 (simplify
3034 (cnd @0 (cnd @0 @1 @2) @3)
3035 (cnd @0 @1 @3))
3036 (simplify
3037 (cnd @0 @1 (cnd @0 @2 @3))
3038 (cnd @0 @1 @3))
3039 /* A ? B : (!A ? C : X) -> A ? B : C. */
3040 /* ??? This matches embedded conditions open-coded because genmatch
3041 would generate matching code for conditions in separate stmts only.
3042 The following is still important to merge then and else arm cases
3043 from if-conversion. */
3044 (simplify
3045 (cnd @0 @1 (cnd @2 @3 @4))
3046 (if (inverse_conditions_p (@0, @2))
3047 (cnd @0 @1 @3)))
3048 (simplify
3049 (cnd @0 (cnd @1 @2 @3) @4)
3050 (if (inverse_conditions_p (@0, @1))
3051 (cnd @0 @3 @4)))
3052
3053 /* A ? B : B -> B. */
3054 (simplify
3055 (cnd @0 @1 @1)
3056 @1)
3057
3058 /* !A ? B : C -> A ? C : B. */
3059 (simplify
3060 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
3061 (cnd @0 @2 @1)))
3062
3063 /* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
3064 return all -1 or all 0 results. */
3065 /* ??? We could instead convert all instances of the vec_cond to negate,
3066 but that isn't necessarily a win on its own. */
3067 (simplify
3068 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
3069 (if (VECTOR_TYPE_P (type)
3070 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3071 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
3072 && (TYPE_MODE (TREE_TYPE (type))
3073 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
3074 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
3075
3076 /* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
3077 (simplify
3078 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
3079 (if (VECTOR_TYPE_P (type)
3080 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3081 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
3082 && (TYPE_MODE (TREE_TYPE (type))
3083 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
3084 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
3085
3086
3087 /* Simplifications of comparisons. */
3088
3089 /* See if we can reduce the magnitude of a constant involved in a
3090 comparison by changing the comparison code. This is a canonicalization
3091 formerly done by maybe_canonicalize_comparison_1. */
3092 (for cmp (le gt)
3093 acmp (lt ge)
3094 (simplify
3095 (cmp @0 INTEGER_CST@1)
3096 (if (tree_int_cst_sgn (@1) == -1)
3097 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))))
3098 (for cmp (ge lt)
3099 acmp (gt le)
3100 (simplify
3101 (cmp @0 INTEGER_CST@1)
3102 (if (tree_int_cst_sgn (@1) == 1)
3103 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))))
3104
3105
3106 /* We can simplify a logical negation of a comparison to the
3107 inverted comparison. As we cannot compute an expression
3108 operator using invert_tree_comparison we have to simulate
3109 that with expression code iteration. */
3110 (for cmp (tcc_comparison)
3111 icmp (inverted_tcc_comparison)
3112 ncmp (inverted_tcc_comparison_with_nans)
3113 /* Ideally we'd like to combine the following two patterns
3114 and handle some more cases by using
3115 (logical_inverted_value (cmp @0 @1))
3116 here but for that genmatch would need to "inline" that.
3117 For now implement what forward_propagate_comparison did. */
3118 (simplify
3119 (bit_not (cmp @0 @1))
3120 (if (VECTOR_TYPE_P (type)
3121 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
3122 /* Comparison inversion may be impossible for trapping math,
3123 invert_tree_comparison will tell us. But we can't use
3124 a computed operator in the replacement tree thus we have
3125 to play the trick below. */
3126 (with { enum tree_code ic = invert_tree_comparison
3127 (cmp, HONOR_NANS (@0)); }
3128 (if (ic == icmp)
3129 (icmp @0 @1)
3130 (if (ic == ncmp)
3131 (ncmp @0 @1))))))
3132 (simplify
3133 (bit_xor (cmp @0 @1) integer_truep)
3134 (with { enum tree_code ic = invert_tree_comparison
3135 (cmp, HONOR_NANS (@0)); }
3136 (if (ic == icmp)
3137 (icmp @0 @1)
3138 (if (ic == ncmp)
3139 (ncmp @0 @1))))))
3140
3141 /* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
3142 ??? The transformation is valid for the other operators if overflow
3143 is undefined for the type, but performing it here badly interacts
3144 with the transformation in fold_cond_expr_with_comparison which
3145 attempts to synthetize ABS_EXPR. */
3146 (for cmp (eq ne)
3147 (for sub (minus pointer_diff)
3148 (simplify
3149 (cmp (sub@2 @0 @1) integer_zerop)
3150 (if (single_use (@2))
3151 (cmp @0 @1)))))
3152
3153 /* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
3154 signed arithmetic case. That form is created by the compiler
3155 often enough for folding it to be of value. One example is in
3156 computing loop trip counts after Operator Strength Reduction. */
3157 (for cmp (simple_comparison)
3158 scmp (swapped_simple_comparison)
3159 (simplify
3160 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
3161 /* Handle unfolded multiplication by zero. */
3162 (if (integer_zerop (@1))
3163 (cmp @1 @2)
3164 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3165 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3166 && single_use (@3))
3167 /* If @1 is negative we swap the sense of the comparison. */
3168 (if (tree_int_cst_sgn (@1) < 0)
3169 (scmp @0 @2)
3170 (cmp @0 @2))))))
3171
3172 /* Simplify comparison of something with itself. For IEEE
3173 floating-point, we can only do some of these simplifications. */
3174 (for cmp (eq ge le)
3175 (simplify
3176 (cmp @0 @0)
3177 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
3178 || ! HONOR_NANS (@0))
3179 { constant_boolean_node (true, type); }
3180 (if (cmp != EQ_EXPR)
3181 (eq @0 @0)))))
3182 (for cmp (ne gt lt)
3183 (simplify
3184 (cmp @0 @0)
3185 (if (cmp != NE_EXPR
3186 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
3187 || ! HONOR_NANS (@0))
3188 { constant_boolean_node (false, type); })))
3189 (for cmp (unle unge uneq)
3190 (simplify
3191 (cmp @0 @0)
3192 { constant_boolean_node (true, type); }))
3193 (for cmp (unlt ungt)
3194 (simplify
3195 (cmp @0 @0)
3196 (unordered @0 @0)))
3197 (simplify
3198 (ltgt @0 @0)
3199 (if (!flag_trapping_math)
3200 { constant_boolean_node (false, type); }))
3201
3202 /* Fold ~X op ~Y as Y op X. */
3203 (for cmp (simple_comparison)
3204 (simplify
3205 (cmp (bit_not@2 @0) (bit_not@3 @1))
3206 (if (single_use (@2) && single_use (@3))
3207 (cmp @1 @0))))
3208
3209 /* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
3210 (for cmp (simple_comparison)
3211 scmp (swapped_simple_comparison)
3212 (simplify
3213 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
3214 (if (single_use (@2)
3215 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
3216 (scmp @0 (bit_not @1)))))
3217
3218 (for cmp (simple_comparison)
3219 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
3220 (simplify
3221 (cmp (convert@2 @0) (convert? @1))
3222 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3223 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3224 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3225 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3226 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
3227 (with
3228 {
3229 tree type1 = TREE_TYPE (@1);
3230 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
3231 {
3232 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
3233 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
3234 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
3235 type1 = float_type_node;
3236 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
3237 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
3238 type1 = double_type_node;
3239 }
3240 tree newtype
3241 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
3242 ? TREE_TYPE (@0) : type1);
3243 }
3244 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
3245 (cmp (convert:newtype @0) (convert:newtype @1))))))
3246
3247 (simplify
3248 (cmp @0 REAL_CST@1)
3249 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
3250 (switch
3251 /* a CMP (-0) -> a CMP 0 */
3252 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
3253 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
3254 /* x != NaN is always true, other ops are always false. */
3255 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3256 && ! HONOR_SNANS (@1))
3257 { constant_boolean_node (cmp == NE_EXPR, type); })
3258 /* Fold comparisons against infinity. */
3259 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
3260 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
3261 (with
3262 {
3263 REAL_VALUE_TYPE max;
3264 enum tree_code code = cmp;
3265 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
3266 if (neg)
3267 code = swap_tree_comparison (code);
3268 }
3269 (switch
3270 /* x > +Inf is always false, if we ignore NaNs or exceptions. */
3271 (if (code == GT_EXPR
3272 && !(HONOR_NANS (@0) && flag_trapping_math))
3273 { constant_boolean_node (false, type); })
3274 (if (code == LE_EXPR)
3275 /* x <= +Inf is always true, if we don't care about NaNs. */
3276 (if (! HONOR_NANS (@0))
3277 { constant_boolean_node (true, type); }
3278 /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses
3279 an "invalid" exception. */
3280 (if (!flag_trapping_math)
3281 (eq @0 @0))))
3282 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but
3283 for == this introduces an exception for x a NaN. */
3284 (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math))
3285 || code == GE_EXPR)
3286 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3287 (if (neg)
3288 (lt @0 { build_real (TREE_TYPE (@0), max); })
3289 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
3290 /* x < +Inf is always equal to x <= DBL_MAX. */
3291 (if (code == LT_EXPR)
3292 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3293 (if (neg)
3294 (ge @0 { build_real (TREE_TYPE (@0), max); })
3295 (le @0 { build_real (TREE_TYPE (@0), max); }))))
3296 /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces
3297 an exception for x a NaN so use an unordered comparison. */
3298 (if (code == NE_EXPR)
3299 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3300 (if (! HONOR_NANS (@0))
3301 (if (neg)
3302 (ge @0 { build_real (TREE_TYPE (@0), max); })
3303 (le @0 { build_real (TREE_TYPE (@0), max); }))
3304 (if (neg)
3305 (unge @0 { build_real (TREE_TYPE (@0), max); })
3306 (unle @0 { build_real (TREE_TYPE (@0), max); }))))))))))
3307
3308 /* If this is a comparison of a real constant with a PLUS_EXPR
3309 or a MINUS_EXPR of a real constant, we can convert it into a
3310 comparison with a revised real constant as long as no overflow
3311 occurs when unsafe_math_optimizations are enabled. */
3312 (if (flag_unsafe_math_optimizations)
3313 (for op (plus minus)
3314 (simplify
3315 (cmp (op @0 REAL_CST@1) REAL_CST@2)
3316 (with
3317 {
3318 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
3319 TREE_TYPE (@1), @2, @1);
3320 }
3321 (if (tem && !TREE_OVERFLOW (tem))
3322 (cmp @0 { tem; }))))))
3323
3324 /* Likewise, we can simplify a comparison of a real constant with
3325 a MINUS_EXPR whose first operand is also a real constant, i.e.
3326 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
3327 floating-point types only if -fassociative-math is set. */
3328 (if (flag_associative_math)
3329 (simplify
3330 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
3331 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
3332 (if (tem && !TREE_OVERFLOW (tem))
3333 (cmp { tem; } @1)))))
3334
3335 /* Fold comparisons against built-in math functions. */
3336 (if (flag_unsafe_math_optimizations
3337 && ! flag_errno_math)
3338 (for sq (SQRT)
3339 (simplify
3340 (cmp (sq @0) REAL_CST@1)
3341 (switch
3342 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
3343 (switch
3344 /* sqrt(x) < y is always false, if y is negative. */
3345 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
3346 { constant_boolean_node (false, type); })
3347 /* sqrt(x) > y is always true, if y is negative and we
3348 don't care about NaNs, i.e. negative values of x. */
3349 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
3350 { constant_boolean_node (true, type); })
3351 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
3352 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
3353 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
3354 (switch
3355 /* sqrt(x) < 0 is always false. */
3356 (if (cmp == LT_EXPR)
3357 { constant_boolean_node (false, type); })
3358 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
3359 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
3360 { constant_boolean_node (true, type); })
3361 /* sqrt(x) <= 0 -> x == 0. */
3362 (if (cmp == LE_EXPR)
3363 (eq @0 @1))
3364 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
3365 == or !=. In the last case:
3366
3367 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
3368
3369 if x is negative or NaN. Due to -funsafe-math-optimizations,
3370 the results for other x follow from natural arithmetic. */
3371 (cmp @0 @1)))
3372 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3373 (with
3374 {
3375 REAL_VALUE_TYPE c2;
3376 real_arithmetic (&c2, MULT_EXPR,
3377 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
3378 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3379 }
3380 (if (REAL_VALUE_ISINF (c2))
3381 /* sqrt(x) > y is x == +Inf, when y is very large. */
3382 (if (HONOR_INFINITIES (@0))
3383 (eq @0 { build_real (TREE_TYPE (@0), c2); })
3384 { constant_boolean_node (false, type); })
3385 /* sqrt(x) > c is the same as x > c*c. */
3386 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
3387 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3388 (with
3389 {
3390 REAL_VALUE_TYPE c2;
3391 real_arithmetic (&c2, MULT_EXPR,
3392 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
3393 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3394 }
3395 (if (REAL_VALUE_ISINF (c2))
3396 (switch
3397 /* sqrt(x) < y is always true, when y is a very large
3398 value and we don't care about NaNs or Infinities. */
3399 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
3400 { constant_boolean_node (true, type); })
3401 /* sqrt(x) < y is x != +Inf when y is very large and we
3402 don't care about NaNs. */
3403 (if (! HONOR_NANS (@0))
3404 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
3405 /* sqrt(x) < y is x >= 0 when y is very large and we
3406 don't care about Infinities. */
3407 (if (! HONOR_INFINITIES (@0))
3408 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
3409 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
3410 (if (GENERIC)
3411 (truth_andif
3412 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3413 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
3414 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
3415 (if (! HONOR_NANS (@0))
3416 (cmp @0 { build_real (TREE_TYPE (@0), c2); })
3417 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
3418 (if (GENERIC)
3419 (truth_andif
3420 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3421 (cmp @0 { build_real (TREE_TYPE (@0), c2); })))))))))
3422 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */
3423 (simplify
3424 (cmp (sq @0) (sq @1))
3425 (if (! HONOR_NANS (@0))
3426 (cmp @0 @1))))))
3427
3428 /* Optimize various special cases of (FTYPE) N CMP (FTYPE) M. */
3429 (for cmp (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
3430 icmp (lt le eq ne ge gt unordered ordered lt le gt ge eq ne)
3431 (simplify
3432 (cmp (float@0 @1) (float @2))
3433 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@0))
3434 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3435 (with
3436 {
3437 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0))));
3438 tree type1 = TREE_TYPE (@1);
3439 bool type1_signed_p = TYPE_SIGN (type1) == SIGNED;
3440 tree type2 = TREE_TYPE (@2);
3441 bool type2_signed_p = TYPE_SIGN (type2) == SIGNED;
3442 }
3443 (if (fmt.can_represent_integral_type_p (type1)
3444 && fmt.can_represent_integral_type_p (type2))
3445 (if (cmp == ORDERED_EXPR || cmp == UNORDERED_EXPR)
3446 { constant_boolean_node (cmp == ORDERED_EXPR, type); }
3447 (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2)
3448 && type1_signed_p >= type2_signed_p)
3449 (icmp @1 (convert @2))
3450 (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2)
3451 && type1_signed_p <= type2_signed_p)
3452 (icmp (convert:type2 @1) @2)
3453 (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
3454 && type1_signed_p == type2_signed_p)
3455 (icmp @1 @2))))))))))
3456
3457 /* Optimize various special cases of (FTYPE) N CMP CST. */
3458 (for cmp (lt le eq ne ge gt)
3459 icmp (le le eq ne ge ge)
3460 (simplify
3461 (cmp (float @0) REAL_CST@1)
3462 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
3463 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
3464 (with
3465 {
3466 tree itype = TREE_TYPE (@0);
3467 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
3468 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
3469 /* Be careful to preserve any potential exceptions due to
3470 NaNs. qNaNs are ok in == or != context.
3471 TODO: relax under -fno-trapping-math or
3472 -fno-signaling-nans. */
3473 bool exception_p
3474 = real_isnan (cst) && (cst->signalling
3475 || (cmp != EQ_EXPR && cmp != NE_EXPR));
3476 }
3477 /* TODO: allow non-fitting itype and SNaNs when
3478 -fno-trapping-math. */
3479 (if (fmt.can_represent_integral_type_p (itype) && ! exception_p)
3480 (with
3481 {
3482 signop isign = TYPE_SIGN (itype);
3483 REAL_VALUE_TYPE imin, imax;
3484 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
3485 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
3486
3487 REAL_VALUE_TYPE icst;
3488 if (cmp == GT_EXPR || cmp == GE_EXPR)
3489 real_ceil (&icst, fmt, cst);
3490 else if (cmp == LT_EXPR || cmp == LE_EXPR)
3491 real_floor (&icst, fmt, cst);
3492 else
3493 real_trunc (&icst, fmt, cst);
3494
3495 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst);
3496
3497 bool overflow_p = false;
3498 wide_int icst_val
3499 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
3500 }
3501 (switch
3502 /* Optimize cases when CST is outside of ITYPE's range. */
3503 (if (real_compare (LT_EXPR, cst, &imin))
3504 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
3505 type); })
3506 (if (real_compare (GT_EXPR, cst, &imax))
3507 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
3508 type); })
3509 /* Remove cast if CST is an integer representable by ITYPE. */
3510 (if (cst_int_p)
3511 (cmp @0 { gcc_assert (!overflow_p);
3512 wide_int_to_tree (itype, icst_val); })
3513 )
3514 /* When CST is fractional, optimize
3515 (FTYPE) N == CST -> 0
3516 (FTYPE) N != CST -> 1. */
3517 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3518 { constant_boolean_node (cmp == NE_EXPR, type); })
3519 /* Otherwise replace with sensible integer constant. */
3520 (with
3521 {
3522 gcc_checking_assert (!overflow_p);
3523 }
3524 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
3525
3526 /* Fold A /[ex] B CMP C to A CMP B * C. */
3527 (for cmp (eq ne)
3528 (simplify
3529 (cmp (exact_div @0 @1) INTEGER_CST@2)
3530 (if (!integer_zerop (@1))
3531 (if (wi::to_wide (@2) == 0)
3532 (cmp @0 @2)
3533 (if (TREE_CODE (@1) == INTEGER_CST)
3534 (with
3535 {
3536 wi::overflow_type ovf;
3537 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3538 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
3539 }
3540 (if (ovf)
3541 { constant_boolean_node (cmp == NE_EXPR, type); }
3542 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
3543 (for cmp (lt le gt ge)
3544 (simplify
3545 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
3546 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
3547 (with
3548 {
3549 wi::overflow_type ovf;
3550 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3551 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
3552 }
3553 (if (ovf)
3554 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
3555 TYPE_SIGN (TREE_TYPE (@2)))
3556 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
3557 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
3558
3559 /* Unordered tests if either argument is a NaN. */
3560 (simplify
3561 (bit_ior (unordered @0 @0) (unordered @1 @1))
3562 (if (types_match (@0, @1))
3563 (unordered @0 @1)))
3564 (simplify
3565 (bit_and (ordered @0 @0) (ordered @1 @1))
3566 (if (types_match (@0, @1))
3567 (ordered @0 @1)))
3568 (simplify
3569 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
3570 @2)
3571 (simplify
3572 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
3573 @2)
3574
3575 /* Simple range test simplifications. */
3576 /* A < B || A >= B -> true. */
3577 (for test1 (lt le le le ne ge)
3578 test2 (ge gt ge ne eq ne)
3579 (simplify
3580 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
3581 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3582 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3583 { constant_boolean_node (true, type); })))
3584 /* A < B && A >= B -> false. */
3585 (for test1 (lt lt lt le ne eq)
3586 test2 (ge gt eq gt eq gt)
3587 (simplify
3588 (bit_and:c (test1 @0 @1) (test2 @0 @1))
3589 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3590 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3591 { constant_boolean_node (false, type); })))
3592
3593 /* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
3594 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0
3595
3596 Note that comparisons
3597 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0
3598 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0
3599 will be canonicalized to above so there's no need to
3600 consider them here.
3601 */
3602
3603 (for cmp (le gt)
3604 eqcmp (eq ne)
3605 (simplify
3606 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
3607 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
3608 (with
3609 {
3610 tree ty = TREE_TYPE (@0);
3611 unsigned prec = TYPE_PRECISION (ty);
3612 wide_int mask = wi::to_wide (@2, prec);
3613 wide_int rhs = wi::to_wide (@3, prec);
3614 signop sgn = TYPE_SIGN (ty);
3615 }
3616 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
3617 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
3618 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
3619 { build_zero_cst (ty); }))))))
3620
3621 /* -A CMP -B -> B CMP A. */
3622 (for cmp (tcc_comparison)
3623 scmp (swapped_tcc_comparison)
3624 (simplify
3625 (cmp (negate @0) (negate @1))
3626 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3627 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3628 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3629 (scmp @0 @1)))
3630 (simplify
3631 (cmp (negate @0) CONSTANT_CLASS_P@1)
3632 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3633 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3634 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3635 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
3636 (if (tem && !TREE_OVERFLOW (tem))
3637 (scmp @0 { tem; }))))))
3638
3639 /* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
3640 (for op (eq ne)
3641 (simplify
3642 (op (abs @0) zerop@1)
3643 (op @0 @1)))
3644
3645 /* From fold_sign_changed_comparison and fold_widened_comparison.
3646 FIXME: the lack of symmetry is disturbing. */
3647 (for cmp (simple_comparison)
3648 (simplify
3649 (cmp (convert@0 @00) (convert?@1 @10))
3650 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3651 /* Disable this optimization if we're casting a function pointer
3652 type on targets that require function pointer canonicalization. */
3653 && !(targetm.have_canonicalize_funcptr_for_compare ()
3654 && ((POINTER_TYPE_P (TREE_TYPE (@00))
3655 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00))))
3656 || (POINTER_TYPE_P (TREE_TYPE (@10))
3657 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@10))))))
3658 && single_use (@0))
3659 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
3660 && (TREE_CODE (@10) == INTEGER_CST
3661 || @1 != @10)
3662 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
3663 || cmp == NE_EXPR
3664 || cmp == EQ_EXPR)
3665 && !POINTER_TYPE_P (TREE_TYPE (@00)))
3666 /* ??? The special-casing of INTEGER_CST conversion was in the original
3667 code and here to avoid a spurious overflow flag on the resulting
3668 constant which fold_convert produces. */
3669 (if (TREE_CODE (@1) == INTEGER_CST)
3670 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
3671 TREE_OVERFLOW (@1)); })
3672 (cmp @00 (convert @1)))
3673
3674 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
3675 /* If possible, express the comparison in the shorter mode. */
3676 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
3677 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
3678 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
3679 && TYPE_UNSIGNED (TREE_TYPE (@00))))
3680 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
3681 || ((TYPE_PRECISION (TREE_TYPE (@00))
3682 >= TYPE_PRECISION (TREE_TYPE (@10)))
3683 && (TYPE_UNSIGNED (TREE_TYPE (@00))
3684 == TYPE_UNSIGNED (TREE_TYPE (@10))))
3685 || (TREE_CODE (@10) == INTEGER_CST
3686 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
3687 && int_fits_type_p (@10, TREE_TYPE (@00)))))
3688 (cmp @00 (convert @10))
3689 (if (TREE_CODE (@10) == INTEGER_CST
3690 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
3691 && !int_fits_type_p (@10, TREE_TYPE (@00)))
3692 (with
3693 {
3694 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3695 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3696 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
3697 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
3698 }
3699 (if (above || below)
3700 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3701 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
3702 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3703 { constant_boolean_node (above ? true : false, type); }
3704 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3705 { constant_boolean_node (above ? false : true, type); }))))))))))))
3706
3707 (for cmp (eq ne)
3708 /* A local variable can never be pointed to by
3709 the default SSA name of an incoming parameter.
3710 SSA names are canonicalized to 2nd place. */
3711 (simplify
3712 (cmp addr@0 SSA_NAME@1)
3713 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
3714 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
3715 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
3716 (if (TREE_CODE (base) == VAR_DECL
3717 && auto_var_in_fn_p (base, current_function_decl))
3718 (if (cmp == NE_EXPR)
3719 { constant_boolean_node (true, type); }
3720 { constant_boolean_node (false, type); }))))))
3721
3722 /* Equality compare simplifications from fold_binary */
3723 (for cmp (eq ne)
3724
3725 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
3726 Similarly for NE_EXPR. */
3727 (simplify
3728 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
3729 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
3730 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
3731 { constant_boolean_node (cmp == NE_EXPR, type); }))
3732
3733 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
3734 (simplify
3735 (cmp (bit_xor @0 @1) integer_zerop)
3736 (cmp @0 @1))
3737
3738 /* (X ^ Y) == Y becomes X == 0.
3739 Likewise (X ^ Y) == X becomes Y == 0. */
3740 (simplify
3741 (cmp:c (bit_xor:c @0 @1) @0)
3742 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
3743
3744 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
3745 (simplify
3746 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
3747 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
3748 (cmp @0 (bit_xor @1 (convert @2)))))
3749
3750 (simplify
3751 (cmp (convert? addr@0) integer_zerop)
3752 (if (tree_single_nonzero_warnv_p (@0, NULL))
3753 { constant_boolean_node (cmp == NE_EXPR, type); })))
3754
3755 /* If we have (A & C) == C where C is a power of 2, convert this into
3756 (A & C) != 0. Similarly for NE_EXPR. */
3757 (for cmp (eq ne)
3758 icmp (ne eq)
3759 (simplify
3760 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
3761 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
3762
3763 /* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
3764 convert this into a shift followed by ANDing with D. */
3765 (simplify
3766 (cond
3767 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
3768 INTEGER_CST@2 integer_zerop)
3769 (if (integer_pow2p (@2))
3770 (with {
3771 int shift = (wi::exact_log2 (wi::to_wide (@2))
3772 - wi::exact_log2 (wi::to_wide (@1)));
3773 }
3774 (if (shift > 0)
3775 (bit_and
3776 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
3777 (bit_and
3778 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); }))
3779 @2)))))
3780
3781 /* If we have (A & C) != 0 where C is the sign bit of A, convert
3782 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
3783 (for cmp (eq ne)
3784 ncmp (ge lt)
3785 (simplify
3786 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
3787 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3788 && type_has_mode_precision_p (TREE_TYPE (@0))
3789 && element_precision (@2) >= element_precision (@0)
3790 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
3791 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
3792 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
3793
3794 /* If we have A < 0 ? C : 0 where C is a power of 2, convert
3795 this into a right shift or sign extension followed by ANDing with C. */
3796 (simplify
3797 (cond
3798 (lt @0 integer_zerop)
3799 INTEGER_CST@1 integer_zerop)
3800 (if (integer_pow2p (@1)
3801 && !TYPE_UNSIGNED (TREE_TYPE (@0)))
3802 (with {
3803 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
3804 }
3805 (if (shift >= 0)
3806 (bit_and
3807 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
3808 @1)
3809 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
3810 sign extension followed by AND with C will achieve the effect. */
3811 (bit_and (convert @0) @1)))))
3812
3813 /* When the addresses are not directly of decls compare base and offset.
3814 This implements some remaining parts of fold_comparison address
3815 comparisons but still no complete part of it. Still it is good
3816 enough to make fold_stmt not regress when not dispatching to fold_binary. */
3817 (for cmp (simple_comparison)
3818 (simplify
3819 (cmp (convert1?@2 addr@0) (convert2? addr@1))
3820 (with
3821 {
3822 poly_int64 off0, off1;
3823 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
3824 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
3825 if (base0 && TREE_CODE (base0) == MEM_REF)
3826 {
3827 off0 += mem_ref_offset (base0).force_shwi ();
3828 base0 = TREE_OPERAND (base0, 0);
3829 }
3830 if (base1 && TREE_CODE (base1) == MEM_REF)
3831 {
3832 off1 += mem_ref_offset (base1).force_shwi ();
3833 base1 = TREE_OPERAND (base1, 0);
3834 }
3835 }
3836 (if (base0 && base1)
3837 (with
3838 {
3839 int equal = 2;
3840 /* Punt in GENERIC on variables with value expressions;
3841 the value expressions might point to fields/elements
3842 of other vars etc. */
3843 if (GENERIC
3844 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
3845 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
3846 ;
3847 else if (decl_in_symtab_p (base0)
3848 && decl_in_symtab_p (base1))
3849 equal = symtab_node::get_create (base0)
3850 ->equal_address_to (symtab_node::get_create (base1));
3851 else if ((DECL_P (base0)
3852 || TREE_CODE (base0) == SSA_NAME
3853 || TREE_CODE (base0) == STRING_CST)
3854 && (DECL_P (base1)
3855 || TREE_CODE (base1) == SSA_NAME
3856 || TREE_CODE (base1) == STRING_CST))
3857 equal = (base0 == base1);
3858 }
3859 (if (equal == 1
3860 && (cmp == EQ_EXPR || cmp == NE_EXPR
3861 /* If the offsets are equal we can ignore overflow. */
3862 || known_eq (off0, off1)
3863 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3864 /* Or if we compare using pointers to decls or strings. */
3865 || (POINTER_TYPE_P (TREE_TYPE (@2))
3866 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST))))
3867 (switch
3868 (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
3869 { constant_boolean_node (known_eq (off0, off1), type); })
3870 (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
3871 { constant_boolean_node (known_ne (off0, off1), type); })
3872 (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1)))
3873 { constant_boolean_node (known_lt (off0, off1), type); })
3874 (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1)))
3875 { constant_boolean_node (known_le (off0, off1), type); })
3876 (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1)))
3877 { constant_boolean_node (known_ge (off0, off1), type); })
3878 (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1)))
3879 { constant_boolean_node (known_gt (off0, off1), type); }))
3880 (if (equal == 0
3881 && DECL_P (base0) && DECL_P (base1)
3882 /* If we compare this as integers require equal offset. */
3883 && (!INTEGRAL_TYPE_P (TREE_TYPE (@2))
3884 || known_eq (off0, off1)))
3885 (switch
3886 (if (cmp == EQ_EXPR)
3887 { constant_boolean_node (false, type); })
3888 (if (cmp == NE_EXPR)
3889 { constant_boolean_node (true, type); })))))))))
3890
3891 /* Simplify pointer equality compares using PTA. */
3892 (for neeq (ne eq)
3893 (simplify
3894 (neeq @0 @1)
3895 (if (POINTER_TYPE_P (TREE_TYPE (@0))
3896 && ptrs_compare_unequal (@0, @1))
3897 { constant_boolean_node (neeq != EQ_EXPR, type); })))
3898
3899 /* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
3900 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
3901 Disable the transform if either operand is pointer to function.
3902 This broke pr22051-2.c for arm where function pointer
3903 canonicalizaion is not wanted. */
3904
3905 (for cmp (ne eq)
3906 (simplify
3907 (cmp (convert @0) INTEGER_CST@1)
3908 (if (((POINTER_TYPE_P (TREE_TYPE (@0))
3909 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
3910 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
3911 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3912 && POINTER_TYPE_P (TREE_TYPE (@1))
3913 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
3914 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
3915 (cmp @0 (convert @1)))))
3916
3917 /* Non-equality compare simplifications from fold_binary */
3918 (for cmp (lt gt le ge)
3919 /* Comparisons with the highest or lowest possible integer of
3920 the specified precision will have known values. */
3921 (simplify
3922 (cmp (convert?@2 @0) INTEGER_CST@1)
3923 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
3924 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
3925 (with
3926 {
3927 tree arg1_type = TREE_TYPE (@1);
3928 unsigned int prec = TYPE_PRECISION (arg1_type);
3929 wide_int max = wi::max_value (arg1_type);
3930 wide_int signed_max = wi::max_value (prec, SIGNED);
3931 wide_int min = wi::min_value (arg1_type);
3932 }
3933 (switch
3934 (if (wi::to_wide (@1) == max)
3935 (switch
3936 (if (cmp == GT_EXPR)
3937 { constant_boolean_node (false, type); })
3938 (if (cmp == GE_EXPR)
3939 (eq @2 @1))
3940 (if (cmp == LE_EXPR)
3941 { constant_boolean_node (true, type); })
3942 (if (cmp == LT_EXPR)
3943 (ne @2 @1))))
3944 (if (wi::to_wide (@1) == min)
3945 (switch
3946 (if (cmp == LT_EXPR)
3947 { constant_boolean_node (false, type); })
3948 (if (cmp == LE_EXPR)
3949 (eq @2 @1))
3950 (if (cmp == GE_EXPR)
3951 { constant_boolean_node (true, type); })
3952 (if (cmp == GT_EXPR)
3953 (ne @2 @1))))
3954 (if (wi::to_wide (@1) == max - 1)
3955 (switch
3956 (if (cmp == GT_EXPR)
3957 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))
3958 (if (cmp == LE_EXPR)
3959 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) + 1); }))))
3960 (if (wi::to_wide (@1) == min + 1)
3961 (switch
3962 (if (cmp == GE_EXPR)
3963 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))
3964 (if (cmp == LT_EXPR)
3965 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::to_wide (@1) - 1); }))))
3966 (if (wi::to_wide (@1) == signed_max
3967 && TYPE_UNSIGNED (arg1_type)
3968 /* We will flip the signedness of the comparison operator
3969 associated with the mode of @1, so the sign bit is
3970 specified by this mode. Check that @1 is the signed
3971 max associated with this sign bit. */
3972 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
3973 /* signed_type does not work on pointer types. */
3974 && INTEGRAL_TYPE_P (arg1_type))
3975 /* The following case also applies to X < signed_max+1
3976 and X >= signed_max+1 because previous transformations. */
3977 (if (cmp == LE_EXPR || cmp == GT_EXPR)
3978 (with { tree st = signed_type_for (arg1_type); }
3979 (if (cmp == LE_EXPR)
3980 (ge (convert:st @0) { build_zero_cst (st); })
3981 (lt (convert:st @0) { build_zero_cst (st); }))))))))))
3982
3983 (for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
3984 /* If the second operand is NaN, the result is constant. */
3985 (simplify
3986 (cmp @0 REAL_CST@1)
3987 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3988 && (cmp != LTGT_EXPR || ! flag_trapping_math))
3989 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
3990 ? false : true, type); })))
3991
3992 /* bool_var != 0 becomes bool_var. */
3993 (simplify
3994 (ne @0 integer_zerop)
3995 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
3996 && types_match (type, TREE_TYPE (@0)))
3997 (non_lvalue @0)))
3998 /* bool_var == 1 becomes bool_var. */
3999 (simplify
4000 (eq @0 integer_onep)
4001 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4002 && types_match (type, TREE_TYPE (@0)))
4003 (non_lvalue @0)))
4004 /* Do not handle
4005 bool_var == 0 becomes !bool_var or
4006 bool_var != 1 becomes !bool_var
4007 here because that only is good in assignment context as long
4008 as we require a tcc_comparison in GIMPLE_CONDs where we'd
4009 replace if (x == 0) with tem = ~x; if (tem != 0) which is
4010 clearly less optimal and which we'll transform again in forwprop. */
4011
4012 /* When one argument is a constant, overflow detection can be simplified.
4013 Currently restricted to single use so as not to interfere too much with
4014 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
4015 A + CST CMP A -> A CMP' CST' */
4016 (for cmp (lt le ge gt)
4017 out (gt gt le le)
4018 (simplify
4019 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
4020 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4021 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
4022 && wi::to_wide (@1) != 0
4023 && single_use (@2))
4024 (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
4025 (out @0 { wide_int_to_tree (TREE_TYPE (@0),
4026 wi::max_value (prec, UNSIGNED)
4027 - wi::to_wide (@1)); })))))
4028
4029 /* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
4030 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
4031 expects the long form, so we restrict the transformation for now. */
4032 (for cmp (gt le)
4033 (simplify
4034 (cmp:c (minus@2 @0 @1) @0)
4035 (if (single_use (@2)
4036 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4037 && TYPE_UNSIGNED (TREE_TYPE (@0))
4038 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4039 (cmp @1 @0))))
4040
4041 /* Testing for overflow is unnecessary if we already know the result. */
4042 /* A - B > A */
4043 (for cmp (gt le)
4044 out (ne eq)
4045 (simplify
4046 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
4047 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4048 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4049 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4050 /* A + B < A */
4051 (for cmp (lt ge)
4052 out (ne eq)
4053 (simplify
4054 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
4055 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4056 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4057 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4058
4059 /* For unsigned operands, -1 / B < A checks whether A * B would overflow.
4060 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
4061 (for cmp (lt ge)
4062 out (ne eq)
4063 (simplify
4064 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
4065 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
4066 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
4067 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
4068
4069 /* Simplification of math builtins. These rules must all be optimizations
4070 as well as IL simplifications. If there is a possibility that the new
4071 form could be a pessimization, the rule should go in the canonicalization
4072 section that follows this one.
4073
4074 Rules can generally go in this section if they satisfy one of
4075 the following:
4076
4077 - the rule describes an identity
4078
4079 - the rule replaces calls with something as simple as addition or
4080 multiplication
4081
4082 - the rule contains unary calls only and simplifies the surrounding
4083 arithmetic. (The idea here is to exclude non-unary calls in which
4084 one operand is constant and in which the call is known to be cheap
4085 when the operand has that value.) */
4086
4087 (if (flag_unsafe_math_optimizations)
4088 /* Simplify sqrt(x) * sqrt(x) -> x. */
4089 (simplify
4090 (mult (SQRT_ALL@1 @0) @1)
4091 (if (!HONOR_SNANS (type))
4092 @0))
4093
4094 (for op (plus minus)
4095 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */
4096 (simplify
4097 (op (rdiv @0 @1)
4098 (rdiv @2 @1))
4099 (rdiv (op @0 @2) @1)))
4100
4101 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
4102 (for root (SQRT CBRT)
4103 (simplify
4104 (mult (root:s @0) (root:s @1))
4105 (root (mult @0 @1))))
4106
4107 /* Simplify expN(x) * expN(y) -> expN(x+y). */
4108 (for exps (EXP EXP2 EXP10 POW10)
4109 (simplify
4110 (mult (exps:s @0) (exps:s @1))
4111 (exps (plus @0 @1))))
4112
4113 /* Simplify a/root(b/c) into a*root(c/b). */
4114 (for root (SQRT CBRT)
4115 (simplify
4116 (rdiv @0 (root:s (rdiv:s @1 @2)))
4117 (mult @0 (root (rdiv @2 @1)))))
4118
4119 /* Simplify x/expN(y) into x*expN(-y). */
4120 (for exps (EXP EXP2 EXP10 POW10)
4121 (simplify
4122 (rdiv @0 (exps:s @1))
4123 (mult @0 (exps (negate @1)))))
4124
4125 (for logs (LOG LOG2 LOG10 LOG10)
4126 exps (EXP EXP2 EXP10 POW10)
4127 /* logN(expN(x)) -> x. */
4128 (simplify
4129 (logs (exps @0))
4130 @0)
4131 /* expN(logN(x)) -> x. */
4132 (simplify
4133 (exps (logs @0))
4134 @0))
4135
4136 /* Optimize logN(func()) for various exponential functions. We
4137 want to determine the value "x" and the power "exponent" in
4138 order to transform logN(x**exponent) into exponent*logN(x). */
4139 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
4140 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
4141 (simplify
4142 (logs (exps @0))
4143 (if (SCALAR_FLOAT_TYPE_P (type))
4144 (with {
4145 tree x;
4146 switch (exps)
4147 {
4148 CASE_CFN_EXP:
4149 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
4150 x = build_real_truncate (type, dconst_e ());
4151 break;
4152 CASE_CFN_EXP2:
4153 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
4154 x = build_real (type, dconst2);
4155 break;
4156 CASE_CFN_EXP10:
4157 CASE_CFN_POW10:
4158 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
4159 {
4160 REAL_VALUE_TYPE dconst10;
4161 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
4162 x = build_real (type, dconst10);
4163 }
4164 break;
4165 default:
4166 gcc_unreachable ();
4167 }
4168 }
4169 (mult (logs { x; }) @0)))))
4170
4171 (for logs (LOG LOG
4172 LOG2 LOG2
4173 LOG10 LOG10)
4174 exps (SQRT CBRT)
4175 (simplify
4176 (logs (exps @0))
4177 (if (SCALAR_FLOAT_TYPE_P (type))
4178 (with {
4179 tree x;
4180 switch (exps)
4181 {
4182 CASE_CFN_SQRT:
4183 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
4184 x = build_real (type, dconsthalf);
4185 break;
4186 CASE_CFN_CBRT:
4187 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
4188 x = build_real_truncate (type, dconst_third ());
4189 break;
4190 default:
4191 gcc_unreachable ();
4192 }
4193 }
4194 (mult { x; } (logs @0))))))
4195
4196 /* logN(pow(x,exponent)) -> exponent*logN(x). */
4197 (for logs (LOG LOG2 LOG10)
4198 pows (POW)
4199 (simplify
4200 (logs (pows @0 @1))
4201 (mult @1 (logs @0))))
4202
4203 /* pow(C,x) -> exp(log(C)*x) if C > 0,
4204 or if C is a positive power of 2,
4205 pow(C,x) -> exp2(log2(C)*x). */
4206 #if GIMPLE
4207 (for pows (POW)
4208 exps (EXP)
4209 logs (LOG)
4210 exp2s (EXP2)
4211 log2s (LOG2)
4212 (simplify
4213 (pows REAL_CST@0 @1)
4214 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
4215 && real_isfinite (TREE_REAL_CST_PTR (@0))
4216 /* As libmvec doesn't have a vectorized exp2, defer optimizing
4217 the use_exp2 case until after vectorization. It seems actually
4218 beneficial for all constants to postpone this until later,
4219 because exp(log(C)*x), while faster, will have worse precision
4220 and if x folds into a constant too, that is unnecessary
4221 pessimization. */
4222 && canonicalize_math_after_vectorization_p ())
4223 (with {
4224 const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0);
4225 bool use_exp2 = false;
4226 if (targetm.libc_has_function (function_c99_misc)
4227 && value->cl == rvc_normal)
4228 {
4229 REAL_VALUE_TYPE frac_rvt = *value;
4230 SET_REAL_EXP (&frac_rvt, 1);
4231 if (real_equal (&frac_rvt, &dconst1))
4232 use_exp2 = true;
4233 }
4234 }
4235 (if (!use_exp2)
4236 (if (optimize_pow_to_exp (@0, @1))
4237 (exps (mult (logs @0) @1)))
4238 (exp2s (mult (log2s @0) @1)))))))
4239 #endif
4240
4241 /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */
4242 (for pows (POW)
4243 exps (EXP EXP2 EXP10 POW10)
4244 logs (LOG LOG2 LOG10 LOG10)
4245 (simplify
4246 (mult:c (pows:s REAL_CST@0 @1) (exps:s @2))
4247 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
4248 && real_isfinite (TREE_REAL_CST_PTR (@0)))
4249 (exps (plus (mult (logs @0) @1) @2)))))
4250
4251 (for sqrts (SQRT)
4252 cbrts (CBRT)
4253 pows (POW)
4254 exps (EXP EXP2 EXP10 POW10)
4255 /* sqrt(expN(x)) -> expN(x*0.5). */
4256 (simplify
4257 (sqrts (exps @0))
4258 (exps (mult @0 { build_real (type, dconsthalf); })))
4259 /* cbrt(expN(x)) -> expN(x/3). */
4260 (simplify
4261 (cbrts (exps @0))
4262 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
4263 /* pow(expN(x), y) -> expN(x*y). */
4264 (simplify
4265 (pows (exps @0) @1)
4266 (exps (mult @0 @1))))
4267
4268 /* tan(atan(x)) -> x. */
4269 (for tans (TAN)
4270 atans (ATAN)
4271 (simplify
4272 (tans (atans @0))
4273 @0)))
4274
4275 /* Simplify sin(atan(x)) -> x / sqrt(x*x + 1). */
4276 (for sins (SIN)
4277 atans (ATAN)
4278 sqrts (SQRT)
4279 copysigns (COPYSIGN)
4280 (simplify
4281 (sins (atans:s @0))
4282 (with
4283 {
4284 REAL_VALUE_TYPE r_cst;
4285 build_sinatan_real (&r_cst, type);
4286 tree t_cst = build_real (type, r_cst);
4287 tree t_one = build_one_cst (type);
4288 }
4289 (if (SCALAR_FLOAT_TYPE_P (type))
4290 (cond (le (abs @0) { t_cst; })
4291 (rdiv @0 (sqrts (plus (mult @0 @0) { t_one; })))
4292 (copysigns { t_one; } @0))))))
4293
4294 /* Simplify cos(atan(x)) -> 1 / sqrt(x*x + 1). */
4295 (for coss (COS)
4296 atans (ATAN)
4297 sqrts (SQRT)
4298 copysigns (COPYSIGN)
4299 (simplify
4300 (coss (atans:s @0))
4301 (with
4302 {
4303 REAL_VALUE_TYPE r_cst;
4304 build_sinatan_real (&r_cst, type);
4305 tree t_cst = build_real (type, r_cst);
4306 tree t_one = build_one_cst (type);
4307 tree t_zero = build_zero_cst (type);
4308 }
4309 (if (SCALAR_FLOAT_TYPE_P (type))
4310 (cond (le (abs @0) { t_cst; })
4311 (rdiv { t_one; } (sqrts (plus (mult @0 @0) { t_one; })))
4312 (copysigns { t_zero; } @0))))))
4313
4314 /* cabs(x+0i) or cabs(0+xi) -> abs(x). */
4315 (simplify
4316 (CABS (complex:C @0 real_zerop@1))
4317 (abs @0))
4318
4319 /* trunc(trunc(x)) -> trunc(x), etc. */
4320 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
4321 (simplify
4322 (fns (fns @0))
4323 (fns @0)))
4324 /* f(x) -> x if x is integer valued and f does nothing for such values. */
4325 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
4326 (simplify
4327 (fns integer_valued_real_p@0)
4328 @0))
4329
4330 /* hypot(x,0) and hypot(0,x) -> abs(x). */
4331 (simplify
4332 (HYPOT:c @0 real_zerop@1)
4333 (abs @0))
4334
4335 /* pow(1,x) -> 1. */
4336 (simplify
4337 (POW real_onep@0 @1)
4338 @0)
4339
4340 (simplify
4341 /* copysign(x,x) -> x. */
4342 (COPYSIGN_ALL @0 @0)
4343 @0)
4344
4345 (simplify
4346 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
4347 (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1)
4348 (abs @0))
4349
4350 (for scale (LDEXP SCALBN SCALBLN)
4351 /* ldexp(0, x) -> 0. */
4352 (simplify
4353 (scale real_zerop@0 @1)
4354 @0)
4355 /* ldexp(x, 0) -> x. */
4356 (simplify
4357 (scale @0 integer_zerop@1)
4358 @0)
4359 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
4360 (simplify
4361 (scale REAL_CST@0 @1)
4362 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
4363 @0)))
4364
4365 /* Canonicalization of sequences of math builtins. These rules represent
4366 IL simplifications but are not necessarily optimizations.
4367
4368 The sincos pass is responsible for picking "optimal" implementations
4369 of math builtins, which may be more complicated and can sometimes go
4370 the other way, e.g. converting pow into a sequence of sqrts.
4371 We only want to do these canonicalizations before the pass has run. */
4372
4373 (if (flag_unsafe_math_optimizations && canonicalize_math_p ())
4374 /* Simplify tan(x) * cos(x) -> sin(x). */
4375 (simplify
4376 (mult:c (TAN:s @0) (COS:s @0))
4377 (SIN @0))
4378
4379 /* Simplify x * pow(x,c) -> pow(x,c+1). */
4380 (simplify
4381 (mult:c @0 (POW:s @0 REAL_CST@1))
4382 (if (!TREE_OVERFLOW (@1))
4383 (POW @0 (plus @1 { build_one_cst (type); }))))
4384
4385 /* Simplify sin(x) / cos(x) -> tan(x). */
4386 (simplify
4387 (rdiv (SIN:s @0) (COS:s @0))
4388 (TAN @0))
4389
4390 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
4391 (simplify
4392 (rdiv (COS:s @0) (SIN:s @0))
4393 (rdiv { build_one_cst (type); } (TAN @0)))
4394
4395 /* Simplify sin(x) / tan(x) -> cos(x). */
4396 (simplify
4397 (rdiv (SIN:s @0) (TAN:s @0))
4398 (if (! HONOR_NANS (@0)
4399 && ! HONOR_INFINITIES (@0))
4400 (COS @0)))
4401
4402 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
4403 (simplify
4404 (rdiv (TAN:s @0) (SIN:s @0))
4405 (if (! HONOR_NANS (@0)
4406 && ! HONOR_INFINITIES (@0))
4407 (rdiv { build_one_cst (type); } (COS @0))))
4408
4409 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
4410 (simplify
4411 (mult (POW:s @0 @1) (POW:s @0 @2))
4412 (POW @0 (plus @1 @2)))
4413
4414 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
4415 (simplify
4416 (mult (POW:s @0 @1) (POW:s @2 @1))
4417 (POW (mult @0 @2) @1))
4418
4419 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
4420 (simplify
4421 (mult (POWI:s @0 @1) (POWI:s @2 @1))
4422 (POWI (mult @0 @2) @1))
4423
4424 /* Simplify pow(x,c) / x -> pow(x,c-1). */
4425 (simplify
4426 (rdiv (POW:s @0 REAL_CST@1) @0)
4427 (if (!TREE_OVERFLOW (@1))
4428 (POW @0 (minus @1 { build_one_cst (type); }))))
4429
4430 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
4431 (simplify
4432 (rdiv @0 (POW:s @1 @2))
4433 (mult @0 (POW @1 (negate @2))))
4434
4435 (for sqrts (SQRT)
4436 cbrts (CBRT)
4437 pows (POW)
4438 /* sqrt(sqrt(x)) -> pow(x,1/4). */
4439 (simplify
4440 (sqrts (sqrts @0))
4441 (pows @0 { build_real (type, dconst_quarter ()); }))
4442 /* sqrt(cbrt(x)) -> pow(x,1/6). */
4443 (simplify
4444 (sqrts (cbrts @0))
4445 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4446 /* cbrt(sqrt(x)) -> pow(x,1/6). */
4447 (simplify
4448 (cbrts (sqrts @0))
4449 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4450 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
4451 (simplify
4452 (cbrts (cbrts tree_expr_nonnegative_p@0))
4453 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
4454 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
4455 (simplify
4456 (sqrts (pows @0 @1))
4457 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
4458 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
4459 (simplify
4460 (cbrts (pows tree_expr_nonnegative_p@0 @1))
4461 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4462 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
4463 (simplify
4464 (pows (sqrts @0) @1)
4465 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
4466 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
4467 (simplify
4468 (pows (cbrts tree_expr_nonnegative_p@0) @1)
4469 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4470 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
4471 (simplify
4472 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
4473 (pows @0 (mult @1 @2))))
4474
4475 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
4476 (simplify
4477 (CABS (complex @0 @0))
4478 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4479
4480 /* hypot(x,x) -> fabs(x)*sqrt(2). */
4481 (simplify
4482 (HYPOT @0 @0)
4483 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4484
4485 /* cexp(x+yi) -> exp(x)*cexpi(y). */
4486 (for cexps (CEXP)
4487 exps (EXP)
4488 cexpis (CEXPI)
4489 (simplify
4490 (cexps compositional_complex@0)
4491 (if (targetm.libc_has_function (function_c99_math_complex))
4492 (complex
4493 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
4494 (mult @1 (imagpart @2)))))))
4495
4496 (if (canonicalize_math_p ())
4497 /* floor(x) -> trunc(x) if x is nonnegative. */
4498 (for floors (FLOOR_ALL)
4499 truncs (TRUNC_ALL)
4500 (simplify
4501 (floors tree_expr_nonnegative_p@0)
4502 (truncs @0))))
4503
4504 (match double_value_p
4505 @0
4506 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
4507 (for froms (BUILT_IN_TRUNCL
4508 BUILT_IN_FLOORL
4509 BUILT_IN_CEILL
4510 BUILT_IN_ROUNDL
4511 BUILT_IN_NEARBYINTL
4512 BUILT_IN_RINTL)
4513 tos (BUILT_IN_TRUNC
4514 BUILT_IN_FLOOR
4515 BUILT_IN_CEIL
4516 BUILT_IN_ROUND
4517 BUILT_IN_NEARBYINT
4518 BUILT_IN_RINT)
4519 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
4520 (if (optimize && canonicalize_math_p ())
4521 (simplify
4522 (froms (convert double_value_p@0))
4523 (convert (tos @0)))))
4524
4525 (match float_value_p
4526 @0
4527 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
4528 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
4529 BUILT_IN_FLOORL BUILT_IN_FLOOR
4530 BUILT_IN_CEILL BUILT_IN_CEIL
4531 BUILT_IN_ROUNDL BUILT_IN_ROUND
4532 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
4533 BUILT_IN_RINTL BUILT_IN_RINT)
4534 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
4535 BUILT_IN_FLOORF BUILT_IN_FLOORF
4536 BUILT_IN_CEILF BUILT_IN_CEILF
4537 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
4538 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
4539 BUILT_IN_RINTF BUILT_IN_RINTF)
4540 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
4541 if x is a float. */
4542 (if (optimize && canonicalize_math_p ()
4543 && targetm.libc_has_function (function_c99_misc))
4544 (simplify
4545 (froms (convert float_value_p@0))
4546 (convert (tos @0)))))
4547
4548 (for froms (XFLOORL XCEILL XROUNDL XRINTL)
4549 tos (XFLOOR XCEIL XROUND XRINT)
4550 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
4551 (if (optimize && canonicalize_math_p ())
4552 (simplify
4553 (froms (convert double_value_p@0))
4554 (tos @0))))
4555
4556 (for froms (XFLOORL XCEILL XROUNDL XRINTL
4557 XFLOOR XCEIL XROUND XRINT)
4558 tos (XFLOORF XCEILF XROUNDF XRINTF)
4559 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
4560 if x is a float. */
4561 (if (optimize && canonicalize_math_p ())
4562 (simplify
4563 (froms (convert float_value_p@0))
4564 (tos @0))))
4565
4566 (if (canonicalize_math_p ())
4567 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
4568 (for floors (IFLOOR LFLOOR LLFLOOR)
4569 (simplify
4570 (floors tree_expr_nonnegative_p@0)
4571 (fix_trunc @0))))
4572
4573 (if (canonicalize_math_p ())
4574 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
4575 (for fns (IFLOOR LFLOOR LLFLOOR
4576 ICEIL LCEIL LLCEIL
4577 IROUND LROUND LLROUND)
4578 (simplify
4579 (fns integer_valued_real_p@0)
4580 (fix_trunc @0)))
4581 (if (!flag_errno_math)
4582 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
4583 (for rints (IRINT LRINT LLRINT)
4584 (simplify
4585 (rints integer_valued_real_p@0)
4586 (fix_trunc @0)))))
4587
4588 (if (canonicalize_math_p ())
4589 (for ifn (IFLOOR ICEIL IROUND IRINT)
4590 lfn (LFLOOR LCEIL LROUND LRINT)
4591 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
4592 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
4593 sizeof (int) == sizeof (long). */
4594 (if (TYPE_PRECISION (integer_type_node)
4595 == TYPE_PRECISION (long_integer_type_node))
4596 (simplify
4597 (ifn @0)
4598 (lfn:long_integer_type_node @0)))
4599 /* Canonicalize llround (x) to lround (x) on LP64 targets where
4600 sizeof (long long) == sizeof (long). */
4601 (if (TYPE_PRECISION (long_long_integer_type_node)
4602 == TYPE_PRECISION (long_integer_type_node))
4603 (simplify
4604 (llfn @0)
4605 (lfn:long_integer_type_node @0)))))
4606
4607 /* cproj(x) -> x if we're ignoring infinities. */
4608 (simplify
4609 (CPROJ @0)
4610 (if (!HONOR_INFINITIES (type))
4611 @0))
4612
4613 /* If the real part is inf and the imag part is known to be
4614 nonnegative, return (inf + 0i). */
4615 (simplify
4616 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
4617 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
4618 { build_complex_inf (type, false); }))
4619
4620 /* If the imag part is inf, return (inf+I*copysign(0,imag)). */
4621 (simplify
4622 (CPROJ (complex @0 REAL_CST@1))
4623 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
4624 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
4625
4626 (for pows (POW)
4627 sqrts (SQRT)
4628 cbrts (CBRT)
4629 (simplify
4630 (pows @0 REAL_CST@1)
4631 (with {
4632 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
4633 REAL_VALUE_TYPE tmp;
4634 }
4635 (switch
4636 /* pow(x,0) -> 1. */
4637 (if (real_equal (value, &dconst0))
4638 { build_real (type, dconst1); })
4639 /* pow(x,1) -> x. */
4640 (if (real_equal (value, &dconst1))
4641 @0)
4642 /* pow(x,-1) -> 1/x. */
4643 (if (real_equal (value, &dconstm1))
4644 (rdiv { build_real (type, dconst1); } @0))
4645 /* pow(x,0.5) -> sqrt(x). */
4646 (if (flag_unsafe_math_optimizations
4647 && canonicalize_math_p ()
4648 && real_equal (value, &dconsthalf))
4649 (sqrts @0))
4650 /* pow(x,1/3) -> cbrt(x). */
4651 (if (flag_unsafe_math_optimizations
4652 && canonicalize_math_p ()
4653 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
4654 real_equal (value, &tmp)))
4655 (cbrts @0))))))
4656
4657 /* powi(1,x) -> 1. */
4658 (simplify
4659 (POWI real_onep@0 @1)
4660 @0)
4661
4662 (simplify
4663 (POWI @0 INTEGER_CST@1)
4664 (switch
4665 /* powi(x,0) -> 1. */
4666 (if (wi::to_wide (@1) == 0)
4667 { build_real (type, dconst1); })
4668 /* powi(x,1) -> x. */
4669 (if (wi::to_wide (@1) == 1)
4670 @0)
4671 /* powi(x,-1) -> 1/x. */
4672 (if (wi::to_wide (@1) == -1)
4673 (rdiv { build_real (type, dconst1); } @0))))
4674
4675 /* Narrowing of arithmetic and logical operations.
4676
4677 These are conceptually similar to the transformations performed for
4678 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
4679 term we want to move all that code out of the front-ends into here. */
4680
4681 /* If we have a narrowing conversion of an arithmetic operation where
4682 both operands are widening conversions from the same type as the outer
4683 narrowing conversion. Then convert the innermost operands to a suitable
4684 unsigned type (to avoid introducing undefined behavior), perform the
4685 operation and convert the result to the desired type. */
4686 (for op (plus minus)
4687 (simplify
4688 (convert (op:s (convert@2 @0) (convert?@3 @1)))
4689 (if (INTEGRAL_TYPE_P (type)
4690 /* We check for type compatibility between @0 and @1 below,
4691 so there's no need to check that @1/@3 are integral types. */
4692 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4693 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4694 /* The precision of the type of each operand must match the
4695 precision of the mode of each operand, similarly for the
4696 result. */
4697 && type_has_mode_precision_p (TREE_TYPE (@0))
4698 && type_has_mode_precision_p (TREE_TYPE (@1))
4699 && type_has_mode_precision_p (type)
4700 /* The inner conversion must be a widening conversion. */
4701 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4702 && types_match (@0, type)
4703 && (types_match (@0, @1)
4704 /* Or the second operand is const integer or converted const
4705 integer from valueize. */
4706 || TREE_CODE (@1) == INTEGER_CST))
4707 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4708 (op @0 (convert @1))
4709 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
4710 (convert (op (convert:utype @0)
4711 (convert:utype @1))))))))
4712
4713 /* This is another case of narrowing, specifically when there's an outer
4714 BIT_AND_EXPR which masks off bits outside the type of the innermost
4715 operands. Like the previous case we have to convert the operands
4716 to unsigned types to avoid introducing undefined behavior for the
4717 arithmetic operation. */
4718 (for op (minus plus)
4719 (simplify
4720 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
4721 (if (INTEGRAL_TYPE_P (type)
4722 /* We check for type compatibility between @0 and @1 below,
4723 so there's no need to check that @1/@3 are integral types. */
4724 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4725 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4726 /* The precision of the type of each operand must match the
4727 precision of the mode of each operand, similarly for the
4728 result. */
4729 && type_has_mode_precision_p (TREE_TYPE (@0))
4730 && type_has_mode_precision_p (TREE_TYPE (@1))
4731 && type_has_mode_precision_p (type)
4732 /* The inner conversion must be a widening conversion. */
4733 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4734 && types_match (@0, @1)
4735 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
4736 <= TYPE_PRECISION (TREE_TYPE (@0)))
4737 && (wi::to_wide (@4)
4738 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
4739 true, TYPE_PRECISION (type))) == 0)
4740 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4741 (with { tree ntype = TREE_TYPE (@0); }
4742 (convert (bit_and (op @0 @1) (convert:ntype @4))))
4743 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
4744 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
4745 (convert:utype @4))))))))
4746
4747 /* Transform (@0 < @1 and @0 < @2) to use min,
4748 (@0 > @1 and @0 > @2) to use max */
4749 (for logic (bit_and bit_and bit_and bit_and bit_ior bit_ior bit_ior bit_ior)
4750 op (lt le gt ge lt le gt ge )
4751 ext (min min max max max max min min )
4752 (simplify
4753 (logic (op:cs @0 @1) (op:cs @0 @2))
4754 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4755 && TREE_CODE (@0) != INTEGER_CST)
4756 (op @0 (ext @1 @2)))))
4757
4758 (simplify
4759 /* signbit(x) -> 0 if x is nonnegative. */
4760 (SIGNBIT tree_expr_nonnegative_p@0)
4761 { integer_zero_node; })
4762
4763 (simplify
4764 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
4765 (SIGNBIT @0)
4766 (if (!HONOR_SIGNED_ZEROS (@0))
4767 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
4768
4769 /* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
4770 (for cmp (eq ne)
4771 (for op (plus minus)
4772 rop (minus plus)
4773 (simplify
4774 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4775 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4776 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
4777 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
4778 && !TYPE_SATURATING (TREE_TYPE (@0)))
4779 (with { tree res = int_const_binop (rop, @2, @1); }
4780 (if (TREE_OVERFLOW (res)
4781 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4782 { constant_boolean_node (cmp == NE_EXPR, type); }
4783 (if (single_use (@3))
4784 (cmp @0 { TREE_OVERFLOW (res)
4785 ? drop_tree_overflow (res) : res; }))))))))
4786 (for cmp (lt le gt ge)
4787 (for op (plus minus)
4788 rop (minus plus)
4789 (simplify
4790 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4791 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4792 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4793 (with { tree res = int_const_binop (rop, @2, @1); }
4794 (if (TREE_OVERFLOW (res))
4795 {
4796 fold_overflow_warning (("assuming signed overflow does not occur "
4797 "when simplifying conditional to constant"),
4798 WARN_STRICT_OVERFLOW_CONDITIONAL);
4799 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
4800 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
4801 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
4802 TYPE_SIGN (TREE_TYPE (@1)))
4803 != (op == MINUS_EXPR);
4804 constant_boolean_node (less == ovf_high, type);
4805 }
4806 (if (single_use (@3))
4807 (with
4808 {
4809 fold_overflow_warning (("assuming signed overflow does not occur "
4810 "when changing X +- C1 cmp C2 to "
4811 "X cmp C2 -+ C1"),
4812 WARN_STRICT_OVERFLOW_COMPARISON);
4813 }
4814 (cmp @0 { res; })))))))))
4815
4816 /* Canonicalizations of BIT_FIELD_REFs. */
4817
4818 (simplify
4819 (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4)
4820 (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); }))
4821
4822 (simplify
4823 (BIT_FIELD_REF (view_convert @0) @1 @2)
4824 (BIT_FIELD_REF @0 @1 @2))
4825
4826 (simplify
4827 (BIT_FIELD_REF @0 @1 integer_zerop)
4828 (if (tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (@0))))
4829 (view_convert @0)))
4830
4831 (simplify
4832 (BIT_FIELD_REF @0 @1 @2)
4833 (switch
4834 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
4835 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4836 (switch
4837 (if (integer_zerop (@2))
4838 (view_convert (realpart @0)))
4839 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4840 (view_convert (imagpart @0)))))
4841 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4842 && INTEGRAL_TYPE_P (type)
4843 /* On GIMPLE this should only apply to register arguments. */
4844 && (! GIMPLE || is_gimple_reg (@0))
4845 /* A bit-field-ref that referenced the full argument can be stripped. */
4846 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
4847 && integer_zerop (@2))
4848 /* Low-parts can be reduced to integral conversions.
4849 ??? The following doesn't work for PDP endian. */
4850 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
4851 /* Don't even think about BITS_BIG_ENDIAN. */
4852 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
4853 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
4854 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
4855 ? (TYPE_PRECISION (TREE_TYPE (@0))
4856 - TYPE_PRECISION (type))
4857 : 0)) == 0)))
4858 (convert @0))))
4859
4860 /* Simplify vector extracts. */
4861
4862 (simplify
4863 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
4864 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
4865 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
4866 || (VECTOR_TYPE_P (type)
4867 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
4868 (with
4869 {
4870 tree ctor = (TREE_CODE (@0) == SSA_NAME
4871 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
4872 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
4873 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
4874 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
4875 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
4876 }
4877 (if (n != 0
4878 && (idx % width) == 0
4879 && (n % width) == 0
4880 && known_le ((idx + n) / width,
4881 TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor))))
4882 (with
4883 {
4884 idx = idx / width;
4885 n = n / width;
4886 /* Constructor elements can be subvectors. */
4887 poly_uint64 k = 1;
4888 if (CONSTRUCTOR_NELTS (ctor) != 0)
4889 {
4890 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
4891 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
4892 k = TYPE_VECTOR_SUBPARTS (cons_elem);
4893 }
4894 unsigned HOST_WIDE_INT elt, count, const_k;
4895 }
4896 (switch
4897 /* We keep an exact subset of the constructor elements. */
4898 (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count))
4899 (if (CONSTRUCTOR_NELTS (ctor) == 0)
4900 { build_constructor (type, NULL); }
4901 (if (count == 1)
4902 (if (elt < CONSTRUCTOR_NELTS (ctor))
4903 (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; })
4904 { build_zero_cst (type); })
4905 {
4906 vec<constructor_elt, va_gc> *vals;
4907 vec_alloc (vals, count);
4908 for (unsigned i = 0;
4909 i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i)
4910 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
4911 CONSTRUCTOR_ELT (ctor, elt + i)->value);
4912 build_constructor (type, vals);
4913 })))
4914 /* The bitfield references a single constructor element. */
4915 (if (k.is_constant (&const_k)
4916 && idx + n <= (idx / const_k + 1) * const_k)
4917 (switch
4918 (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k)
4919 { build_zero_cst (type); })
4920 (if (n == const_k)
4921 (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }))
4922 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }
4923 @1 { bitsize_int ((idx % const_k) * width); })))))))))
4924
4925 /* Simplify a bit extraction from a bit insertion for the cases with
4926 the inserted element fully covering the extraction or the insertion
4927 not touching the extraction. */
4928 (simplify
4929 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
4930 (with
4931 {
4932 unsigned HOST_WIDE_INT isize;
4933 if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
4934 isize = TYPE_PRECISION (TREE_TYPE (@1));
4935 else
4936 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
4937 }
4938 (switch
4939 (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
4940 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
4941 wi::to_wide (@ipos) + isize))
4942 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
4943 wi::to_wide (@rpos)
4944 - wi::to_wide (@ipos)); }))
4945 (if (wi::geu_p (wi::to_wide (@ipos),
4946 wi::to_wide (@rpos) + wi::to_wide (@rsize))
4947 || wi::geu_p (wi::to_wide (@rpos),
4948 wi::to_wide (@ipos) + isize))
4949 (BIT_FIELD_REF @0 @rsize @rpos)))))
4950
4951 (if (canonicalize_math_after_vectorization_p ())
4952 (for fmas (FMA)
4953 (simplify
4954 (fmas:c (negate @0) @1 @2)
4955 (IFN_FNMA @0 @1 @2))
4956 (simplify
4957 (fmas @0 @1 (negate @2))
4958 (IFN_FMS @0 @1 @2))
4959 (simplify
4960 (fmas:c (negate @0) @1 (negate @2))
4961 (IFN_FNMS @0 @1 @2))
4962 (simplify
4963 (negate (fmas@3 @0 @1 @2))
4964 (if (single_use (@3))
4965 (IFN_FNMS @0 @1 @2))))
4966
4967 (simplify
4968 (IFN_FMS:c (negate @0) @1 @2)
4969 (IFN_FNMS @0 @1 @2))
4970 (simplify
4971 (IFN_FMS @0 @1 (negate @2))
4972 (IFN_FMA @0 @1 @2))
4973 (simplify
4974 (IFN_FMS:c (negate @0) @1 (negate @2))
4975 (IFN_FNMA @0 @1 @2))
4976 (simplify
4977 (negate (IFN_FMS@3 @0 @1 @2))
4978 (if (single_use (@3))
4979 (IFN_FNMA @0 @1 @2)))
4980
4981 (simplify
4982 (IFN_FNMA:c (negate @0) @1 @2)
4983 (IFN_FMA @0 @1 @2))
4984 (simplify
4985 (IFN_FNMA @0 @1 (negate @2))
4986 (IFN_FNMS @0 @1 @2))
4987 (simplify
4988 (IFN_FNMA:c (negate @0) @1 (negate @2))
4989 (IFN_FMS @0 @1 @2))
4990 (simplify
4991 (negate (IFN_FNMA@3 @0 @1 @2))
4992 (if (single_use (@3))
4993 (IFN_FMS @0 @1 @2)))
4994
4995 (simplify
4996 (IFN_FNMS:c (negate @0) @1 @2)
4997 (IFN_FMS @0 @1 @2))
4998 (simplify
4999 (IFN_FNMS @0 @1 (negate @2))
5000 (IFN_FNMA @0 @1 @2))
5001 (simplify
5002 (IFN_FNMS:c (negate @0) @1 (negate @2))
5003 (IFN_FMA @0 @1 @2))
5004 (simplify
5005 (negate (IFN_FNMS@3 @0 @1 @2))
5006 (if (single_use (@3))
5007 (IFN_FMA @0 @1 @2))))
5008
5009 /* POPCOUNT simplifications. */
5010 (for popcount (BUILT_IN_POPCOUNT BUILT_IN_POPCOUNTL BUILT_IN_POPCOUNTLL
5011 BUILT_IN_POPCOUNTIMAX)
5012 /* popcount(X&1) is nop_expr(X&1). */
5013 (simplify
5014 (popcount @0)
5015 (if (tree_nonzero_bits (@0) == 1)
5016 (convert @0)))
5017 /* popcount(X) + popcount(Y) is popcount(X|Y) when X&Y must be zero. */
5018 (simplify
5019 (plus (popcount:s @0) (popcount:s @1))
5020 (if (wi::bit_and (tree_nonzero_bits (@0), tree_nonzero_bits (@1)) == 0)
5021 (popcount (bit_ior @0 @1))))
5022 /* popcount(X) == 0 is X == 0, and related (in)equalities. */
5023 (for cmp (le eq ne gt)
5024 rep (eq eq ne ne)
5025 (simplify
5026 (cmp (popcount @0) integer_zerop)
5027 (rep @0 { build_zero_cst (TREE_TYPE (@0)); }))))
5028
5029 /* Simplify:
5030
5031 a = a1 op a2
5032 r = c ? a : b;
5033
5034 to:
5035
5036 r = c ? a1 op a2 : b;
5037
5038 if the target can do it in one go. This makes the operation conditional
5039 on c, so could drop potentially-trapping arithmetic, but that's a valid
5040 simplification if the result of the operation isn't needed. */
5041 (for uncond_op (UNCOND_BINARY)
5042 cond_op (COND_BINARY)
5043 (simplify
5044 (vec_cond @0 (view_convert? (uncond_op@4 @1 @2)) @3)
5045 (with { tree op_type = TREE_TYPE (@4); }
5046 (if (element_precision (type) == element_precision (op_type))
5047 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @3))))))
5048 (simplify
5049 (vec_cond @0 @1 (view_convert? (uncond_op@4 @2 @3)))
5050 (with { tree op_type = TREE_TYPE (@4); }
5051 (if (element_precision (type) == element_precision (op_type))
5052 (view_convert (cond_op (bit_not @0) @2 @3 (view_convert:op_type @1)))))))
5053
5054 /* Same for ternary operations. */
5055 (for uncond_op (UNCOND_TERNARY)
5056 cond_op (COND_TERNARY)
5057 (simplify
5058 (vec_cond @0 (view_convert? (uncond_op@5 @1 @2 @3)) @4)
5059 (with { tree op_type = TREE_TYPE (@5); }
5060 (if (element_precision (type) == element_precision (op_type))
5061 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @4))))))
5062 (simplify
5063 (vec_cond @0 @1 (view_convert? (uncond_op@5 @2 @3 @4)))
5064 (with { tree op_type = TREE_TYPE (@5); }
5065 (if (element_precision (type) == element_precision (op_type))
5066 (view_convert (cond_op (bit_not @0) @2 @3 @4
5067 (view_convert:op_type @1)))))))
5068
5069 /* Detect cases in which a VEC_COND_EXPR effectively replaces the
5070 "else" value of an IFN_COND_*. */
5071 (for cond_op (COND_BINARY)
5072 (simplify
5073 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3)) @4)
5074 (with { tree op_type = TREE_TYPE (@3); }
5075 (if (element_precision (type) == element_precision (op_type))
5076 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @4))))))
5077 (simplify
5078 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5)))
5079 (with { tree op_type = TREE_TYPE (@5); }
5080 (if (inverse_conditions_p (@0, @2)
5081 && element_precision (type) == element_precision (op_type))
5082 (view_convert (cond_op @2 @3 @4 (view_convert:op_type @1)))))))
5083
5084 /* Same for ternary operations. */
5085 (for cond_op (COND_TERNARY)
5086 (simplify
5087 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3 @4)) @5)
5088 (with { tree op_type = TREE_TYPE (@4); }
5089 (if (element_precision (type) == element_precision (op_type))
5090 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @5))))))
5091 (simplify
5092 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5 @6)))
5093 (with { tree op_type = TREE_TYPE (@6); }
5094 (if (inverse_conditions_p (@0, @2)
5095 && element_precision (type) == element_precision (op_type))
5096 (view_convert (cond_op @2 @3 @4 @5 (view_convert:op_type @1)))))))
5097
5098 /* For pointers @0 and @2 and nonnegative constant offset @1, look for
5099 expressions like:
5100
5101 A: (@0 + @1 < @2) | (@2 + @1 < @0)
5102 B: (@0 + @1 <= @2) | (@2 + @1 <= @0)
5103
5104 If pointers are known not to wrap, B checks whether @1 bytes starting
5105 at @0 and @2 do not overlap, while A tests the same thing for @1 + 1
5106 bytes. A is more efficiently tested as:
5107
5108 A: (sizetype) (@0 + @1 - @2) > @1 * 2
5109
5110 The equivalent expression for B is given by replacing @1 with @1 - 1:
5111
5112 B: (sizetype) (@0 + (@1 - 1) - @2) > (@1 - 1) * 2
5113
5114 @0 and @2 can be swapped in both expressions without changing the result.
5115
5116 The folds rely on sizetype's being unsigned (which is always true)
5117 and on its being the same width as the pointer (which we have to check).
5118
5119 The fold replaces two pointer_plus expressions, two comparisons and
5120 an IOR with a pointer_plus, a pointer_diff, and a comparison, so in
5121 the best case it's a saving of two operations. The A fold retains one
5122 of the original pointer_pluses, so is a win even if both pointer_pluses
5123 are used elsewhere. The B fold is a wash if both pointer_pluses are
5124 used elsewhere, since all we end up doing is replacing a comparison with
5125 a pointer_plus. We do still apply the fold under those circumstances
5126 though, in case applying it to other conditions eventually makes one of the
5127 pointer_pluses dead. */
5128 (for ior (truth_orif truth_or bit_ior)
5129 (for cmp (le lt)
5130 (simplify
5131 (ior (cmp:cs (pointer_plus@3 @0 INTEGER_CST@1) @2)
5132 (cmp:cs (pointer_plus@4 @2 @1) @0))
5133 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
5134 && TYPE_OVERFLOW_WRAPS (sizetype)
5135 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (sizetype))
5136 /* Calculate the rhs constant. */
5137 (with { offset_int off = wi::to_offset (@1) - (cmp == LE_EXPR ? 1 : 0);
5138 offset_int rhs = off * 2; }
5139 /* Always fails for negative values. */
5140 (if (wi::min_precision (rhs, UNSIGNED) <= TYPE_PRECISION (sizetype))
5141 /* Since the order of @0 and @2 doesn't matter, let tree_swap_operands_p
5142 pick a canonical order. This increases the chances of using the
5143 same pointer_plus in multiple checks. */
5144 (with { bool swap_p = tree_swap_operands_p (@0, @2);
5145 tree rhs_tree = wide_int_to_tree (sizetype, rhs); }
5146 (if (cmp == LT_EXPR)
5147 (gt (convert:sizetype
5148 (pointer_diff:ssizetype { swap_p ? @4 : @3; }
5149 { swap_p ? @0 : @2; }))
5150 { rhs_tree; })
5151 (gt (convert:sizetype
5152 (pointer_diff:ssizetype
5153 (pointer_plus { swap_p ? @2 : @0; }
5154 { wide_int_to_tree (sizetype, off); })
5155 { swap_p ? @0 : @2; }))
5156 { rhs_tree; })))))))))