1 /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
5 Copyright (C) 2014-2015 Free Software Foundation, Inc.
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
26 /* Generic tree predicates we inherit. */
28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
29 integer_each_onep integer_truep integer_nonzerop
30 real_zerop real_onep real_minus_onep
33 tree_expr_nonnegative_p
39 (define_operator_list tcc_comparison
40 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
41 (define_operator_list inverted_tcc_comparison
42 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
43 (define_operator_list inverted_tcc_comparison_with_nans
44 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
45 (define_operator_list swapped_tcc_comparison
46 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
47 (define_operator_list simple_comparison lt le eq ne ge gt)
48 (define_operator_list swapped_simple_comparison gt ge eq ne le lt)
50 /* Define an operand list for math function FN, with float, double and
51 long double variants (in that order). */
52 #define DEFINE_MATH_FN(FN) \
53 (define_operator_list FN BUILT_IN_##FN##F BUILT_IN_##FN BUILT_IN_##FN##L)
55 /* Define operand lists for math rounding functions {,i,l,ll}FN,
56 where the versions prefixed with "i" return an int, those prefixed with
57 "l" return a long and those prefixed with "ll" return a long long.
59 Also define operand lists:
61 X<FN>F for all float functions, in the order i, l, ll
62 X<FN> for all double functions, in the same order
63 X<FN>L for all long double functions, in the same order. */
64 #define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
66 DEFINE_MATH_FN (I##FN) \
67 DEFINE_MATH_FN (L##FN) \
68 DEFINE_MATH_FN (LL##FN) \
69 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
72 (define_operator_list X##FN BUILT_IN_I##FN \
75 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
83 DEFINE_MATH_FN (LOG10)
84 DEFINE_MATH_FN (EXP10)
86 DEFINE_MATH_FN (POW10)
96 DEFINE_MATH_FN (CEXPI)
97 DEFINE_MATH_FN (CPROJ)
99 DEFINE_MATH_FN (CCOSH)
100 DEFINE_MATH_FN (HYPOT)
101 DEFINE_MATH_FN (COPYSIGN)
102 DEFINE_MATH_FN (CABS)
103 DEFINE_MATH_FN (TRUNC)
104 DEFINE_MATH_FN (NEARBYINT)
105 DEFINE_MATH_FN (SIGNBIT)
106 DEFINE_MATH_FN (FMIN)
107 DEFINE_MATH_FN (FMAX)
108 DEFINE_MATH_FN (LDEXP)
109 DEFINE_MATH_FN (SCALBN)
110 DEFINE_MATH_FN (SCALBLN)
112 DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
113 DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
114 DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
115 DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
117 /* Simplifications of operations with one constant operand and
118 simplifications to constants or single values. */
120 (for op (plus pointer_plus minus bit_ior bit_xor)
122 (op @0 integer_zerop)
125 /* 0 +p index -> (type)index */
127 (pointer_plus integer_zerop @1)
128 (non_lvalue (convert @1)))
130 /* See if ARG1 is zero and X + ARG1 reduces to X.
131 Likewise if the operands are reversed. */
133 (plus:c @0 real_zerop@1)
134 (if (fold_real_zero_addition_p (type, @1, 0))
137 /* See if ARG1 is zero and X - ARG1 reduces to X. */
139 (minus @0 real_zerop@1)
140 (if (fold_real_zero_addition_p (type, @1, 1))
144 This is unsafe for certain floats even in non-IEEE formats.
145 In IEEE, it is unsafe because it does wrong for NaNs.
146 Also note that operand_equal_p is always false if an operand
150 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
151 { build_zero_cst (type); }))
154 (mult @0 integer_zerop@1)
157 /* Maybe fold x * 0 to 0. The expressions aren't the same
158 when x is NaN, since x * 0 is also NaN. Nor are they the
159 same in modes with signed zeros, since multiplying a
160 negative value by 0 gives -0, not +0. */
162 (mult @0 real_zerop@1)
163 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
166 /* In IEEE floating point, x*1 is not equivalent to x for snans.
167 Likewise for complex arithmetic with signed zeros. */
170 (if (!HONOR_SNANS (type)
171 && (!HONOR_SIGNED_ZEROS (type)
172 || !COMPLEX_FLOAT_TYPE_P (type)))
175 /* Transform x * -1.0 into -x. */
177 (mult @0 real_minus_onep)
178 (if (!HONOR_SNANS (type)
179 && (!HONOR_SIGNED_ZEROS (type)
180 || !COMPLEX_FLOAT_TYPE_P (type)))
183 /* Make sure to preserve divisions by zero. This is the reason why
184 we don't simplify x / x to 1 or 0 / x to 0. */
185 (for op (mult trunc_div ceil_div floor_div round_div exact_div)
191 (for div (trunc_div ceil_div floor_div round_div exact_div)
193 (div @0 integer_minus_onep@1)
194 (if (!TYPE_UNSIGNED (type))
197 /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
198 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
201 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
202 && TYPE_UNSIGNED (type))
205 /* Combine two successive divisions. Note that combining ceil_div
206 and floor_div is trickier and combining round_div even more so. */
207 (for div (trunc_div exact_div)
209 (div (div @0 INTEGER_CST@1) INTEGER_CST@2)
212 wide_int mul = wi::mul (@1, @2, TYPE_SIGN (type), &overflow_p);
215 (div @0 { wide_int_to_tree (type, mul); })
216 (if (TYPE_UNSIGNED (type)
217 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
218 { build_zero_cst (type); })))))
220 /* Optimize A / A to 1.0 if we don't care about
221 NaNs or Infinities. */
224 (if (FLOAT_TYPE_P (type)
225 && ! HONOR_NANS (type)
226 && ! HONOR_INFINITIES (type))
227 { build_one_cst (type); }))
229 /* Optimize -A / A to -1.0 if we don't care about
230 NaNs or Infinities. */
232 (rdiv:c @0 (negate @0))
233 (if (FLOAT_TYPE_P (type)
234 && ! HONOR_NANS (type)
235 && ! HONOR_INFINITIES (type))
236 { build_minus_one_cst (type); }))
238 /* In IEEE floating point, x/1 is not equivalent to x for snans. */
241 (if (!HONOR_SNANS (type))
244 /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
246 (rdiv @0 real_minus_onep)
247 (if (!HONOR_SNANS (type))
250 (if (flag_reciprocal_math)
251 /* Convert (A/B)/C to A/(B*C) */
253 (rdiv (rdiv:s @0 @1) @2)
254 (rdiv @0 (mult @1 @2)))
256 /* Convert A/(B/C) to (A/B)*C */
258 (rdiv @0 (rdiv:s @1 @2))
259 (mult (rdiv @0 @1) @2)))
261 /* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
262 (for div (trunc_div ceil_div floor_div round_div exact_div)
264 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
265 (if (integer_pow2p (@2)
266 && tree_int_cst_sgn (@2) > 0
267 && wi::add (@2, @1) == 0
268 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
269 (rshift (convert @0) { build_int_cst (integer_type_node,
270 wi::exact_log2 (@2)); }))))
272 /* If ARG1 is a constant, we can convert this to a multiply by the
273 reciprocal. This does not have the same rounding properties,
274 so only do this if -freciprocal-math. We can actually
275 always safely do it if ARG1 is a power of two, but it's hard to
276 tell if it is or not in a portable manner. */
277 (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
281 (if (flag_reciprocal_math
284 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
286 (mult @0 { tem; } )))
287 (if (cst != COMPLEX_CST)
288 (with { tree inverse = exact_inverse (type, @1); }
290 (mult @0 { inverse; } ))))))))
292 /* Same applies to modulo operations, but fold is inconsistent here
293 and simplifies 0 % x to 0, only preserving literal 0 % 0. */
294 (for mod (ceil_mod floor_mod round_mod trunc_mod)
295 /* 0 % X is always zero. */
297 (mod integer_zerop@0 @1)
298 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
299 (if (!integer_zerop (@1))
301 /* X % 1 is always zero. */
303 (mod @0 integer_onep)
304 { build_zero_cst (type); })
305 /* X % -1 is zero. */
307 (mod @0 integer_minus_onep@1)
308 (if (!TYPE_UNSIGNED (type))
309 { build_zero_cst (type); }))
310 /* (X % Y) % Y is just X % Y. */
312 (mod (mod@2 @0 @1) @1)
314 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
316 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
317 (if (ANY_INTEGRAL_TYPE_P (type)
318 && TYPE_OVERFLOW_UNDEFINED (type)
319 && wi::multiple_of_p (@1, @2, TYPE_SIGN (type)))
320 { build_zero_cst (type); })))
322 /* X % -C is the same as X % C. */
324 (trunc_mod @0 INTEGER_CST@1)
325 (if (TYPE_SIGN (type) == SIGNED
326 && !TREE_OVERFLOW (@1)
328 && !TYPE_OVERFLOW_TRAPS (type)
329 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
330 && !sign_bit_p (@1, @1))
331 (trunc_mod @0 (negate @1))))
333 /* X % -Y is the same as X % Y. */
335 (trunc_mod @0 (convert? (negate @1)))
336 (if (!TYPE_UNSIGNED (type)
337 && !TYPE_OVERFLOW_TRAPS (type)
338 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
339 (trunc_mod @0 (convert @1))))
341 /* X - (X / Y) * Y is the same as X % Y. */
343 (minus (convert1? @2) (convert2? (mult:c (trunc_div @0 @1) @1)))
344 /* We cannot use matching captures here, since in the case of
345 constants we really want the type of @0, not @2. */
346 (if (operand_equal_p (@0, @2, 0)
347 && (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type)))
348 (convert (trunc_mod @0 @1))))
350 /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
351 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
352 Also optimize A % (C << N) where C is a power of 2,
353 to A & ((C << N) - 1). */
354 (match (power_of_two_cand @1)
356 (match (power_of_two_cand @1)
357 (lshift INTEGER_CST@1 @2))
358 (for mod (trunc_mod floor_mod)
360 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
361 (if ((TYPE_UNSIGNED (type)
362 || tree_expr_nonnegative_p (@0))
363 && tree_nop_conversion_p (type, TREE_TYPE (@3))
364 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
365 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
367 /* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
369 (trunc_div (mult @0 integer_pow2p@1) @1)
370 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
371 (bit_and @0 { wide_int_to_tree
372 (type, wi::mask (TYPE_PRECISION (type) - wi::exact_log2 (@1),
373 false, TYPE_PRECISION (type))); })))
375 /* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
377 (mult (trunc_div @0 integer_pow2p@1) @1)
378 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
379 (bit_and @0 (negate @1))))
381 /* Simplify (t * 2) / 2) -> t. */
382 (for div (trunc_div ceil_div floor_div round_div exact_div)
384 (div (mult @0 @1) @1)
385 (if (ANY_INTEGRAL_TYPE_P (type)
386 && TYPE_OVERFLOW_UNDEFINED (type))
390 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
395 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
398 (pows (op @0) REAL_CST@1)
399 (with { HOST_WIDE_INT n; }
400 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
402 /* Strip negate and abs from both operands of hypot. */
410 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
411 (for copysigns (COPYSIGN)
413 (copysigns (op @0) @1)
416 /* abs(x)*abs(x) -> x*x. Should be valid for all types. */
421 /* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
425 (coss (copysigns @0 @1))
428 /* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
432 (pows (copysigns @0 @1) REAL_CST@1)
433 (with { HOST_WIDE_INT n; }
434 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
439 /* hypot(copysign(x, y), z) -> hypot(x, z). */
441 (hypots (copysigns @0 @1) @2)
443 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
445 (hypots @0 (copysigns @1 @2))
448 /* copysign(copysign(x, y), z) -> copysign(x, z). */
449 (for copysigns (COPYSIGN)
451 (copysigns (copysigns @0 @1) @2)
454 /* copysign(x,y)*copysign(x,y) -> x*x. */
455 (for copysigns (COPYSIGN)
457 (mult (copysigns@2 @0 @1) @2)
460 /* ccos(-x) -> ccos(x). Similarly for ccosh. */
461 (for ccoss (CCOS CCOSH)
466 /* cabs(-x) and cos(conj(x)) -> cabs(x). */
467 (for ops (conj negate)
473 /* Fold (a * (1 << b)) into (a << b) */
475 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
476 (if (! FLOAT_TYPE_P (type)
477 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
480 /* Fold (C1/X)*C2 into (C1*C2)/X. */
482 (mult (rdiv:s REAL_CST@0 @1) REAL_CST@2)
483 (if (flag_associative_math)
485 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
487 (rdiv { tem; } @1)))))
489 /* Convert C1/(X*C2) into (C1/C2)/X */
491 (rdiv REAL_CST@0 (mult @1 REAL_CST@2))
492 (if (flag_reciprocal_math)
494 { tree tem = const_binop (RDIV_EXPR, type, @0, @2); }
496 (rdiv { tem; } @1)))))
498 /* Simplify ~X & X as zero. */
500 (bit_and:c (convert? @0) (convert? (bit_not @0)))
501 { build_zero_cst (type); })
503 /* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
505 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
506 (minus (bit_xor @0 @1) @1))
508 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
509 (if (wi::bit_not (@2) == @1)
510 (minus (bit_xor @0 @1) @1)))
512 /* Fold (A & B) - (A & ~B) into B - (A ^ B). */
514 (minus (bit_and:s @0 @1) (bit_and:cs @0 (bit_not @1)))
515 (minus @1 (bit_xor @0 @1)))
517 /* Simplify (X & ~Y) | (~X & Y) -> X ^ Y. */
519 (bit_ior (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
522 (bit_ior:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
523 (if (wi::bit_not (@2) == @1)
526 /* X % Y is smaller than Y. */
529 (cmp (trunc_mod @0 @1) @1)
530 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
531 { constant_boolean_node (cmp == LT_EXPR, type); })))
534 (cmp @1 (trunc_mod @0 @1))
535 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
536 { constant_boolean_node (cmp == GT_EXPR, type); })))
540 (bit_ior @0 integer_all_onesp@1)
545 (bit_and @0 integer_zerop@1)
551 (for op (bit_ior bit_xor plus)
553 (op:c (convert? @0) (convert? (bit_not @0)))
554 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
559 { build_zero_cst (type); })
561 /* Canonicalize X ^ ~0 to ~X. */
563 (bit_xor @0 integer_all_onesp@1)
568 (bit_and @0 integer_all_onesp)
571 /* x & x -> x, x | x -> x */
572 (for bitop (bit_and bit_ior)
577 /* x + (x & 1) -> (x + 1) & ~1 */
579 (plus:c @0 (bit_and:s @0 integer_onep@1))
580 (bit_and (plus @0 @1) (bit_not @1)))
582 /* x & ~(x & y) -> x & ~y */
583 /* x | ~(x | y) -> x | ~y */
584 (for bitop (bit_and bit_ior)
586 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
587 (bitop @0 (bit_not @1))))
589 /* (x | y) & ~x -> y & ~x */
590 /* (x & y) | ~x -> y | ~x */
591 (for bitop (bit_and bit_ior)
592 rbitop (bit_ior bit_and)
594 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
597 /* (x & y) ^ (x | y) -> x ^ y */
599 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
602 /* (x ^ y) ^ (x | y) -> x & y */
604 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
607 /* (x & y) + (x ^ y) -> x | y */
608 /* (x & y) | (x ^ y) -> x | y */
609 /* (x & y) ^ (x ^ y) -> x | y */
610 (for op (plus bit_ior bit_xor)
612 (op:c (bit_and @0 @1) (bit_xor @0 @1))
615 /* (x & y) + (x | y) -> x + y */
617 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
620 /* (x + y) - (x | y) -> x & y */
622 (minus (plus @0 @1) (bit_ior @0 @1))
623 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
624 && !TYPE_SATURATING (type))
627 /* (x + y) - (x & y) -> x | y */
629 (minus (plus @0 @1) (bit_and @0 @1))
630 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
631 && !TYPE_SATURATING (type))
634 /* (x | y) - (x ^ y) -> x & y */
636 (minus (bit_ior @0 @1) (bit_xor @0 @1))
639 /* (x | y) - (x & y) -> x ^ y */
641 (minus (bit_ior @0 @1) (bit_and @0 @1))
644 /* (x | y) & ~(x & y) -> x ^ y */
646 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
649 /* (x | y) & (~x ^ y) -> x & y */
651 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
654 /* ~x & ~y -> ~(x | y)
655 ~x | ~y -> ~(x & y) */
656 (for op (bit_and bit_ior)
657 rop (bit_ior bit_and)
659 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
660 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
661 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
662 (bit_not (rop (convert @0) (convert @1))))))
664 /* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
665 with a constant, and the two constants have no bits in common,
666 we should treat this as a BIT_IOR_EXPR since this may produce more
668 (for op (bit_xor plus)
670 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
671 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
672 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
673 && tree_nop_conversion_p (type, TREE_TYPE (@2))
674 && wi::bit_and (@1, @3) == 0)
675 (bit_ior (convert @4) (convert @5)))))
677 /* (X | Y) ^ X -> Y & ~ X*/
679 (bit_xor:c (convert? (bit_ior:c @0 @1)) (convert? @0))
680 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
681 (convert (bit_and @1 (bit_not @0)))))
683 /* Convert ~X ^ ~Y to X ^ Y. */
685 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
686 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
687 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
688 (bit_xor (convert @0) (convert @1))))
690 /* Convert ~X ^ C to X ^ ~C. */
692 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
693 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
694 (bit_xor (convert @0) (bit_not @1))))
696 /* Fold (X & Y) ^ Y as ~X & Y. */
698 (bit_xor:c (bit_and:c @0 @1) @1)
699 (bit_and (bit_not @0) @1))
701 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
702 operands are another bit-wise operation with a common input. If so,
703 distribute the bit operations to save an operation and possibly two if
704 constants are involved. For example, convert
705 (A | B) & (A | C) into A | (B & C)
706 Further simplification will occur if B and C are constants. */
707 (for op (bit_and bit_ior)
708 rop (bit_ior bit_and)
710 (op (convert? (rop:c @0 @1)) (convert? (rop @0 @2)))
711 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
712 (rop (convert @0) (op (convert @1) (convert @2))))))
722 (abs tree_expr_nonnegative_p@0)
725 /* A few cases of fold-const.c negate_expr_p predicate. */
728 (if ((INTEGRAL_TYPE_P (type)
729 && TYPE_OVERFLOW_WRAPS (type))
730 || (!TYPE_OVERFLOW_SANITIZED (type)
731 && may_negate_without_overflow_p (t)))))
736 (if (!TYPE_OVERFLOW_SANITIZED (type))))
739 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
740 /* VECTOR_CST handling of non-wrapping types would recurse in unsupported
744 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
746 /* (-A) * (-B) -> A * B */
748 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
749 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
750 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
751 (mult (convert @0) (convert (negate @1)))))
753 /* -(A + B) -> (-B) - A. */
755 (negate (plus:c @0 negate_expr_p@1))
756 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
757 && !HONOR_SIGNED_ZEROS (element_mode (type)))
758 (minus (negate @1) @0)))
760 /* A - B -> A + (-B) if B is easily negatable. */
762 (minus @0 negate_expr_p@1)
763 (if (!FIXED_POINT_TYPE_P (type))
764 (plus @0 (negate @1))))
766 /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
768 For bitwise binary operations apply operand conversions to the
769 binary operation result instead of to the operands. This allows
770 to combine successive conversions and bitwise binary operations.
771 We combine the above two cases by using a conditional convert. */
772 (for bitop (bit_and bit_ior bit_xor)
774 (bitop (convert @0) (convert? @1))
775 (if (((TREE_CODE (@1) == INTEGER_CST
776 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
777 && int_fits_type_p (@1, TREE_TYPE (@0)))
778 || types_match (@0, @1))
779 /* ??? This transform conflicts with fold-const.c doing
780 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
781 constants (if x has signed type, the sign bit cannot be set
782 in c). This folds extension into the BIT_AND_EXPR.
783 Restrict it to GIMPLE to avoid endless recursions. */
784 && (bitop != BIT_AND_EXPR || GIMPLE)
785 && (/* That's a good idea if the conversion widens the operand, thus
786 after hoisting the conversion the operation will be narrower. */
787 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
788 /* It's also a good idea if the conversion is to a non-integer
790 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
791 /* Or if the precision of TO is not the same as the precision
793 || TYPE_PRECISION (type) != GET_MODE_PRECISION (TYPE_MODE (type))))
794 (convert (bitop @0 (convert @1))))))
796 (for bitop (bit_and bit_ior)
797 rbitop (bit_ior bit_and)
798 /* (x | y) & x -> x */
799 /* (x & y) | x -> x */
801 (bitop:c (rbitop:c @0 @1) @0)
803 /* (~x | y) & x -> x & y */
804 /* (~x & y) | x -> x | y */
806 (bitop:c (rbitop:c (bit_not @0) @1) @0)
809 /* Simplify (A & B) OP0 (C & B) to (A OP0 C) & B. */
810 (for bitop (bit_and bit_ior bit_xor)
812 (bitop (bit_and:c @0 @1) (bit_and @2 @1))
813 (bit_and (bitop @0 @2) @1)))
815 /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
817 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
818 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
820 /* Combine successive equal operations with constants. */
821 (for bitop (bit_and bit_ior bit_xor)
823 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
824 (bitop @0 (bitop @1 @2))))
826 /* Try simple folding for X op !X, and X op X with the help
827 of the truth_valued_p and logical_inverted_value predicates. */
828 (match truth_valued_p
830 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
831 (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
832 (match truth_valued_p
834 (match truth_valued_p
837 (match (logical_inverted_value @0)
839 (match (logical_inverted_value @0)
840 (bit_not truth_valued_p@0))
841 (match (logical_inverted_value @0)
842 (eq @0 integer_zerop))
843 (match (logical_inverted_value @0)
844 (ne truth_valued_p@0 integer_truep))
845 (match (logical_inverted_value @0)
846 (bit_xor truth_valued_p@0 integer_truep))
850 (bit_and:c @0 (logical_inverted_value @0))
851 { build_zero_cst (type); })
852 /* X | !X and X ^ !X -> 1, , if X is truth-valued. */
853 (for op (bit_ior bit_xor)
855 (op:c truth_valued_p@0 (logical_inverted_value @0))
856 { constant_boolean_node (true, type); }))
857 /* X ==/!= !X is false/true. */
860 (op:c truth_valued_p@0 (logical_inverted_value @0))
861 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
863 /* If arg1 and arg2 are booleans (or any single bit type)
864 then try to simplify:
871 But only do this if our result feeds into a comparison as
872 this transformation is not always a win, particularly on
873 targets with and-not instructions.
874 -> simplify_bitwise_binary_boolean */
876 (ne (bit_and:c (bit_not @0) @1) integer_zerop)
877 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
878 && TYPE_PRECISION (TREE_TYPE (@1)) == 1)
881 (ne (bit_ior:c (bit_not @0) @1) integer_zerop)
882 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
883 && TYPE_PRECISION (TREE_TYPE (@1)) == 1)
888 (bit_not (bit_not @0))
891 /* Convert ~ (-A) to A - 1. */
893 (bit_not (convert? (negate @0)))
894 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
895 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
897 /* Convert ~ (A - 1) or ~ (A + -1) to -A. */
899 (bit_not (convert? (minus @0 integer_each_onep)))
900 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
901 (convert (negate @0))))
903 (bit_not (convert? (plus @0 integer_all_onesp)))
904 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
905 (convert (negate @0))))
907 /* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
909 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
910 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
911 (convert (bit_xor @0 (bit_not @1)))))
913 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
914 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
915 (convert (bit_xor @0 @1))))
917 /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
919 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
920 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
922 /* Fold A - (A & B) into ~B & A. */
924 (minus (convert? @0) (convert?:s (bit_and:cs @0 @1)))
925 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
926 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
927 (convert (bit_and (bit_not @1) @0))))
931 /* ((X inner_op C0) outer_op C1)
932 With X being a tree where value_range has reasoned certain bits to always be
933 zero throughout its computed value range,
934 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
935 where zero_mask has 1's for all bits that are sure to be 0 in
937 if (inner_op == '^') C0 &= ~C1;
938 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
939 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
941 (for inner_op (bit_ior bit_xor)
942 outer_op (bit_xor bit_ior)
945 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
949 wide_int zero_mask_not;
953 if (TREE_CODE (@2) == SSA_NAME)
954 zero_mask_not = get_nonzero_bits (@2);
958 if (inner_op == BIT_XOR_EXPR)
960 C0 = wi::bit_and_not (@0, @1);
961 cst_emit = wi::bit_or (C0, @1);
966 cst_emit = wi::bit_xor (@0, @1);
969 (if (!fail && wi::bit_and (C0, zero_mask_not) == 0)
970 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
971 (if (!fail && wi::bit_and (@1, zero_mask_not) == 0)
972 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
974 /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
976 (pointer_plus (pointer_plus:s @0 @1) @3)
977 (pointer_plus @0 (plus @1 @3)))
983 tem4 = (unsigned long) tem3;
988 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
989 /* Conditionally look through a sign-changing conversion. */
990 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
991 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
992 || (GENERIC && type == TREE_TYPE (@1))))
996 tem = (sizetype) ptr;
1000 and produce the simpler and easier to analyze with respect to alignment
1001 ... = ptr & ~algn; */
1003 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
1004 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), wi::bit_not (@1)); }
1005 (bit_and @0 { algn; })))
1007 /* Try folding difference of addresses. */
1009 (minus (convert ADDR_EXPR@0) (convert @1))
1010 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1011 (with { HOST_WIDE_INT diff; }
1012 (if (ptr_difference_const (@0, @1, &diff))
1013 { build_int_cst_type (type, diff); }))))
1015 (minus (convert @0) (convert ADDR_EXPR@1))
1016 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1017 (with { HOST_WIDE_INT diff; }
1018 (if (ptr_difference_const (@0, @1, &diff))
1019 { build_int_cst_type (type, diff); }))))
1021 /* If arg0 is derived from the address of an object or function, we may
1022 be able to fold this expression using the object or function's
1025 (bit_and (convert? @0) INTEGER_CST@1)
1026 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1027 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1031 unsigned HOST_WIDE_INT bitpos;
1032 get_pointer_alignment_1 (@0, &align, &bitpos);
1034 (if (wi::ltu_p (@1, align / BITS_PER_UNIT))
1035 { wide_int_to_tree (type, wi::bit_and (@1, bitpos / BITS_PER_UNIT)); }))))
1038 /* We can't reassociate at all for saturating types. */
1039 (if (!TYPE_SATURATING (type))
1041 /* Contract negates. */
1042 /* A + (-B) -> A - B */
1044 (plus:c (convert1? @0) (convert2? (negate @1)))
1045 /* Apply STRIP_NOPS on @0 and the negate. */
1046 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1047 && tree_nop_conversion_p (type, TREE_TYPE (@1))
1048 && !TYPE_OVERFLOW_SANITIZED (type))
1049 (minus (convert @0) (convert @1))))
1050 /* A - (-B) -> A + B */
1052 (minus (convert1? @0) (convert2? (negate @1)))
1053 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1054 && tree_nop_conversion_p (type, TREE_TYPE (@1))
1055 && !TYPE_OVERFLOW_SANITIZED (type))
1056 (plus (convert @0) (convert @1))))
1059 (negate (convert? (negate @1)))
1060 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1061 && !TYPE_OVERFLOW_SANITIZED (type))
1064 /* We can't reassociate floating-point unless -fassociative-math
1065 or fixed-point plus or minus because of saturation to +-Inf. */
1066 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
1067 && !FIXED_POINT_TYPE_P (type))
1069 /* Match patterns that allow contracting a plus-minus pair
1070 irrespective of overflow issues. */
1071 /* (A +- B) - A -> +- B */
1072 /* (A +- B) -+ B -> A */
1073 /* A - (A +- B) -> -+ B */
1074 /* A +- (B -+ A) -> +- B */
1076 (minus (plus:c @0 @1) @0)
1079 (minus (minus @0 @1) @0)
1082 (plus:c (minus @0 @1) @1)
1085 (minus @0 (plus:c @0 @1))
1088 (minus @0 (minus @0 @1))
1091 /* (A +- CST) +- CST -> A + CST */
1092 (for outer_op (plus minus)
1093 (for inner_op (plus minus)
1095 (outer_op (inner_op @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1096 /* If the constant operation overflows we cannot do the transform
1097 as we would introduce undefined overflow, for example
1098 with (a - 1) + INT_MIN. */
1099 (with { tree cst = fold_binary (outer_op == inner_op
1100 ? PLUS_EXPR : MINUS_EXPR, type, @1, @2); }
1101 (if (cst && !TREE_OVERFLOW (cst))
1102 (inner_op @0 { cst; } ))))))
1104 /* (CST - A) +- CST -> CST - A */
1105 (for outer_op (plus minus)
1107 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
1108 (with { tree cst = fold_binary (outer_op, type, @1, @2); }
1109 (if (cst && !TREE_OVERFLOW (cst))
1110 (minus { cst; } @0)))))
1114 (plus:c (bit_not @0) @0)
1115 (if (!TYPE_OVERFLOW_TRAPS (type))
1116 { build_all_ones_cst (type); }))
1120 (plus (convert? (bit_not @0)) integer_each_onep)
1121 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1122 (negate (convert @0))))
1126 (minus (convert? (negate @0)) integer_each_onep)
1127 (if (!TYPE_OVERFLOW_TRAPS (type)
1128 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1129 (bit_not (convert @0))))
1133 (minus integer_all_onesp @0)
1136 /* (T)(P + A) - (T)P -> (T) A */
1137 (for add (plus pointer_plus)
1139 (minus (convert (add @0 @1))
1141 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1142 /* For integer types, if A has a smaller type
1143 than T the result depends on the possible
1145 E.g. T=size_t, A=(unsigned)429497295, P>0.
1146 However, if an overflow in P + A would cause
1147 undefined behavior, we can assume that there
1149 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1150 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1151 /* For pointer types, if the conversion of A to the
1152 final type requires a sign- or zero-extension,
1153 then we have to punt - it is not defined which
1155 || (POINTER_TYPE_P (TREE_TYPE (@0))
1156 && TREE_CODE (@1) == INTEGER_CST
1157 && tree_int_cst_sign_bit (@1) == 0))
1160 /* (T)P - (T)(P + A) -> -(T) A */
1161 (for add (plus pointer_plus)
1164 (convert (add @0 @1)))
1165 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1166 /* For integer types, if A has a smaller type
1167 than T the result depends on the possible
1169 E.g. T=size_t, A=(unsigned)429497295, P>0.
1170 However, if an overflow in P + A would cause
1171 undefined behavior, we can assume that there
1173 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1174 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1175 /* For pointer types, if the conversion of A to the
1176 final type requires a sign- or zero-extension,
1177 then we have to punt - it is not defined which
1179 || (POINTER_TYPE_P (TREE_TYPE (@0))
1180 && TREE_CODE (@1) == INTEGER_CST
1181 && tree_int_cst_sign_bit (@1) == 0))
1182 (negate (convert @1)))))
1184 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
1185 (for add (plus pointer_plus)
1187 (minus (convert (add @0 @1))
1188 (convert (add @0 @2)))
1189 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1190 /* For integer types, if A has a smaller type
1191 than T the result depends on the possible
1193 E.g. T=size_t, A=(unsigned)429497295, P>0.
1194 However, if an overflow in P + A would cause
1195 undefined behavior, we can assume that there
1197 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1198 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1199 /* For pointer types, if the conversion of A to the
1200 final type requires a sign- or zero-extension,
1201 then we have to punt - it is not defined which
1203 || (POINTER_TYPE_P (TREE_TYPE (@0))
1204 && TREE_CODE (@1) == INTEGER_CST
1205 && tree_int_cst_sign_bit (@1) == 0
1206 && TREE_CODE (@2) == INTEGER_CST
1207 && tree_int_cst_sign_bit (@2) == 0))
1208 (minus (convert @1) (convert @2)))))))
1211 /* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
1213 (for minmax (min max FMIN FMAX)
1217 /* min(max(x,y),y) -> y. */
1219 (min:c (max:c @0 @1) @1)
1221 /* max(min(x,y),y) -> y. */
1223 (max:c (min:c @0 @1) @1)
1227 (if (INTEGRAL_TYPE_P (type)
1228 && TYPE_MIN_VALUE (type)
1229 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
1233 (if (INTEGRAL_TYPE_P (type)
1234 && TYPE_MAX_VALUE (type)
1235 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
1237 (for minmax (FMIN FMAX)
1238 /* If either argument is NaN, return the other one. Avoid the
1239 transformation if we get (and honor) a signalling NaN. */
1241 (minmax:c @0 REAL_CST@1)
1242 (if (real_isnan (TREE_REAL_CST_PTR (@1))
1243 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
1245 /* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
1246 functions to return the numeric arg if the other one is NaN.
1247 MIN and MAX don't honor that, so only transform if -ffinite-math-only
1248 is set. C99 doesn't require -0.0 to be handled, so we don't have to
1249 worry about it either. */
1250 (if (flag_finite_math_only)
1258 /* Simplifications of shift and rotates. */
1260 (for rotate (lrotate rrotate)
1262 (rotate integer_all_onesp@0 @1)
1265 /* Optimize -1 >> x for arithmetic right shifts. */
1267 (rshift integer_all_onesp@0 @1)
1268 (if (!TYPE_UNSIGNED (type)
1269 && tree_expr_nonnegative_p (@1))
1272 /* Optimize (x >> c) << c into x & (-1<<c). */
1274 (lshift (rshift @0 INTEGER_CST@1) @1)
1275 (if (wi::ltu_p (@1, element_precision (type)))
1276 (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
1278 /* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
1281 (rshift (lshift @0 INTEGER_CST@1) @1)
1282 (if (TYPE_UNSIGNED (type)
1283 && (wi::ltu_p (@1, element_precision (type))))
1284 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
1286 (for shiftrotate (lrotate rrotate lshift rshift)
1288 (shiftrotate @0 integer_zerop)
1291 (shiftrotate integer_zerop@0 @1)
1293 /* Prefer vector1 << scalar to vector1 << vector2
1294 if vector2 is uniform. */
1295 (for vec (VECTOR_CST CONSTRUCTOR)
1297 (shiftrotate @0 vec@1)
1298 (with { tree tem = uniform_vector_p (@1); }
1300 (shiftrotate @0 { tem; }))))))
1302 /* Rewrite an LROTATE_EXPR by a constant into an
1303 RROTATE_EXPR by a new constant. */
1305 (lrotate @0 INTEGER_CST@1)
1306 (rrotate @0 { fold_binary (MINUS_EXPR, TREE_TYPE (@1),
1307 build_int_cst (TREE_TYPE (@1),
1308 element_precision (type)), @1); }))
1310 /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
1311 (for op (lrotate rrotate rshift lshift)
1313 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
1314 (with { unsigned int prec = element_precision (type); }
1315 (if (wi::ge_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1)))
1316 && wi::lt_p (@1, prec, TYPE_SIGN (TREE_TYPE (@1)))
1317 && wi::ge_p (@2, 0, TYPE_SIGN (TREE_TYPE (@2)))
1318 && wi::lt_p (@2, prec, TYPE_SIGN (TREE_TYPE (@2))))
1319 (with { unsigned int low = wi::add (@1, @2).to_uhwi (); }
1320 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
1321 being well defined. */
1323 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
1324 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
1325 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
1326 { build_zero_cst (type); }
1327 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
1328 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
1331 /* ((1 << A) & 1) != 0 -> A == 0
1332 ((1 << A) & 1) == 0 -> A != 0 */
1336 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
1337 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
1339 /* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
1340 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
1344 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
1345 (with { int cand = wi::ctz (@2) - wi::ctz (@0); }
1347 || (!integer_zerop (@2)
1348 && wi::ne_p (wi::lshift (@0, cand), @2)))
1349 { constant_boolean_node (cmp == NE_EXPR, type); }
1350 (if (!integer_zerop (@2)
1351 && wi::eq_p (wi::lshift (@0, cand), @2))
1352 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
1354 /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
1355 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
1356 if the new mask might be further optimized. */
1357 (for shift (lshift rshift)
1359 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
1361 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
1362 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
1363 && tree_fits_uhwi_p (@1)
1364 && tree_to_uhwi (@1) > 0
1365 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
1368 unsigned int shiftc = tree_to_uhwi (@1);
1369 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
1370 unsigned HOST_WIDE_INT newmask, zerobits = 0;
1371 tree shift_type = TREE_TYPE (@3);
1374 if (shift == LSHIFT_EXPR)
1375 zerobits = ((((unsigned HOST_WIDE_INT) 1) << shiftc) - 1);
1376 else if (shift == RSHIFT_EXPR
1377 && (TYPE_PRECISION (shift_type)
1378 == GET_MODE_PRECISION (TYPE_MODE (shift_type))))
1380 prec = TYPE_PRECISION (TREE_TYPE (@3));
1382 /* See if more bits can be proven as zero because of
1385 && TYPE_UNSIGNED (TREE_TYPE (@0)))
1387 tree inner_type = TREE_TYPE (@0);
1388 if ((TYPE_PRECISION (inner_type)
1389 == GET_MODE_PRECISION (TYPE_MODE (inner_type)))
1390 && TYPE_PRECISION (inner_type) < prec)
1392 prec = TYPE_PRECISION (inner_type);
1393 /* See if we can shorten the right shift. */
1395 shift_type = inner_type;
1396 /* Otherwise X >> C1 is all zeros, so we'll optimize
1397 it into (X, 0) later on by making sure zerobits
1401 zerobits = ~(unsigned HOST_WIDE_INT) 0;
1404 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
1405 zerobits <<= prec - shiftc;
1407 /* For arithmetic shift if sign bit could be set, zerobits
1408 can contain actually sign bits, so no transformation is
1409 possible, unless MASK masks them all away. In that
1410 case the shift needs to be converted into logical shift. */
1411 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
1412 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
1414 if ((mask & zerobits) == 0)
1415 shift_type = unsigned_type_for (TREE_TYPE (@3));
1421 /* ((X << 16) & 0xff00) is (X, 0). */
1422 (if ((mask & zerobits) == mask)
1423 { build_int_cst (type, 0); }
1424 (with { newmask = mask | zerobits; }
1425 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
1428 /* Only do the transformation if NEWMASK is some integer
1430 for (prec = BITS_PER_UNIT;
1431 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
1432 if (newmask == (((unsigned HOST_WIDE_INT) 1) << prec) - 1)
1435 (if (prec < HOST_BITS_PER_WIDE_INT
1436 || newmask == ~(unsigned HOST_WIDE_INT) 0)
1438 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
1439 (if (!tree_int_cst_equal (newmaskt, @2))
1440 (if (shift_type != TREE_TYPE (@3))
1441 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
1442 (bit_and @4 { newmaskt; })))))))))))))
1444 /* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
1445 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
1446 (for shift (lshift rshift)
1447 (for bit_op (bit_and bit_xor bit_ior)
1449 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
1450 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1451 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
1452 (bit_op (shift (convert @0) @1) { mask; }))))))
1455 /* Simplifications of conversions. */
1457 /* Basic strip-useless-type-conversions / strip_nops. */
1458 (for cvt (convert view_convert float fix_trunc)
1461 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
1462 || (GENERIC && type == TREE_TYPE (@0)))
1465 /* Contract view-conversions. */
1467 (view_convert (view_convert @0))
1470 /* For integral conversions with the same precision or pointer
1471 conversions use a NOP_EXPR instead. */
1474 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
1475 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
1476 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
1479 /* Strip inner integral conversions that do not change precision or size. */
1481 (view_convert (convert@0 @1))
1482 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
1483 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
1484 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
1485 && (TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))))
1488 /* Re-association barriers around constants and other re-association
1489 barriers can be removed. */
1491 (paren CONSTANT_CLASS_P@0)
1494 (paren (paren@1 @0))
1497 /* Handle cases of two conversions in a row. */
1498 (for ocvt (convert float fix_trunc)
1499 (for icvt (convert float)
1504 tree inside_type = TREE_TYPE (@0);
1505 tree inter_type = TREE_TYPE (@1);
1506 int inside_int = INTEGRAL_TYPE_P (inside_type);
1507 int inside_ptr = POINTER_TYPE_P (inside_type);
1508 int inside_float = FLOAT_TYPE_P (inside_type);
1509 int inside_vec = VECTOR_TYPE_P (inside_type);
1510 unsigned int inside_prec = TYPE_PRECISION (inside_type);
1511 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
1512 int inter_int = INTEGRAL_TYPE_P (inter_type);
1513 int inter_ptr = POINTER_TYPE_P (inter_type);
1514 int inter_float = FLOAT_TYPE_P (inter_type);
1515 int inter_vec = VECTOR_TYPE_P (inter_type);
1516 unsigned int inter_prec = TYPE_PRECISION (inter_type);
1517 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
1518 int final_int = INTEGRAL_TYPE_P (type);
1519 int final_ptr = POINTER_TYPE_P (type);
1520 int final_float = FLOAT_TYPE_P (type);
1521 int final_vec = VECTOR_TYPE_P (type);
1522 unsigned int final_prec = TYPE_PRECISION (type);
1523 int final_unsignedp = TYPE_UNSIGNED (type);
1526 /* In addition to the cases of two conversions in a row
1527 handled below, if we are converting something to its own
1528 type via an object of identical or wider precision, neither
1529 conversion is needed. */
1530 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
1532 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
1533 && (((inter_int || inter_ptr) && final_int)
1534 || (inter_float && final_float))
1535 && inter_prec >= final_prec)
1538 /* Likewise, if the intermediate and initial types are either both
1539 float or both integer, we don't need the middle conversion if the
1540 former is wider than the latter and doesn't change the signedness
1541 (for integers). Avoid this if the final type is a pointer since
1542 then we sometimes need the middle conversion. Likewise if the
1543 final type has a precision not equal to the size of its mode. */
1544 (if (((inter_int && inside_int) || (inter_float && inside_float))
1545 && (final_int || final_float)
1546 && inter_prec >= inside_prec
1547 && (inter_float || inter_unsignedp == inside_unsignedp)
1548 && ! (final_prec != GET_MODE_PRECISION (TYPE_MODE (type))
1549 && TYPE_MODE (type) == TYPE_MODE (inter_type)))
1552 /* If we have a sign-extension of a zero-extended value, we can
1553 replace that by a single zero-extension. Likewise if the
1554 final conversion does not change precision we can drop the
1555 intermediate conversion. */
1556 (if (inside_int && inter_int && final_int
1557 && ((inside_prec < inter_prec && inter_prec < final_prec
1558 && inside_unsignedp && !inter_unsignedp)
1559 || final_prec == inter_prec))
1562 /* Two conversions in a row are not needed unless:
1563 - some conversion is floating-point (overstrict for now), or
1564 - some conversion is a vector (overstrict for now), or
1565 - the intermediate type is narrower than both initial and
1567 - the intermediate type and innermost type differ in signedness,
1568 and the outermost type is wider than the intermediate, or
1569 - the initial type is a pointer type and the precisions of the
1570 intermediate and final types differ, or
1571 - the final type is a pointer type and the precisions of the
1572 initial and intermediate types differ. */
1573 (if (! inside_float && ! inter_float && ! final_float
1574 && ! inside_vec && ! inter_vec && ! final_vec
1575 && (inter_prec >= inside_prec || inter_prec >= final_prec)
1576 && ! (inside_int && inter_int
1577 && inter_unsignedp != inside_unsignedp
1578 && inter_prec < final_prec)
1579 && ((inter_unsignedp && inter_prec > inside_prec)
1580 == (final_unsignedp && final_prec > inter_prec))
1581 && ! (inside_ptr && inter_prec != final_prec)
1582 && ! (final_ptr && inside_prec != inter_prec)
1583 && ! (final_prec != GET_MODE_PRECISION (TYPE_MODE (type))
1584 && TYPE_MODE (type) == TYPE_MODE (inter_type)))
1587 /* A truncation to an unsigned type (a zero-extension) should be
1588 canonicalized as bitwise and of a mask. */
1589 (if (final_int && inter_int && inside_int
1590 && final_prec == inside_prec
1591 && final_prec > inter_prec
1593 (convert (bit_and @0 { wide_int_to_tree
1595 wi::mask (inter_prec, false,
1596 TYPE_PRECISION (inside_type))); })))
1598 /* If we are converting an integer to a floating-point that can
1599 represent it exactly and back to an integer, we can skip the
1600 floating-point conversion. */
1601 (if (GIMPLE /* PR66211 */
1602 && inside_int && inter_float && final_int &&
1603 (unsigned) significand_size (TYPE_MODE (inter_type))
1604 >= inside_prec - !inside_unsignedp)
1607 /* If we have a narrowing conversion to an integral type that is fed by a
1608 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
1609 masks off bits outside the final type (and nothing else). */
1611 (convert (bit_and @0 INTEGER_CST@1))
1612 (if (INTEGRAL_TYPE_P (type)
1613 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1614 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
1615 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
1616 TYPE_PRECISION (type)), 0))
1620 /* (X /[ex] A) * A -> X. */
1622 (mult (convert? (exact_div @0 @1)) @1)
1623 /* Look through a sign-changing conversion. */
1626 /* Canonicalization of binary operations. */
1628 /* Convert X + -C into X - C. */
1630 (plus @0 REAL_CST@1)
1631 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
1632 (with { tree tem = fold_unary (NEGATE_EXPR, type, @1); }
1633 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
1634 (minus @0 { tem; })))))
1636 /* Convert x+x into x*2.0. */
1639 (if (SCALAR_FLOAT_TYPE_P (type))
1640 (mult @0 { build_real (type, dconst2); })))
1643 (minus integer_zerop @1)
1646 /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
1647 ARG0 is zero and X + ARG0 reduces to X, since that would mean
1648 (-ARG1 + ARG0) reduces to -ARG1. */
1650 (minus real_zerop@0 @1)
1651 (if (fold_real_zero_addition_p (type, @0, 0))
1654 /* Transform x * -1 into -x. */
1656 (mult @0 integer_minus_onep)
1659 /* True if we can easily extract the real and imaginary parts of a complex
1661 (match compositional_complex
1662 (convert? (complex @0 @1)))
1664 /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
1666 (complex (realpart @0) (imagpart @0))
1669 (realpart (complex @0 @1))
1672 (imagpart (complex @0 @1))
1675 /* Sometimes we only care about half of a complex expression. */
1677 (realpart (convert?:s (conj:s @0)))
1678 (convert (realpart @0)))
1680 (imagpart (convert?:s (conj:s @0)))
1681 (convert (negate (imagpart @0))))
1682 (for part (realpart imagpart)
1683 (for op (plus minus)
1685 (part (convert?:s@2 (op:s @0 @1)))
1686 (convert (op (part @0) (part @1))))))
1688 (realpart (convert?:s (CEXPI:s @0)))
1691 (imagpart (convert?:s (CEXPI:s @0)))
1694 /* conj(conj(x)) -> x */
1696 (conj (convert? (conj @0)))
1697 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
1700 /* conj({x,y}) -> {x,-y} */
1702 (conj (convert?:s (complex:s @0 @1)))
1703 (with { tree itype = TREE_TYPE (type); }
1704 (complex (convert:itype @0) (negate (convert:itype @1)))))
1706 /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
1707 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
1712 (bswap (bit_not (bswap @0)))
1714 (for bitop (bit_xor bit_ior bit_and)
1716 (bswap (bitop:c (bswap @0) @1))
1717 (bitop @0 (bswap @1)))))
1720 /* Combine COND_EXPRs and VEC_COND_EXPRs. */
1722 /* Simplify constant conditions.
1723 Only optimize constant conditions when the selected branch
1724 has the same type as the COND_EXPR. This avoids optimizing
1725 away "c ? x : throw", where the throw has a void type.
1726 Note that we cannot throw away the fold-const.c variant nor
1727 this one as we depend on doing this transform before possibly
1728 A ? B : B -> B triggers and the fold-const.c one can optimize
1729 0 ? A : B to B even if A has side-effects. Something
1730 genmatch cannot handle. */
1732 (cond INTEGER_CST@0 @1 @2)
1733 (if (integer_zerop (@0))
1734 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
1736 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
1739 (vec_cond VECTOR_CST@0 @1 @2)
1740 (if (integer_all_onesp (@0))
1742 (if (integer_zerop (@0))
1745 (for cnd (cond vec_cond)
1746 /* A ? B : (A ? X : C) -> A ? B : C. */
1748 (cnd @0 (cnd @0 @1 @2) @3)
1751 (cnd @0 @1 (cnd @0 @2 @3))
1754 /* A ? B : B -> B. */
1759 /* !A ? B : C -> A ? C : B. */
1761 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
1764 /* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C), since vector comparisons
1765 return all-1 or all-0 results. */
1766 /* ??? We could instead convert all instances of the vec_cond to negate,
1767 but that isn't necessarily a win on its own. */
1769 (plus:c @3 (view_convert? (vec_cond @0 integer_each_onep@1 integer_zerop@2)))
1770 (if (VECTOR_TYPE_P (type)
1771 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0))
1772 && (TYPE_MODE (TREE_TYPE (type))
1773 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@0)))))
1774 (minus @3 (view_convert @0))))
1776 /* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C). */
1778 (minus @3 (view_convert? (vec_cond @0 integer_each_onep@1 integer_zerop@2)))
1779 (if (VECTOR_TYPE_P (type)
1780 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0))
1781 && (TYPE_MODE (TREE_TYPE (type))
1782 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@0)))))
1783 (plus @3 (view_convert @0))))
1786 /* Simplifications of comparisons. */
1788 /* See if we can reduce the magnitude of a constant involved in a
1789 comparison by changing the comparison code. This is a canonicalization
1790 formerly done by maybe_canonicalize_comparison_1. */
1794 (cmp @0 INTEGER_CST@1)
1795 (if (tree_int_cst_sgn (@1) == -1)
1796 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
1800 (cmp @0 INTEGER_CST@1)
1801 (if (tree_int_cst_sgn (@1) == 1)
1802 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
1805 /* We can simplify a logical negation of a comparison to the
1806 inverted comparison. As we cannot compute an expression
1807 operator using invert_tree_comparison we have to simulate
1808 that with expression code iteration. */
1809 (for cmp (tcc_comparison)
1810 icmp (inverted_tcc_comparison)
1811 ncmp (inverted_tcc_comparison_with_nans)
1812 /* Ideally we'd like to combine the following two patterns
1813 and handle some more cases by using
1814 (logical_inverted_value (cmp @0 @1))
1815 here but for that genmatch would need to "inline" that.
1816 For now implement what forward_propagate_comparison did. */
1818 (bit_not (cmp @0 @1))
1819 (if (VECTOR_TYPE_P (type)
1820 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
1821 /* Comparison inversion may be impossible for trapping math,
1822 invert_tree_comparison will tell us. But we can't use
1823 a computed operator in the replacement tree thus we have
1824 to play the trick below. */
1825 (with { enum tree_code ic = invert_tree_comparison
1826 (cmp, HONOR_NANS (@0)); }
1832 (bit_xor (cmp @0 @1) integer_truep)
1833 (with { enum tree_code ic = invert_tree_comparison
1834 (cmp, HONOR_NANS (@0)); }
1840 /* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
1841 ??? The transformation is valid for the other operators if overflow
1842 is undefined for the type, but performing it here badly interacts
1843 with the transformation in fold_cond_expr_with_comparison which
1844 attempts to synthetize ABS_EXPR. */
1847 (cmp (minus@2 @0 @1) integer_zerop)
1848 (if (single_use (@2))
1851 /* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
1852 signed arithmetic case. That form is created by the compiler
1853 often enough for folding it to be of value. One example is in
1854 computing loop trip counts after Operator Strength Reduction. */
1855 (for cmp (simple_comparison)
1856 scmp (swapped_simple_comparison)
1858 (cmp (mult @0 INTEGER_CST@1) integer_zerop@2)
1859 /* Handle unfolded multiplication by zero. */
1860 (if (integer_zerop (@1))
1862 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1863 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1864 /* If @1 is negative we swap the sense of the comparison. */
1865 (if (tree_int_cst_sgn (@1) < 0)
1869 /* Simplify comparison of something with itself. For IEEE
1870 floating-point, we can only do some of these simplifications. */
1873 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
1874 || ! HONOR_NANS (TYPE_MODE (TREE_TYPE (@0))))
1875 { constant_boolean_node (true, type); }))
1884 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
1885 || ! HONOR_NANS (TYPE_MODE (TREE_TYPE (@0))))
1886 { constant_boolean_node (false, type); })))
1887 (for cmp (unle unge uneq)
1890 { constant_boolean_node (true, type); }))
1893 (if (!flag_trapping_math)
1894 { constant_boolean_node (false, type); }))
1896 /* Fold ~X op ~Y as Y op X. */
1897 (for cmp (simple_comparison)
1899 (cmp (bit_not @0) (bit_not @1))
1902 /* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
1903 (for cmp (simple_comparison)
1904 scmp (swapped_simple_comparison)
1906 (cmp (bit_not @0) CONSTANT_CLASS_P@1)
1907 (if (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST)
1908 (scmp @0 (bit_not @1)))))
1910 (for cmp (simple_comparison)
1911 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
1913 (cmp (convert@2 @0) (convert? @1))
1914 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
1915 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
1916 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
1917 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
1918 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
1921 tree type1 = TREE_TYPE (@1);
1922 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
1924 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
1925 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
1926 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
1927 type1 = float_type_node;
1928 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
1929 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
1930 type1 = double_type_node;
1933 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
1934 ? TREE_TYPE (@0) : type1);
1936 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
1937 (cmp (convert:newtype @0) (convert:newtype @1))))))
1941 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
1943 /* a CMP (-0) -> a CMP 0 */
1944 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
1945 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
1946 /* x != NaN is always true, other ops are always false. */
1947 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
1948 && ! HONOR_SNANS (@1))
1949 { constant_boolean_node (cmp == NE_EXPR, type); })
1950 /* Fold comparisons against infinity. */
1951 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
1952 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
1955 REAL_VALUE_TYPE max;
1956 enum tree_code code = cmp;
1957 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
1959 code = swap_tree_comparison (code);
1962 /* x > +Inf is always false, if with ignore sNANs. */
1963 (if (code == GT_EXPR
1964 && ! HONOR_SNANS (@0))
1965 { constant_boolean_node (false, type); })
1966 (if (code == LE_EXPR)
1967 /* x <= +Inf is always true, if we don't case about NaNs. */
1968 (if (! HONOR_NANS (@0))
1969 { constant_boolean_node (true, type); }
1970 /* x <= +Inf is the same as x == x, i.e. !isnan(x). */
1972 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX. */
1973 (if (code == EQ_EXPR || code == GE_EXPR)
1974 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
1976 (lt @0 { build_real (TREE_TYPE (@0), max); })
1977 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
1978 /* x < +Inf is always equal to x <= DBL_MAX. */
1979 (if (code == LT_EXPR)
1980 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
1982 (ge @0 { build_real (TREE_TYPE (@0), max); })
1983 (le @0 { build_real (TREE_TYPE (@0), max); }))))
1984 /* x != +Inf is always equal to !(x > DBL_MAX). */
1985 (if (code == NE_EXPR)
1986 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
1987 (if (! HONOR_NANS (@0))
1989 (ge @0 { build_real (TREE_TYPE (@0), max); })
1990 (le @0 { build_real (TREE_TYPE (@0), max); }))
1992 (bit_xor (lt @0 { build_real (TREE_TYPE (@0), max); })
1993 { build_one_cst (type); })
1994 (bit_xor (gt @0 { build_real (TREE_TYPE (@0), max); })
1995 { build_one_cst (type); }))))))))))
1997 /* If this is a comparison of a real constant with a PLUS_EXPR
1998 or a MINUS_EXPR of a real constant, we can convert it into a
1999 comparison with a revised real constant as long as no overflow
2000 occurs when unsafe_math_optimizations are enabled. */
2001 (if (flag_unsafe_math_optimizations)
2002 (for op (plus minus)
2004 (cmp (op @0 REAL_CST@1) REAL_CST@2)
2007 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
2008 TREE_TYPE (@1), @2, @1);
2010 (if (tem && !TREE_OVERFLOW (tem))
2011 (cmp @0 { tem; }))))))
2013 /* Likewise, we can simplify a comparison of a real constant with
2014 a MINUS_EXPR whose first operand is also a real constant, i.e.
2015 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
2016 floating-point types only if -fassociative-math is set. */
2017 (if (flag_associative_math)
2019 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
2020 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
2021 (if (tem && !TREE_OVERFLOW (tem))
2022 (cmp { tem; } @1)))))
2024 /* Fold comparisons against built-in math functions. */
2025 (if (flag_unsafe_math_optimizations
2026 && ! flag_errno_math)
2029 (cmp (sq @0) REAL_CST@1)
2031 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
2033 /* sqrt(x) < y is always false, if y is negative. */
2034 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
2035 { constant_boolean_node (false, type); })
2036 /* sqrt(x) > y is always true, if y is negative and we
2037 don't care about NaNs, i.e. negative values of x. */
2038 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
2039 { constant_boolean_node (true, type); })
2040 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
2041 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
2042 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
2044 /* sqrt(x) < 0 is always false. */
2045 (if (cmp == LT_EXPR)
2046 { constant_boolean_node (false, type); })
2047 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
2048 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
2049 { constant_boolean_node (true, type); })
2050 /* sqrt(x) <= 0 -> x == 0. */
2051 (if (cmp == LE_EXPR)
2053 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
2054 == or !=. In the last case:
2056 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
2058 if x is negative or NaN. Due to -funsafe-math-optimizations,
2059 the results for other x follow from natural arithmetic. */
2061 (if (cmp == GT_EXPR || cmp == GE_EXPR)
2065 real_arithmetic (&c2, MULT_EXPR,
2066 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
2067 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
2069 (if (REAL_VALUE_ISINF (c2))
2070 /* sqrt(x) > y is x == +Inf, when y is very large. */
2071 (if (HONOR_INFINITIES (@0))
2072 (eq @0 { build_real (TREE_TYPE (@0), c2); })
2073 { constant_boolean_node (false, type); })
2074 /* sqrt(x) > c is the same as x > c*c. */
2075 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
2076 (if (cmp == LT_EXPR || cmp == LE_EXPR)
2080 real_arithmetic (&c2, MULT_EXPR,
2081 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
2082 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
2084 (if (REAL_VALUE_ISINF (c2))
2086 /* sqrt(x) < y is always true, when y is a very large
2087 value and we don't care about NaNs or Infinities. */
2088 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
2089 { constant_boolean_node (true, type); })
2090 /* sqrt(x) < y is x != +Inf when y is very large and we
2091 don't care about NaNs. */
2092 (if (! HONOR_NANS (@0))
2093 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
2094 /* sqrt(x) < y is x >= 0 when y is very large and we
2095 don't care about Infinities. */
2096 (if (! HONOR_INFINITIES (@0))
2097 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
2098 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
2101 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
2102 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
2103 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
2104 (if (! HONOR_NANS (@0))
2105 (cmp @0 { build_real (TREE_TYPE (@0), c2); })
2106 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
2109 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
2110 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))))))))))
2112 /* Unordered tests if either argument is a NaN. */
2114 (bit_ior (unordered @0 @0) (unordered @1 @1))
2115 (if (types_match (@0, @1))
2118 (bit_and (ordered @0 @0) (ordered @1 @1))
2119 (if (types_match (@0, @1))
2122 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
2125 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
2128 /* -A CMP -B -> B CMP A. */
2129 (for cmp (tcc_comparison)
2130 scmp (swapped_tcc_comparison)
2132 (cmp (negate @0) (negate @1))
2133 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2134 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2135 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2138 (cmp (negate @0) CONSTANT_CLASS_P@1)
2139 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2140 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2141 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2142 (with { tree tem = fold_unary (NEGATE_EXPR, TREE_TYPE (@0), @1); }
2143 (if (tem && !TREE_OVERFLOW (tem))
2144 (scmp @0 { tem; }))))))
2146 /* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
2149 (op (abs @0) zerop@1)
2152 /* From fold_sign_changed_comparison and fold_widened_comparison. */
2153 (for cmp (simple_comparison)
2155 (cmp (convert@0 @00) (convert?@1 @10))
2156 (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE
2157 /* Disable this optimization if we're casting a function pointer
2158 type on targets that require function pointer canonicalization. */
2159 && !(targetm.have_canonicalize_funcptr_for_compare ()
2160 && TREE_CODE (TREE_TYPE (@00)) == POINTER_TYPE
2161 && TREE_CODE (TREE_TYPE (TREE_TYPE (@00))) == FUNCTION_TYPE)
2163 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
2164 && (TREE_CODE (@10) == INTEGER_CST
2165 || (@1 != @10 && types_match (TREE_TYPE (@10), TREE_TYPE (@00))))
2166 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
2169 && (POINTER_TYPE_P (TREE_TYPE (@00)) == POINTER_TYPE_P (TREE_TYPE (@0))))
2170 /* ??? The special-casing of INTEGER_CST conversion was in the original
2171 code and here to avoid a spurious overflow flag on the resulting
2172 constant which fold_convert produces. */
2173 (if (TREE_CODE (@1) == INTEGER_CST)
2174 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
2175 TREE_OVERFLOW (@1)); })
2176 (cmp @00 (convert @1)))
2178 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
2179 /* If possible, express the comparison in the shorter mode. */
2180 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
2181 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00)))
2182 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
2183 || ((TYPE_PRECISION (TREE_TYPE (@00))
2184 >= TYPE_PRECISION (TREE_TYPE (@10)))
2185 && (TYPE_UNSIGNED (TREE_TYPE (@00))
2186 == TYPE_UNSIGNED (TREE_TYPE (@10))))
2187 || (TREE_CODE (@10) == INTEGER_CST
2188 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
2189 && int_fits_type_p (@10, TREE_TYPE (@00)))))
2190 (cmp @00 (convert @10))
2191 (if (TREE_CODE (@10) == INTEGER_CST
2192 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
2193 && !int_fits_type_p (@10, TREE_TYPE (@00)))
2196 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
2197 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
2198 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
2199 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
2201 (if (above || below)
2202 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
2203 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
2204 (if (cmp == LT_EXPR || cmp == LE_EXPR)
2205 { constant_boolean_node (above ? true : false, type); }
2206 (if (cmp == GT_EXPR || cmp == GE_EXPR)
2207 { constant_boolean_node (above ? false : true, type); }))))))))))))
2210 /* A local variable can never be pointed to by
2211 the default SSA name of an incoming parameter.
2212 SSA names are canonicalized to 2nd place. */
2214 (cmp addr@0 SSA_NAME@1)
2215 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
2216 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
2217 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
2218 (if (TREE_CODE (base) == VAR_DECL
2219 && auto_var_in_fn_p (base, current_function_decl))
2220 (if (cmp == NE_EXPR)
2221 { constant_boolean_node (true, type); }
2222 { constant_boolean_node (false, type); }))))))
2224 /* Equality compare simplifications from fold_binary */
2227 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
2228 Similarly for NE_EXPR. */
2230 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
2231 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
2232 && wi::bit_and_not (@1, @2) != 0)
2233 { constant_boolean_node (cmp == NE_EXPR, type); }))
2235 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
2237 (cmp (bit_xor @0 @1) integer_zerop)
2240 /* (X ^ Y) == Y becomes X == 0.
2241 Likewise (X ^ Y) == X becomes Y == 0. */
2243 (cmp:c (bit_xor:c @0 @1) @0)
2244 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
2246 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
2248 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
2249 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
2250 (cmp @0 (bit_xor @1 (convert @2)))))
2253 (cmp (convert? addr@0) integer_zerop)
2254 (if (tree_single_nonzero_warnv_p (@0, NULL))
2255 { constant_boolean_node (cmp == NE_EXPR, type); })))
2257 /* If we have (A & C) == C where C is a power of 2, convert this into
2258 (A & C) != 0. Similarly for NE_EXPR. */
2262 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
2263 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
2265 /* If we have (A & C) != 0 where C is the sign bit of A, convert
2266 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
2270 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
2271 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2272 && (TYPE_PRECISION (TREE_TYPE (@0))
2273 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
2274 && element_precision (@2) >= element_precision (@0)
2275 && wi::only_sign_bit_p (@1, element_precision (@0)))
2276 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
2277 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
2279 /* When the addresses are not directly of decls compare base and offset.
2280 This implements some remaining parts of fold_comparison address
2281 comparisons but still no complete part of it. Still it is good
2282 enough to make fold_stmt not regress when not dispatching to fold_binary. */
2283 (for cmp (simple_comparison)
2285 (cmp (convert1?@2 addr@0) (convert2? addr@1))
2288 HOST_WIDE_INT off0, off1;
2289 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
2290 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
2291 if (base0 && TREE_CODE (base0) == MEM_REF)
2293 off0 += mem_ref_offset (base0).to_short_addr ();
2294 base0 = TREE_OPERAND (base0, 0);
2296 if (base1 && TREE_CODE (base1) == MEM_REF)
2298 off1 += mem_ref_offset (base1).to_short_addr ();
2299 base1 = TREE_OPERAND (base1, 0);
2302 (if (base0 && base1)
2306 if (decl_in_symtab_p (base0)
2307 && decl_in_symtab_p (base1))
2308 equal = symtab_node::get_create (base0)
2309 ->equal_address_to (symtab_node::get_create (base1));
2310 else if ((DECL_P (base0)
2311 || TREE_CODE (base0) == SSA_NAME
2312 || TREE_CODE (base0) == STRING_CST)
2314 || TREE_CODE (base1) == SSA_NAME
2315 || TREE_CODE (base1) == STRING_CST))
2316 equal = (base0 == base1);
2319 && (cmp == EQ_EXPR || cmp == NE_EXPR
2320 /* If the offsets are equal we can ignore overflow. */
2322 || POINTER_TYPE_OVERFLOW_UNDEFINED
2323 /* Or if we compare using pointers to decls or strings. */
2324 || (POINTER_TYPE_P (TREE_TYPE (@2))
2325 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST))))
2327 (if (cmp == EQ_EXPR)
2328 { constant_boolean_node (off0 == off1, type); })
2329 (if (cmp == NE_EXPR)
2330 { constant_boolean_node (off0 != off1, type); })
2331 (if (cmp == LT_EXPR)
2332 { constant_boolean_node (off0 < off1, type); })
2333 (if (cmp == LE_EXPR)
2334 { constant_boolean_node (off0 <= off1, type); })
2335 (if (cmp == GE_EXPR)
2336 { constant_boolean_node (off0 >= off1, type); })
2337 (if (cmp == GT_EXPR)
2338 { constant_boolean_node (off0 > off1, type); }))
2340 && DECL_P (base0) && DECL_P (base1)
2341 /* If we compare this as integers require equal offset. */
2342 && (!INTEGRAL_TYPE_P (TREE_TYPE (@2))
2345 (if (cmp == EQ_EXPR)
2346 { constant_boolean_node (false, type); })
2347 (if (cmp == NE_EXPR)
2348 { constant_boolean_node (true, type); })))))))))
2350 /* Non-equality compare simplifications from fold_binary */
2351 (for cmp (lt gt le ge)
2352 /* Comparisons with the highest or lowest possible integer of
2353 the specified precision will have known values. */
2355 (cmp (convert?@2 @0) INTEGER_CST@1)
2356 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
2357 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
2360 tree arg1_type = TREE_TYPE (@1);
2361 unsigned int prec = TYPE_PRECISION (arg1_type);
2362 wide_int max = wi::max_value (arg1_type);
2363 wide_int signed_max = wi::max_value (prec, SIGNED);
2364 wide_int min = wi::min_value (arg1_type);
2367 (if (wi::eq_p (@1, max))
2369 (if (cmp == GT_EXPR)
2370 { constant_boolean_node (false, type); })
2371 (if (cmp == GE_EXPR)
2373 (if (cmp == LE_EXPR)
2374 { constant_boolean_node (true, type); })
2375 (if (cmp == LT_EXPR)
2377 (if (wi::eq_p (@1, min))
2379 (if (cmp == LT_EXPR)
2380 { constant_boolean_node (false, type); })
2381 (if (cmp == LE_EXPR)
2383 (if (cmp == GE_EXPR)
2384 { constant_boolean_node (true, type); })
2385 (if (cmp == GT_EXPR)
2387 (if (wi::eq_p (@1, max - 1))
2389 (if (cmp == GT_EXPR)
2390 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))
2391 (if (cmp == LE_EXPR)
2392 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
2393 (if (wi::eq_p (@1, min + 1))
2395 (if (cmp == GE_EXPR)
2396 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))
2397 (if (cmp == LT_EXPR)
2398 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
2399 (if (wi::eq_p (@1, signed_max)
2400 && TYPE_UNSIGNED (arg1_type)
2401 /* We will flip the signedness of the comparison operator
2402 associated with the mode of @1, so the sign bit is
2403 specified by this mode. Check that @1 is the signed
2404 max associated with this sign bit. */
2405 && prec == GET_MODE_PRECISION (TYPE_MODE (arg1_type))
2406 /* signed_type does not work on pointer types. */
2407 && INTEGRAL_TYPE_P (arg1_type))
2408 /* The following case also applies to X < signed_max+1
2409 and X >= signed_max+1 because previous transformations. */
2410 (if (cmp == LE_EXPR || cmp == GT_EXPR)
2411 (with { tree st = signed_type_for (arg1_type); }
2412 (if (cmp == LE_EXPR)
2413 (ge (convert:st @0) { build_zero_cst (st); })
2414 (lt (convert:st @0) { build_zero_cst (st); }))))))))))
2416 (for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
2417 /* If the second operand is NaN, the result is constant. */
2420 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
2421 && (cmp != LTGT_EXPR || ! flag_trapping_math))
2422 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
2423 ? false : true, type); })))
2425 /* bool_var != 0 becomes bool_var. */
2427 (ne @0 integer_zerop)
2428 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
2429 && types_match (type, TREE_TYPE (@0)))
2431 /* bool_var == 1 becomes bool_var. */
2433 (eq @0 integer_onep)
2434 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
2435 && types_match (type, TREE_TYPE (@0)))
2438 bool_var == 0 becomes !bool_var or
2439 bool_var != 1 becomes !bool_var
2440 here because that only is good in assignment context as long
2441 as we require a tcc_comparison in GIMPLE_CONDs where we'd
2442 replace if (x == 0) with tem = ~x; if (tem != 0) which is
2443 clearly less optimal and which we'll transform again in forwprop. */
2446 /* Simplification of math builtins. These rules must all be optimizations
2447 as well as IL simplifications. If there is a possibility that the new
2448 form could be a pessimization, the rule should go in the canonicalization
2449 section that follows this one.
2451 Rules can generally go in this section if they satisfy one of
2454 - the rule describes an identity
2456 - the rule replaces calls with something as simple as addition or
2459 - the rule contains unary calls only and simplifies the surrounding
2460 arithmetic. (The idea here is to exclude non-unary calls in which
2461 one operand is constant and in which the call is known to be cheap
2462 when the operand has that value.) */
2464 (if (flag_unsafe_math_optimizations)
2465 /* Simplify sqrt(x) * sqrt(x) -> x. */
2467 (mult (SQRT@1 @0) @1)
2468 (if (!HONOR_SNANS (type))
2471 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
2472 (for root (SQRT CBRT)
2474 (mult (root:s @0) (root:s @1))
2475 (root (mult @0 @1))))
2477 /* Simplify expN(x) * expN(y) -> expN(x+y). */
2478 (for exps (EXP EXP2 EXP10 POW10)
2480 (mult (exps:s @0) (exps:s @1))
2481 (exps (plus @0 @1))))
2483 /* Simplify a/root(b/c) into a*root(c/b). */
2484 (for root (SQRT CBRT)
2486 (rdiv @0 (root:s (rdiv:s @1 @2)))
2487 (mult @0 (root (rdiv @2 @1)))))
2489 /* Simplify x/expN(y) into x*expN(-y). */
2490 (for exps (EXP EXP2 EXP10 POW10)
2492 (rdiv @0 (exps:s @1))
2493 (mult @0 (exps (negate @1)))))
2495 (for logs (LOG LOG2 LOG10 LOG10)
2496 exps (EXP EXP2 EXP10 POW10)
2497 /* logN(expN(x)) -> x. */
2501 /* expN(logN(x)) -> x. */
2506 /* Optimize logN(func()) for various exponential functions. We
2507 want to determine the value "x" and the power "exponent" in
2508 order to transform logN(x**exponent) into exponent*logN(x). */
2509 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
2510 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
2517 CASE_FLT_FN (BUILT_IN_EXP):
2518 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
2519 x = build_real_truncate (type, dconst_e ());
2521 CASE_FLT_FN (BUILT_IN_EXP2):
2522 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
2523 x = build_real (type, dconst2);
2525 CASE_FLT_FN (BUILT_IN_EXP10):
2526 CASE_FLT_FN (BUILT_IN_POW10):
2527 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
2529 REAL_VALUE_TYPE dconst10;
2530 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
2531 x = build_real (type, dconst10);
2538 (mult (logs { x; }) @0))))
2550 CASE_FLT_FN (BUILT_IN_SQRT):
2551 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
2552 x = build_real (type, dconsthalf);
2554 CASE_FLT_FN (BUILT_IN_CBRT):
2555 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
2556 x = build_real_truncate (type, dconst_third ());
2562 (mult { x; } (logs @0)))))
2564 /* logN(pow(x,exponent)) -> exponent*logN(x). */
2565 (for logs (LOG LOG2 LOG10)
2569 (mult @1 (logs @0))))
2574 exps (EXP EXP2 EXP10 POW10)
2575 /* sqrt(expN(x)) -> expN(x*0.5). */
2578 (exps (mult @0 { build_real (type, dconsthalf); })))
2579 /* cbrt(expN(x)) -> expN(x/3). */
2582 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
2583 /* pow(expN(x), y) -> expN(x*y). */
2586 (exps (mult @0 @1))))
2588 /* tan(atan(x)) -> x. */
2595 /* cabs(x+0i) or cabs(0+xi) -> abs(x). */
2597 (CABS (complex:c @0 real_zerop@1))
2600 /* trunc(trunc(x)) -> trunc(x), etc. */
2601 (for fns (TRUNC FLOOR CEIL ROUND NEARBYINT RINT)
2605 /* f(x) -> x if x is integer valued and f does nothing for such values. */
2606 (for fns (TRUNC FLOOR CEIL ROUND NEARBYINT)
2608 (fns integer_valued_real_p@0)
2610 /* Same for rint. We have to check flag_errno_math because
2611 integer_valued_real_p accepts +Inf, -Inf and NaNs as integers. */
2612 (if (!flag_errno_math)
2614 (RINT integer_valued_real_p@0)
2617 /* hypot(x,0) and hypot(0,x) -> abs(x). */
2619 (hypot:c @0 real_zerop@1)
2622 /* pow(1,x) -> 1. */
2624 (POW real_onep@0 @1)
2628 /* copysign(x,x) -> x. */
2633 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
2634 (COPYSIGN @0 tree_expr_nonnegative_p@1)
2637 (for scale (LDEXP SCALBN SCALBLN)
2638 /* ldexp(0, x) -> 0. */
2640 (scale real_zerop@0 @1)
2642 /* ldexp(x, 0) -> x. */
2644 (scale @0 integer_zerop@1)
2646 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
2648 (scale REAL_CST@0 @1)
2649 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
2652 /* Canonicalization of sequences of math builtins. These rules represent
2653 IL simplifications but are not necessarily optimizations.
2655 The sincos pass is responsible for picking "optimal" implementations
2656 of math builtins, which may be more complicated and can sometimes go
2657 the other way, e.g. converting pow into a sequence of sqrts.
2658 We only want to do these canonicalizations before the pass has run. */
2660 (if (flag_unsafe_math_optimizations && canonicalize_math_p ())
2661 /* Simplify tan(x) * cos(x) -> sin(x). */
2663 (mult:c (TAN:s @0) (COS:s @0))
2666 /* Simplify x * pow(x,c) -> pow(x,c+1). */
2668 (mult @0 (POW:s @0 REAL_CST@1))
2669 (if (!TREE_OVERFLOW (@1))
2670 (POW @0 (plus @1 { build_one_cst (type); }))))
2672 /* Simplify sin(x) / cos(x) -> tan(x). */
2674 (rdiv (SIN:s @0) (COS:s @0))
2677 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
2679 (rdiv (COS:s @0) (SIN:s @0))
2680 (rdiv { build_one_cst (type); } (TAN @0)))
2682 /* Simplify sin(x) / tan(x) -> cos(x). */
2684 (rdiv (SIN:s @0) (TAN:s @0))
2685 (if (! HONOR_NANS (@0)
2686 && ! HONOR_INFINITIES (@0))
2689 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
2691 (rdiv (TAN:s @0) (SIN:s @0))
2692 (if (! HONOR_NANS (@0)
2693 && ! HONOR_INFINITIES (@0))
2694 (rdiv { build_one_cst (type); } (COS @0))))
2696 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
2698 (mult (POW:s @0 @1) (POW:s @0 @2))
2699 (POW @0 (plus @1 @2)))
2701 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
2703 (mult (POW:s @0 @1) (POW:s @2 @1))
2704 (POW (mult @0 @2) @1))
2706 /* Simplify pow(x,c) / x -> pow(x,c-1). */
2708 (rdiv (POW:s @0 REAL_CST@1) @0)
2709 (if (!TREE_OVERFLOW (@1))
2710 (POW @0 (minus @1 { build_one_cst (type); }))))
2712 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
2714 (rdiv @0 (POW:s @1 @2))
2715 (mult @0 (POW @1 (negate @2))))
2720 /* sqrt(sqrt(x)) -> pow(x,1/4). */
2723 (pows @0 { build_real (type, dconst_quarter ()); }))
2724 /* sqrt(cbrt(x)) -> pow(x,1/6). */
2727 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
2728 /* cbrt(sqrt(x)) -> pow(x,1/6). */
2731 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
2732 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
2734 (cbrts (cbrts tree_expr_nonnegative_p@0))
2735 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
2736 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
2738 (sqrts (pows @0 @1))
2739 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
2740 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
2742 (cbrts (pows tree_expr_nonnegative_p@0 @1))
2743 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
2744 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
2746 (pows (sqrts @0) @1)
2747 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
2748 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
2750 (pows (cbrts tree_expr_nonnegative_p@0) @1)
2751 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
2752 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
2754 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
2755 (pows @0 (mult @1 @2))))
2757 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
2759 (CABS (complex @0 @0))
2760 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
2762 /* hypot(x,x) -> fabs(x)*sqrt(2). */
2765 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
2767 /* cexp(x+yi) -> exp(x)*cexpi(y). */
2772 (cexps compositional_complex@0)
2773 (if (targetm.libc_has_function (function_c99_math_complex))
2775 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
2776 (mult @1 (imagpart @2)))))))
2778 (if (canonicalize_math_p ())
2779 /* floor(x) -> trunc(x) if x is nonnegative. */
2783 (floors tree_expr_nonnegative_p@0)
2786 (match double_value_p
2788 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
2789 (for froms (BUILT_IN_TRUNCL
2801 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
2802 (if (optimize && canonicalize_math_p ())
2804 (froms (convert double_value_p@0))
2805 (convert (tos @0)))))
2807 (match float_value_p
2809 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
2810 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
2811 BUILT_IN_FLOORL BUILT_IN_FLOOR
2812 BUILT_IN_CEILL BUILT_IN_CEIL
2813 BUILT_IN_ROUNDL BUILT_IN_ROUND
2814 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
2815 BUILT_IN_RINTL BUILT_IN_RINT)
2816 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
2817 BUILT_IN_FLOORF BUILT_IN_FLOORF
2818 BUILT_IN_CEILF BUILT_IN_CEILF
2819 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
2820 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
2821 BUILT_IN_RINTF BUILT_IN_RINTF)
2822 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
2824 (if (optimize && canonicalize_math_p ())
2826 (froms (convert float_value_p@0))
2827 (convert (tos @0)))))
2829 (for froms (XFLOORL XCEILL XROUNDL XRINTL)
2830 tos (XFLOOR XCEIL XROUND XRINT)
2831 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
2832 (if (optimize && canonicalize_math_p ())
2834 (froms (convert double_value_p@0))
2837 (for froms (XFLOORL XCEILL XROUNDL XRINTL
2838 XFLOOR XCEIL XROUND XRINT)
2839 tos (XFLOORF XCEILF XROUNDF XRINTF)
2840 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
2842 (if (optimize && canonicalize_math_p ())
2844 (froms (convert float_value_p@0))
2847 (if (canonicalize_math_p ())
2848 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
2849 (for floors (IFLOOR LFLOOR LLFLOOR)
2851 (floors tree_expr_nonnegative_p@0)
2854 (if (canonicalize_math_p ())
2855 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
2856 (for fns (IFLOOR LFLOOR LLFLOOR
2858 IROUND LROUND LLROUND)
2860 (fns integer_valued_real_p@0)
2862 (if (!flag_errno_math)
2863 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
2864 (for rints (IRINT LRINT LLRINT)
2866 (rints integer_valued_real_p@0)
2869 (if (canonicalize_math_p ())
2870 (for ifn (IFLOOR ICEIL IROUND IRINT)
2871 lfn (LFLOOR LCEIL LROUND LRINT)
2872 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
2873 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
2874 sizeof (int) == sizeof (long). */
2875 (if (TYPE_PRECISION (integer_type_node)
2876 == TYPE_PRECISION (long_integer_type_node))
2879 (lfn:long_integer_type_node @0)))
2880 /* Canonicalize llround (x) to lround (x) on LP64 targets where
2881 sizeof (long long) == sizeof (long). */
2882 (if (TYPE_PRECISION (long_long_integer_type_node)
2883 == TYPE_PRECISION (long_integer_type_node))
2886 (lfn:long_integer_type_node @0)))))
2888 /* cproj(x) -> x if we're ignoring infinities. */
2891 (if (!HONOR_INFINITIES (type))
2894 /* If the real part is inf and the imag part is known to be
2895 nonnegative, return (inf + 0i). */
2897 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
2898 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
2899 { build_complex_inf (type, false); }))
2901 /* If the imag part is inf, return (inf+I*copysign(0,imag)). */
2903 (CPROJ (complex @0 REAL_CST@1))
2904 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
2905 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
2911 (pows @0 REAL_CST@1)
2913 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
2914 REAL_VALUE_TYPE tmp;
2917 /* pow(x,0) -> 1. */
2918 (if (real_equal (value, &dconst0))
2919 { build_real (type, dconst1); })
2920 /* pow(x,1) -> x. */
2921 (if (real_equal (value, &dconst1))
2923 /* pow(x,-1) -> 1/x. */
2924 (if (real_equal (value, &dconstm1))
2925 (rdiv { build_real (type, dconst1); } @0))
2926 /* pow(x,0.5) -> sqrt(x). */
2927 (if (flag_unsafe_math_optimizations
2928 && canonicalize_math_p ()
2929 && real_equal (value, &dconsthalf))
2931 /* pow(x,1/3) -> cbrt(x). */
2932 (if (flag_unsafe_math_optimizations
2933 && canonicalize_math_p ()
2934 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
2935 real_equal (value, &tmp)))
2938 /* powi(1,x) -> 1. */
2940 (POWI real_onep@0 @1)
2944 (POWI @0 INTEGER_CST@1)
2946 /* powi(x,0) -> 1. */
2947 (if (wi::eq_p (@1, 0))
2948 { build_real (type, dconst1); })
2949 /* powi(x,1) -> x. */
2950 (if (wi::eq_p (@1, 1))
2952 /* powi(x,-1) -> 1/x. */
2953 (if (wi::eq_p (@1, -1))
2954 (rdiv { build_real (type, dconst1); } @0))))
2956 /* Narrowing of arithmetic and logical operations.
2958 These are conceptually similar to the transformations performed for
2959 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
2960 term we want to move all that code out of the front-ends into here. */
2962 /* If we have a narrowing conversion of an arithmetic operation where
2963 both operands are widening conversions from the same type as the outer
2964 narrowing conversion. Then convert the innermost operands to a suitable
2965 unsigned type (to avoid introducing undefined behaviour), perform the
2966 operation and convert the result to the desired type. */
2967 (for op (plus minus)
2969 (convert (op:s (convert@2 @0) (convert@3 @1)))
2970 (if (INTEGRAL_TYPE_P (type)
2971 /* We check for type compatibility between @0 and @1 below,
2972 so there's no need to check that @1/@3 are integral types. */
2973 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2974 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
2975 /* The precision of the type of each operand must match the
2976 precision of the mode of each operand, similarly for the
2978 && (TYPE_PRECISION (TREE_TYPE (@0))
2979 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
2980 && (TYPE_PRECISION (TREE_TYPE (@1))
2981 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@1))))
2982 && TYPE_PRECISION (type) == GET_MODE_PRECISION (TYPE_MODE (type))
2983 /* The inner conversion must be a widening conversion. */
2984 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
2985 && types_match (@0, @1)
2986 && types_match (@0, type))
2987 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2988 (convert (op @0 @1))
2989 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
2990 (convert (op (convert:utype @0) (convert:utype @1))))))))
2992 /* This is another case of narrowing, specifically when there's an outer
2993 BIT_AND_EXPR which masks off bits outside the type of the innermost
2994 operands. Like the previous case we have to convert the operands
2995 to unsigned types to avoid introducing undefined behaviour for the
2996 arithmetic operation. */
2997 (for op (minus plus)
2999 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
3000 (if (INTEGRAL_TYPE_P (type)
3001 /* We check for type compatibility between @0 and @1 below,
3002 so there's no need to check that @1/@3 are integral types. */
3003 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
3004 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
3005 /* The precision of the type of each operand must match the
3006 precision of the mode of each operand, similarly for the
3008 && (TYPE_PRECISION (TREE_TYPE (@0))
3009 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
3010 && (TYPE_PRECISION (TREE_TYPE (@1))
3011 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@1))))
3012 && TYPE_PRECISION (type) == GET_MODE_PRECISION (TYPE_MODE (type))
3013 /* The inner conversion must be a widening conversion. */
3014 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
3015 && types_match (@0, @1)
3016 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
3017 <= TYPE_PRECISION (TREE_TYPE (@0)))
3018 && (wi::bit_and (@4, wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
3019 true, TYPE_PRECISION (type))) == 0))
3020 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3021 (with { tree ntype = TREE_TYPE (@0); }
3022 (convert (bit_and (op @0 @1) (convert:ntype @4))))
3023 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
3024 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
3025 (convert:utype @4))))))))
3027 /* Transform (@0 < @1 and @0 < @2) to use min,
3028 (@0 > @1 and @0 > @2) to use max */
3029 (for op (lt le gt ge)
3030 ext (min min max max)
3032 (bit_and (op:s @0 @1) (op:s @0 @2))
3033 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
3034 (op @0 (ext @1 @2)))))
3037 /* signbit(x) -> 0 if x is nonnegative. */
3038 (SIGNBIT tree_expr_nonnegative_p@0)
3039 { integer_zero_node; })
3042 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
3044 (if (!HONOR_SIGNED_ZEROS (@0))
3045 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))