Fix PR88784, middle end is missing some optimizations about unsigned
[gcc.git] / gcc / match.pd
1 /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
4
5 Copyright (C) 2014-2019 Free Software Foundation, Inc.
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25
26 /* Generic tree predicates we inherit. */
27 (define_predicates
28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
29 integer_each_onep integer_truep integer_nonzerop
30 real_zerop real_onep real_minus_onep
31 zerop
32 initializer_each_zero_or_onep
33 CONSTANT_CLASS_P
34 tree_expr_nonnegative_p
35 tree_expr_nonzero_p
36 integer_valued_real_p
37 integer_pow2p
38 uniform_integer_cst_p
39 HONOR_NANS
40 uniform_vector_p)
41
42 /* Operator lists. */
43 (define_operator_list tcc_comparison
44 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
45 (define_operator_list inverted_tcc_comparison
46 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
47 (define_operator_list inverted_tcc_comparison_with_nans
48 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
49 (define_operator_list swapped_tcc_comparison
50 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
51 (define_operator_list simple_comparison lt le eq ne ge gt)
52 (define_operator_list swapped_simple_comparison gt ge eq ne le lt)
53
54 #include "cfn-operators.pd"
55
56 /* Define operand lists for math rounding functions {,i,l,ll}FN,
57 where the versions prefixed with "i" return an int, those prefixed with
58 "l" return a long and those prefixed with "ll" return a long long.
59
60 Also define operand lists:
61
62 X<FN>F for all float functions, in the order i, l, ll
63 X<FN> for all double functions, in the same order
64 X<FN>L for all long double functions, in the same order. */
65 #define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
66 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
67 BUILT_IN_L##FN##F \
68 BUILT_IN_LL##FN##F) \
69 (define_operator_list X##FN BUILT_IN_I##FN \
70 BUILT_IN_L##FN \
71 BUILT_IN_LL##FN) \
72 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
73 BUILT_IN_L##FN##L \
74 BUILT_IN_LL##FN##L)
75
76 DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
77 DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
78 DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
79 DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
80
81 /* Binary operations and their associated IFN_COND_* function. */
82 (define_operator_list UNCOND_BINARY
83 plus minus
84 mult trunc_div trunc_mod rdiv
85 min max
86 bit_and bit_ior bit_xor
87 lshift rshift)
88 (define_operator_list COND_BINARY
89 IFN_COND_ADD IFN_COND_SUB
90 IFN_COND_MUL IFN_COND_DIV IFN_COND_MOD IFN_COND_RDIV
91 IFN_COND_MIN IFN_COND_MAX
92 IFN_COND_AND IFN_COND_IOR IFN_COND_XOR
93 IFN_COND_SHL IFN_COND_SHR)
94
95 /* Same for ternary operations. */
96 (define_operator_list UNCOND_TERNARY
97 IFN_FMA IFN_FMS IFN_FNMA IFN_FNMS)
98 (define_operator_list COND_TERNARY
99 IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS)
100
101 /* As opposed to convert?, this still creates a single pattern, so
102 it is not a suitable replacement for convert? in all cases. */
103 (match (nop_convert @0)
104 (convert @0)
105 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
106 (match (nop_convert @0)
107 (view_convert @0)
108 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
109 && known_eq (TYPE_VECTOR_SUBPARTS (type),
110 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0)))
111 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
112 /* This one has to be last, or it shadows the others. */
113 (match (nop_convert @0)
114 @0)
115
116 /* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x>
117 ABSU_EXPR returns unsigned absolute value of the operand and the operand
118 of the ABSU_EXPR will have the corresponding signed type. */
119 (simplify (abs (convert @0))
120 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
121 && !TYPE_UNSIGNED (TREE_TYPE (@0))
122 && element_precision (type) > element_precision (TREE_TYPE (@0)))
123 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
124 (convert (absu:utype @0)))))
125
126
127 /* Simplifications of operations with one constant operand and
128 simplifications to constants or single values. */
129
130 (for op (plus pointer_plus minus bit_ior bit_xor)
131 (simplify
132 (op @0 integer_zerop)
133 (non_lvalue @0)))
134
135 /* 0 +p index -> (type)index */
136 (simplify
137 (pointer_plus integer_zerop @1)
138 (non_lvalue (convert @1)))
139
140 /* ptr - 0 -> (type)ptr */
141 (simplify
142 (pointer_diff @0 integer_zerop)
143 (convert @0))
144
145 /* See if ARG1 is zero and X + ARG1 reduces to X.
146 Likewise if the operands are reversed. */
147 (simplify
148 (plus:c @0 real_zerop@1)
149 (if (fold_real_zero_addition_p (type, @1, 0))
150 (non_lvalue @0)))
151
152 /* See if ARG1 is zero and X - ARG1 reduces to X. */
153 (simplify
154 (minus @0 real_zerop@1)
155 (if (fold_real_zero_addition_p (type, @1, 1))
156 (non_lvalue @0)))
157
158 /* Even if the fold_real_zero_addition_p can't simplify X + 0.0
159 into X, we can optimize (X + 0.0) + 0.0 or (X + 0.0) - 0.0
160 or (X - 0.0) + 0.0 into X + 0.0 and (X - 0.0) - 0.0 into X - 0.0
161 if not -frounding-math. For sNaNs the first operation would raise
162 exceptions but turn the result into qNan, so the second operation
163 would not raise it. */
164 (for inner_op (plus minus)
165 (for outer_op (plus minus)
166 (simplify
167 (outer_op (inner_op@3 @0 REAL_CST@1) REAL_CST@2)
168 (if (real_zerop (@1)
169 && real_zerop (@2)
170 && !HONOR_SIGN_DEPENDENT_ROUNDING (type))
171 (with { bool inner_plus = ((inner_op == PLUS_EXPR)
172 ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)));
173 bool outer_plus
174 = ((outer_op == PLUS_EXPR)
175 ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@2))); }
176 (if (outer_plus && !inner_plus)
177 (outer_op @0 @2)
178 @3))))))
179
180 /* Simplify x - x.
181 This is unsafe for certain floats even in non-IEEE formats.
182 In IEEE, it is unsafe because it does wrong for NaNs.
183 Also note that operand_equal_p is always false if an operand
184 is volatile. */
185 (simplify
186 (minus @0 @0)
187 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
188 { build_zero_cst (type); }))
189 (simplify
190 (pointer_diff @@0 @0)
191 { build_zero_cst (type); })
192
193 (simplify
194 (mult @0 integer_zerop@1)
195 @1)
196
197 /* Maybe fold x * 0 to 0. The expressions aren't the same
198 when x is NaN, since x * 0 is also NaN. Nor are they the
199 same in modes with signed zeros, since multiplying a
200 negative value by 0 gives -0, not +0. */
201 (simplify
202 (mult @0 real_zerop@1)
203 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
204 @1))
205
206 /* In IEEE floating point, x*1 is not equivalent to x for snans.
207 Likewise for complex arithmetic with signed zeros. */
208 (simplify
209 (mult @0 real_onep)
210 (if (!HONOR_SNANS (type)
211 && (!HONOR_SIGNED_ZEROS (type)
212 || !COMPLEX_FLOAT_TYPE_P (type)))
213 (non_lvalue @0)))
214
215 /* Transform x * -1.0 into -x. */
216 (simplify
217 (mult @0 real_minus_onep)
218 (if (!HONOR_SNANS (type)
219 && (!HONOR_SIGNED_ZEROS (type)
220 || !COMPLEX_FLOAT_TYPE_P (type)))
221 (negate @0)))
222
223 /* Transform { 0 or 1 } * { 0 or 1 } into { 0 or 1 } & { 0 or 1 } */
224 (simplify
225 (mult SSA_NAME@1 SSA_NAME@2)
226 (if (INTEGRAL_TYPE_P (type)
227 && get_nonzero_bits (@1) == 1
228 && get_nonzero_bits (@2) == 1)
229 (bit_and @1 @2)))
230
231 /* Transform x * { 0 or 1, 0 or 1, ... } into x & { 0 or -1, 0 or -1, ...},
232 unless the target has native support for the former but not the latter. */
233 (simplify
234 (mult @0 VECTOR_CST@1)
235 (if (initializer_each_zero_or_onep (@1)
236 && !HONOR_SNANS (type)
237 && !HONOR_SIGNED_ZEROS (type))
238 (with { tree itype = FLOAT_TYPE_P (type) ? unsigned_type_for (type) : type; }
239 (if (itype
240 && (!VECTOR_MODE_P (TYPE_MODE (type))
241 || (VECTOR_MODE_P (TYPE_MODE (itype))
242 && optab_handler (and_optab,
243 TYPE_MODE (itype)) != CODE_FOR_nothing)))
244 (view_convert (bit_and:itype (view_convert @0)
245 (ne @1 { build_zero_cst (type); })))))))
246
247 (for cmp (gt ge lt le)
248 outp (convert convert negate negate)
249 outn (negate negate convert convert)
250 /* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */
251 /* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */
252 /* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
253 /* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
254 (simplify
255 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)
256 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
257 && types_match (type, TREE_TYPE (@0)))
258 (switch
259 (if (types_match (type, float_type_node))
260 (BUILT_IN_COPYSIGNF @1 (outp @0)))
261 (if (types_match (type, double_type_node))
262 (BUILT_IN_COPYSIGN @1 (outp @0)))
263 (if (types_match (type, long_double_type_node))
264 (BUILT_IN_COPYSIGNL @1 (outp @0))))))
265 /* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
266 /* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
267 /* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */
268 /* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */
269 (simplify
270 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1)
271 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
272 && types_match (type, TREE_TYPE (@0)))
273 (switch
274 (if (types_match (type, float_type_node))
275 (BUILT_IN_COPYSIGNF @1 (outn @0)))
276 (if (types_match (type, double_type_node))
277 (BUILT_IN_COPYSIGN @1 (outn @0)))
278 (if (types_match (type, long_double_type_node))
279 (BUILT_IN_COPYSIGNL @1 (outn @0)))))))
280
281 /* Transform X * copysign (1.0, X) into abs(X). */
282 (simplify
283 (mult:c @0 (COPYSIGN_ALL real_onep @0))
284 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
285 (abs @0)))
286
287 /* Transform X * copysign (1.0, -X) into -abs(X). */
288 (simplify
289 (mult:c @0 (COPYSIGN_ALL real_onep (negate @0)))
290 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
291 (negate (abs @0))))
292
293 /* Transform copysign (CST, X) into copysign (ABS(CST), X). */
294 (simplify
295 (COPYSIGN_ALL REAL_CST@0 @1)
296 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
297 (COPYSIGN_ALL (negate @0) @1)))
298
299 /* X * 1, X / 1 -> X. */
300 (for op (mult trunc_div ceil_div floor_div round_div exact_div)
301 (simplify
302 (op @0 integer_onep)
303 (non_lvalue @0)))
304
305 /* (A / (1 << B)) -> (A >> B).
306 Only for unsigned A. For signed A, this would not preserve rounding
307 toward zero.
308 For example: (-1 / ( 1 << B)) != -1 >> B.
309 Also also widening conversions, like:
310 (A / (unsigned long long) (1U << B)) -> (A >> B)
311 or
312 (A / (unsigned long long) (1 << B)) -> (A >> B).
313 If the left shift is signed, it can be done only if the upper bits
314 of A starting from shift's type sign bit are zero, as
315 (unsigned long long) (1 << 31) is -2147483648ULL, not 2147483648ULL,
316 so it is valid only if A >> 31 is zero. */
317 (simplify
318 (trunc_div @0 (convert? (lshift integer_onep@1 @2)))
319 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
320 && (!VECTOR_TYPE_P (type)
321 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
322 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar))
323 && (useless_type_conversion_p (type, TREE_TYPE (@1))
324 || (element_precision (type) >= element_precision (TREE_TYPE (@1))
325 && (TYPE_UNSIGNED (TREE_TYPE (@1))
326 || (element_precision (type)
327 == element_precision (TREE_TYPE (@1)))
328 || (INTEGRAL_TYPE_P (type)
329 && (tree_nonzero_bits (@0)
330 & wi::mask (element_precision (TREE_TYPE (@1)) - 1,
331 true,
332 element_precision (type))) == 0)))))
333 (rshift @0 @2)))
334
335 /* Preserve explicit divisions by 0: the C++ front-end wants to detect
336 undefined behavior in constexpr evaluation, and assuming that the division
337 traps enables better optimizations than these anyway. */
338 (for div (trunc_div ceil_div floor_div round_div exact_div)
339 /* 0 / X is always zero. */
340 (simplify
341 (div integer_zerop@0 @1)
342 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
343 (if (!integer_zerop (@1))
344 @0))
345 /* X / -1 is -X. */
346 (simplify
347 (div @0 integer_minus_onep@1)
348 (if (!TYPE_UNSIGNED (type))
349 (negate @0)))
350 /* X / X is one. */
351 (simplify
352 (div @0 @0)
353 /* But not for 0 / 0 so that we can get the proper warnings and errors.
354 And not for _Fract types where we can't build 1. */
355 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type)))
356 { build_one_cst (type); }))
357 /* X / abs (X) is X < 0 ? -1 : 1. */
358 (simplify
359 (div:C @0 (abs @0))
360 (if (INTEGRAL_TYPE_P (type)
361 && TYPE_OVERFLOW_UNDEFINED (type))
362 (cond (lt @0 { build_zero_cst (type); })
363 { build_minus_one_cst (type); } { build_one_cst (type); })))
364 /* X / -X is -1. */
365 (simplify
366 (div:C @0 (negate @0))
367 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
368 && TYPE_OVERFLOW_UNDEFINED (type))
369 { build_minus_one_cst (type); })))
370
371 /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
372 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
373 (simplify
374 (floor_div @0 @1)
375 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
376 && TYPE_UNSIGNED (type))
377 (trunc_div @0 @1)))
378
379 /* Combine two successive divisions. Note that combining ceil_div
380 and floor_div is trickier and combining round_div even more so. */
381 (for div (trunc_div exact_div)
382 (simplify
383 (div (div@3 @0 INTEGER_CST@1) INTEGER_CST@2)
384 (with {
385 wi::overflow_type overflow;
386 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
387 TYPE_SIGN (type), &overflow);
388 }
389 (if (div == EXACT_DIV_EXPR
390 || optimize_successive_divisions_p (@2, @3))
391 (if (!overflow)
392 (div @0 { wide_int_to_tree (type, mul); })
393 (if (TYPE_UNSIGNED (type)
394 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
395 { build_zero_cst (type); }))))))
396
397 /* Combine successive multiplications. Similar to above, but handling
398 overflow is different. */
399 (simplify
400 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
401 (with {
402 wi::overflow_type overflow;
403 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
404 TYPE_SIGN (type), &overflow);
405 }
406 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
407 otherwise undefined overflow implies that @0 must be zero. */
408 (if (!overflow || TYPE_OVERFLOW_WRAPS (type))
409 (mult @0 { wide_int_to_tree (type, mul); }))))
410
411 /* Optimize A / A to 1.0 if we don't care about
412 NaNs or Infinities. */
413 (simplify
414 (rdiv @0 @0)
415 (if (FLOAT_TYPE_P (type)
416 && ! HONOR_NANS (type)
417 && ! HONOR_INFINITIES (type))
418 { build_one_cst (type); }))
419
420 /* Optimize -A / A to -1.0 if we don't care about
421 NaNs or Infinities. */
422 (simplify
423 (rdiv:C @0 (negate @0))
424 (if (FLOAT_TYPE_P (type)
425 && ! HONOR_NANS (type)
426 && ! HONOR_INFINITIES (type))
427 { build_minus_one_cst (type); }))
428
429 /* PR71078: x / abs(x) -> copysign (1.0, x) */
430 (simplify
431 (rdiv:C (convert? @0) (convert? (abs @0)))
432 (if (SCALAR_FLOAT_TYPE_P (type)
433 && ! HONOR_NANS (type)
434 && ! HONOR_INFINITIES (type))
435 (switch
436 (if (types_match (type, float_type_node))
437 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
438 (if (types_match (type, double_type_node))
439 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
440 (if (types_match (type, long_double_type_node))
441 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
442
443 /* In IEEE floating point, x/1 is not equivalent to x for snans. */
444 (simplify
445 (rdiv @0 real_onep)
446 (if (!HONOR_SNANS (type))
447 (non_lvalue @0)))
448
449 /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
450 (simplify
451 (rdiv @0 real_minus_onep)
452 (if (!HONOR_SNANS (type))
453 (negate @0)))
454
455 (if (flag_reciprocal_math)
456 /* Convert (A/B)/C to A/(B*C). */
457 (simplify
458 (rdiv (rdiv:s @0 @1) @2)
459 (rdiv @0 (mult @1 @2)))
460
461 /* Canonicalize x / (C1 * y) to (x * C2) / y. */
462 (simplify
463 (rdiv @0 (mult:s @1 REAL_CST@2))
464 (with
465 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); }
466 (if (tem)
467 (rdiv (mult @0 { tem; } ) @1))))
468
469 /* Convert A/(B/C) to (A/B)*C */
470 (simplify
471 (rdiv @0 (rdiv:s @1 @2))
472 (mult (rdiv @0 @1) @2)))
473
474 /* Simplify x / (- y) to -x / y. */
475 (simplify
476 (rdiv @0 (negate @1))
477 (rdiv (negate @0) @1))
478
479 (if (flag_unsafe_math_optimizations)
480 /* Simplify (C / x op 0.0) to x op 0.0 for C != 0, C != Inf/Nan.
481 Since C / x may underflow to zero, do this only for unsafe math. */
482 (for op (lt le gt ge)
483 neg_op (gt ge lt le)
484 (simplify
485 (op (rdiv REAL_CST@0 @1) real_zerop@2)
486 (if (!HONOR_SIGNED_ZEROS (@1) && !HONOR_INFINITIES (@1))
487 (switch
488 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@0)))
489 (op @1 @2))
490 /* For C < 0, use the inverted operator. */
491 (if (real_less (TREE_REAL_CST_PTR (@0), &dconst0))
492 (neg_op @1 @2)))))))
493
494 /* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
495 (for div (trunc_div ceil_div floor_div round_div exact_div)
496 (simplify
497 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
498 (if (integer_pow2p (@2)
499 && tree_int_cst_sgn (@2) > 0
500 && tree_nop_conversion_p (type, TREE_TYPE (@0))
501 && wi::to_wide (@2) + wi::to_wide (@1) == 0)
502 (rshift (convert @0)
503 { build_int_cst (integer_type_node,
504 wi::exact_log2 (wi::to_wide (@2))); }))))
505
506 /* If ARG1 is a constant, we can convert this to a multiply by the
507 reciprocal. This does not have the same rounding properties,
508 so only do this if -freciprocal-math. We can actually
509 always safely do it if ARG1 is a power of two, but it's hard to
510 tell if it is or not in a portable manner. */
511 (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
512 (simplify
513 (rdiv @0 cst@1)
514 (if (optimize)
515 (if (flag_reciprocal_math
516 && !real_zerop (@1))
517 (with
518 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
519 (if (tem)
520 (mult @0 { tem; } )))
521 (if (cst != COMPLEX_CST)
522 (with { tree inverse = exact_inverse (type, @1); }
523 (if (inverse)
524 (mult @0 { inverse; } ))))))))
525
526 (for mod (ceil_mod floor_mod round_mod trunc_mod)
527 /* 0 % X is always zero. */
528 (simplify
529 (mod integer_zerop@0 @1)
530 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
531 (if (!integer_zerop (@1))
532 @0))
533 /* X % 1 is always zero. */
534 (simplify
535 (mod @0 integer_onep)
536 { build_zero_cst (type); })
537 /* X % -1 is zero. */
538 (simplify
539 (mod @0 integer_minus_onep@1)
540 (if (!TYPE_UNSIGNED (type))
541 { build_zero_cst (type); }))
542 /* X % X is zero. */
543 (simplify
544 (mod @0 @0)
545 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
546 (if (!integer_zerop (@0))
547 { build_zero_cst (type); }))
548 /* (X % Y) % Y is just X % Y. */
549 (simplify
550 (mod (mod@2 @0 @1) @1)
551 @2)
552 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
553 (simplify
554 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
555 (if (ANY_INTEGRAL_TYPE_P (type)
556 && TYPE_OVERFLOW_UNDEFINED (type)
557 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
558 TYPE_SIGN (type)))
559 { build_zero_cst (type); }))
560 /* For (X % C) == 0, if X is signed and C is power of 2, use unsigned
561 modulo and comparison, since it is simpler and equivalent. */
562 (for cmp (eq ne)
563 (simplify
564 (cmp (mod @0 integer_pow2p@2) integer_zerop@1)
565 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
566 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
567 (cmp (mod (convert:utype @0) (convert:utype @2)) (convert:utype @1)))))))
568
569 /* X % -C is the same as X % C. */
570 (simplify
571 (trunc_mod @0 INTEGER_CST@1)
572 (if (TYPE_SIGN (type) == SIGNED
573 && !TREE_OVERFLOW (@1)
574 && wi::neg_p (wi::to_wide (@1))
575 && !TYPE_OVERFLOW_TRAPS (type)
576 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
577 && !sign_bit_p (@1, @1))
578 (trunc_mod @0 (negate @1))))
579
580 /* X % -Y is the same as X % Y. */
581 (simplify
582 (trunc_mod @0 (convert? (negate @1)))
583 (if (INTEGRAL_TYPE_P (type)
584 && !TYPE_UNSIGNED (type)
585 && !TYPE_OVERFLOW_TRAPS (type)
586 && tree_nop_conversion_p (type, TREE_TYPE (@1))
587 /* Avoid this transformation if X might be INT_MIN or
588 Y might be -1, because we would then change valid
589 INT_MIN % -(-1) into invalid INT_MIN % -1. */
590 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
591 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
592 (TREE_TYPE (@1))))))
593 (trunc_mod @0 (convert @1))))
594
595 /* X - (X / Y) * Y is the same as X % Y. */
596 (simplify
597 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
598 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
599 (convert (trunc_mod @0 @1))))
600
601 /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
602 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
603 Also optimize A % (C << N) where C is a power of 2,
604 to A & ((C << N) - 1). */
605 (match (power_of_two_cand @1)
606 INTEGER_CST@1)
607 (match (power_of_two_cand @1)
608 (lshift INTEGER_CST@1 @2))
609 (for mod (trunc_mod floor_mod)
610 (simplify
611 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
612 (if ((TYPE_UNSIGNED (type)
613 || tree_expr_nonnegative_p (@0))
614 && tree_nop_conversion_p (type, TREE_TYPE (@3))
615 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
616 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
617
618 /* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
619 (simplify
620 (trunc_div (mult @0 integer_pow2p@1) @1)
621 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
622 (bit_and @0 { wide_int_to_tree
623 (type, wi::mask (TYPE_PRECISION (type)
624 - wi::exact_log2 (wi::to_wide (@1)),
625 false, TYPE_PRECISION (type))); })))
626
627 /* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
628 (simplify
629 (mult (trunc_div @0 integer_pow2p@1) @1)
630 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
631 (bit_and @0 (negate @1))))
632
633 /* Simplify (t * 2) / 2) -> t. */
634 (for div (trunc_div ceil_div floor_div round_div exact_div)
635 (simplify
636 (div (mult:c @0 @1) @1)
637 (if (ANY_INTEGRAL_TYPE_P (type)
638 && TYPE_OVERFLOW_UNDEFINED (type))
639 @0)))
640
641 (for op (negate abs)
642 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
643 (for coss (COS COSH)
644 (simplify
645 (coss (op @0))
646 (coss @0)))
647 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
648 (for pows (POW)
649 (simplify
650 (pows (op @0) REAL_CST@1)
651 (with { HOST_WIDE_INT n; }
652 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
653 (pows @0 @1)))))
654 /* Likewise for powi. */
655 (for pows (POWI)
656 (simplify
657 (pows (op @0) INTEGER_CST@1)
658 (if ((wi::to_wide (@1) & 1) == 0)
659 (pows @0 @1))))
660 /* Strip negate and abs from both operands of hypot. */
661 (for hypots (HYPOT)
662 (simplify
663 (hypots (op @0) @1)
664 (hypots @0 @1))
665 (simplify
666 (hypots @0 (op @1))
667 (hypots @0 @1)))
668 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
669 (for copysigns (COPYSIGN_ALL)
670 (simplify
671 (copysigns (op @0) @1)
672 (copysigns @0 @1))))
673
674 /* abs(x)*abs(x) -> x*x. Should be valid for all types. */
675 (simplify
676 (mult (abs@1 @0) @1)
677 (mult @0 @0))
678
679 /* Convert absu(x)*absu(x) -> x*x. */
680 (simplify
681 (mult (absu@1 @0) @1)
682 (mult (convert@2 @0) @2))
683
684 /* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
685 (for coss (COS COSH)
686 copysigns (COPYSIGN)
687 (simplify
688 (coss (copysigns @0 @1))
689 (coss @0)))
690
691 /* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
692 (for pows (POW)
693 copysigns (COPYSIGN)
694 (simplify
695 (pows (copysigns @0 @2) REAL_CST@1)
696 (with { HOST_WIDE_INT n; }
697 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
698 (pows @0 @1)))))
699 /* Likewise for powi. */
700 (for pows (POWI)
701 copysigns (COPYSIGN)
702 (simplify
703 (pows (copysigns @0 @2) INTEGER_CST@1)
704 (if ((wi::to_wide (@1) & 1) == 0)
705 (pows @0 @1))))
706
707 (for hypots (HYPOT)
708 copysigns (COPYSIGN)
709 /* hypot(copysign(x, y), z) -> hypot(x, z). */
710 (simplify
711 (hypots (copysigns @0 @1) @2)
712 (hypots @0 @2))
713 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
714 (simplify
715 (hypots @0 (copysigns @1 @2))
716 (hypots @0 @1)))
717
718 /* copysign(x, CST) -> [-]abs (x). */
719 (for copysigns (COPYSIGN_ALL)
720 (simplify
721 (copysigns @0 REAL_CST@1)
722 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
723 (negate (abs @0))
724 (abs @0))))
725
726 /* copysign(copysign(x, y), z) -> copysign(x, z). */
727 (for copysigns (COPYSIGN_ALL)
728 (simplify
729 (copysigns (copysigns @0 @1) @2)
730 (copysigns @0 @2)))
731
732 /* copysign(x,y)*copysign(x,y) -> x*x. */
733 (for copysigns (COPYSIGN_ALL)
734 (simplify
735 (mult (copysigns@2 @0 @1) @2)
736 (mult @0 @0)))
737
738 /* ccos(-x) -> ccos(x). Similarly for ccosh. */
739 (for ccoss (CCOS CCOSH)
740 (simplify
741 (ccoss (negate @0))
742 (ccoss @0)))
743
744 /* cabs(-x) and cos(conj(x)) -> cabs(x). */
745 (for ops (conj negate)
746 (for cabss (CABS)
747 (simplify
748 (cabss (ops @0))
749 (cabss @0))))
750
751 /* Fold (a * (1 << b)) into (a << b) */
752 (simplify
753 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
754 (if (! FLOAT_TYPE_P (type)
755 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
756 (lshift @0 @2)))
757
758 /* Fold (1 << (C - x)) where C = precision(type) - 1
759 into ((1 << C) >> x). */
760 (simplify
761 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
762 (if (INTEGRAL_TYPE_P (type)
763 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
764 && single_use (@1))
765 (if (TYPE_UNSIGNED (type))
766 (rshift (lshift @0 @2) @3)
767 (with
768 { tree utype = unsigned_type_for (type); }
769 (convert (rshift (lshift (convert:utype @0) @2) @3))))))
770
771 /* Fold (C1/X)*C2 into (C1*C2)/X. */
772 (simplify
773 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
774 (if (flag_associative_math
775 && single_use (@3))
776 (with
777 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
778 (if (tem)
779 (rdiv { tem; } @1)))))
780
781 /* Simplify ~X & X as zero. */
782 (simplify
783 (bit_and:c (convert? @0) (convert? (bit_not @0)))
784 { build_zero_cst (type); })
785
786 /* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
787 (simplify
788 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
789 (if (TYPE_UNSIGNED (type))
790 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
791
792 (for bitop (bit_and bit_ior)
793 cmp (eq ne)
794 /* PR35691: Transform
795 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
796 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
797 (simplify
798 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
799 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
800 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
801 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
802 (cmp (bit_ior @0 (convert @1)) @2)))
803 /* Transform:
804 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
805 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */
806 (simplify
807 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
808 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
809 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
810 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
811 (cmp (bit_and @0 (convert @1)) @2))))
812
813 /* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
814 (simplify
815 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
816 (minus (bit_xor @0 @1) @1))
817 (simplify
818 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
819 (if (~wi::to_wide (@2) == wi::to_wide (@1))
820 (minus (bit_xor @0 @1) @1)))
821
822 /* Fold (A & B) - (A & ~B) into B - (A ^ B). */
823 (simplify
824 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
825 (minus @1 (bit_xor @0 @1)))
826
827 /* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
828 (for op (bit_ior bit_xor plus)
829 (simplify
830 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
831 (bit_xor @0 @1))
832 (simplify
833 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
834 (if (~wi::to_wide (@2) == wi::to_wide (@1))
835 (bit_xor @0 @1))))
836
837 /* PR53979: Transform ((a ^ b) | a) -> (a | b) */
838 (simplify
839 (bit_ior:c (bit_xor:c @0 @1) @0)
840 (bit_ior @0 @1))
841
842 /* (a & ~b) | (a ^ b) --> a ^ b */
843 (simplify
844 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
845 @2)
846
847 /* (a & ~b) ^ ~a --> ~(a & b) */
848 (simplify
849 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
850 (bit_not (bit_and @0 @1)))
851
852 /* (~a & b) ^ a --> (a | b) */
853 (simplify
854 (bit_xor:c (bit_and:cs (bit_not @0) @1) @0)
855 (bit_ior @0 @1))
856
857 /* (a | b) & ~(a ^ b) --> a & b */
858 (simplify
859 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
860 (bit_and @0 @1))
861
862 /* a | ~(a ^ b) --> a | ~b */
863 (simplify
864 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
865 (bit_ior @0 (bit_not @1)))
866
867 /* (a | b) | (a &^ b) --> a | b */
868 (for op (bit_and bit_xor)
869 (simplify
870 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
871 @2))
872
873 /* (a & b) | ~(a ^ b) --> ~(a ^ b) */
874 (simplify
875 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
876 @2)
877
878 /* ~(~a & b) --> a | ~b */
879 (simplify
880 (bit_not (bit_and:cs (bit_not @0) @1))
881 (bit_ior @0 (bit_not @1)))
882
883 /* ~(~a | b) --> a & ~b */
884 (simplify
885 (bit_not (bit_ior:cs (bit_not @0) @1))
886 (bit_and @0 (bit_not @1)))
887
888 /* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
889 #if GIMPLE
890 (simplify
891 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
892 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
893 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
894 (bit_xor @0 @1)))
895 #endif
896
897 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
898 ((A & N) + B) & M -> (A + B) & M
899 Similarly if (N & M) == 0,
900 ((A | N) + B) & M -> (A + B) & M
901 and for - instead of + (or unary - instead of +)
902 and/or ^ instead of |.
903 If B is constant and (B & M) == 0, fold into A & M. */
904 (for op (plus minus)
905 (for bitop (bit_and bit_ior bit_xor)
906 (simplify
907 (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2)
908 (with
909 { tree pmop[2];
910 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop,
911 @3, @4, @1, ERROR_MARK, NULL_TREE,
912 NULL_TREE, pmop); }
913 (if (utype)
914 (convert (bit_and (op (convert:utype { pmop[0]; })
915 (convert:utype { pmop[1]; }))
916 (convert:utype @2))))))
917 (simplify
918 (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2)
919 (with
920 { tree pmop[2];
921 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
922 NULL_TREE, NULL_TREE, @1, bitop, @3,
923 @4, pmop); }
924 (if (utype)
925 (convert (bit_and (op (convert:utype { pmop[0]; })
926 (convert:utype { pmop[1]; }))
927 (convert:utype @2)))))))
928 (simplify
929 (bit_and (op:s @0 @1) INTEGER_CST@2)
930 (with
931 { tree pmop[2];
932 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
933 NULL_TREE, NULL_TREE, @1, ERROR_MARK,
934 NULL_TREE, NULL_TREE, pmop); }
935 (if (utype)
936 (convert (bit_and (op (convert:utype { pmop[0]; })
937 (convert:utype { pmop[1]; }))
938 (convert:utype @2)))))))
939 (for bitop (bit_and bit_ior bit_xor)
940 (simplify
941 (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1)
942 (with
943 { tree pmop[2];
944 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0,
945 bitop, @2, @3, NULL_TREE, ERROR_MARK,
946 NULL_TREE, NULL_TREE, pmop); }
947 (if (utype)
948 (convert (bit_and (negate (convert:utype { pmop[0]; }))
949 (convert:utype @1)))))))
950
951 /* X % Y is smaller than Y. */
952 (for cmp (lt ge)
953 (simplify
954 (cmp (trunc_mod @0 @1) @1)
955 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
956 { constant_boolean_node (cmp == LT_EXPR, type); })))
957 (for cmp (gt le)
958 (simplify
959 (cmp @1 (trunc_mod @0 @1))
960 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
961 { constant_boolean_node (cmp == GT_EXPR, type); })))
962
963 /* x | ~0 -> ~0 */
964 (simplify
965 (bit_ior @0 integer_all_onesp@1)
966 @1)
967
968 /* x | 0 -> x */
969 (simplify
970 (bit_ior @0 integer_zerop)
971 @0)
972
973 /* x & 0 -> 0 */
974 (simplify
975 (bit_and @0 integer_zerop@1)
976 @1)
977
978 /* ~x | x -> -1 */
979 /* ~x ^ x -> -1 */
980 /* ~x + x -> -1 */
981 (for op (bit_ior bit_xor plus)
982 (simplify
983 (op:c (convert? @0) (convert? (bit_not @0)))
984 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
985
986 /* x ^ x -> 0 */
987 (simplify
988 (bit_xor @0 @0)
989 { build_zero_cst (type); })
990
991 /* Canonicalize X ^ ~0 to ~X. */
992 (simplify
993 (bit_xor @0 integer_all_onesp@1)
994 (bit_not @0))
995
996 /* x & ~0 -> x */
997 (simplify
998 (bit_and @0 integer_all_onesp)
999 (non_lvalue @0))
1000
1001 /* x & x -> x, x | x -> x */
1002 (for bitop (bit_and bit_ior)
1003 (simplify
1004 (bitop @0 @0)
1005 (non_lvalue @0)))
1006
1007 /* x & C -> x if we know that x & ~C == 0. */
1008 #if GIMPLE
1009 (simplify
1010 (bit_and SSA_NAME@0 INTEGER_CST@1)
1011 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1012 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
1013 @0))
1014 #endif
1015
1016 /* x + (x & 1) -> (x + 1) & ~1 */
1017 (simplify
1018 (plus:c @0 (bit_and:s @0 integer_onep@1))
1019 (bit_and (plus @0 @1) (bit_not @1)))
1020
1021 /* x & ~(x & y) -> x & ~y */
1022 /* x | ~(x | y) -> x | ~y */
1023 (for bitop (bit_and bit_ior)
1024 (simplify
1025 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
1026 (bitop @0 (bit_not @1))))
1027
1028 /* (~x & y) | ~(x | y) -> ~x */
1029 (simplify
1030 (bit_ior:c (bit_and:c (bit_not@2 @0) @1) (bit_not (bit_ior:c @0 @1)))
1031 @2)
1032
1033 /* (x | y) ^ (x | ~y) -> ~x */
1034 (simplify
1035 (bit_xor:c (bit_ior:c @0 @1) (bit_ior:c @0 (bit_not @1)))
1036 (bit_not @0))
1037
1038 /* (x & y) | ~(x | y) -> ~(x ^ y) */
1039 (simplify
1040 (bit_ior:c (bit_and:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
1041 (bit_not (bit_xor @0 @1)))
1042
1043 /* (~x | y) ^ (x ^ y) -> x | ~y */
1044 (simplify
1045 (bit_xor:c (bit_ior:cs (bit_not @0) @1) (bit_xor:s @0 @1))
1046 (bit_ior @0 (bit_not @1)))
1047
1048 /* (x ^ y) | ~(x | y) -> ~(x & y) */
1049 (simplify
1050 (bit_ior:c (bit_xor:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
1051 (bit_not (bit_and @0 @1)))
1052
1053 /* (x | y) & ~x -> y & ~x */
1054 /* (x & y) | ~x -> y | ~x */
1055 (for bitop (bit_and bit_ior)
1056 rbitop (bit_ior bit_and)
1057 (simplify
1058 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
1059 (bitop @1 @2)))
1060
1061 /* (x & y) ^ (x | y) -> x ^ y */
1062 (simplify
1063 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
1064 (bit_xor @0 @1))
1065
1066 /* (x ^ y) ^ (x | y) -> x & y */
1067 (simplify
1068 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
1069 (bit_and @0 @1))
1070
1071 /* (x & y) + (x ^ y) -> x | y */
1072 /* (x & y) | (x ^ y) -> x | y */
1073 /* (x & y) ^ (x ^ y) -> x | y */
1074 (for op (plus bit_ior bit_xor)
1075 (simplify
1076 (op:c (bit_and @0 @1) (bit_xor @0 @1))
1077 (bit_ior @0 @1)))
1078
1079 /* (x & y) + (x | y) -> x + y */
1080 (simplify
1081 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
1082 (plus @0 @1))
1083
1084 /* (x + y) - (x | y) -> x & y */
1085 (simplify
1086 (minus (plus @0 @1) (bit_ior @0 @1))
1087 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1088 && !TYPE_SATURATING (type))
1089 (bit_and @0 @1)))
1090
1091 /* (x + y) - (x & y) -> x | y */
1092 (simplify
1093 (minus (plus @0 @1) (bit_and @0 @1))
1094 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1095 && !TYPE_SATURATING (type))
1096 (bit_ior @0 @1)))
1097
1098 /* (x | y) - (x ^ y) -> x & y */
1099 (simplify
1100 (minus (bit_ior @0 @1) (bit_xor @0 @1))
1101 (bit_and @0 @1))
1102
1103 /* (x | y) - (x & y) -> x ^ y */
1104 (simplify
1105 (minus (bit_ior @0 @1) (bit_and @0 @1))
1106 (bit_xor @0 @1))
1107
1108 /* (x | y) & ~(x & y) -> x ^ y */
1109 (simplify
1110 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
1111 (bit_xor @0 @1))
1112
1113 /* (x | y) & (~x ^ y) -> x & y */
1114 (simplify
1115 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
1116 (bit_and @0 @1))
1117
1118 /* (~x | y) & (x | ~y) -> ~(x ^ y) */
1119 (simplify
1120 (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1)))
1121 (bit_not (bit_xor @0 @1)))
1122
1123 /* (~x | y) ^ (x | ~y) -> x ^ y */
1124 (simplify
1125 (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1)))
1126 (bit_xor @0 @1))
1127
1128 /* ~x & ~y -> ~(x | y)
1129 ~x | ~y -> ~(x & y) */
1130 (for op (bit_and bit_ior)
1131 rop (bit_ior bit_and)
1132 (simplify
1133 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1134 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1135 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1136 (bit_not (rop (convert @0) (convert @1))))))
1137
1138 /* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
1139 with a constant, and the two constants have no bits in common,
1140 we should treat this as a BIT_IOR_EXPR since this may produce more
1141 simplifications. */
1142 (for op (bit_xor plus)
1143 (simplify
1144 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
1145 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
1146 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1147 && tree_nop_conversion_p (type, TREE_TYPE (@2))
1148 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
1149 (bit_ior (convert @4) (convert @5)))))
1150
1151 /* (X | Y) ^ X -> Y & ~ X*/
1152 (simplify
1153 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
1154 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1155 (convert (bit_and @1 (bit_not @0)))))
1156
1157 /* Convert ~X ^ ~Y to X ^ Y. */
1158 (simplify
1159 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1160 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1161 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1162 (bit_xor (convert @0) (convert @1))))
1163
1164 /* Convert ~X ^ C to X ^ ~C. */
1165 (simplify
1166 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
1167 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1168 (bit_xor (convert @0) (bit_not @1))))
1169
1170 /* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
1171 (for opo (bit_and bit_xor)
1172 opi (bit_xor bit_and)
1173 (simplify
1174 (opo:c (opi:cs @0 @1) @1)
1175 (bit_and (bit_not @0) @1)))
1176
1177 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
1178 operands are another bit-wise operation with a common input. If so,
1179 distribute the bit operations to save an operation and possibly two if
1180 constants are involved. For example, convert
1181 (A | B) & (A | C) into A | (B & C)
1182 Further simplification will occur if B and C are constants. */
1183 (for op (bit_and bit_ior bit_xor)
1184 rop (bit_ior bit_and bit_and)
1185 (simplify
1186 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
1187 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1188 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1189 (rop (convert @0) (op (convert @1) (convert @2))))))
1190
1191 /* Some simple reassociation for bit operations, also handled in reassoc. */
1192 /* (X & Y) & Y -> X & Y
1193 (X | Y) | Y -> X | Y */
1194 (for op (bit_and bit_ior)
1195 (simplify
1196 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
1197 @2))
1198 /* (X ^ Y) ^ Y -> X */
1199 (simplify
1200 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
1201 (convert @0))
1202 /* (X & Y) & (X & Z) -> (X & Y) & Z
1203 (X | Y) | (X | Z) -> (X | Y) | Z */
1204 (for op (bit_and bit_ior)
1205 (simplify
1206 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
1207 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1208 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1209 (if (single_use (@5) && single_use (@6))
1210 (op @3 (convert @2))
1211 (if (single_use (@3) && single_use (@4))
1212 (op (convert @1) @5))))))
1213 /* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
1214 (simplify
1215 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
1216 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1217 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1218 (bit_xor (convert @1) (convert @2))))
1219
1220 /* Convert abs (abs (X)) into abs (X).
1221 also absu (absu (X)) into absu (X). */
1222 (simplify
1223 (abs (abs@1 @0))
1224 @1)
1225
1226 (simplify
1227 (absu (convert@2 (absu@1 @0)))
1228 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@1)))
1229 @1))
1230
1231 /* Convert abs[u] (-X) -> abs[u] (X). */
1232 (simplify
1233 (abs (negate @0))
1234 (abs @0))
1235
1236 (simplify
1237 (absu (negate @0))
1238 (absu @0))
1239
1240 /* Convert abs[u] (X) where X is nonnegative -> (X). */
1241 (simplify
1242 (abs tree_expr_nonnegative_p@0)
1243 @0)
1244
1245 (simplify
1246 (absu tree_expr_nonnegative_p@0)
1247 (convert @0))
1248
1249 /* A few cases of fold-const.c negate_expr_p predicate. */
1250 (match negate_expr_p
1251 INTEGER_CST
1252 (if ((INTEGRAL_TYPE_P (type)
1253 && TYPE_UNSIGNED (type))
1254 || (!TYPE_OVERFLOW_SANITIZED (type)
1255 && may_negate_without_overflow_p (t)))))
1256 (match negate_expr_p
1257 FIXED_CST)
1258 (match negate_expr_p
1259 (negate @0)
1260 (if (!TYPE_OVERFLOW_SANITIZED (type))))
1261 (match negate_expr_p
1262 REAL_CST
1263 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
1264 /* VECTOR_CST handling of non-wrapping types would recurse in unsupported
1265 ways. */
1266 (match negate_expr_p
1267 VECTOR_CST
1268 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
1269 (match negate_expr_p
1270 (minus @0 @1)
1271 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
1272 || (FLOAT_TYPE_P (type)
1273 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1274 && !HONOR_SIGNED_ZEROS (type)))))
1275
1276 /* (-A) * (-B) -> A * B */
1277 (simplify
1278 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
1279 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1280 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1281 (mult (convert @0) (convert (negate @1)))))
1282
1283 /* -(A + B) -> (-B) - A. */
1284 (simplify
1285 (negate (plus:c @0 negate_expr_p@1))
1286 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
1287 && !HONOR_SIGNED_ZEROS (element_mode (type)))
1288 (minus (negate @1) @0)))
1289
1290 /* -(A - B) -> B - A. */
1291 (simplify
1292 (negate (minus @0 @1))
1293 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
1294 || (FLOAT_TYPE_P (type)
1295 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1296 && !HONOR_SIGNED_ZEROS (type)))
1297 (minus @1 @0)))
1298 (simplify
1299 (negate (pointer_diff @0 @1))
1300 (if (TYPE_OVERFLOW_UNDEFINED (type))
1301 (pointer_diff @1 @0)))
1302
1303 /* A - B -> A + (-B) if B is easily negatable. */
1304 (simplify
1305 (minus @0 negate_expr_p@1)
1306 (if (!FIXED_POINT_TYPE_P (type))
1307 (plus @0 (negate @1))))
1308
1309 /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
1310 when profitable.
1311 For bitwise binary operations apply operand conversions to the
1312 binary operation result instead of to the operands. This allows
1313 to combine successive conversions and bitwise binary operations.
1314 We combine the above two cases by using a conditional convert. */
1315 (for bitop (bit_and bit_ior bit_xor)
1316 (simplify
1317 (bitop (convert @0) (convert? @1))
1318 (if (((TREE_CODE (@1) == INTEGER_CST
1319 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1320 && int_fits_type_p (@1, TREE_TYPE (@0)))
1321 || types_match (@0, @1))
1322 /* ??? This transform conflicts with fold-const.c doing
1323 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
1324 constants (if x has signed type, the sign bit cannot be set
1325 in c). This folds extension into the BIT_AND_EXPR.
1326 Restrict it to GIMPLE to avoid endless recursions. */
1327 && (bitop != BIT_AND_EXPR || GIMPLE)
1328 && (/* That's a good idea if the conversion widens the operand, thus
1329 after hoisting the conversion the operation will be narrower. */
1330 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
1331 /* It's also a good idea if the conversion is to a non-integer
1332 mode. */
1333 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
1334 /* Or if the precision of TO is not the same as the precision
1335 of its mode. */
1336 || !type_has_mode_precision_p (type)))
1337 (convert (bitop @0 (convert @1))))))
1338
1339 (for bitop (bit_and bit_ior)
1340 rbitop (bit_ior bit_and)
1341 /* (x | y) & x -> x */
1342 /* (x & y) | x -> x */
1343 (simplify
1344 (bitop:c (rbitop:c @0 @1) @0)
1345 @0)
1346 /* (~x | y) & x -> x & y */
1347 /* (~x & y) | x -> x | y */
1348 (simplify
1349 (bitop:c (rbitop:c (bit_not @0) @1) @0)
1350 (bitop @0 @1)))
1351
1352 /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
1353 (simplify
1354 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1355 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
1356
1357 /* Combine successive equal operations with constants. */
1358 (for bitop (bit_and bit_ior bit_xor)
1359 (simplify
1360 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1361 (if (!CONSTANT_CLASS_P (@0))
1362 /* This is the canonical form regardless of whether (bitop @1 @2) can be
1363 folded to a constant. */
1364 (bitop @0 (bitop @1 @2))
1365 /* In this case we have three constants and (bitop @0 @1) doesn't fold
1366 to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if
1367 the values involved are such that the operation can't be decided at
1368 compile time. Try folding one of @0 or @1 with @2 to see whether
1369 that combination can be decided at compile time.
1370
1371 Keep the existing form if both folds fail, to avoid endless
1372 oscillation. */
1373 (with { tree cst1 = const_binop (bitop, type, @0, @2); }
1374 (if (cst1)
1375 (bitop @1 { cst1; })
1376 (with { tree cst2 = const_binop (bitop, type, @1, @2); }
1377 (if (cst2)
1378 (bitop @0 { cst2; }))))))))
1379
1380 /* Try simple folding for X op !X, and X op X with the help
1381 of the truth_valued_p and logical_inverted_value predicates. */
1382 (match truth_valued_p
1383 @0
1384 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
1385 (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
1386 (match truth_valued_p
1387 (op @0 @1)))
1388 (match truth_valued_p
1389 (truth_not @0))
1390
1391 (match (logical_inverted_value @0)
1392 (truth_not @0))
1393 (match (logical_inverted_value @0)
1394 (bit_not truth_valued_p@0))
1395 (match (logical_inverted_value @0)
1396 (eq @0 integer_zerop))
1397 (match (logical_inverted_value @0)
1398 (ne truth_valued_p@0 integer_truep))
1399 (match (logical_inverted_value @0)
1400 (bit_xor truth_valued_p@0 integer_truep))
1401
1402 /* X & !X -> 0. */
1403 (simplify
1404 (bit_and:c @0 (logical_inverted_value @0))
1405 { build_zero_cst (type); })
1406 /* X | !X and X ^ !X -> 1, , if X is truth-valued. */
1407 (for op (bit_ior bit_xor)
1408 (simplify
1409 (op:c truth_valued_p@0 (logical_inverted_value @0))
1410 { constant_boolean_node (true, type); }))
1411 /* X ==/!= !X is false/true. */
1412 (for op (eq ne)
1413 (simplify
1414 (op:c truth_valued_p@0 (logical_inverted_value @0))
1415 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
1416
1417 /* ~~x -> x */
1418 (simplify
1419 (bit_not (bit_not @0))
1420 @0)
1421
1422 /* Convert ~ (-A) to A - 1. */
1423 (simplify
1424 (bit_not (convert? (negate @0)))
1425 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1426 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1427 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
1428
1429 /* Convert - (~A) to A + 1. */
1430 (simplify
1431 (negate (nop_convert (bit_not @0)))
1432 (plus (view_convert @0) { build_each_one_cst (type); }))
1433
1434 /* Convert ~ (A - 1) or ~ (A + -1) to -A. */
1435 (simplify
1436 (bit_not (convert? (minus @0 integer_each_onep)))
1437 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1438 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1439 (convert (negate @0))))
1440 (simplify
1441 (bit_not (convert? (plus @0 integer_all_onesp)))
1442 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1443 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1444 (convert (negate @0))))
1445
1446 /* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
1447 (simplify
1448 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
1449 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1450 (convert (bit_xor @0 (bit_not @1)))))
1451 (simplify
1452 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
1453 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1454 (convert (bit_xor @0 @1))))
1455
1456 /* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
1457 (simplify
1458 (bit_xor:c (nop_convert:s (bit_not:s @0)) @1)
1459 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1460 (bit_not (bit_xor (view_convert @0) @1))))
1461
1462 /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1463 (simplify
1464 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1465 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
1466
1467 /* Fold A - (A & B) into ~B & A. */
1468 (simplify
1469 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
1470 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1471 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1472 (convert (bit_and (bit_not @1) @0))))
1473
1474 /* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
1475 (for cmp (gt lt ge le)
1476 (simplify
1477 (mult (convert (cmp @0 @1)) @2)
1478 (cond (cmp @0 @1) @2 { build_zero_cst (type); })))
1479
1480 /* For integral types with undefined overflow and C != 0 fold
1481 x * C EQ/NE y * C into x EQ/NE y. */
1482 (for cmp (eq ne)
1483 (simplify
1484 (cmp (mult:c @0 @1) (mult:c @2 @1))
1485 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1486 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1487 && tree_expr_nonzero_p (@1))
1488 (cmp @0 @2))))
1489
1490 /* For integral types with wrapping overflow and C odd fold
1491 x * C EQ/NE y * C into x EQ/NE y. */
1492 (for cmp (eq ne)
1493 (simplify
1494 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
1495 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1496 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
1497 && (TREE_INT_CST_LOW (@1) & 1) != 0)
1498 (cmp @0 @2))))
1499
1500 /* For integral types with undefined overflow and C != 0 fold
1501 x * C RELOP y * C into:
1502
1503 x RELOP y for nonnegative C
1504 y RELOP x for negative C */
1505 (for cmp (lt gt le ge)
1506 (simplify
1507 (cmp (mult:c @0 @1) (mult:c @2 @1))
1508 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1509 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1510 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
1511 (cmp @0 @2)
1512 (if (TREE_CODE (@1) == INTEGER_CST
1513 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
1514 (cmp @2 @0))))))
1515
1516 /* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
1517 (for cmp (le gt)
1518 icmp (gt le)
1519 (simplify
1520 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
1521 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1522 && TYPE_UNSIGNED (TREE_TYPE (@0))
1523 && TYPE_PRECISION (TREE_TYPE (@0)) > 1
1524 && (wi::to_wide (@2)
1525 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
1526 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1527 (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
1528
1529 /* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
1530 (for cmp (simple_comparison)
1531 (simplify
1532 (cmp (convert?@3 (exact_div @0 INTEGER_CST@2)) (convert? (exact_div @1 @2)))
1533 (if (element_precision (@3) >= element_precision (@0)
1534 && types_match (@0, @1))
1535 (if (wi::lt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
1536 (if (!TYPE_UNSIGNED (TREE_TYPE (@3)))
1537 (cmp @1 @0)
1538 (if (tree_expr_nonzero_p (@0) && tree_expr_nonzero_p (@1))
1539 (with
1540 {
1541 tree utype = unsigned_type_for (TREE_TYPE (@0));
1542 }
1543 (cmp (convert:utype @1) (convert:utype @0)))))
1544 (if (wi::gt_p (wi::to_wide (@2), 1, TYPE_SIGN (TREE_TYPE (@2))))
1545 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) || !TYPE_UNSIGNED (TREE_TYPE (@3)))
1546 (cmp @0 @1)
1547 (with
1548 {
1549 tree utype = unsigned_type_for (TREE_TYPE (@0));
1550 }
1551 (cmp (convert:utype @0) (convert:utype @1)))))))))
1552
1553 /* X / C1 op C2 into a simple range test. */
1554 (for cmp (simple_comparison)
1555 (simplify
1556 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
1557 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1558 && integer_nonzerop (@1)
1559 && !TREE_OVERFLOW (@1)
1560 && !TREE_OVERFLOW (@2))
1561 (with { tree lo, hi; bool neg_overflow;
1562 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
1563 &neg_overflow); }
1564 (switch
1565 (if (code == LT_EXPR || code == GE_EXPR)
1566 (if (TREE_OVERFLOW (lo))
1567 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
1568 (if (code == LT_EXPR)
1569 (lt @0 { lo; })
1570 (ge @0 { lo; }))))
1571 (if (code == LE_EXPR || code == GT_EXPR)
1572 (if (TREE_OVERFLOW (hi))
1573 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
1574 (if (code == LE_EXPR)
1575 (le @0 { hi; })
1576 (gt @0 { hi; }))))
1577 (if (!lo && !hi)
1578 { build_int_cst (type, code == NE_EXPR); })
1579 (if (code == EQ_EXPR && !hi)
1580 (ge @0 { lo; }))
1581 (if (code == EQ_EXPR && !lo)
1582 (le @0 { hi; }))
1583 (if (code == NE_EXPR && !hi)
1584 (lt @0 { lo; }))
1585 (if (code == NE_EXPR && !lo)
1586 (gt @0 { hi; }))
1587 (if (GENERIC)
1588 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
1589 lo, hi); })
1590 (with
1591 {
1592 tree etype = range_check_type (TREE_TYPE (@0));
1593 if (etype)
1594 {
1595 hi = fold_convert (etype, hi);
1596 lo = fold_convert (etype, lo);
1597 hi = const_binop (MINUS_EXPR, etype, hi, lo);
1598 }
1599 }
1600 (if (etype && hi && !TREE_OVERFLOW (hi))
1601 (if (code == EQ_EXPR)
1602 (le (minus (convert:etype @0) { lo; }) { hi; })
1603 (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
1604
1605 /* X + Z < Y + Z is the same as X < Y when there is no overflow. */
1606 (for op (lt le ge gt)
1607 (simplify
1608 (op (plus:c @0 @2) (plus:c @1 @2))
1609 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1610 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1611 (op @0 @1))))
1612 /* For equality and subtraction, this is also true with wrapping overflow. */
1613 (for op (eq ne minus)
1614 (simplify
1615 (op (plus:c @0 @2) (plus:c @1 @2))
1616 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1617 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1618 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1619 (op @0 @1))))
1620
1621 /* X - Z < Y - Z is the same as X < Y when there is no overflow. */
1622 (for op (lt le ge gt)
1623 (simplify
1624 (op (minus @0 @2) (minus @1 @2))
1625 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1626 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1627 (op @0 @1))))
1628 /* For equality and subtraction, this is also true with wrapping overflow. */
1629 (for op (eq ne minus)
1630 (simplify
1631 (op (minus @0 @2) (minus @1 @2))
1632 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1633 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1634 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1635 (op @0 @1))))
1636 /* And for pointers... */
1637 (for op (simple_comparison)
1638 (simplify
1639 (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1640 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1641 (op @0 @1))))
1642 (simplify
1643 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1644 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1645 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1646 (pointer_diff @0 @1)))
1647
1648 /* Z - X < Z - Y is the same as Y < X when there is no overflow. */
1649 (for op (lt le ge gt)
1650 (simplify
1651 (op (minus @2 @0) (minus @2 @1))
1652 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1653 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1654 (op @1 @0))))
1655 /* For equality and subtraction, this is also true with wrapping overflow. */
1656 (for op (eq ne minus)
1657 (simplify
1658 (op (minus @2 @0) (minus @2 @1))
1659 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1660 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1661 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1662 (op @1 @0))))
1663 /* And for pointers... */
1664 (for op (simple_comparison)
1665 (simplify
1666 (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1667 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1668 (op @1 @0))))
1669 (simplify
1670 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1671 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1672 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1673 (pointer_diff @1 @0)))
1674
1675 /* X + Y < Y is the same as X < 0 when there is no overflow. */
1676 (for op (lt le gt ge)
1677 (simplify
1678 (op:c (plus:c@2 @0 @1) @1)
1679 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1680 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1681 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
1682 && (CONSTANT_CLASS_P (@0) || single_use (@2)))
1683 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))))
1684 /* For equality, this is also true with wrapping overflow. */
1685 (for op (eq ne)
1686 (simplify
1687 (op:c (nop_convert@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
1688 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1689 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1690 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1691 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3)))
1692 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2))
1693 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
1694 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))
1695 (simplify
1696 (op:c (nop_convert@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
1697 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
1698 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
1699 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
1700 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1701
1702 /* X - Y < X is the same as Y > 0 when there is no overflow.
1703 For equality, this is also true with wrapping overflow. */
1704 (for op (simple_comparison)
1705 (simplify
1706 (op:c @0 (minus@2 @0 @1))
1707 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1708 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1709 || ((op == EQ_EXPR || op == NE_EXPR)
1710 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1711 && (CONSTANT_CLASS_P (@1) || single_use (@2)))
1712 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1713
1714 /* Transform:
1715 (X / Y) == 0 -> X < Y if X, Y are unsigned.
1716 (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */
1717 (for cmp (eq ne)
1718 ocmp (lt ge)
1719 (simplify
1720 (cmp (trunc_div @0 @1) integer_zerop)
1721 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
1722 /* Complex ==/!= is allowed, but not </>=. */
1723 && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE
1724 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0))))
1725 (ocmp @0 @1))))
1726
1727 /* X == C - X can never be true if C is odd. */
1728 (for cmp (eq ne)
1729 (simplify
1730 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
1731 (if (TREE_INT_CST_LOW (@1) & 1)
1732 { constant_boolean_node (cmp == NE_EXPR, type); })))
1733
1734 /* Arguments on which one can call get_nonzero_bits to get the bits
1735 possibly set. */
1736 (match with_possible_nonzero_bits
1737 INTEGER_CST@0)
1738 (match with_possible_nonzero_bits
1739 SSA_NAME@0
1740 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
1741 /* Slightly extended version, do not make it recursive to keep it cheap. */
1742 (match (with_possible_nonzero_bits2 @0)
1743 with_possible_nonzero_bits@0)
1744 (match (with_possible_nonzero_bits2 @0)
1745 (bit_and:c with_possible_nonzero_bits@0 @2))
1746
1747 /* Same for bits that are known to be set, but we do not have
1748 an equivalent to get_nonzero_bits yet. */
1749 (match (with_certain_nonzero_bits2 @0)
1750 INTEGER_CST@0)
1751 (match (with_certain_nonzero_bits2 @0)
1752 (bit_ior @1 INTEGER_CST@0))
1753
1754 /* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
1755 (for cmp (eq ne)
1756 (simplify
1757 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
1758 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
1759 { constant_boolean_node (cmp == NE_EXPR, type); })))
1760
1761 /* ((X inner_op C0) outer_op C1)
1762 With X being a tree where value_range has reasoned certain bits to always be
1763 zero throughout its computed value range,
1764 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1765 where zero_mask has 1's for all bits that are sure to be 0 in
1766 and 0's otherwise.
1767 if (inner_op == '^') C0 &= ~C1;
1768 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1769 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1770 */
1771 (for inner_op (bit_ior bit_xor)
1772 outer_op (bit_xor bit_ior)
1773 (simplify
1774 (outer_op
1775 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1776 (with
1777 {
1778 bool fail = false;
1779 wide_int zero_mask_not;
1780 wide_int C0;
1781 wide_int cst_emit;
1782
1783 if (TREE_CODE (@2) == SSA_NAME)
1784 zero_mask_not = get_nonzero_bits (@2);
1785 else
1786 fail = true;
1787
1788 if (inner_op == BIT_XOR_EXPR)
1789 {
1790 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
1791 cst_emit = C0 | wi::to_wide (@1);
1792 }
1793 else
1794 {
1795 C0 = wi::to_wide (@0);
1796 cst_emit = C0 ^ wi::to_wide (@1);
1797 }
1798 }
1799 (if (!fail && (C0 & zero_mask_not) == 0)
1800 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
1801 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
1802 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1803
1804 /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1805 (simplify
1806 (pointer_plus (pointer_plus:s @0 @1) @3)
1807 (pointer_plus @0 (plus @1 @3)))
1808
1809 /* Pattern match
1810 tem1 = (long) ptr1;
1811 tem2 = (long) ptr2;
1812 tem3 = tem2 - tem1;
1813 tem4 = (unsigned long) tem3;
1814 tem5 = ptr1 + tem4;
1815 and produce
1816 tem5 = ptr2; */
1817 (simplify
1818 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1819 /* Conditionally look through a sign-changing conversion. */
1820 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1821 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1822 || (GENERIC && type == TREE_TYPE (@1))))
1823 @1))
1824 (simplify
1825 (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0)))
1826 (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3)))
1827 (convert @1)))
1828
1829 /* Pattern match
1830 tem = (sizetype) ptr;
1831 tem = tem & algn;
1832 tem = -tem;
1833 ... = ptr p+ tem;
1834 and produce the simpler and easier to analyze with respect to alignment
1835 ... = ptr & ~algn; */
1836 (simplify
1837 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
1838 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
1839 (bit_and @0 { algn; })))
1840
1841 /* Try folding difference of addresses. */
1842 (simplify
1843 (minus (convert ADDR_EXPR@0) (convert @1))
1844 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1845 (with { poly_int64 diff; }
1846 (if (ptr_difference_const (@0, @1, &diff))
1847 { build_int_cst_type (type, diff); }))))
1848 (simplify
1849 (minus (convert @0) (convert ADDR_EXPR@1))
1850 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1851 (with { poly_int64 diff; }
1852 (if (ptr_difference_const (@0, @1, &diff))
1853 { build_int_cst_type (type, diff); }))))
1854 (simplify
1855 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1))
1856 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1857 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
1858 (with { poly_int64 diff; }
1859 (if (ptr_difference_const (@0, @1, &diff))
1860 { build_int_cst_type (type, diff); }))))
1861 (simplify
1862 (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1))
1863 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1864 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
1865 (with { poly_int64 diff; }
1866 (if (ptr_difference_const (@0, @1, &diff))
1867 { build_int_cst_type (type, diff); }))))
1868
1869 /* If arg0 is derived from the address of an object or function, we may
1870 be able to fold this expression using the object or function's
1871 alignment. */
1872 (simplify
1873 (bit_and (convert? @0) INTEGER_CST@1)
1874 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1875 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1876 (with
1877 {
1878 unsigned int align;
1879 unsigned HOST_WIDE_INT bitpos;
1880 get_pointer_alignment_1 (@0, &align, &bitpos);
1881 }
1882 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
1883 { wide_int_to_tree (type, (wi::to_wide (@1)
1884 & (bitpos / BITS_PER_UNIT))); }))))
1885
1886 (match min_value
1887 INTEGER_CST
1888 (if (INTEGRAL_TYPE_P (type)
1889 && wi::eq_p (wi::to_wide (t), wi::min_value (type)))))
1890
1891 (match max_value
1892 INTEGER_CST
1893 (if (INTEGRAL_TYPE_P (type)
1894 && wi::eq_p (wi::to_wide (t), wi::max_value (type)))))
1895
1896 /* x > y && x != XXX_MIN --> x > y
1897 x > y && x == XXX_MIN --> false . */
1898 (for eqne (eq ne)
1899 (simplify
1900 (bit_and:c (gt:c@2 @0 @1) (eqne @0 min_value))
1901 (switch
1902 (if (eqne == EQ_EXPR)
1903 { constant_boolean_node (false, type); })
1904 (if (eqne == NE_EXPR)
1905 @2)
1906 )))
1907
1908 /* x < y && x != XXX_MAX --> x < y
1909 x < y && x == XXX_MAX --> false. */
1910 (for eqne (eq ne)
1911 (simplify
1912 (bit_and:c (lt:c@2 @0 @1) (eqne @0 max_value))
1913 (switch
1914 (if (eqne == EQ_EXPR)
1915 { constant_boolean_node (false, type); })
1916 (if (eqne == NE_EXPR)
1917 @2)
1918 )))
1919
1920 /* x <= y && x == XXX_MIN --> x == XXX_MIN. */
1921 (simplify
1922 (bit_and:c (le:c @0 @1) (eq@2 @0 min_value))
1923 @2)
1924
1925 /* x >= y && x == XXX_MAX --> x == XXX_MAX. */
1926 (simplify
1927 (bit_and:c (ge:c @0 @1) (eq@2 @0 max_value))
1928 @2)
1929
1930 /* x > y || x != XXX_MIN --> x != XXX_MIN. */
1931 (simplify
1932 (bit_ior:c (gt:c @0 @1) (ne@2 @0 min_value))
1933 @2)
1934
1935 /* x <= y || x != XXX_MIN --> true. */
1936 (simplify
1937 (bit_ior:c (le:c @0 @1) (ne @0 min_value))
1938 { constant_boolean_node (true, type); })
1939
1940 /* x <= y || x == XXX_MIN --> x <= y. */
1941 (simplify
1942 (bit_ior:c (le:c@2 @0 @1) (eq @0 min_value))
1943 @2)
1944
1945 /* x < y || x != XXX_MAX --> x != XXX_MAX. */
1946 (simplify
1947 (bit_ior:c (lt:c @0 @1) (ne@2 @0 max_value))
1948 @2)
1949
1950 /* x >= y || x != XXX_MAX --> true
1951 x >= y || x == XXX_MAX --> x >= y. */
1952 (for eqne (eq ne)
1953 (simplify
1954 (bit_ior:c (ge:c@2 @0 @1) (eqne @0 max_value))
1955 (switch
1956 (if (eqne == EQ_EXPR)
1957 @2)
1958 (if (eqne == NE_EXPR)
1959 { constant_boolean_node (true, type); }))))
1960
1961 /* We can't reassociate at all for saturating types. */
1962 (if (!TYPE_SATURATING (type))
1963
1964 /* Contract negates. */
1965 /* A + (-B) -> A - B */
1966 (simplify
1967 (plus:c @0 (convert? (negate @1)))
1968 /* Apply STRIP_NOPS on the negate. */
1969 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1970 && !TYPE_OVERFLOW_SANITIZED (type))
1971 (with
1972 {
1973 tree t1 = type;
1974 if (INTEGRAL_TYPE_P (type)
1975 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1976 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1977 }
1978 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
1979 /* A - (-B) -> A + B */
1980 (simplify
1981 (minus @0 (convert? (negate @1)))
1982 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1983 && !TYPE_OVERFLOW_SANITIZED (type))
1984 (with
1985 {
1986 tree t1 = type;
1987 if (INTEGRAL_TYPE_P (type)
1988 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1989 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1990 }
1991 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
1992 /* -(T)(-A) -> (T)A
1993 Sign-extension is ok except for INT_MIN, which thankfully cannot
1994 happen without overflow. */
1995 (simplify
1996 (negate (convert (negate @1)))
1997 (if (INTEGRAL_TYPE_P (type)
1998 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
1999 || (!TYPE_UNSIGNED (TREE_TYPE (@1))
2000 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2001 && !TYPE_OVERFLOW_SANITIZED (type)
2002 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
2003 (convert @1)))
2004 (simplify
2005 (negate (convert negate_expr_p@1))
2006 (if (SCALAR_FLOAT_TYPE_P (type)
2007 && ((DECIMAL_FLOAT_TYPE_P (type)
2008 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))
2009 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1)))
2010 || !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
2011 (convert (negate @1))))
2012 (simplify
2013 (negate (nop_convert (negate @1)))
2014 (if (!TYPE_OVERFLOW_SANITIZED (type)
2015 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
2016 (view_convert @1)))
2017
2018 /* We can't reassociate floating-point unless -fassociative-math
2019 or fixed-point plus or minus because of saturation to +-Inf. */
2020 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
2021 && !FIXED_POINT_TYPE_P (type))
2022
2023 /* Match patterns that allow contracting a plus-minus pair
2024 irrespective of overflow issues. */
2025 /* (A +- B) - A -> +- B */
2026 /* (A +- B) -+ B -> A */
2027 /* A - (A +- B) -> -+ B */
2028 /* A +- (B -+ A) -> +- B */
2029 (simplify
2030 (minus (plus:c @0 @1) @0)
2031 @1)
2032 (simplify
2033 (minus (minus @0 @1) @0)
2034 (negate @1))
2035 (simplify
2036 (plus:c (minus @0 @1) @1)
2037 @0)
2038 (simplify
2039 (minus @0 (plus:c @0 @1))
2040 (negate @1))
2041 (simplify
2042 (minus @0 (minus @0 @1))
2043 @1)
2044 /* (A +- B) + (C - A) -> C +- B */
2045 /* (A + B) - (A - C) -> B + C */
2046 /* More cases are handled with comparisons. */
2047 (simplify
2048 (plus:c (plus:c @0 @1) (minus @2 @0))
2049 (plus @2 @1))
2050 (simplify
2051 (plus:c (minus @0 @1) (minus @2 @0))
2052 (minus @2 @1))
2053 (simplify
2054 (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0))
2055 (if (TYPE_OVERFLOW_UNDEFINED (type)
2056 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)))
2057 (pointer_diff @2 @1)))
2058 (simplify
2059 (minus (plus:c @0 @1) (minus @0 @2))
2060 (plus @1 @2))
2061
2062 /* (A +- CST1) +- CST2 -> A + CST3
2063 Use view_convert because it is safe for vectors and equivalent for
2064 scalars. */
2065 (for outer_op (plus minus)
2066 (for inner_op (plus minus)
2067 neg_inner_op (minus plus)
2068 (simplify
2069 (outer_op (nop_convert (inner_op @0 CONSTANT_CLASS_P@1))
2070 CONSTANT_CLASS_P@2)
2071 /* If one of the types wraps, use that one. */
2072 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
2073 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
2074 forever if something doesn't simplify into a constant. */
2075 (if (!CONSTANT_CLASS_P (@0))
2076 (if (outer_op == PLUS_EXPR)
2077 (plus (view_convert @0) (inner_op @2 (view_convert @1)))
2078 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1)))))
2079 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2080 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2081 (if (outer_op == PLUS_EXPR)
2082 (view_convert (plus @0 (inner_op (view_convert @2) @1)))
2083 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
2084 /* If the constant operation overflows we cannot do the transform
2085 directly as we would introduce undefined overflow, for example
2086 with (a - 1) + INT_MIN. */
2087 (if (types_match (type, @0))
2088 (with { tree cst = const_binop (outer_op == inner_op
2089 ? PLUS_EXPR : MINUS_EXPR,
2090 type, @1, @2); }
2091 (if (cst && !TREE_OVERFLOW (cst))
2092 (inner_op @0 { cst; } )
2093 /* X+INT_MAX+1 is X-INT_MIN. */
2094 (if (INTEGRAL_TYPE_P (type) && cst
2095 && wi::to_wide (cst) == wi::min_value (type))
2096 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
2097 /* Last resort, use some unsigned type. */
2098 (with { tree utype = unsigned_type_for (type); }
2099 (if (utype)
2100 (view_convert (inner_op
2101 (view_convert:utype @0)
2102 (view_convert:utype
2103 { drop_tree_overflow (cst); }))))))))))))))
2104
2105 /* (CST1 - A) +- CST2 -> CST3 - A */
2106 (for outer_op (plus minus)
2107 (simplify
2108 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
2109 (with { tree cst = const_binop (outer_op, type, @1, @2); }
2110 (if (cst && !TREE_OVERFLOW (cst))
2111 (minus { cst; } @0)))))
2112
2113 /* CST1 - (CST2 - A) -> CST3 + A */
2114 (simplify
2115 (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0))
2116 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
2117 (if (cst && !TREE_OVERFLOW (cst))
2118 (plus { cst; } @0))))
2119
2120 /* ((T)(A)) + CST -> (T)(A + CST) */
2121 #if GIMPLE
2122 (simplify
2123 (plus (convert SSA_NAME@0) INTEGER_CST@1)
2124 (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE
2125 && TREE_CODE (type) == INTEGER_TYPE
2126 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0))
2127 && int_fits_type_p (@1, TREE_TYPE (@0)))
2128 /* Perform binary operation inside the cast if the constant fits
2129 and (A + CST)'s range does not overflow. */
2130 (with
2131 {
2132 wi::overflow_type min_ovf = wi::OVF_OVERFLOW,
2133 max_ovf = wi::OVF_OVERFLOW;
2134 tree inner_type = TREE_TYPE (@0);
2135
2136 wide_int w1 = wide_int::from (wi::to_wide (@1), TYPE_PRECISION (inner_type),
2137 TYPE_SIGN (inner_type));
2138
2139 wide_int wmin0, wmax0;
2140 if (get_range_info (@0, &wmin0, &wmax0) == VR_RANGE)
2141 {
2142 wi::add (wmin0, w1, TYPE_SIGN (inner_type), &min_ovf);
2143 wi::add (wmax0, w1, TYPE_SIGN (inner_type), &max_ovf);
2144 }
2145 }
2146 (if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE)
2147 (convert (plus @0 { wide_int_to_tree (TREE_TYPE (@0), w1); } )))
2148 )))
2149 #endif
2150
2151 /* ~A + A -> -1 */
2152 (simplify
2153 (plus:c (bit_not @0) @0)
2154 (if (!TYPE_OVERFLOW_TRAPS (type))
2155 { build_all_ones_cst (type); }))
2156
2157 /* ~A + 1 -> -A */
2158 (simplify
2159 (plus (convert? (bit_not @0)) integer_each_onep)
2160 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2161 (negate (convert @0))))
2162
2163 /* -A - 1 -> ~A */
2164 (simplify
2165 (minus (convert? (negate @0)) integer_each_onep)
2166 (if (!TYPE_OVERFLOW_TRAPS (type)
2167 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
2168 (bit_not (convert @0))))
2169
2170 /* -1 - A -> ~A */
2171 (simplify
2172 (minus integer_all_onesp @0)
2173 (bit_not @0))
2174
2175 /* (T)(P + A) - (T)P -> (T) A */
2176 (simplify
2177 (minus (convert (plus:c @@0 @1))
2178 (convert? @0))
2179 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2180 /* For integer types, if A has a smaller type
2181 than T the result depends on the possible
2182 overflow in P + A.
2183 E.g. T=size_t, A=(unsigned)429497295, P>0.
2184 However, if an overflow in P + A would cause
2185 undefined behavior, we can assume that there
2186 is no overflow. */
2187 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2188 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2189 (convert @1)))
2190 (simplify
2191 (minus (convert (pointer_plus @@0 @1))
2192 (convert @0))
2193 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2194 /* For pointer types, if the conversion of A to the
2195 final type requires a sign- or zero-extension,
2196 then we have to punt - it is not defined which
2197 one is correct. */
2198 || (POINTER_TYPE_P (TREE_TYPE (@0))
2199 && TREE_CODE (@1) == INTEGER_CST
2200 && tree_int_cst_sign_bit (@1) == 0))
2201 (convert @1)))
2202 (simplify
2203 (pointer_diff (pointer_plus @@0 @1) @0)
2204 /* The second argument of pointer_plus must be interpreted as signed, and
2205 thus sign-extended if necessary. */
2206 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2207 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2208 second arg is unsigned even when we need to consider it as signed,
2209 we don't want to diagnose overflow here. */
2210 (convert (view_convert:stype @1))))
2211
2212 /* (T)P - (T)(P + A) -> -(T) A */
2213 (simplify
2214 (minus (convert? @0)
2215 (convert (plus:c @@0 @1)))
2216 (if (INTEGRAL_TYPE_P (type)
2217 && TYPE_OVERFLOW_UNDEFINED (type)
2218 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2219 (with { tree utype = unsigned_type_for (type); }
2220 (convert (negate (convert:utype @1))))
2221 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2222 /* For integer types, if A has a smaller type
2223 than T the result depends on the possible
2224 overflow in P + A.
2225 E.g. T=size_t, A=(unsigned)429497295, P>0.
2226 However, if an overflow in P + A would cause
2227 undefined behavior, we can assume that there
2228 is no overflow. */
2229 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2230 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2231 (negate (convert @1)))))
2232 (simplify
2233 (minus (convert @0)
2234 (convert (pointer_plus @@0 @1)))
2235 (if (INTEGRAL_TYPE_P (type)
2236 && TYPE_OVERFLOW_UNDEFINED (type)
2237 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2238 (with { tree utype = unsigned_type_for (type); }
2239 (convert (negate (convert:utype @1))))
2240 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2241 /* For pointer types, if the conversion of A to the
2242 final type requires a sign- or zero-extension,
2243 then we have to punt - it is not defined which
2244 one is correct. */
2245 || (POINTER_TYPE_P (TREE_TYPE (@0))
2246 && TREE_CODE (@1) == INTEGER_CST
2247 && tree_int_cst_sign_bit (@1) == 0))
2248 (negate (convert @1)))))
2249 (simplify
2250 (pointer_diff @0 (pointer_plus @@0 @1))
2251 /* The second argument of pointer_plus must be interpreted as signed, and
2252 thus sign-extended if necessary. */
2253 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2254 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2255 second arg is unsigned even when we need to consider it as signed,
2256 we don't want to diagnose overflow here. */
2257 (negate (convert (view_convert:stype @1)))))
2258
2259 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
2260 (simplify
2261 (minus (convert (plus:c @@0 @1))
2262 (convert (plus:c @0 @2)))
2263 (if (INTEGRAL_TYPE_P (type)
2264 && TYPE_OVERFLOW_UNDEFINED (type)
2265 && element_precision (type) <= element_precision (TREE_TYPE (@1))
2266 && element_precision (type) <= element_precision (TREE_TYPE (@2)))
2267 (with { tree utype = unsigned_type_for (type); }
2268 (convert (minus (convert:utype @1) (convert:utype @2))))
2269 (if (((element_precision (type) <= element_precision (TREE_TYPE (@1)))
2270 == (element_precision (type) <= element_precision (TREE_TYPE (@2))))
2271 && (element_precision (type) <= element_precision (TREE_TYPE (@1))
2272 /* For integer types, if A has a smaller type
2273 than T the result depends on the possible
2274 overflow in P + A.
2275 E.g. T=size_t, A=(unsigned)429497295, P>0.
2276 However, if an overflow in P + A would cause
2277 undefined behavior, we can assume that there
2278 is no overflow. */
2279 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2280 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
2281 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))
2282 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2)))))
2283 (minus (convert @1) (convert @2)))))
2284 (simplify
2285 (minus (convert (pointer_plus @@0 @1))
2286 (convert (pointer_plus @0 @2)))
2287 (if (INTEGRAL_TYPE_P (type)
2288 && TYPE_OVERFLOW_UNDEFINED (type)
2289 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2290 (with { tree utype = unsigned_type_for (type); }
2291 (convert (minus (convert:utype @1) (convert:utype @2))))
2292 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2293 /* For pointer types, if the conversion of A to the
2294 final type requires a sign- or zero-extension,
2295 then we have to punt - it is not defined which
2296 one is correct. */
2297 || (POINTER_TYPE_P (TREE_TYPE (@0))
2298 && TREE_CODE (@1) == INTEGER_CST
2299 && tree_int_cst_sign_bit (@1) == 0
2300 && TREE_CODE (@2) == INTEGER_CST
2301 && tree_int_cst_sign_bit (@2) == 0))
2302 (minus (convert @1) (convert @2)))))
2303 (simplify
2304 (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2))
2305 /* The second argument of pointer_plus must be interpreted as signed, and
2306 thus sign-extended if necessary. */
2307 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2308 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2309 second arg is unsigned even when we need to consider it as signed,
2310 we don't want to diagnose overflow here. */
2311 (minus (convert (view_convert:stype @1))
2312 (convert (view_convert:stype @2)))))))
2313
2314 /* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1).
2315 Modeled after fold_plusminus_mult_expr. */
2316 (if (!TYPE_SATURATING (type)
2317 && (!FLOAT_TYPE_P (type) || flag_associative_math))
2318 (for plusminus (plus minus)
2319 (simplify
2320 (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2))
2321 (if ((!ANY_INTEGRAL_TYPE_P (type)
2322 || TYPE_OVERFLOW_WRAPS (type)
2323 || (INTEGRAL_TYPE_P (type)
2324 && tree_expr_nonzero_p (@0)
2325 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2326 /* If @1 +- @2 is constant require a hard single-use on either
2327 original operand (but not on both). */
2328 && (single_use (@3) || single_use (@4)))
2329 (mult (plusminus @1 @2) @0)))
2330 /* We cannot generate constant 1 for fract. */
2331 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type)))
2332 (simplify
2333 (plusminus @0 (mult:c@3 @0 @2))
2334 (if ((!ANY_INTEGRAL_TYPE_P (type)
2335 || TYPE_OVERFLOW_WRAPS (type)
2336 || (INTEGRAL_TYPE_P (type)
2337 && tree_expr_nonzero_p (@0)
2338 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2339 && single_use (@3))
2340 (mult (plusminus { build_one_cst (type); } @2) @0)))
2341 (simplify
2342 (plusminus (mult:c@3 @0 @2) @0)
2343 (if ((!ANY_INTEGRAL_TYPE_P (type)
2344 || TYPE_OVERFLOW_WRAPS (type)
2345 || (INTEGRAL_TYPE_P (type)
2346 && tree_expr_nonzero_p (@0)
2347 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2348 && single_use (@3))
2349 (mult (plusminus @2 { build_one_cst (type); }) @0))))))
2350
2351 /* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
2352
2353 (for minmax (min max FMIN_ALL FMAX_ALL)
2354 (simplify
2355 (minmax @0 @0)
2356 @0))
2357 /* min(max(x,y),y) -> y. */
2358 (simplify
2359 (min:c (max:c @0 @1) @1)
2360 @1)
2361 /* max(min(x,y),y) -> y. */
2362 (simplify
2363 (max:c (min:c @0 @1) @1)
2364 @1)
2365 /* max(a,-a) -> abs(a). */
2366 (simplify
2367 (max:c @0 (negate @0))
2368 (if (TREE_CODE (type) != COMPLEX_TYPE
2369 && (! ANY_INTEGRAL_TYPE_P (type)
2370 || TYPE_OVERFLOW_UNDEFINED (type)))
2371 (abs @0)))
2372 /* min(a,-a) -> -abs(a). */
2373 (simplify
2374 (min:c @0 (negate @0))
2375 (if (TREE_CODE (type) != COMPLEX_TYPE
2376 && (! ANY_INTEGRAL_TYPE_P (type)
2377 || TYPE_OVERFLOW_UNDEFINED (type)))
2378 (negate (abs @0))))
2379 (simplify
2380 (min @0 @1)
2381 (switch
2382 (if (INTEGRAL_TYPE_P (type)
2383 && TYPE_MIN_VALUE (type)
2384 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2385 @1)
2386 (if (INTEGRAL_TYPE_P (type)
2387 && TYPE_MAX_VALUE (type)
2388 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2389 @0)))
2390 (simplify
2391 (max @0 @1)
2392 (switch
2393 (if (INTEGRAL_TYPE_P (type)
2394 && TYPE_MAX_VALUE (type)
2395 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2396 @1)
2397 (if (INTEGRAL_TYPE_P (type)
2398 && TYPE_MIN_VALUE (type)
2399 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2400 @0)))
2401
2402 /* max (a, a + CST) -> a + CST where CST is positive. */
2403 /* max (a, a + CST) -> a where CST is negative. */
2404 (simplify
2405 (max:c @0 (plus@2 @0 INTEGER_CST@1))
2406 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2407 (if (tree_int_cst_sgn (@1) > 0)
2408 @2
2409 @0)))
2410
2411 /* min (a, a + CST) -> a where CST is positive. */
2412 /* min (a, a + CST) -> a + CST where CST is negative. */
2413 (simplify
2414 (min:c @0 (plus@2 @0 INTEGER_CST@1))
2415 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2416 (if (tree_int_cst_sgn (@1) > 0)
2417 @0
2418 @2)))
2419
2420 /* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
2421 and the outer convert demotes the expression back to x's type. */
2422 (for minmax (min max)
2423 (simplify
2424 (convert (minmax@0 (convert @1) INTEGER_CST@2))
2425 (if (INTEGRAL_TYPE_P (type)
2426 && types_match (@1, type) && int_fits_type_p (@2, type)
2427 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
2428 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
2429 (minmax @1 (convert @2)))))
2430
2431 (for minmax (FMIN_ALL FMAX_ALL)
2432 /* If either argument is NaN, return the other one. Avoid the
2433 transformation if we get (and honor) a signalling NaN. */
2434 (simplify
2435 (minmax:c @0 REAL_CST@1)
2436 (if (real_isnan (TREE_REAL_CST_PTR (@1))
2437 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
2438 @0)))
2439 /* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
2440 functions to return the numeric arg if the other one is NaN.
2441 MIN and MAX don't honor that, so only transform if -ffinite-math-only
2442 is set. C99 doesn't require -0.0 to be handled, so we don't have to
2443 worry about it either. */
2444 (if (flag_finite_math_only)
2445 (simplify
2446 (FMIN_ALL @0 @1)
2447 (min @0 @1))
2448 (simplify
2449 (FMAX_ALL @0 @1)
2450 (max @0 @1)))
2451 /* min (-A, -B) -> -max (A, B) */
2452 (for minmax (min max FMIN_ALL FMAX_ALL)
2453 maxmin (max min FMAX_ALL FMIN_ALL)
2454 (simplify
2455 (minmax (negate:s@2 @0) (negate:s@3 @1))
2456 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2457 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2458 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2459 (negate (maxmin @0 @1)))))
2460 /* MIN (~X, ~Y) -> ~MAX (X, Y)
2461 MAX (~X, ~Y) -> ~MIN (X, Y) */
2462 (for minmax (min max)
2463 maxmin (max min)
2464 (simplify
2465 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
2466 (bit_not (maxmin @0 @1))))
2467
2468 /* MIN (X, Y) == X -> X <= Y */
2469 (for minmax (min min max max)
2470 cmp (eq ne eq ne )
2471 out (le gt ge lt )
2472 (simplify
2473 (cmp:c (minmax:c @0 @1) @0)
2474 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
2475 (out @0 @1))))
2476 /* MIN (X, 5) == 0 -> X == 0
2477 MIN (X, 5) == 7 -> false */
2478 (for cmp (eq ne)
2479 (simplify
2480 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
2481 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2482 TYPE_SIGN (TREE_TYPE (@0))))
2483 { constant_boolean_node (cmp == NE_EXPR, type); }
2484 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2485 TYPE_SIGN (TREE_TYPE (@0))))
2486 (cmp @0 @2)))))
2487 (for cmp (eq ne)
2488 (simplify
2489 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
2490 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2491 TYPE_SIGN (TREE_TYPE (@0))))
2492 { constant_boolean_node (cmp == NE_EXPR, type); }
2493 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2494 TYPE_SIGN (TREE_TYPE (@0))))
2495 (cmp @0 @2)))))
2496 /* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
2497 (for minmax (min min max max min min max max )
2498 cmp (lt le gt ge gt ge lt le )
2499 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
2500 (simplify
2501 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
2502 (comb (cmp @0 @2) (cmp @1 @2))))
2503
2504 /* Simplifications of shift and rotates. */
2505
2506 (for rotate (lrotate rrotate)
2507 (simplify
2508 (rotate integer_all_onesp@0 @1)
2509 @0))
2510
2511 /* Optimize -1 >> x for arithmetic right shifts. */
2512 (simplify
2513 (rshift integer_all_onesp@0 @1)
2514 (if (!TYPE_UNSIGNED (type)
2515 && tree_expr_nonnegative_p (@1))
2516 @0))
2517
2518 /* Optimize (x >> c) << c into x & (-1<<c). */
2519 (simplify
2520 (lshift (rshift @0 INTEGER_CST@1) @1)
2521 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
2522 (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
2523
2524 /* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
2525 types. */
2526 (simplify
2527 (rshift (lshift @0 INTEGER_CST@1) @1)
2528 (if (TYPE_UNSIGNED (type)
2529 && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
2530 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
2531
2532 (for shiftrotate (lrotate rrotate lshift rshift)
2533 (simplify
2534 (shiftrotate @0 integer_zerop)
2535 (non_lvalue @0))
2536 (simplify
2537 (shiftrotate integer_zerop@0 @1)
2538 @0)
2539 /* Prefer vector1 << scalar to vector1 << vector2
2540 if vector2 is uniform. */
2541 (for vec (VECTOR_CST CONSTRUCTOR)
2542 (simplify
2543 (shiftrotate @0 vec@1)
2544 (with { tree tem = uniform_vector_p (@1); }
2545 (if (tem)
2546 (shiftrotate @0 { tem; }))))))
2547
2548 /* Simplify X << Y where Y's low width bits are 0 to X, as only valid
2549 Y is 0. Similarly for X >> Y. */
2550 #if GIMPLE
2551 (for shift (lshift rshift)
2552 (simplify
2553 (shift @0 SSA_NAME@1)
2554 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
2555 (with {
2556 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
2557 int prec = TYPE_PRECISION (TREE_TYPE (@1));
2558 }
2559 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
2560 @0)))))
2561 #endif
2562
2563 /* Rewrite an LROTATE_EXPR by a constant into an
2564 RROTATE_EXPR by a new constant. */
2565 (simplify
2566 (lrotate @0 INTEGER_CST@1)
2567 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
2568 build_int_cst (TREE_TYPE (@1),
2569 element_precision (type)), @1); }))
2570
2571 /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
2572 (for op (lrotate rrotate rshift lshift)
2573 (simplify
2574 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
2575 (with { unsigned int prec = element_precision (type); }
2576 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
2577 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
2578 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
2579 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
2580 (with { unsigned int low = (tree_to_uhwi (@1)
2581 + tree_to_uhwi (@2)); }
2582 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
2583 being well defined. */
2584 (if (low >= prec)
2585 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
2586 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
2587 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
2588 { build_zero_cst (type); }
2589 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
2590 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
2591
2592
2593 /* ((1 << A) & 1) != 0 -> A == 0
2594 ((1 << A) & 1) == 0 -> A != 0 */
2595 (for cmp (ne eq)
2596 icmp (eq ne)
2597 (simplify
2598 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
2599 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
2600
2601 /* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
2602 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
2603 if CST2 != 0. */
2604 (for cmp (ne eq)
2605 (simplify
2606 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
2607 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
2608 (if (cand < 0
2609 || (!integer_zerop (@2)
2610 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
2611 { constant_boolean_node (cmp == NE_EXPR, type); }
2612 (if (!integer_zerop (@2)
2613 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
2614 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
2615
2616 /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
2617 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
2618 if the new mask might be further optimized. */
2619 (for shift (lshift rshift)
2620 (simplify
2621 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
2622 INTEGER_CST@2)
2623 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
2624 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
2625 && tree_fits_uhwi_p (@1)
2626 && tree_to_uhwi (@1) > 0
2627 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
2628 (with
2629 {
2630 unsigned int shiftc = tree_to_uhwi (@1);
2631 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
2632 unsigned HOST_WIDE_INT newmask, zerobits = 0;
2633 tree shift_type = TREE_TYPE (@3);
2634 unsigned int prec;
2635
2636 if (shift == LSHIFT_EXPR)
2637 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
2638 else if (shift == RSHIFT_EXPR
2639 && type_has_mode_precision_p (shift_type))
2640 {
2641 prec = TYPE_PRECISION (TREE_TYPE (@3));
2642 tree arg00 = @0;
2643 /* See if more bits can be proven as zero because of
2644 zero extension. */
2645 if (@3 != @0
2646 && TYPE_UNSIGNED (TREE_TYPE (@0)))
2647 {
2648 tree inner_type = TREE_TYPE (@0);
2649 if (type_has_mode_precision_p (inner_type)
2650 && TYPE_PRECISION (inner_type) < prec)
2651 {
2652 prec = TYPE_PRECISION (inner_type);
2653 /* See if we can shorten the right shift. */
2654 if (shiftc < prec)
2655 shift_type = inner_type;
2656 /* Otherwise X >> C1 is all zeros, so we'll optimize
2657 it into (X, 0) later on by making sure zerobits
2658 is all ones. */
2659 }
2660 }
2661 zerobits = HOST_WIDE_INT_M1U;
2662 if (shiftc < prec)
2663 {
2664 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
2665 zerobits <<= prec - shiftc;
2666 }
2667 /* For arithmetic shift if sign bit could be set, zerobits
2668 can contain actually sign bits, so no transformation is
2669 possible, unless MASK masks them all away. In that
2670 case the shift needs to be converted into logical shift. */
2671 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
2672 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
2673 {
2674 if ((mask & zerobits) == 0)
2675 shift_type = unsigned_type_for (TREE_TYPE (@3));
2676 else
2677 zerobits = 0;
2678 }
2679 }
2680 }
2681 /* ((X << 16) & 0xff00) is (X, 0). */
2682 (if ((mask & zerobits) == mask)
2683 { build_int_cst (type, 0); }
2684 (with { newmask = mask | zerobits; }
2685 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
2686 (with
2687 {
2688 /* Only do the transformation if NEWMASK is some integer
2689 mode's mask. */
2690 for (prec = BITS_PER_UNIT;
2691 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
2692 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
2693 break;
2694 }
2695 (if (prec < HOST_BITS_PER_WIDE_INT
2696 || newmask == HOST_WIDE_INT_M1U)
2697 (with
2698 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
2699 (if (!tree_int_cst_equal (newmaskt, @2))
2700 (if (shift_type != TREE_TYPE (@3))
2701 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
2702 (bit_and @4 { newmaskt; })))))))))))))
2703
2704 /* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
2705 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
2706 (for shift (lshift rshift)
2707 (for bit_op (bit_and bit_xor bit_ior)
2708 (simplify
2709 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
2710 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2711 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
2712 (bit_op (shift (convert @0) @1) { mask; }))))))
2713
2714 /* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
2715 (simplify
2716 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
2717 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
2718 && (element_precision (TREE_TYPE (@0))
2719 <= element_precision (TREE_TYPE (@1))
2720 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
2721 (with
2722 { tree shift_type = TREE_TYPE (@0); }
2723 (convert (rshift (convert:shift_type @1) @2)))))
2724
2725 /* ~(~X >>r Y) -> X >>r Y
2726 ~(~X <<r Y) -> X <<r Y */
2727 (for rotate (lrotate rrotate)
2728 (simplify
2729 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
2730 (if ((element_precision (TREE_TYPE (@0))
2731 <= element_precision (TREE_TYPE (@1))
2732 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
2733 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
2734 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
2735 (with
2736 { tree rotate_type = TREE_TYPE (@0); }
2737 (convert (rotate (convert:rotate_type @1) @2))))))
2738
2739 /* Simplifications of conversions. */
2740
2741 /* Basic strip-useless-type-conversions / strip_nops. */
2742 (for cvt (convert view_convert float fix_trunc)
2743 (simplify
2744 (cvt @0)
2745 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
2746 || (GENERIC && type == TREE_TYPE (@0)))
2747 @0)))
2748
2749 /* Contract view-conversions. */
2750 (simplify
2751 (view_convert (view_convert @0))
2752 (view_convert @0))
2753
2754 /* For integral conversions with the same precision or pointer
2755 conversions use a NOP_EXPR instead. */
2756 (simplify
2757 (view_convert @0)
2758 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
2759 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2760 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
2761 (convert @0)))
2762
2763 /* Strip inner integral conversions that do not change precision or size, or
2764 zero-extend while keeping the same size (for bool-to-char). */
2765 (simplify
2766 (view_convert (convert@0 @1))
2767 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2768 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
2769 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
2770 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
2771 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
2772 && TYPE_UNSIGNED (TREE_TYPE (@1)))))
2773 (view_convert @1)))
2774
2775 /* Simplify a view-converted empty constructor. */
2776 (simplify
2777 (view_convert CONSTRUCTOR@0)
2778 (if (TREE_CODE (@0) != SSA_NAME
2779 && CONSTRUCTOR_NELTS (@0) == 0)
2780 { build_zero_cst (type); }))
2781
2782 /* Re-association barriers around constants and other re-association
2783 barriers can be removed. */
2784 (simplify
2785 (paren CONSTANT_CLASS_P@0)
2786 @0)
2787 (simplify
2788 (paren (paren@1 @0))
2789 @1)
2790
2791 /* Handle cases of two conversions in a row. */
2792 (for ocvt (convert float fix_trunc)
2793 (for icvt (convert float)
2794 (simplify
2795 (ocvt (icvt@1 @0))
2796 (with
2797 {
2798 tree inside_type = TREE_TYPE (@0);
2799 tree inter_type = TREE_TYPE (@1);
2800 int inside_int = INTEGRAL_TYPE_P (inside_type);
2801 int inside_ptr = POINTER_TYPE_P (inside_type);
2802 int inside_float = FLOAT_TYPE_P (inside_type);
2803 int inside_vec = VECTOR_TYPE_P (inside_type);
2804 unsigned int inside_prec = TYPE_PRECISION (inside_type);
2805 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
2806 int inter_int = INTEGRAL_TYPE_P (inter_type);
2807 int inter_ptr = POINTER_TYPE_P (inter_type);
2808 int inter_float = FLOAT_TYPE_P (inter_type);
2809 int inter_vec = VECTOR_TYPE_P (inter_type);
2810 unsigned int inter_prec = TYPE_PRECISION (inter_type);
2811 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
2812 int final_int = INTEGRAL_TYPE_P (type);
2813 int final_ptr = POINTER_TYPE_P (type);
2814 int final_float = FLOAT_TYPE_P (type);
2815 int final_vec = VECTOR_TYPE_P (type);
2816 unsigned int final_prec = TYPE_PRECISION (type);
2817 int final_unsignedp = TYPE_UNSIGNED (type);
2818 }
2819 (switch
2820 /* In addition to the cases of two conversions in a row
2821 handled below, if we are converting something to its own
2822 type via an object of identical or wider precision, neither
2823 conversion is needed. */
2824 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
2825 || (GENERIC
2826 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
2827 && (((inter_int || inter_ptr) && final_int)
2828 || (inter_float && final_float))
2829 && inter_prec >= final_prec)
2830 (ocvt @0))
2831
2832 /* Likewise, if the intermediate and initial types are either both
2833 float or both integer, we don't need the middle conversion if the
2834 former is wider than the latter and doesn't change the signedness
2835 (for integers). Avoid this if the final type is a pointer since
2836 then we sometimes need the middle conversion. */
2837 (if (((inter_int && inside_int) || (inter_float && inside_float))
2838 && (final_int || final_float)
2839 && inter_prec >= inside_prec
2840 && (inter_float || inter_unsignedp == inside_unsignedp))
2841 (ocvt @0))
2842
2843 /* If we have a sign-extension of a zero-extended value, we can
2844 replace that by a single zero-extension. Likewise if the
2845 final conversion does not change precision we can drop the
2846 intermediate conversion. */
2847 (if (inside_int && inter_int && final_int
2848 && ((inside_prec < inter_prec && inter_prec < final_prec
2849 && inside_unsignedp && !inter_unsignedp)
2850 || final_prec == inter_prec))
2851 (ocvt @0))
2852
2853 /* Two conversions in a row are not needed unless:
2854 - some conversion is floating-point (overstrict for now), or
2855 - some conversion is a vector (overstrict for now), or
2856 - the intermediate type is narrower than both initial and
2857 final, or
2858 - the intermediate type and innermost type differ in signedness,
2859 and the outermost type is wider than the intermediate, or
2860 - the initial type is a pointer type and the precisions of the
2861 intermediate and final types differ, or
2862 - the final type is a pointer type and the precisions of the
2863 initial and intermediate types differ. */
2864 (if (! inside_float && ! inter_float && ! final_float
2865 && ! inside_vec && ! inter_vec && ! final_vec
2866 && (inter_prec >= inside_prec || inter_prec >= final_prec)
2867 && ! (inside_int && inter_int
2868 && inter_unsignedp != inside_unsignedp
2869 && inter_prec < final_prec)
2870 && ((inter_unsignedp && inter_prec > inside_prec)
2871 == (final_unsignedp && final_prec > inter_prec))
2872 && ! (inside_ptr && inter_prec != final_prec)
2873 && ! (final_ptr && inside_prec != inter_prec))
2874 (ocvt @0))
2875
2876 /* A truncation to an unsigned type (a zero-extension) should be
2877 canonicalized as bitwise and of a mask. */
2878 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
2879 && final_int && inter_int && inside_int
2880 && final_prec == inside_prec
2881 && final_prec > inter_prec
2882 && inter_unsignedp)
2883 (convert (bit_and @0 { wide_int_to_tree
2884 (inside_type,
2885 wi::mask (inter_prec, false,
2886 TYPE_PRECISION (inside_type))); })))
2887
2888 /* If we are converting an integer to a floating-point that can
2889 represent it exactly and back to an integer, we can skip the
2890 floating-point conversion. */
2891 (if (GIMPLE /* PR66211 */
2892 && inside_int && inter_float && final_int &&
2893 (unsigned) significand_size (TYPE_MODE (inter_type))
2894 >= inside_prec - !inside_unsignedp)
2895 (convert @0)))))))
2896
2897 /* If we have a narrowing conversion to an integral type that is fed by a
2898 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
2899 masks off bits outside the final type (and nothing else). */
2900 (simplify
2901 (convert (bit_and @0 INTEGER_CST@1))
2902 (if (INTEGRAL_TYPE_P (type)
2903 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2904 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
2905 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
2906 TYPE_PRECISION (type)), 0))
2907 (convert @0)))
2908
2909
2910 /* (X /[ex] A) * A -> X. */
2911 (simplify
2912 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
2913 (convert @0))
2914
2915 /* Simplify (A / B) * B + (A % B) -> A. */
2916 (for div (trunc_div ceil_div floor_div round_div)
2917 mod (trunc_mod ceil_mod floor_mod round_mod)
2918 (simplify
2919 (plus:c (mult:c (div @0 @1) @1) (mod @0 @1))
2920 @0))
2921
2922 /* ((X /[ex] A) +- B) * A --> X +- A * B. */
2923 (for op (plus minus)
2924 (simplify
2925 (mult (convert1? (op (convert2? (exact_div @0 INTEGER_CST@@1)) INTEGER_CST@2)) @1)
2926 (if (tree_nop_conversion_p (type, TREE_TYPE (@2))
2927 && tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2)))
2928 (with
2929 {
2930 wi::overflow_type overflow;
2931 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
2932 TYPE_SIGN (type), &overflow);
2933 }
2934 (if (types_match (type, TREE_TYPE (@2))
2935 && types_match (TREE_TYPE (@0), TREE_TYPE (@2)) && !overflow)
2936 (op @0 { wide_int_to_tree (type, mul); })
2937 (with { tree utype = unsigned_type_for (type); }
2938 (convert (op (convert:utype @0)
2939 (mult (convert:utype @1) (convert:utype @2))))))))))
2940
2941 /* Canonicalization of binary operations. */
2942
2943 /* Convert X + -C into X - C. */
2944 (simplify
2945 (plus @0 REAL_CST@1)
2946 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
2947 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
2948 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
2949 (minus @0 { tem; })))))
2950
2951 /* Convert x+x into x*2. */
2952 (simplify
2953 (plus @0 @0)
2954 (if (SCALAR_FLOAT_TYPE_P (type))
2955 (mult @0 { build_real (type, dconst2); })
2956 (if (INTEGRAL_TYPE_P (type))
2957 (mult @0 { build_int_cst (type, 2); }))))
2958
2959 /* 0 - X -> -X. */
2960 (simplify
2961 (minus integer_zerop @1)
2962 (negate @1))
2963 (simplify
2964 (pointer_diff integer_zerop @1)
2965 (negate (convert @1)))
2966
2967 /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
2968 ARG0 is zero and X + ARG0 reduces to X, since that would mean
2969 (-ARG1 + ARG0) reduces to -ARG1. */
2970 (simplify
2971 (minus real_zerop@0 @1)
2972 (if (fold_real_zero_addition_p (type, @0, 0))
2973 (negate @1)))
2974
2975 /* Transform x * -1 into -x. */
2976 (simplify
2977 (mult @0 integer_minus_onep)
2978 (negate @0))
2979
2980 /* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
2981 signed overflow for CST != 0 && CST != -1. */
2982 (simplify
2983 (mult:c (mult:s@3 @0 INTEGER_CST@1) @2)
2984 (if (TREE_CODE (@2) != INTEGER_CST
2985 && single_use (@3)
2986 && !integer_zerop (@1) && !integer_minus_onep (@1))
2987 (mult (mult @0 @2) @1)))
2988
2989 /* True if we can easily extract the real and imaginary parts of a complex
2990 number. */
2991 (match compositional_complex
2992 (convert? (complex @0 @1)))
2993
2994 /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
2995 (simplify
2996 (complex (realpart @0) (imagpart @0))
2997 @0)
2998 (simplify
2999 (realpart (complex @0 @1))
3000 @0)
3001 (simplify
3002 (imagpart (complex @0 @1))
3003 @1)
3004
3005 /* Sometimes we only care about half of a complex expression. */
3006 (simplify
3007 (realpart (convert?:s (conj:s @0)))
3008 (convert (realpart @0)))
3009 (simplify
3010 (imagpart (convert?:s (conj:s @0)))
3011 (convert (negate (imagpart @0))))
3012 (for part (realpart imagpart)
3013 (for op (plus minus)
3014 (simplify
3015 (part (convert?:s@2 (op:s @0 @1)))
3016 (convert (op (part @0) (part @1))))))
3017 (simplify
3018 (realpart (convert?:s (CEXPI:s @0)))
3019 (convert (COS @0)))
3020 (simplify
3021 (imagpart (convert?:s (CEXPI:s @0)))
3022 (convert (SIN @0)))
3023
3024 /* conj(conj(x)) -> x */
3025 (simplify
3026 (conj (convert? (conj @0)))
3027 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
3028 (convert @0)))
3029
3030 /* conj({x,y}) -> {x,-y} */
3031 (simplify
3032 (conj (convert?:s (complex:s @0 @1)))
3033 (with { tree itype = TREE_TYPE (type); }
3034 (complex (convert:itype @0) (negate (convert:itype @1)))))
3035
3036 /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
3037 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
3038 (simplify
3039 (bswap (bswap @0))
3040 @0)
3041 (simplify
3042 (bswap (bit_not (bswap @0)))
3043 (bit_not @0))
3044 (for bitop (bit_xor bit_ior bit_and)
3045 (simplify
3046 (bswap (bitop:c (bswap @0) @1))
3047 (bitop @0 (bswap @1)))))
3048
3049
3050 /* Combine COND_EXPRs and VEC_COND_EXPRs. */
3051
3052 /* Simplify constant conditions.
3053 Only optimize constant conditions when the selected branch
3054 has the same type as the COND_EXPR. This avoids optimizing
3055 away "c ? x : throw", where the throw has a void type.
3056 Note that we cannot throw away the fold-const.c variant nor
3057 this one as we depend on doing this transform before possibly
3058 A ? B : B -> B triggers and the fold-const.c one can optimize
3059 0 ? A : B to B even if A has side-effects. Something
3060 genmatch cannot handle. */
3061 (simplify
3062 (cond INTEGER_CST@0 @1 @2)
3063 (if (integer_zerop (@0))
3064 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
3065 @2)
3066 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
3067 @1)))
3068 (simplify
3069 (vec_cond VECTOR_CST@0 @1 @2)
3070 (if (integer_all_onesp (@0))
3071 @1
3072 (if (integer_zerop (@0))
3073 @2)))
3074
3075 /* Sink unary operations to constant branches, but only if we do fold it to
3076 constants. */
3077 (for op (negate bit_not abs absu)
3078 (simplify
3079 (op (vec_cond @0 VECTOR_CST@1 VECTOR_CST@2))
3080 (with
3081 {
3082 tree cst1, cst2;
3083 cst1 = const_unop (op, type, @1);
3084 if (cst1)
3085 cst2 = const_unop (op, type, @2);
3086 }
3087 (if (cst1 && cst2)
3088 (vec_cond @0 { cst1; } { cst2; })))))
3089
3090 /* Simplification moved from fold_cond_expr_with_comparison. It may also
3091 be extended. */
3092 /* This pattern implements two kinds simplification:
3093
3094 Case 1)
3095 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
3096 1) Conversions are type widening from smaller type.
3097 2) Const c1 equals to c2 after canonicalizing comparison.
3098 3) Comparison has tree code LT, LE, GT or GE.
3099 This specific pattern is needed when (cmp (convert x) c) may not
3100 be simplified by comparison patterns because of multiple uses of
3101 x. It also makes sense here because simplifying across multiple
3102 referred var is always benefitial for complicated cases.
3103
3104 Case 2)
3105 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
3106 (for cmp (lt le gt ge eq)
3107 (simplify
3108 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
3109 (with
3110 {
3111 tree from_type = TREE_TYPE (@1);
3112 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
3113 enum tree_code code = ERROR_MARK;
3114
3115 if (INTEGRAL_TYPE_P (from_type)
3116 && int_fits_type_p (@2, from_type)
3117 && (types_match (c1_type, from_type)
3118 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
3119 && (TYPE_UNSIGNED (from_type)
3120 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
3121 && (types_match (c2_type, from_type)
3122 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
3123 && (TYPE_UNSIGNED (from_type)
3124 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
3125 {
3126 if (cmp != EQ_EXPR)
3127 {
3128 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
3129 {
3130 /* X <= Y - 1 equals to X < Y. */
3131 if (cmp == LE_EXPR)
3132 code = LT_EXPR;
3133 /* X > Y - 1 equals to X >= Y. */
3134 if (cmp == GT_EXPR)
3135 code = GE_EXPR;
3136 }
3137 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
3138 {
3139 /* X < Y + 1 equals to X <= Y. */
3140 if (cmp == LT_EXPR)
3141 code = LE_EXPR;
3142 /* X >= Y + 1 equals to X > Y. */
3143 if (cmp == GE_EXPR)
3144 code = GT_EXPR;
3145 }
3146 if (code != ERROR_MARK
3147 || wi::to_widest (@2) == wi::to_widest (@3))
3148 {
3149 if (cmp == LT_EXPR || cmp == LE_EXPR)
3150 code = MIN_EXPR;
3151 if (cmp == GT_EXPR || cmp == GE_EXPR)
3152 code = MAX_EXPR;
3153 }
3154 }
3155 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
3156 else if (int_fits_type_p (@3, from_type))
3157 code = EQ_EXPR;
3158 }
3159 }
3160 (if (code == MAX_EXPR)
3161 (convert (max @1 (convert @2)))
3162 (if (code == MIN_EXPR)
3163 (convert (min @1 (convert @2)))
3164 (if (code == EQ_EXPR)
3165 (convert (cond (eq @1 (convert @3))
3166 (convert:from_type @3) (convert:from_type @2)))))))))
3167
3168 /* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
3169
3170 1) OP is PLUS or MINUS.
3171 2) CMP is LT, LE, GT or GE.
3172 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
3173
3174 This pattern also handles special cases like:
3175
3176 A) Operand x is a unsigned to signed type conversion and c1 is
3177 integer zero. In this case,
3178 (signed type)x < 0 <=> x > MAX_VAL(signed type)
3179 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
3180 B) Const c1 may not equal to (C3 op' C2). In this case we also
3181 check equality for (c1+1) and (c1-1) by adjusting comparison
3182 code.
3183
3184 TODO: Though signed type is handled by this pattern, it cannot be
3185 simplified at the moment because C standard requires additional
3186 type promotion. In order to match&simplify it here, the IR needs
3187 to be cleaned up by other optimizers, i.e, VRP. */
3188 (for op (plus minus)
3189 (for cmp (lt le gt ge)
3190 (simplify
3191 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
3192 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
3193 (if (types_match (from_type, to_type)
3194 /* Check if it is special case A). */
3195 || (TYPE_UNSIGNED (from_type)
3196 && !TYPE_UNSIGNED (to_type)
3197 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
3198 && integer_zerop (@1)
3199 && (cmp == LT_EXPR || cmp == GE_EXPR)))
3200 (with
3201 {
3202 wi::overflow_type overflow = wi::OVF_NONE;
3203 enum tree_code code, cmp_code = cmp;
3204 wide_int real_c1;
3205 wide_int c1 = wi::to_wide (@1);
3206 wide_int c2 = wi::to_wide (@2);
3207 wide_int c3 = wi::to_wide (@3);
3208 signop sgn = TYPE_SIGN (from_type);
3209
3210 /* Handle special case A), given x of unsigned type:
3211 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
3212 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
3213 if (!types_match (from_type, to_type))
3214 {
3215 if (cmp_code == LT_EXPR)
3216 cmp_code = GT_EXPR;
3217 if (cmp_code == GE_EXPR)
3218 cmp_code = LE_EXPR;
3219 c1 = wi::max_value (to_type);
3220 }
3221 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
3222 compute (c3 op' c2) and check if it equals to c1 with op' being
3223 the inverted operator of op. Make sure overflow doesn't happen
3224 if it is undefined. */
3225 if (op == PLUS_EXPR)
3226 real_c1 = wi::sub (c3, c2, sgn, &overflow);
3227 else
3228 real_c1 = wi::add (c3, c2, sgn, &overflow);
3229
3230 code = cmp_code;
3231 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
3232 {
3233 /* Check if c1 equals to real_c1. Boundary condition is handled
3234 by adjusting comparison operation if necessary. */
3235 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
3236 && !overflow)
3237 {
3238 /* X <= Y - 1 equals to X < Y. */
3239 if (cmp_code == LE_EXPR)
3240 code = LT_EXPR;
3241 /* X > Y - 1 equals to X >= Y. */
3242 if (cmp_code == GT_EXPR)
3243 code = GE_EXPR;
3244 }
3245 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
3246 && !overflow)
3247 {
3248 /* X < Y + 1 equals to X <= Y. */
3249 if (cmp_code == LT_EXPR)
3250 code = LE_EXPR;
3251 /* X >= Y + 1 equals to X > Y. */
3252 if (cmp_code == GE_EXPR)
3253 code = GT_EXPR;
3254 }
3255 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
3256 {
3257 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
3258 code = MIN_EXPR;
3259 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
3260 code = MAX_EXPR;
3261 }
3262 }
3263 }
3264 (if (code == MAX_EXPR)
3265 (op (max @X { wide_int_to_tree (from_type, real_c1); })
3266 { wide_int_to_tree (from_type, c2); })
3267 (if (code == MIN_EXPR)
3268 (op (min @X { wide_int_to_tree (from_type, real_c1); })
3269 { wide_int_to_tree (from_type, c2); })))))))))
3270
3271 (for cnd (cond vec_cond)
3272 /* A ? B : (A ? X : C) -> A ? B : C. */
3273 (simplify
3274 (cnd @0 (cnd @0 @1 @2) @3)
3275 (cnd @0 @1 @3))
3276 (simplify
3277 (cnd @0 @1 (cnd @0 @2 @3))
3278 (cnd @0 @1 @3))
3279 /* A ? B : (!A ? C : X) -> A ? B : C. */
3280 /* ??? This matches embedded conditions open-coded because genmatch
3281 would generate matching code for conditions in separate stmts only.
3282 The following is still important to merge then and else arm cases
3283 from if-conversion. */
3284 (simplify
3285 (cnd @0 @1 (cnd @2 @3 @4))
3286 (if (inverse_conditions_p (@0, @2))
3287 (cnd @0 @1 @3)))
3288 (simplify
3289 (cnd @0 (cnd @1 @2 @3) @4)
3290 (if (inverse_conditions_p (@0, @1))
3291 (cnd @0 @3 @4)))
3292
3293 /* A ? B : B -> B. */
3294 (simplify
3295 (cnd @0 @1 @1)
3296 @1)
3297
3298 /* !A ? B : C -> A ? C : B. */
3299 (simplify
3300 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
3301 (cnd @0 @2 @1)))
3302
3303 /* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
3304 return all -1 or all 0 results. */
3305 /* ??? We could instead convert all instances of the vec_cond to negate,
3306 but that isn't necessarily a win on its own. */
3307 (simplify
3308 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
3309 (if (VECTOR_TYPE_P (type)
3310 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3311 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
3312 && (TYPE_MODE (TREE_TYPE (type))
3313 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
3314 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
3315
3316 /* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
3317 (simplify
3318 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
3319 (if (VECTOR_TYPE_P (type)
3320 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3321 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
3322 && (TYPE_MODE (TREE_TYPE (type))
3323 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
3324 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
3325
3326
3327 /* Simplifications of comparisons. */
3328
3329 /* See if we can reduce the magnitude of a constant involved in a
3330 comparison by changing the comparison code. This is a canonicalization
3331 formerly done by maybe_canonicalize_comparison_1. */
3332 (for cmp (le gt)
3333 acmp (lt ge)
3334 (simplify
3335 (cmp @0 uniform_integer_cst_p@1)
3336 (with { tree cst = uniform_integer_cst_p (@1); }
3337 (if (tree_int_cst_sgn (cst) == -1)
3338 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
3339 wide_int_to_tree (TREE_TYPE (cst),
3340 wi::to_wide (cst)
3341 + 1)); })))))
3342 (for cmp (ge lt)
3343 acmp (gt le)
3344 (simplify
3345 (cmp @0 uniform_integer_cst_p@1)
3346 (with { tree cst = uniform_integer_cst_p (@1); }
3347 (if (tree_int_cst_sgn (cst) == 1)
3348 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
3349 wide_int_to_tree (TREE_TYPE (cst),
3350 wi::to_wide (cst) - 1)); })))))
3351
3352 /* We can simplify a logical negation of a comparison to the
3353 inverted comparison. As we cannot compute an expression
3354 operator using invert_tree_comparison we have to simulate
3355 that with expression code iteration. */
3356 (for cmp (tcc_comparison)
3357 icmp (inverted_tcc_comparison)
3358 ncmp (inverted_tcc_comparison_with_nans)
3359 /* Ideally we'd like to combine the following two patterns
3360 and handle some more cases by using
3361 (logical_inverted_value (cmp @0 @1))
3362 here but for that genmatch would need to "inline" that.
3363 For now implement what forward_propagate_comparison did. */
3364 (simplify
3365 (bit_not (cmp @0 @1))
3366 (if (VECTOR_TYPE_P (type)
3367 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
3368 /* Comparison inversion may be impossible for trapping math,
3369 invert_tree_comparison will tell us. But we can't use
3370 a computed operator in the replacement tree thus we have
3371 to play the trick below. */
3372 (with { enum tree_code ic = invert_tree_comparison
3373 (cmp, HONOR_NANS (@0)); }
3374 (if (ic == icmp)
3375 (icmp @0 @1)
3376 (if (ic == ncmp)
3377 (ncmp @0 @1))))))
3378 (simplify
3379 (bit_xor (cmp @0 @1) integer_truep)
3380 (with { enum tree_code ic = invert_tree_comparison
3381 (cmp, HONOR_NANS (@0)); }
3382 (if (ic == icmp)
3383 (icmp @0 @1)
3384 (if (ic == ncmp)
3385 (ncmp @0 @1))))))
3386
3387 /* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
3388 ??? The transformation is valid for the other operators if overflow
3389 is undefined for the type, but performing it here badly interacts
3390 with the transformation in fold_cond_expr_with_comparison which
3391 attempts to synthetize ABS_EXPR. */
3392 (for cmp (eq ne)
3393 (for sub (minus pointer_diff)
3394 (simplify
3395 (cmp (sub@2 @0 @1) integer_zerop)
3396 (if (single_use (@2))
3397 (cmp @0 @1)))))
3398
3399 /* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
3400 signed arithmetic case. That form is created by the compiler
3401 often enough for folding it to be of value. One example is in
3402 computing loop trip counts after Operator Strength Reduction. */
3403 (for cmp (simple_comparison)
3404 scmp (swapped_simple_comparison)
3405 (simplify
3406 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
3407 /* Handle unfolded multiplication by zero. */
3408 (if (integer_zerop (@1))
3409 (cmp @1 @2)
3410 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3411 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3412 && single_use (@3))
3413 /* If @1 is negative we swap the sense of the comparison. */
3414 (if (tree_int_cst_sgn (@1) < 0)
3415 (scmp @0 @2)
3416 (cmp @0 @2))))))
3417
3418 /* Simplify comparison of something with itself. For IEEE
3419 floating-point, we can only do some of these simplifications. */
3420 (for cmp (eq ge le)
3421 (simplify
3422 (cmp @0 @0)
3423 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
3424 || ! HONOR_NANS (@0))
3425 { constant_boolean_node (true, type); }
3426 (if (cmp != EQ_EXPR)
3427 (eq @0 @0)))))
3428 (for cmp (ne gt lt)
3429 (simplify
3430 (cmp @0 @0)
3431 (if (cmp != NE_EXPR
3432 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
3433 || ! HONOR_NANS (@0))
3434 { constant_boolean_node (false, type); })))
3435 (for cmp (unle unge uneq)
3436 (simplify
3437 (cmp @0 @0)
3438 { constant_boolean_node (true, type); }))
3439 (for cmp (unlt ungt)
3440 (simplify
3441 (cmp @0 @0)
3442 (unordered @0 @0)))
3443 (simplify
3444 (ltgt @0 @0)
3445 (if (!flag_trapping_math)
3446 { constant_boolean_node (false, type); }))
3447
3448 /* Fold ~X op ~Y as Y op X. */
3449 (for cmp (simple_comparison)
3450 (simplify
3451 (cmp (bit_not@2 @0) (bit_not@3 @1))
3452 (if (single_use (@2) && single_use (@3))
3453 (cmp @1 @0))))
3454
3455 /* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
3456 (for cmp (simple_comparison)
3457 scmp (swapped_simple_comparison)
3458 (simplify
3459 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
3460 (if (single_use (@2)
3461 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
3462 (scmp @0 (bit_not @1)))))
3463
3464 (for cmp (simple_comparison)
3465 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
3466 (simplify
3467 (cmp (convert@2 @0) (convert? @1))
3468 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3469 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3470 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3471 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3472 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
3473 (with
3474 {
3475 tree type1 = TREE_TYPE (@1);
3476 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
3477 {
3478 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
3479 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
3480 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
3481 type1 = float_type_node;
3482 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
3483 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
3484 type1 = double_type_node;
3485 }
3486 tree newtype
3487 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
3488 ? TREE_TYPE (@0) : type1);
3489 }
3490 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
3491 (cmp (convert:newtype @0) (convert:newtype @1))))))
3492
3493 (simplify
3494 (cmp @0 REAL_CST@1)
3495 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
3496 (switch
3497 /* a CMP (-0) -> a CMP 0 */
3498 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
3499 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
3500 /* x != NaN is always true, other ops are always false. */
3501 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3502 && ! HONOR_SNANS (@1))
3503 { constant_boolean_node (cmp == NE_EXPR, type); })
3504 /* Fold comparisons against infinity. */
3505 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
3506 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
3507 (with
3508 {
3509 REAL_VALUE_TYPE max;
3510 enum tree_code code = cmp;
3511 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
3512 if (neg)
3513 code = swap_tree_comparison (code);
3514 }
3515 (switch
3516 /* x > +Inf is always false, if we ignore NaNs or exceptions. */
3517 (if (code == GT_EXPR
3518 && !(HONOR_NANS (@0) && flag_trapping_math))
3519 { constant_boolean_node (false, type); })
3520 (if (code == LE_EXPR)
3521 /* x <= +Inf is always true, if we don't care about NaNs. */
3522 (if (! HONOR_NANS (@0))
3523 { constant_boolean_node (true, type); }
3524 /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses
3525 an "invalid" exception. */
3526 (if (!flag_trapping_math)
3527 (eq @0 @0))))
3528 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but
3529 for == this introduces an exception for x a NaN. */
3530 (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math))
3531 || code == GE_EXPR)
3532 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3533 (if (neg)
3534 (lt @0 { build_real (TREE_TYPE (@0), max); })
3535 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
3536 /* x < +Inf is always equal to x <= DBL_MAX. */
3537 (if (code == LT_EXPR)
3538 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3539 (if (neg)
3540 (ge @0 { build_real (TREE_TYPE (@0), max); })
3541 (le @0 { build_real (TREE_TYPE (@0), max); }))))
3542 /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces
3543 an exception for x a NaN so use an unordered comparison. */
3544 (if (code == NE_EXPR)
3545 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3546 (if (! HONOR_NANS (@0))
3547 (if (neg)
3548 (ge @0 { build_real (TREE_TYPE (@0), max); })
3549 (le @0 { build_real (TREE_TYPE (@0), max); }))
3550 (if (neg)
3551 (unge @0 { build_real (TREE_TYPE (@0), max); })
3552 (unle @0 { build_real (TREE_TYPE (@0), max); }))))))))))
3553
3554 /* If this is a comparison of a real constant with a PLUS_EXPR
3555 or a MINUS_EXPR of a real constant, we can convert it into a
3556 comparison with a revised real constant as long as no overflow
3557 occurs when unsafe_math_optimizations are enabled. */
3558 (if (flag_unsafe_math_optimizations)
3559 (for op (plus minus)
3560 (simplify
3561 (cmp (op @0 REAL_CST@1) REAL_CST@2)
3562 (with
3563 {
3564 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
3565 TREE_TYPE (@1), @2, @1);
3566 }
3567 (if (tem && !TREE_OVERFLOW (tem))
3568 (cmp @0 { tem; }))))))
3569
3570 /* Likewise, we can simplify a comparison of a real constant with
3571 a MINUS_EXPR whose first operand is also a real constant, i.e.
3572 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
3573 floating-point types only if -fassociative-math is set. */
3574 (if (flag_associative_math)
3575 (simplify
3576 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
3577 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
3578 (if (tem && !TREE_OVERFLOW (tem))
3579 (cmp { tem; } @1)))))
3580
3581 /* Fold comparisons against built-in math functions. */
3582 (if (flag_unsafe_math_optimizations
3583 && ! flag_errno_math)
3584 (for sq (SQRT)
3585 (simplify
3586 (cmp (sq @0) REAL_CST@1)
3587 (switch
3588 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
3589 (switch
3590 /* sqrt(x) < y is always false, if y is negative. */
3591 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
3592 { constant_boolean_node (false, type); })
3593 /* sqrt(x) > y is always true, if y is negative and we
3594 don't care about NaNs, i.e. negative values of x. */
3595 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
3596 { constant_boolean_node (true, type); })
3597 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
3598 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
3599 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
3600 (switch
3601 /* sqrt(x) < 0 is always false. */
3602 (if (cmp == LT_EXPR)
3603 { constant_boolean_node (false, type); })
3604 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
3605 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
3606 { constant_boolean_node (true, type); })
3607 /* sqrt(x) <= 0 -> x == 0. */
3608 (if (cmp == LE_EXPR)
3609 (eq @0 @1))
3610 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
3611 == or !=. In the last case:
3612
3613 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
3614
3615 if x is negative or NaN. Due to -funsafe-math-optimizations,
3616 the results for other x follow from natural arithmetic. */
3617 (cmp @0 @1)))
3618 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3619 (with
3620 {
3621 REAL_VALUE_TYPE c2;
3622 real_arithmetic (&c2, MULT_EXPR,
3623 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
3624 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3625 }
3626 (if (REAL_VALUE_ISINF (c2))
3627 /* sqrt(x) > y is x == +Inf, when y is very large. */
3628 (if (HONOR_INFINITIES (@0))
3629 (eq @0 { build_real (TREE_TYPE (@0), c2); })
3630 { constant_boolean_node (false, type); })
3631 /* sqrt(x) > c is the same as x > c*c. */
3632 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
3633 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3634 (with
3635 {
3636 REAL_VALUE_TYPE c2;
3637 real_arithmetic (&c2, MULT_EXPR,
3638 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
3639 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
3640 }
3641 (if (REAL_VALUE_ISINF (c2))
3642 (switch
3643 /* sqrt(x) < y is always true, when y is a very large
3644 value and we don't care about NaNs or Infinities. */
3645 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
3646 { constant_boolean_node (true, type); })
3647 /* sqrt(x) < y is x != +Inf when y is very large and we
3648 don't care about NaNs. */
3649 (if (! HONOR_NANS (@0))
3650 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
3651 /* sqrt(x) < y is x >= 0 when y is very large and we
3652 don't care about Infinities. */
3653 (if (! HONOR_INFINITIES (@0))
3654 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
3655 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
3656 (if (GENERIC)
3657 (truth_andif
3658 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3659 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
3660 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
3661 (if (! HONOR_NANS (@0))
3662 (cmp @0 { build_real (TREE_TYPE (@0), c2); })
3663 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
3664 (if (GENERIC)
3665 (truth_andif
3666 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
3667 (cmp @0 { build_real (TREE_TYPE (@0), c2); })))))))))
3668 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */
3669 (simplify
3670 (cmp (sq @0) (sq @1))
3671 (if (! HONOR_NANS (@0))
3672 (cmp @0 @1))))))
3673
3674 /* Optimize various special cases of (FTYPE) N CMP (FTYPE) M. */
3675 (for cmp (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
3676 icmp (lt le eq ne ge gt unordered ordered lt le gt ge eq ne)
3677 (simplify
3678 (cmp (float@0 @1) (float @2))
3679 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@0))
3680 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3681 (with
3682 {
3683 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0))));
3684 tree type1 = TREE_TYPE (@1);
3685 bool type1_signed_p = TYPE_SIGN (type1) == SIGNED;
3686 tree type2 = TREE_TYPE (@2);
3687 bool type2_signed_p = TYPE_SIGN (type2) == SIGNED;
3688 }
3689 (if (fmt.can_represent_integral_type_p (type1)
3690 && fmt.can_represent_integral_type_p (type2))
3691 (if (cmp == ORDERED_EXPR || cmp == UNORDERED_EXPR)
3692 { constant_boolean_node (cmp == ORDERED_EXPR, type); }
3693 (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2)
3694 && type1_signed_p >= type2_signed_p)
3695 (icmp @1 (convert @2))
3696 (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2)
3697 && type1_signed_p <= type2_signed_p)
3698 (icmp (convert:type2 @1) @2)
3699 (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
3700 && type1_signed_p == type2_signed_p)
3701 (icmp @1 @2))))))))))
3702
3703 /* Optimize various special cases of (FTYPE) N CMP CST. */
3704 (for cmp (lt le eq ne ge gt)
3705 icmp (le le eq ne ge ge)
3706 (simplify
3707 (cmp (float @0) REAL_CST@1)
3708 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
3709 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
3710 (with
3711 {
3712 tree itype = TREE_TYPE (@0);
3713 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
3714 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
3715 /* Be careful to preserve any potential exceptions due to
3716 NaNs. qNaNs are ok in == or != context.
3717 TODO: relax under -fno-trapping-math or
3718 -fno-signaling-nans. */
3719 bool exception_p
3720 = real_isnan (cst) && (cst->signalling
3721 || (cmp != EQ_EXPR && cmp != NE_EXPR));
3722 }
3723 /* TODO: allow non-fitting itype and SNaNs when
3724 -fno-trapping-math. */
3725 (if (fmt.can_represent_integral_type_p (itype) && ! exception_p)
3726 (with
3727 {
3728 signop isign = TYPE_SIGN (itype);
3729 REAL_VALUE_TYPE imin, imax;
3730 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
3731 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
3732
3733 REAL_VALUE_TYPE icst;
3734 if (cmp == GT_EXPR || cmp == GE_EXPR)
3735 real_ceil (&icst, fmt, cst);
3736 else if (cmp == LT_EXPR || cmp == LE_EXPR)
3737 real_floor (&icst, fmt, cst);
3738 else
3739 real_trunc (&icst, fmt, cst);
3740
3741 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst);
3742
3743 bool overflow_p = false;
3744 wide_int icst_val
3745 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
3746 }
3747 (switch
3748 /* Optimize cases when CST is outside of ITYPE's range. */
3749 (if (real_compare (LT_EXPR, cst, &imin))
3750 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
3751 type); })
3752 (if (real_compare (GT_EXPR, cst, &imax))
3753 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
3754 type); })
3755 /* Remove cast if CST is an integer representable by ITYPE. */
3756 (if (cst_int_p)
3757 (cmp @0 { gcc_assert (!overflow_p);
3758 wide_int_to_tree (itype, icst_val); })
3759 )
3760 /* When CST is fractional, optimize
3761 (FTYPE) N == CST -> 0
3762 (FTYPE) N != CST -> 1. */
3763 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3764 { constant_boolean_node (cmp == NE_EXPR, type); })
3765 /* Otherwise replace with sensible integer constant. */
3766 (with
3767 {
3768 gcc_checking_assert (!overflow_p);
3769 }
3770 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
3771
3772 /* Fold A /[ex] B CMP C to A CMP B * C. */
3773 (for cmp (eq ne)
3774 (simplify
3775 (cmp (exact_div @0 @1) INTEGER_CST@2)
3776 (if (!integer_zerop (@1))
3777 (if (wi::to_wide (@2) == 0)
3778 (cmp @0 @2)
3779 (if (TREE_CODE (@1) == INTEGER_CST)
3780 (with
3781 {
3782 wi::overflow_type ovf;
3783 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3784 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
3785 }
3786 (if (ovf)
3787 { constant_boolean_node (cmp == NE_EXPR, type); }
3788 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
3789 (for cmp (lt le gt ge)
3790 (simplify
3791 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
3792 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
3793 (with
3794 {
3795 wi::overflow_type ovf;
3796 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
3797 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
3798 }
3799 (if (ovf)
3800 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
3801 TYPE_SIGN (TREE_TYPE (@2)))
3802 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
3803 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
3804
3805 /* Fold (size_t)(A /[ex] B) CMP C to (size_t)A CMP (size_t)B * C or A CMP' 0.
3806
3807 For small C (less than max/B), this is (size_t)A CMP (size_t)B * C.
3808 For large C (more than min/B+2^size), this is also true, with the
3809 multiplication computed modulo 2^size.
3810 For intermediate C, this just tests the sign of A. */
3811 (for cmp (lt le gt ge)
3812 cmp2 (ge ge lt lt)
3813 (simplify
3814 (cmp (convert (exact_div @0 INTEGER_CST@1)) INTEGER_CST@2)
3815 (if (tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2))
3816 && TYPE_UNSIGNED (TREE_TYPE (@2)) && !TYPE_UNSIGNED (TREE_TYPE (@0))
3817 && wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
3818 (with
3819 {
3820 tree utype = TREE_TYPE (@2);
3821 wide_int denom = wi::to_wide (@1);
3822 wide_int right = wi::to_wide (@2);
3823 wide_int smax = wi::sdiv_trunc (wi::max_value (TREE_TYPE (@0)), denom);
3824 wide_int smin = wi::sdiv_trunc (wi::min_value (TREE_TYPE (@0)), denom);
3825 bool small = wi::leu_p (right, smax);
3826 bool large = wi::geu_p (right, smin);
3827 }
3828 (if (small || large)
3829 (cmp (convert:utype @0) (mult @2 (convert @1)))
3830 (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); }))))))
3831
3832 /* Unordered tests if either argument is a NaN. */
3833 (simplify
3834 (bit_ior (unordered @0 @0) (unordered @1 @1))
3835 (if (types_match (@0, @1))
3836 (unordered @0 @1)))
3837 (simplify
3838 (bit_and (ordered @0 @0) (ordered @1 @1))
3839 (if (types_match (@0, @1))
3840 (ordered @0 @1)))
3841 (simplify
3842 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
3843 @2)
3844 (simplify
3845 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
3846 @2)
3847
3848 /* Simple range test simplifications. */
3849 /* A < B || A >= B -> true. */
3850 (for test1 (lt le le le ne ge)
3851 test2 (ge gt ge ne eq ne)
3852 (simplify
3853 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
3854 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3855 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3856 { constant_boolean_node (true, type); })))
3857 /* A < B && A >= B -> false. */
3858 (for test1 (lt lt lt le ne eq)
3859 test2 (ge gt eq gt eq gt)
3860 (simplify
3861 (bit_and:c (test1 @0 @1) (test2 @0 @1))
3862 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3863 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3864 { constant_boolean_node (false, type); })))
3865
3866 /* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
3867 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0
3868
3869 Note that comparisons
3870 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0
3871 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0
3872 will be canonicalized to above so there's no need to
3873 consider them here.
3874 */
3875
3876 (for cmp (le gt)
3877 eqcmp (eq ne)
3878 (simplify
3879 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
3880 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
3881 (with
3882 {
3883 tree ty = TREE_TYPE (@0);
3884 unsigned prec = TYPE_PRECISION (ty);
3885 wide_int mask = wi::to_wide (@2, prec);
3886 wide_int rhs = wi::to_wide (@3, prec);
3887 signop sgn = TYPE_SIGN (ty);
3888 }
3889 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
3890 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
3891 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
3892 { build_zero_cst (ty); }))))))
3893
3894 /* -A CMP -B -> B CMP A. */
3895 (for cmp (tcc_comparison)
3896 scmp (swapped_tcc_comparison)
3897 (simplify
3898 (cmp (negate @0) (negate @1))
3899 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3900 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3901 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3902 (scmp @0 @1)))
3903 (simplify
3904 (cmp (negate @0) CONSTANT_CLASS_P@1)
3905 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3906 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3907 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3908 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
3909 (if (tem && !TREE_OVERFLOW (tem))
3910 (scmp @0 { tem; }))))))
3911
3912 /* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
3913 (for op (eq ne)
3914 (simplify
3915 (op (abs @0) zerop@1)
3916 (op @0 @1)))
3917
3918 /* From fold_sign_changed_comparison and fold_widened_comparison.
3919 FIXME: the lack of symmetry is disturbing. */
3920 (for cmp (simple_comparison)
3921 (simplify
3922 (cmp (convert@0 @00) (convert?@1 @10))
3923 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3924 /* Disable this optimization if we're casting a function pointer
3925 type on targets that require function pointer canonicalization. */
3926 && !(targetm.have_canonicalize_funcptr_for_compare ()
3927 && ((POINTER_TYPE_P (TREE_TYPE (@00))
3928 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00))))
3929 || (POINTER_TYPE_P (TREE_TYPE (@10))
3930 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@10))))))
3931 && single_use (@0))
3932 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
3933 && (TREE_CODE (@10) == INTEGER_CST
3934 || @1 != @10)
3935 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
3936 || cmp == NE_EXPR
3937 || cmp == EQ_EXPR)
3938 && !POINTER_TYPE_P (TREE_TYPE (@00)))
3939 /* ??? The special-casing of INTEGER_CST conversion was in the original
3940 code and here to avoid a spurious overflow flag on the resulting
3941 constant which fold_convert produces. */
3942 (if (TREE_CODE (@1) == INTEGER_CST)
3943 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
3944 TREE_OVERFLOW (@1)); })
3945 (cmp @00 (convert @1)))
3946
3947 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
3948 /* If possible, express the comparison in the shorter mode. */
3949 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
3950 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
3951 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
3952 && TYPE_UNSIGNED (TREE_TYPE (@00))))
3953 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
3954 || ((TYPE_PRECISION (TREE_TYPE (@00))
3955 >= TYPE_PRECISION (TREE_TYPE (@10)))
3956 && (TYPE_UNSIGNED (TREE_TYPE (@00))
3957 == TYPE_UNSIGNED (TREE_TYPE (@10))))
3958 || (TREE_CODE (@10) == INTEGER_CST
3959 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
3960 && int_fits_type_p (@10, TREE_TYPE (@00)))))
3961 (cmp @00 (convert @10))
3962 (if (TREE_CODE (@10) == INTEGER_CST
3963 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
3964 && !int_fits_type_p (@10, TREE_TYPE (@00)))
3965 (with
3966 {
3967 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3968 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3969 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
3970 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
3971 }
3972 (if (above || below)
3973 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3974 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
3975 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3976 { constant_boolean_node (above ? true : false, type); }
3977 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3978 { constant_boolean_node (above ? false : true, type); }))))))))))))
3979
3980 (for cmp (eq ne)
3981 /* A local variable can never be pointed to by
3982 the default SSA name of an incoming parameter.
3983 SSA names are canonicalized to 2nd place. */
3984 (simplify
3985 (cmp addr@0 SSA_NAME@1)
3986 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
3987 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
3988 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
3989 (if (TREE_CODE (base) == VAR_DECL
3990 && auto_var_in_fn_p (base, current_function_decl))
3991 (if (cmp == NE_EXPR)
3992 { constant_boolean_node (true, type); }
3993 { constant_boolean_node (false, type); }))))))
3994
3995 /* Equality compare simplifications from fold_binary */
3996 (for cmp (eq ne)
3997
3998 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
3999 Similarly for NE_EXPR. */
4000 (simplify
4001 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
4002 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
4003 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
4004 { constant_boolean_node (cmp == NE_EXPR, type); }))
4005
4006 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
4007 (simplify
4008 (cmp (bit_xor @0 @1) integer_zerop)
4009 (cmp @0 @1))
4010
4011 /* (X ^ Y) == Y becomes X == 0.
4012 Likewise (X ^ Y) == X becomes Y == 0. */
4013 (simplify
4014 (cmp:c (bit_xor:c @0 @1) @0)
4015 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
4016
4017 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
4018 (simplify
4019 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
4020 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
4021 (cmp @0 (bit_xor @1 (convert @2)))))
4022
4023 (simplify
4024 (cmp (convert? addr@0) integer_zerop)
4025 (if (tree_single_nonzero_warnv_p (@0, NULL))
4026 { constant_boolean_node (cmp == NE_EXPR, type); })))
4027
4028 /* If we have (A & C) == C where C is a power of 2, convert this into
4029 (A & C) != 0. Similarly for NE_EXPR. */
4030 (for cmp (eq ne)
4031 icmp (ne eq)
4032 (simplify
4033 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
4034 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
4035
4036 /* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
4037 convert this into a shift followed by ANDing with D. */
4038 (simplify
4039 (cond
4040 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
4041 INTEGER_CST@2 integer_zerop)
4042 (if (integer_pow2p (@2))
4043 (with {
4044 int shift = (wi::exact_log2 (wi::to_wide (@2))
4045 - wi::exact_log2 (wi::to_wide (@1)));
4046 }
4047 (if (shift > 0)
4048 (bit_and
4049 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
4050 (bit_and
4051 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); }))
4052 @2)))))
4053
4054 /* If we have (A & C) != 0 where C is the sign bit of A, convert
4055 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
4056 (for cmp (eq ne)
4057 ncmp (ge lt)
4058 (simplify
4059 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
4060 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4061 && type_has_mode_precision_p (TREE_TYPE (@0))
4062 && element_precision (@2) >= element_precision (@0)
4063 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
4064 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
4065 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
4066
4067 /* If we have A < 0 ? C : 0 where C is a power of 2, convert
4068 this into a right shift or sign extension followed by ANDing with C. */
4069 (simplify
4070 (cond
4071 (lt @0 integer_zerop)
4072 INTEGER_CST@1 integer_zerop)
4073 (if (integer_pow2p (@1)
4074 && !TYPE_UNSIGNED (TREE_TYPE (@0)))
4075 (with {
4076 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
4077 }
4078 (if (shift >= 0)
4079 (bit_and
4080 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
4081 @1)
4082 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
4083 sign extension followed by AND with C will achieve the effect. */
4084 (bit_and (convert @0) @1)))))
4085
4086 /* When the addresses are not directly of decls compare base and offset.
4087 This implements some remaining parts of fold_comparison address
4088 comparisons but still no complete part of it. Still it is good
4089 enough to make fold_stmt not regress when not dispatching to fold_binary. */
4090 (for cmp (simple_comparison)
4091 (simplify
4092 (cmp (convert1?@2 addr@0) (convert2? addr@1))
4093 (with
4094 {
4095 poly_int64 off0, off1;
4096 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
4097 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
4098 if (base0 && TREE_CODE (base0) == MEM_REF)
4099 {
4100 off0 += mem_ref_offset (base0).force_shwi ();
4101 base0 = TREE_OPERAND (base0, 0);
4102 }
4103 if (base1 && TREE_CODE (base1) == MEM_REF)
4104 {
4105 off1 += mem_ref_offset (base1).force_shwi ();
4106 base1 = TREE_OPERAND (base1, 0);
4107 }
4108 }
4109 (if (base0 && base1)
4110 (with
4111 {
4112 int equal = 2;
4113 /* Punt in GENERIC on variables with value expressions;
4114 the value expressions might point to fields/elements
4115 of other vars etc. */
4116 if (GENERIC
4117 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
4118 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
4119 ;
4120 else if (decl_in_symtab_p (base0)
4121 && decl_in_symtab_p (base1))
4122 equal = symtab_node::get_create (base0)
4123 ->equal_address_to (symtab_node::get_create (base1));
4124 else if ((DECL_P (base0)
4125 || TREE_CODE (base0) == SSA_NAME
4126 || TREE_CODE (base0) == STRING_CST)
4127 && (DECL_P (base1)
4128 || TREE_CODE (base1) == SSA_NAME
4129 || TREE_CODE (base1) == STRING_CST))
4130 equal = (base0 == base1);
4131 if (equal == 0)
4132 {
4133 HOST_WIDE_INT ioff0 = -1, ioff1 = -1;
4134 off0.is_constant (&ioff0);
4135 off1.is_constant (&ioff1);
4136 if ((DECL_P (base0) && TREE_CODE (base1) == STRING_CST)
4137 || (TREE_CODE (base0) == STRING_CST && DECL_P (base1))
4138 || (TREE_CODE (base0) == STRING_CST
4139 && TREE_CODE (base1) == STRING_CST
4140 && ioff0 >= 0 && ioff1 >= 0
4141 && ioff0 < TREE_STRING_LENGTH (base0)
4142 && ioff1 < TREE_STRING_LENGTH (base1)
4143 /* This is a too conservative test that the STRING_CSTs
4144 will not end up being string-merged. */
4145 && strncmp (TREE_STRING_POINTER (base0) + ioff0,
4146 TREE_STRING_POINTER (base1) + ioff1,
4147 MIN (TREE_STRING_LENGTH (base0) - ioff0,
4148 TREE_STRING_LENGTH (base1) - ioff1)) != 0))
4149 ;
4150 else if (!DECL_P (base0) || !DECL_P (base1))
4151 equal = 2;
4152 else if (cmp != EQ_EXPR && cmp != NE_EXPR)
4153 equal = 2;
4154 /* If this is a pointer comparison, ignore for now even
4155 valid equalities where one pointer is the offset zero
4156 of one object and the other to one past end of another one. */
4157 else if (!INTEGRAL_TYPE_P (TREE_TYPE (@2)))
4158 ;
4159 /* Assume that automatic variables can't be adjacent to global
4160 variables. */
4161 else if (is_global_var (base0) != is_global_var (base1))
4162 ;
4163 else
4164 {
4165 tree sz0 = DECL_SIZE_UNIT (base0);
4166 tree sz1 = DECL_SIZE_UNIT (base1);
4167 /* If sizes are unknown, e.g. VLA or not representable,
4168 punt. */
4169 if (!tree_fits_poly_int64_p (sz0)
4170 || !tree_fits_poly_int64_p (sz1))
4171 equal = 2;
4172 else
4173 {
4174 poly_int64 size0 = tree_to_poly_int64 (sz0);
4175 poly_int64 size1 = tree_to_poly_int64 (sz1);
4176 /* If one offset is pointing (or could be) to the beginning
4177 of one object and the other is pointing to one past the
4178 last byte of the other object, punt. */
4179 if (maybe_eq (off0, 0) && maybe_eq (off1, size1))
4180 equal = 2;
4181 else if (maybe_eq (off1, 0) && maybe_eq (off0, size0))
4182 equal = 2;
4183 /* If both offsets are the same, there are some cases
4184 we know that are ok. Either if we know they aren't
4185 zero, or if we know both sizes are no zero. */
4186 if (equal == 2
4187 && known_eq (off0, off1)
4188 && (known_ne (off0, 0)
4189 || (known_ne (size0, 0) && known_ne (size1, 0))))
4190 equal = 0;
4191 }
4192 }
4193 }
4194 }
4195 (if (equal == 1
4196 && (cmp == EQ_EXPR || cmp == NE_EXPR
4197 /* If the offsets are equal we can ignore overflow. */
4198 || known_eq (off0, off1)
4199 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
4200 /* Or if we compare using pointers to decls or strings. */
4201 || (POINTER_TYPE_P (TREE_TYPE (@2))
4202 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST))))
4203 (switch
4204 (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
4205 { constant_boolean_node (known_eq (off0, off1), type); })
4206 (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
4207 { constant_boolean_node (known_ne (off0, off1), type); })
4208 (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1)))
4209 { constant_boolean_node (known_lt (off0, off1), type); })
4210 (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1)))
4211 { constant_boolean_node (known_le (off0, off1), type); })
4212 (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1)))
4213 { constant_boolean_node (known_ge (off0, off1), type); })
4214 (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1)))
4215 { constant_boolean_node (known_gt (off0, off1), type); }))
4216 (if (equal == 0)
4217 (switch
4218 (if (cmp == EQ_EXPR)
4219 { constant_boolean_node (false, type); })
4220 (if (cmp == NE_EXPR)
4221 { constant_boolean_node (true, type); })))))))))
4222
4223 /* Simplify pointer equality compares using PTA. */
4224 (for neeq (ne eq)
4225 (simplify
4226 (neeq @0 @1)
4227 (if (POINTER_TYPE_P (TREE_TYPE (@0))
4228 && ptrs_compare_unequal (@0, @1))
4229 { constant_boolean_node (neeq != EQ_EXPR, type); })))
4230
4231 /* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
4232 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
4233 Disable the transform if either operand is pointer to function.
4234 This broke pr22051-2.c for arm where function pointer
4235 canonicalizaion is not wanted. */
4236
4237 (for cmp (ne eq)
4238 (simplify
4239 (cmp (convert @0) INTEGER_CST@1)
4240 (if (((POINTER_TYPE_P (TREE_TYPE (@0))
4241 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
4242 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
4243 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4244 && POINTER_TYPE_P (TREE_TYPE (@1))
4245 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
4246 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
4247 (cmp @0 (convert @1)))))
4248
4249 /* Non-equality compare simplifications from fold_binary */
4250 (for cmp (lt gt le ge)
4251 /* Comparisons with the highest or lowest possible integer of
4252 the specified precision will have known values. */
4253 (simplify
4254 (cmp (convert?@2 @0) uniform_integer_cst_p@1)
4255 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1))
4256 || POINTER_TYPE_P (TREE_TYPE (@1))
4257 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@1)))
4258 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
4259 (with
4260 {
4261 tree cst = uniform_integer_cst_p (@1);
4262 tree arg1_type = TREE_TYPE (cst);
4263 unsigned int prec = TYPE_PRECISION (arg1_type);
4264 wide_int max = wi::max_value (arg1_type);
4265 wide_int signed_max = wi::max_value (prec, SIGNED);
4266 wide_int min = wi::min_value (arg1_type);
4267 }
4268 (switch
4269 (if (wi::to_wide (cst) == max)
4270 (switch
4271 (if (cmp == GT_EXPR)
4272 { constant_boolean_node (false, type); })
4273 (if (cmp == GE_EXPR)
4274 (eq @2 @1))
4275 (if (cmp == LE_EXPR)
4276 { constant_boolean_node (true, type); })
4277 (if (cmp == LT_EXPR)
4278 (ne @2 @1))))
4279 (if (wi::to_wide (cst) == min)
4280 (switch
4281 (if (cmp == LT_EXPR)
4282 { constant_boolean_node (false, type); })
4283 (if (cmp == LE_EXPR)
4284 (eq @2 @1))
4285 (if (cmp == GE_EXPR)
4286 { constant_boolean_node (true, type); })
4287 (if (cmp == GT_EXPR)
4288 (ne @2 @1))))
4289 (if (wi::to_wide (cst) == max - 1)
4290 (switch
4291 (if (cmp == GT_EXPR)
4292 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
4293 wide_int_to_tree (TREE_TYPE (cst),
4294 wi::to_wide (cst)
4295 + 1)); }))
4296 (if (cmp == LE_EXPR)
4297 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
4298 wide_int_to_tree (TREE_TYPE (cst),
4299 wi::to_wide (cst)
4300 + 1)); }))))
4301 (if (wi::to_wide (cst) == min + 1)
4302 (switch
4303 (if (cmp == GE_EXPR)
4304 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
4305 wide_int_to_tree (TREE_TYPE (cst),
4306 wi::to_wide (cst)
4307 - 1)); }))
4308 (if (cmp == LT_EXPR)
4309 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
4310 wide_int_to_tree (TREE_TYPE (cst),
4311 wi::to_wide (cst)
4312 - 1)); }))))
4313 (if (wi::to_wide (cst) == signed_max
4314 && TYPE_UNSIGNED (arg1_type)
4315 /* We will flip the signedness of the comparison operator
4316 associated with the mode of @1, so the sign bit is
4317 specified by this mode. Check that @1 is the signed
4318 max associated with this sign bit. */
4319 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
4320 /* signed_type does not work on pointer types. */
4321 && INTEGRAL_TYPE_P (arg1_type))
4322 /* The following case also applies to X < signed_max+1
4323 and X >= signed_max+1 because previous transformations. */
4324 (if (cmp == LE_EXPR || cmp == GT_EXPR)
4325 (with { tree st = signed_type_for (TREE_TYPE (@1)); }
4326 (switch
4327 (if (cst == @1 && cmp == LE_EXPR)
4328 (ge (convert:st @0) { build_zero_cst (st); }))
4329 (if (cst == @1 && cmp == GT_EXPR)
4330 (lt (convert:st @0) { build_zero_cst (st); }))
4331 (if (cmp == LE_EXPR)
4332 (ge (view_convert:st @0) { build_zero_cst (st); }))
4333 (if (cmp == GT_EXPR)
4334 (lt (view_convert:st @0) { build_zero_cst (st); })))))))))))
4335
4336 (for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
4337 /* If the second operand is NaN, the result is constant. */
4338 (simplify
4339 (cmp @0 REAL_CST@1)
4340 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
4341 && (cmp != LTGT_EXPR || ! flag_trapping_math))
4342 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
4343 ? false : true, type); })))
4344
4345 /* bool_var != 0 becomes bool_var. */
4346 (simplify
4347 (ne @0 integer_zerop)
4348 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4349 && types_match (type, TREE_TYPE (@0)))
4350 (non_lvalue @0)))
4351 /* bool_var == 1 becomes bool_var. */
4352 (simplify
4353 (eq @0 integer_onep)
4354 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4355 && types_match (type, TREE_TYPE (@0)))
4356 (non_lvalue @0)))
4357 /* Do not handle
4358 bool_var == 0 becomes !bool_var or
4359 bool_var != 1 becomes !bool_var
4360 here because that only is good in assignment context as long
4361 as we require a tcc_comparison in GIMPLE_CONDs where we'd
4362 replace if (x == 0) with tem = ~x; if (tem != 0) which is
4363 clearly less optimal and which we'll transform again in forwprop. */
4364
4365 /* When one argument is a constant, overflow detection can be simplified.
4366 Currently restricted to single use so as not to interfere too much with
4367 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
4368 A + CST CMP A -> A CMP' CST' */
4369 (for cmp (lt le ge gt)
4370 out (gt gt le le)
4371 (simplify
4372 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
4373 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4374 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
4375 && wi::to_wide (@1) != 0
4376 && single_use (@2))
4377 (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
4378 (out @0 { wide_int_to_tree (TREE_TYPE (@0),
4379 wi::max_value (prec, UNSIGNED)
4380 - wi::to_wide (@1)); })))))
4381
4382 /* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
4383 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
4384 expects the long form, so we restrict the transformation for now. */
4385 (for cmp (gt le)
4386 (simplify
4387 (cmp:c (minus@2 @0 @1) @0)
4388 (if (single_use (@2)
4389 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4390 && TYPE_UNSIGNED (TREE_TYPE (@0))
4391 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4392 (cmp @1 @0))))
4393
4394 /* Testing for overflow is unnecessary if we already know the result. */
4395 /* A - B > A */
4396 (for cmp (gt le)
4397 out (ne eq)
4398 (simplify
4399 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
4400 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4401 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4402 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4403 /* A + B < A */
4404 (for cmp (lt ge)
4405 out (ne eq)
4406 (simplify
4407 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
4408 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4409 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4410 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4411
4412 /* For unsigned operands, -1 / B < A checks whether A * B would overflow.
4413 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
4414 (for cmp (lt ge)
4415 out (ne eq)
4416 (simplify
4417 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
4418 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
4419 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
4420 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
4421
4422 /* Simplification of math builtins. These rules must all be optimizations
4423 as well as IL simplifications. If there is a possibility that the new
4424 form could be a pessimization, the rule should go in the canonicalization
4425 section that follows this one.
4426
4427 Rules can generally go in this section if they satisfy one of
4428 the following:
4429
4430 - the rule describes an identity
4431
4432 - the rule replaces calls with something as simple as addition or
4433 multiplication
4434
4435 - the rule contains unary calls only and simplifies the surrounding
4436 arithmetic. (The idea here is to exclude non-unary calls in which
4437 one operand is constant and in which the call is known to be cheap
4438 when the operand has that value.) */
4439
4440 (if (flag_unsafe_math_optimizations)
4441 /* Simplify sqrt(x) * sqrt(x) -> x. */
4442 (simplify
4443 (mult (SQRT_ALL@1 @0) @1)
4444 (if (!HONOR_SNANS (type))
4445 @0))
4446
4447 (for op (plus minus)
4448 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */
4449 (simplify
4450 (op (rdiv @0 @1)
4451 (rdiv @2 @1))
4452 (rdiv (op @0 @2) @1)))
4453
4454 (for cmp (lt le gt ge)
4455 neg_cmp (gt ge lt le)
4456 /* Simplify (x * C1) cmp C2 -> x cmp (C2 / C1), where C1 != 0. */
4457 (simplify
4458 (cmp (mult @0 REAL_CST@1) REAL_CST@2)
4459 (with
4460 { tree tem = const_binop (RDIV_EXPR, type, @2, @1); }
4461 (if (tem
4462 && !(REAL_VALUE_ISINF (TREE_REAL_CST (tem))
4463 || (real_zerop (tem) && !real_zerop (@1))))
4464 (switch
4465 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@1)))
4466 (cmp @0 { tem; }))
4467 (if (real_less (TREE_REAL_CST_PTR (@1), &dconst0))
4468 (neg_cmp @0 { tem; })))))))
4469
4470 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
4471 (for root (SQRT CBRT)
4472 (simplify
4473 (mult (root:s @0) (root:s @1))
4474 (root (mult @0 @1))))
4475
4476 /* Simplify expN(x) * expN(y) -> expN(x+y). */
4477 (for exps (EXP EXP2 EXP10 POW10)
4478 (simplify
4479 (mult (exps:s @0) (exps:s @1))
4480 (exps (plus @0 @1))))
4481
4482 /* Simplify a/root(b/c) into a*root(c/b). */
4483 (for root (SQRT CBRT)
4484 (simplify
4485 (rdiv @0 (root:s (rdiv:s @1 @2)))
4486 (mult @0 (root (rdiv @2 @1)))))
4487
4488 /* Simplify x/expN(y) into x*expN(-y). */
4489 (for exps (EXP EXP2 EXP10 POW10)
4490 (simplify
4491 (rdiv @0 (exps:s @1))
4492 (mult @0 (exps (negate @1)))))
4493
4494 (for logs (LOG LOG2 LOG10 LOG10)
4495 exps (EXP EXP2 EXP10 POW10)
4496 /* logN(expN(x)) -> x. */
4497 (simplify
4498 (logs (exps @0))
4499 @0)
4500 /* expN(logN(x)) -> x. */
4501 (simplify
4502 (exps (logs @0))
4503 @0))
4504
4505 /* Optimize logN(func()) for various exponential functions. We
4506 want to determine the value "x" and the power "exponent" in
4507 order to transform logN(x**exponent) into exponent*logN(x). */
4508 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
4509 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
4510 (simplify
4511 (logs (exps @0))
4512 (if (SCALAR_FLOAT_TYPE_P (type))
4513 (with {
4514 tree x;
4515 switch (exps)
4516 {
4517 CASE_CFN_EXP:
4518 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
4519 x = build_real_truncate (type, dconst_e ());
4520 break;
4521 CASE_CFN_EXP2:
4522 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
4523 x = build_real (type, dconst2);
4524 break;
4525 CASE_CFN_EXP10:
4526 CASE_CFN_POW10:
4527 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
4528 {
4529 REAL_VALUE_TYPE dconst10;
4530 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
4531 x = build_real (type, dconst10);
4532 }
4533 break;
4534 default:
4535 gcc_unreachable ();
4536 }
4537 }
4538 (mult (logs { x; }) @0)))))
4539
4540 (for logs (LOG LOG
4541 LOG2 LOG2
4542 LOG10 LOG10)
4543 exps (SQRT CBRT)
4544 (simplify
4545 (logs (exps @0))
4546 (if (SCALAR_FLOAT_TYPE_P (type))
4547 (with {
4548 tree x;
4549 switch (exps)
4550 {
4551 CASE_CFN_SQRT:
4552 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
4553 x = build_real (type, dconsthalf);
4554 break;
4555 CASE_CFN_CBRT:
4556 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
4557 x = build_real_truncate (type, dconst_third ());
4558 break;
4559 default:
4560 gcc_unreachable ();
4561 }
4562 }
4563 (mult { x; } (logs @0))))))
4564
4565 /* logN(pow(x,exponent)) -> exponent*logN(x). */
4566 (for logs (LOG LOG2 LOG10)
4567 pows (POW)
4568 (simplify
4569 (logs (pows @0 @1))
4570 (mult @1 (logs @0))))
4571
4572 /* pow(C,x) -> exp(log(C)*x) if C > 0,
4573 or if C is a positive power of 2,
4574 pow(C,x) -> exp2(log2(C)*x). */
4575 #if GIMPLE
4576 (for pows (POW)
4577 exps (EXP)
4578 logs (LOG)
4579 exp2s (EXP2)
4580 log2s (LOG2)
4581 (simplify
4582 (pows REAL_CST@0 @1)
4583 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
4584 && real_isfinite (TREE_REAL_CST_PTR (@0))
4585 /* As libmvec doesn't have a vectorized exp2, defer optimizing
4586 the use_exp2 case until after vectorization. It seems actually
4587 beneficial for all constants to postpone this until later,
4588 because exp(log(C)*x), while faster, will have worse precision
4589 and if x folds into a constant too, that is unnecessary
4590 pessimization. */
4591 && canonicalize_math_after_vectorization_p ())
4592 (with {
4593 const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0);
4594 bool use_exp2 = false;
4595 if (targetm.libc_has_function (function_c99_misc)
4596 && value->cl == rvc_normal)
4597 {
4598 REAL_VALUE_TYPE frac_rvt = *value;
4599 SET_REAL_EXP (&frac_rvt, 1);
4600 if (real_equal (&frac_rvt, &dconst1))
4601 use_exp2 = true;
4602 }
4603 }
4604 (if (!use_exp2)
4605 (if (optimize_pow_to_exp (@0, @1))
4606 (exps (mult (logs @0) @1)))
4607 (exp2s (mult (log2s @0) @1)))))))
4608 #endif
4609
4610 /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */
4611 (for pows (POW)
4612 exps (EXP EXP2 EXP10 POW10)
4613 logs (LOG LOG2 LOG10 LOG10)
4614 (simplify
4615 (mult:c (pows:s REAL_CST@0 @1) (exps:s @2))
4616 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
4617 && real_isfinite (TREE_REAL_CST_PTR (@0)))
4618 (exps (plus (mult (logs @0) @1) @2)))))
4619
4620 (for sqrts (SQRT)
4621 cbrts (CBRT)
4622 pows (POW)
4623 exps (EXP EXP2 EXP10 POW10)
4624 /* sqrt(expN(x)) -> expN(x*0.5). */
4625 (simplify
4626 (sqrts (exps @0))
4627 (exps (mult @0 { build_real (type, dconsthalf); })))
4628 /* cbrt(expN(x)) -> expN(x/3). */
4629 (simplify
4630 (cbrts (exps @0))
4631 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
4632 /* pow(expN(x), y) -> expN(x*y). */
4633 (simplify
4634 (pows (exps @0) @1)
4635 (exps (mult @0 @1))))
4636
4637 /* tan(atan(x)) -> x. */
4638 (for tans (TAN)
4639 atans (ATAN)
4640 (simplify
4641 (tans (atans @0))
4642 @0)))
4643
4644 /* Simplify sin(atan(x)) -> x / sqrt(x*x + 1). */
4645 (for sins (SIN)
4646 atans (ATAN)
4647 sqrts (SQRT)
4648 copysigns (COPYSIGN)
4649 (simplify
4650 (sins (atans:s @0))
4651 (with
4652 {
4653 REAL_VALUE_TYPE r_cst;
4654 build_sinatan_real (&r_cst, type);
4655 tree t_cst = build_real (type, r_cst);
4656 tree t_one = build_one_cst (type);
4657 }
4658 (if (SCALAR_FLOAT_TYPE_P (type))
4659 (cond (lt (abs @0) { t_cst; })
4660 (rdiv @0 (sqrts (plus (mult @0 @0) { t_one; })))
4661 (copysigns { t_one; } @0))))))
4662
4663 /* Simplify cos(atan(x)) -> 1 / sqrt(x*x + 1). */
4664 (for coss (COS)
4665 atans (ATAN)
4666 sqrts (SQRT)
4667 copysigns (COPYSIGN)
4668 (simplify
4669 (coss (atans:s @0))
4670 (with
4671 {
4672 REAL_VALUE_TYPE r_cst;
4673 build_sinatan_real (&r_cst, type);
4674 tree t_cst = build_real (type, r_cst);
4675 tree t_one = build_one_cst (type);
4676 tree t_zero = build_zero_cst (type);
4677 }
4678 (if (SCALAR_FLOAT_TYPE_P (type))
4679 (cond (lt (abs @0) { t_cst; })
4680 (rdiv { t_one; } (sqrts (plus (mult @0 @0) { t_one; })))
4681 (copysigns { t_zero; } @0))))))
4682
4683 (if (!flag_errno_math)
4684 /* Simplify sinh(atanh(x)) -> x / sqrt((1 - x)*(1 + x)). */
4685 (for sinhs (SINH)
4686 atanhs (ATANH)
4687 sqrts (SQRT)
4688 (simplify
4689 (sinhs (atanhs:s @0))
4690 (with { tree t_one = build_one_cst (type); }
4691 (rdiv @0 (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0)))))))
4692
4693 /* Simplify cosh(atanh(x)) -> 1 / sqrt((1 - x)*(1 + x)) */
4694 (for coshs (COSH)
4695 atanhs (ATANH)
4696 sqrts (SQRT)
4697 (simplify
4698 (coshs (atanhs:s @0))
4699 (with { tree t_one = build_one_cst (type); }
4700 (rdiv { t_one; } (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0))))))))
4701
4702 /* cabs(x+0i) or cabs(0+xi) -> abs(x). */
4703 (simplify
4704 (CABS (complex:C @0 real_zerop@1))
4705 (abs @0))
4706
4707 /* trunc(trunc(x)) -> trunc(x), etc. */
4708 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
4709 (simplify
4710 (fns (fns @0))
4711 (fns @0)))
4712 /* f(x) -> x if x is integer valued and f does nothing for such values. */
4713 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
4714 (simplify
4715 (fns integer_valued_real_p@0)
4716 @0))
4717
4718 /* hypot(x,0) and hypot(0,x) -> abs(x). */
4719 (simplify
4720 (HYPOT:c @0 real_zerop@1)
4721 (abs @0))
4722
4723 /* pow(1,x) -> 1. */
4724 (simplify
4725 (POW real_onep@0 @1)
4726 @0)
4727
4728 (simplify
4729 /* copysign(x,x) -> x. */
4730 (COPYSIGN_ALL @0 @0)
4731 @0)
4732
4733 (simplify
4734 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
4735 (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1)
4736 (abs @0))
4737
4738 (for scale (LDEXP SCALBN SCALBLN)
4739 /* ldexp(0, x) -> 0. */
4740 (simplify
4741 (scale real_zerop@0 @1)
4742 @0)
4743 /* ldexp(x, 0) -> x. */
4744 (simplify
4745 (scale @0 integer_zerop@1)
4746 @0)
4747 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
4748 (simplify
4749 (scale REAL_CST@0 @1)
4750 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
4751 @0)))
4752
4753 /* Canonicalization of sequences of math builtins. These rules represent
4754 IL simplifications but are not necessarily optimizations.
4755
4756 The sincos pass is responsible for picking "optimal" implementations
4757 of math builtins, which may be more complicated and can sometimes go
4758 the other way, e.g. converting pow into a sequence of sqrts.
4759 We only want to do these canonicalizations before the pass has run. */
4760
4761 (if (flag_unsafe_math_optimizations && canonicalize_math_p ())
4762 /* Simplify tan(x) * cos(x) -> sin(x). */
4763 (simplify
4764 (mult:c (TAN:s @0) (COS:s @0))
4765 (SIN @0))
4766
4767 /* Simplify x * pow(x,c) -> pow(x,c+1). */
4768 (simplify
4769 (mult:c @0 (POW:s @0 REAL_CST@1))
4770 (if (!TREE_OVERFLOW (@1))
4771 (POW @0 (plus @1 { build_one_cst (type); }))))
4772
4773 /* Simplify sin(x) / cos(x) -> tan(x). */
4774 (simplify
4775 (rdiv (SIN:s @0) (COS:s @0))
4776 (TAN @0))
4777
4778 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
4779 (simplify
4780 (rdiv (COS:s @0) (SIN:s @0))
4781 (rdiv { build_one_cst (type); } (TAN @0)))
4782
4783 /* Simplify sin(x) / tan(x) -> cos(x). */
4784 (simplify
4785 (rdiv (SIN:s @0) (TAN:s @0))
4786 (if (! HONOR_NANS (@0)
4787 && ! HONOR_INFINITIES (@0))
4788 (COS @0)))
4789
4790 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
4791 (simplify
4792 (rdiv (TAN:s @0) (SIN:s @0))
4793 (if (! HONOR_NANS (@0)
4794 && ! HONOR_INFINITIES (@0))
4795 (rdiv { build_one_cst (type); } (COS @0))))
4796
4797 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
4798 (simplify
4799 (mult (POW:s @0 @1) (POW:s @0 @2))
4800 (POW @0 (plus @1 @2)))
4801
4802 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
4803 (simplify
4804 (mult (POW:s @0 @1) (POW:s @2 @1))
4805 (POW (mult @0 @2) @1))
4806
4807 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
4808 (simplify
4809 (mult (POWI:s @0 @1) (POWI:s @2 @1))
4810 (POWI (mult @0 @2) @1))
4811
4812 /* Simplify pow(x,c) / x -> pow(x,c-1). */
4813 (simplify
4814 (rdiv (POW:s @0 REAL_CST@1) @0)
4815 (if (!TREE_OVERFLOW (@1))
4816 (POW @0 (minus @1 { build_one_cst (type); }))))
4817
4818 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
4819 (simplify
4820 (rdiv @0 (POW:s @1 @2))
4821 (mult @0 (POW @1 (negate @2))))
4822
4823 (for sqrts (SQRT)
4824 cbrts (CBRT)
4825 pows (POW)
4826 /* sqrt(sqrt(x)) -> pow(x,1/4). */
4827 (simplify
4828 (sqrts (sqrts @0))
4829 (pows @0 { build_real (type, dconst_quarter ()); }))
4830 /* sqrt(cbrt(x)) -> pow(x,1/6). */
4831 (simplify
4832 (sqrts (cbrts @0))
4833 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4834 /* cbrt(sqrt(x)) -> pow(x,1/6). */
4835 (simplify
4836 (cbrts (sqrts @0))
4837 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
4838 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
4839 (simplify
4840 (cbrts (cbrts tree_expr_nonnegative_p@0))
4841 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
4842 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
4843 (simplify
4844 (sqrts (pows @0 @1))
4845 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
4846 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
4847 (simplify
4848 (cbrts (pows tree_expr_nonnegative_p@0 @1))
4849 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4850 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
4851 (simplify
4852 (pows (sqrts @0) @1)
4853 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
4854 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
4855 (simplify
4856 (pows (cbrts tree_expr_nonnegative_p@0) @1)
4857 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
4858 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
4859 (simplify
4860 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
4861 (pows @0 (mult @1 @2))))
4862
4863 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
4864 (simplify
4865 (CABS (complex @0 @0))
4866 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4867
4868 /* hypot(x,x) -> fabs(x)*sqrt(2). */
4869 (simplify
4870 (HYPOT @0 @0)
4871 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
4872
4873 /* cexp(x+yi) -> exp(x)*cexpi(y). */
4874 (for cexps (CEXP)
4875 exps (EXP)
4876 cexpis (CEXPI)
4877 (simplify
4878 (cexps compositional_complex@0)
4879 (if (targetm.libc_has_function (function_c99_math_complex))
4880 (complex
4881 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
4882 (mult @1 (imagpart @2)))))))
4883
4884 (if (canonicalize_math_p ())
4885 /* floor(x) -> trunc(x) if x is nonnegative. */
4886 (for floors (FLOOR_ALL)
4887 truncs (TRUNC_ALL)
4888 (simplify
4889 (floors tree_expr_nonnegative_p@0)
4890 (truncs @0))))
4891
4892 (match double_value_p
4893 @0
4894 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
4895 (for froms (BUILT_IN_TRUNCL
4896 BUILT_IN_FLOORL
4897 BUILT_IN_CEILL
4898 BUILT_IN_ROUNDL
4899 BUILT_IN_NEARBYINTL
4900 BUILT_IN_RINTL)
4901 tos (BUILT_IN_TRUNC
4902 BUILT_IN_FLOOR
4903 BUILT_IN_CEIL
4904 BUILT_IN_ROUND
4905 BUILT_IN_NEARBYINT
4906 BUILT_IN_RINT)
4907 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
4908 (if (optimize && canonicalize_math_p ())
4909 (simplify
4910 (froms (convert double_value_p@0))
4911 (convert (tos @0)))))
4912
4913 (match float_value_p
4914 @0
4915 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
4916 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
4917 BUILT_IN_FLOORL BUILT_IN_FLOOR
4918 BUILT_IN_CEILL BUILT_IN_CEIL
4919 BUILT_IN_ROUNDL BUILT_IN_ROUND
4920 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
4921 BUILT_IN_RINTL BUILT_IN_RINT)
4922 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
4923 BUILT_IN_FLOORF BUILT_IN_FLOORF
4924 BUILT_IN_CEILF BUILT_IN_CEILF
4925 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
4926 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
4927 BUILT_IN_RINTF BUILT_IN_RINTF)
4928 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
4929 if x is a float. */
4930 (if (optimize && canonicalize_math_p ()
4931 && targetm.libc_has_function (function_c99_misc))
4932 (simplify
4933 (froms (convert float_value_p@0))
4934 (convert (tos @0)))))
4935
4936 (for froms (XFLOORL XCEILL XROUNDL XRINTL)
4937 tos (XFLOOR XCEIL XROUND XRINT)
4938 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
4939 (if (optimize && canonicalize_math_p ())
4940 (simplify
4941 (froms (convert double_value_p@0))
4942 (tos @0))))
4943
4944 (for froms (XFLOORL XCEILL XROUNDL XRINTL
4945 XFLOOR XCEIL XROUND XRINT)
4946 tos (XFLOORF XCEILF XROUNDF XRINTF)
4947 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
4948 if x is a float. */
4949 (if (optimize && canonicalize_math_p ())
4950 (simplify
4951 (froms (convert float_value_p@0))
4952 (tos @0))))
4953
4954 (if (canonicalize_math_p ())
4955 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
4956 (for floors (IFLOOR LFLOOR LLFLOOR)
4957 (simplify
4958 (floors tree_expr_nonnegative_p@0)
4959 (fix_trunc @0))))
4960
4961 (if (canonicalize_math_p ())
4962 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
4963 (for fns (IFLOOR LFLOOR LLFLOOR
4964 ICEIL LCEIL LLCEIL
4965 IROUND LROUND LLROUND)
4966 (simplify
4967 (fns integer_valued_real_p@0)
4968 (fix_trunc @0)))
4969 (if (!flag_errno_math)
4970 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
4971 (for rints (IRINT LRINT LLRINT)
4972 (simplify
4973 (rints integer_valued_real_p@0)
4974 (fix_trunc @0)))))
4975
4976 (if (canonicalize_math_p ())
4977 (for ifn (IFLOOR ICEIL IROUND IRINT)
4978 lfn (LFLOOR LCEIL LROUND LRINT)
4979 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
4980 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
4981 sizeof (int) == sizeof (long). */
4982 (if (TYPE_PRECISION (integer_type_node)
4983 == TYPE_PRECISION (long_integer_type_node))
4984 (simplify
4985 (ifn @0)
4986 (lfn:long_integer_type_node @0)))
4987 /* Canonicalize llround (x) to lround (x) on LP64 targets where
4988 sizeof (long long) == sizeof (long). */
4989 (if (TYPE_PRECISION (long_long_integer_type_node)
4990 == TYPE_PRECISION (long_integer_type_node))
4991 (simplify
4992 (llfn @0)
4993 (lfn:long_integer_type_node @0)))))
4994
4995 /* cproj(x) -> x if we're ignoring infinities. */
4996 (simplify
4997 (CPROJ @0)
4998 (if (!HONOR_INFINITIES (type))
4999 @0))
5000
5001 /* If the real part is inf and the imag part is known to be
5002 nonnegative, return (inf + 0i). */
5003 (simplify
5004 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
5005 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
5006 { build_complex_inf (type, false); }))
5007
5008 /* If the imag part is inf, return (inf+I*copysign(0,imag)). */
5009 (simplify
5010 (CPROJ (complex @0 REAL_CST@1))
5011 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
5012 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
5013
5014 (for pows (POW)
5015 sqrts (SQRT)
5016 cbrts (CBRT)
5017 (simplify
5018 (pows @0 REAL_CST@1)
5019 (with {
5020 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
5021 REAL_VALUE_TYPE tmp;
5022 }
5023 (switch
5024 /* pow(x,0) -> 1. */
5025 (if (real_equal (value, &dconst0))
5026 { build_real (type, dconst1); })
5027 /* pow(x,1) -> x. */
5028 (if (real_equal (value, &dconst1))
5029 @0)
5030 /* pow(x,-1) -> 1/x. */
5031 (if (real_equal (value, &dconstm1))
5032 (rdiv { build_real (type, dconst1); } @0))
5033 /* pow(x,0.5) -> sqrt(x). */
5034 (if (flag_unsafe_math_optimizations
5035 && canonicalize_math_p ()
5036 && real_equal (value, &dconsthalf))
5037 (sqrts @0))
5038 /* pow(x,1/3) -> cbrt(x). */
5039 (if (flag_unsafe_math_optimizations
5040 && canonicalize_math_p ()
5041 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
5042 real_equal (value, &tmp)))
5043 (cbrts @0))))))
5044
5045 /* powi(1,x) -> 1. */
5046 (simplify
5047 (POWI real_onep@0 @1)
5048 @0)
5049
5050 (simplify
5051 (POWI @0 INTEGER_CST@1)
5052 (switch
5053 /* powi(x,0) -> 1. */
5054 (if (wi::to_wide (@1) == 0)
5055 { build_real (type, dconst1); })
5056 /* powi(x,1) -> x. */
5057 (if (wi::to_wide (@1) == 1)
5058 @0)
5059 /* powi(x,-1) -> 1/x. */
5060 (if (wi::to_wide (@1) == -1)
5061 (rdiv { build_real (type, dconst1); } @0))))
5062
5063 /* Narrowing of arithmetic and logical operations.
5064
5065 These are conceptually similar to the transformations performed for
5066 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
5067 term we want to move all that code out of the front-ends into here. */
5068
5069 /* Convert (outertype)((innertype0)a+(innertype1)b)
5070 into ((newtype)a+(newtype)b) where newtype
5071 is the widest mode from all of these. */
5072 (for op (plus minus mult rdiv)
5073 (simplify
5074 (convert (op:s@0 (convert1?@3 @1) (convert2?@4 @2)))
5075 /* If we have a narrowing conversion of an arithmetic operation where
5076 both operands are widening conversions from the same type as the outer
5077 narrowing conversion. Then convert the innermost operands to a
5078 suitable unsigned type (to avoid introducing undefined behavior),
5079 perform the operation and convert the result to the desired type. */
5080 (if (INTEGRAL_TYPE_P (type)
5081 && op != MULT_EXPR
5082 && op != RDIV_EXPR
5083 /* We check for type compatibility between @0 and @1 below,
5084 so there's no need to check that @2/@4 are integral types. */
5085 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
5086 && INTEGRAL_TYPE_P (TREE_TYPE (@3))
5087 /* The precision of the type of each operand must match the
5088 precision of the mode of each operand, similarly for the
5089 result. */
5090 && type_has_mode_precision_p (TREE_TYPE (@1))
5091 && type_has_mode_precision_p (TREE_TYPE (@2))
5092 && type_has_mode_precision_p (type)
5093 /* The inner conversion must be a widening conversion. */
5094 && TYPE_PRECISION (TREE_TYPE (@3)) > TYPE_PRECISION (TREE_TYPE (@1))
5095 && types_match (@1, type)
5096 && (types_match (@1, @2)
5097 /* Or the second operand is const integer or converted const
5098 integer from valueize. */
5099 || TREE_CODE (@2) == INTEGER_CST))
5100 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
5101 (op @1 (convert @2))
5102 (with { tree utype = unsigned_type_for (TREE_TYPE (@1)); }
5103 (convert (op (convert:utype @1)
5104 (convert:utype @2)))))
5105 (if (FLOAT_TYPE_P (type)
5106 && DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))
5107 == DECIMAL_FLOAT_TYPE_P (type))
5108 (with { tree arg0 = strip_float_extensions (@1);
5109 tree arg1 = strip_float_extensions (@2);
5110 tree itype = TREE_TYPE (@0);
5111 tree ty1 = TREE_TYPE (arg0);
5112 tree ty2 = TREE_TYPE (arg1);
5113 enum tree_code code = TREE_CODE (itype); }
5114 (if (FLOAT_TYPE_P (ty1)
5115 && FLOAT_TYPE_P (ty2))
5116 (with { tree newtype = type;
5117 if (TYPE_MODE (ty1) == SDmode
5118 || TYPE_MODE (ty2) == SDmode
5119 || TYPE_MODE (type) == SDmode)
5120 newtype = dfloat32_type_node;
5121 if (TYPE_MODE (ty1) == DDmode
5122 || TYPE_MODE (ty2) == DDmode
5123 || TYPE_MODE (type) == DDmode)
5124 newtype = dfloat64_type_node;
5125 if (TYPE_MODE (ty1) == TDmode
5126 || TYPE_MODE (ty2) == TDmode
5127 || TYPE_MODE (type) == TDmode)
5128 newtype = dfloat128_type_node; }
5129 (if ((newtype == dfloat32_type_node
5130 || newtype == dfloat64_type_node
5131 || newtype == dfloat128_type_node)
5132 && newtype == type
5133 && types_match (newtype, type))
5134 (op (convert:newtype @1) (convert:newtype @2))
5135 (with { if (TYPE_PRECISION (ty1) > TYPE_PRECISION (newtype))
5136 newtype = ty1;
5137 if (TYPE_PRECISION (ty2) > TYPE_PRECISION (newtype))
5138 newtype = ty2; }
5139 /* Sometimes this transformation is safe (cannot
5140 change results through affecting double rounding
5141 cases) and sometimes it is not. If NEWTYPE is
5142 wider than TYPE, e.g. (float)((long double)double
5143 + (long double)double) converted to
5144 (float)(double + double), the transformation is
5145 unsafe regardless of the details of the types
5146 involved; double rounding can arise if the result
5147 of NEWTYPE arithmetic is a NEWTYPE value half way
5148 between two representable TYPE values but the
5149 exact value is sufficiently different (in the
5150 right direction) for this difference to be
5151 visible in ITYPE arithmetic. If NEWTYPE is the
5152 same as TYPE, however, the transformation may be
5153 safe depending on the types involved: it is safe
5154 if the ITYPE has strictly more than twice as many
5155 mantissa bits as TYPE, can represent infinities
5156 and NaNs if the TYPE can, and has sufficient
5157 exponent range for the product or ratio of two
5158 values representable in the TYPE to be within the
5159 range of normal values of ITYPE. */
5160 (if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
5161 && (flag_unsafe_math_optimizations
5162 || (TYPE_PRECISION (newtype) == TYPE_PRECISION (type)
5163 && real_can_shorten_arithmetic (TYPE_MODE (itype),
5164 TYPE_MODE (type))
5165 && !excess_precision_type (newtype)))
5166 && !types_match (itype, newtype))
5167 (convert:type (op (convert:newtype @1)
5168 (convert:newtype @2)))
5169 )))) )
5170 ))
5171 )))
5172
5173 /* This is another case of narrowing, specifically when there's an outer
5174 BIT_AND_EXPR which masks off bits outside the type of the innermost
5175 operands. Like the previous case we have to convert the operands
5176 to unsigned types to avoid introducing undefined behavior for the
5177 arithmetic operation. */
5178 (for op (minus plus)
5179 (simplify
5180 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
5181 (if (INTEGRAL_TYPE_P (type)
5182 /* We check for type compatibility between @0 and @1 below,
5183 so there's no need to check that @1/@3 are integral types. */
5184 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
5185 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
5186 /* The precision of the type of each operand must match the
5187 precision of the mode of each operand, similarly for the
5188 result. */
5189 && type_has_mode_precision_p (TREE_TYPE (@0))
5190 && type_has_mode_precision_p (TREE_TYPE (@1))
5191 && type_has_mode_precision_p (type)
5192 /* The inner conversion must be a widening conversion. */
5193 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
5194 && types_match (@0, @1)
5195 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
5196 <= TYPE_PRECISION (TREE_TYPE (@0)))
5197 && (wi::to_wide (@4)
5198 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
5199 true, TYPE_PRECISION (type))) == 0)
5200 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
5201 (with { tree ntype = TREE_TYPE (@0); }
5202 (convert (bit_and (op @0 @1) (convert:ntype @4))))
5203 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
5204 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
5205 (convert:utype @4))))))))
5206
5207 /* Transform (@0 < @1 and @0 < @2) to use min,
5208 (@0 > @1 and @0 > @2) to use max */
5209 (for logic (bit_and bit_and bit_and bit_and bit_ior bit_ior bit_ior bit_ior)
5210 op (lt le gt ge lt le gt ge )
5211 ext (min min max max max max min min )
5212 (simplify
5213 (logic (op:cs @0 @1) (op:cs @0 @2))
5214 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5215 && TREE_CODE (@0) != INTEGER_CST)
5216 (op @0 (ext @1 @2)))))
5217
5218 (simplify
5219 /* signbit(x) -> 0 if x is nonnegative. */
5220 (SIGNBIT tree_expr_nonnegative_p@0)
5221 { integer_zero_node; })
5222
5223 (simplify
5224 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
5225 (SIGNBIT @0)
5226 (if (!HONOR_SIGNED_ZEROS (@0))
5227 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
5228
5229 /* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
5230 (for cmp (eq ne)
5231 (for op (plus minus)
5232 rop (minus plus)
5233 (simplify
5234 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
5235 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
5236 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
5237 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
5238 && !TYPE_SATURATING (TREE_TYPE (@0)))
5239 (with { tree res = int_const_binop (rop, @2, @1); }
5240 (if (TREE_OVERFLOW (res)
5241 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
5242 { constant_boolean_node (cmp == NE_EXPR, type); }
5243 (if (single_use (@3))
5244 (cmp @0 { TREE_OVERFLOW (res)
5245 ? drop_tree_overflow (res) : res; }))))))))
5246 (for cmp (lt le gt ge)
5247 (for op (plus minus)
5248 rop (minus plus)
5249 (simplify
5250 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
5251 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
5252 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
5253 (with { tree res = int_const_binop (rop, @2, @1); }
5254 (if (TREE_OVERFLOW (res))
5255 {
5256 fold_overflow_warning (("assuming signed overflow does not occur "
5257 "when simplifying conditional to constant"),
5258 WARN_STRICT_OVERFLOW_CONDITIONAL);
5259 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
5260 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
5261 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
5262 TYPE_SIGN (TREE_TYPE (@1)))
5263 != (op == MINUS_EXPR);
5264 constant_boolean_node (less == ovf_high, type);
5265 }
5266 (if (single_use (@3))
5267 (with
5268 {
5269 fold_overflow_warning (("assuming signed overflow does not occur "
5270 "when changing X +- C1 cmp C2 to "
5271 "X cmp C2 -+ C1"),
5272 WARN_STRICT_OVERFLOW_COMPARISON);
5273 }
5274 (cmp @0 { res; })))))))))
5275
5276 /* Canonicalizations of BIT_FIELD_REFs. */
5277
5278 (simplify
5279 (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4)
5280 (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); }))
5281
5282 (simplify
5283 (BIT_FIELD_REF (view_convert @0) @1 @2)
5284 (BIT_FIELD_REF @0 @1 @2))
5285
5286 (simplify
5287 (BIT_FIELD_REF @0 @1 integer_zerop)
5288 (if (tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (@0))))
5289 (view_convert @0)))
5290
5291 (simplify
5292 (BIT_FIELD_REF @0 @1 @2)
5293 (switch
5294 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
5295 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
5296 (switch
5297 (if (integer_zerop (@2))
5298 (view_convert (realpart @0)))
5299 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
5300 (view_convert (imagpart @0)))))
5301 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5302 && INTEGRAL_TYPE_P (type)
5303 /* On GIMPLE this should only apply to register arguments. */
5304 && (! GIMPLE || is_gimple_reg (@0))
5305 /* A bit-field-ref that referenced the full argument can be stripped. */
5306 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
5307 && integer_zerop (@2))
5308 /* Low-parts can be reduced to integral conversions.
5309 ??? The following doesn't work for PDP endian. */
5310 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
5311 /* Don't even think about BITS_BIG_ENDIAN. */
5312 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
5313 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
5314 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
5315 ? (TYPE_PRECISION (TREE_TYPE (@0))
5316 - TYPE_PRECISION (type))
5317 : 0)) == 0)))
5318 (convert @0))))
5319
5320 /* Simplify vector extracts. */
5321
5322 (simplify
5323 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
5324 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
5325 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
5326 || (VECTOR_TYPE_P (type)
5327 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
5328 (with
5329 {
5330 tree ctor = (TREE_CODE (@0) == SSA_NAME
5331 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
5332 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
5333 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
5334 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
5335 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
5336 }
5337 (if (n != 0
5338 && (idx % width) == 0
5339 && (n % width) == 0
5340 && known_le ((idx + n) / width,
5341 TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor))))
5342 (with
5343 {
5344 idx = idx / width;
5345 n = n / width;
5346 /* Constructor elements can be subvectors. */
5347 poly_uint64 k = 1;
5348 if (CONSTRUCTOR_NELTS (ctor) != 0)
5349 {
5350 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
5351 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
5352 k = TYPE_VECTOR_SUBPARTS (cons_elem);
5353 }
5354 unsigned HOST_WIDE_INT elt, count, const_k;
5355 }
5356 (switch
5357 /* We keep an exact subset of the constructor elements. */
5358 (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count))
5359 (if (CONSTRUCTOR_NELTS (ctor) == 0)
5360 { build_constructor (type, NULL); }
5361 (if (count == 1)
5362 (if (elt < CONSTRUCTOR_NELTS (ctor))
5363 (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; })
5364 { build_zero_cst (type); })
5365 {
5366 vec<constructor_elt, va_gc> *vals;
5367 vec_alloc (vals, count);
5368 for (unsigned i = 0;
5369 i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i)
5370 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
5371 CONSTRUCTOR_ELT (ctor, elt + i)->value);
5372 build_constructor (type, vals);
5373 })))
5374 /* The bitfield references a single constructor element. */
5375 (if (k.is_constant (&const_k)
5376 && idx + n <= (idx / const_k + 1) * const_k)
5377 (switch
5378 (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k)
5379 { build_zero_cst (type); })
5380 (if (n == const_k)
5381 (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }))
5382 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }
5383 @1 { bitsize_int ((idx % const_k) * width); })))))))))
5384
5385 /* Simplify a bit extraction from a bit insertion for the cases with
5386 the inserted element fully covering the extraction or the insertion
5387 not touching the extraction. */
5388 (simplify
5389 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
5390 (with
5391 {
5392 unsigned HOST_WIDE_INT isize;
5393 if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
5394 isize = TYPE_PRECISION (TREE_TYPE (@1));
5395 else
5396 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
5397 }
5398 (switch
5399 (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
5400 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
5401 wi::to_wide (@ipos) + isize))
5402 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
5403 wi::to_wide (@rpos)
5404 - wi::to_wide (@ipos)); }))
5405 (if (wi::geu_p (wi::to_wide (@ipos),
5406 wi::to_wide (@rpos) + wi::to_wide (@rsize))
5407 || wi::geu_p (wi::to_wide (@rpos),
5408 wi::to_wide (@ipos) + isize))
5409 (BIT_FIELD_REF @0 @rsize @rpos)))))
5410
5411 (if (canonicalize_math_after_vectorization_p ())
5412 (for fmas (FMA)
5413 (simplify
5414 (fmas:c (negate @0) @1 @2)
5415 (IFN_FNMA @0 @1 @2))
5416 (simplify
5417 (fmas @0 @1 (negate @2))
5418 (IFN_FMS @0 @1 @2))
5419 (simplify
5420 (fmas:c (negate @0) @1 (negate @2))
5421 (IFN_FNMS @0 @1 @2))
5422 (simplify
5423 (negate (fmas@3 @0 @1 @2))
5424 (if (single_use (@3))
5425 (IFN_FNMS @0 @1 @2))))
5426
5427 (simplify
5428 (IFN_FMS:c (negate @0) @1 @2)
5429 (IFN_FNMS @0 @1 @2))
5430 (simplify
5431 (IFN_FMS @0 @1 (negate @2))
5432 (IFN_FMA @0 @1 @2))
5433 (simplify
5434 (IFN_FMS:c (negate @0) @1 (negate @2))
5435 (IFN_FNMA @0 @1 @2))
5436 (simplify
5437 (negate (IFN_FMS@3 @0 @1 @2))
5438 (if (single_use (@3))
5439 (IFN_FNMA @0 @1 @2)))
5440
5441 (simplify
5442 (IFN_FNMA:c (negate @0) @1 @2)
5443 (IFN_FMA @0 @1 @2))
5444 (simplify
5445 (IFN_FNMA @0 @1 (negate @2))
5446 (IFN_FNMS @0 @1 @2))
5447 (simplify
5448 (IFN_FNMA:c (negate @0) @1 (negate @2))
5449 (IFN_FMS @0 @1 @2))
5450 (simplify
5451 (negate (IFN_FNMA@3 @0 @1 @2))
5452 (if (single_use (@3))
5453 (IFN_FMS @0 @1 @2)))
5454
5455 (simplify
5456 (IFN_FNMS:c (negate @0) @1 @2)
5457 (IFN_FMS @0 @1 @2))
5458 (simplify
5459 (IFN_FNMS @0 @1 (negate @2))
5460 (IFN_FNMA @0 @1 @2))
5461 (simplify
5462 (IFN_FNMS:c (negate @0) @1 (negate @2))
5463 (IFN_FMA @0 @1 @2))
5464 (simplify
5465 (negate (IFN_FNMS@3 @0 @1 @2))
5466 (if (single_use (@3))
5467 (IFN_FMA @0 @1 @2))))
5468
5469 /* POPCOUNT simplifications. */
5470 (for popcount (BUILT_IN_POPCOUNT BUILT_IN_POPCOUNTL BUILT_IN_POPCOUNTLL
5471 BUILT_IN_POPCOUNTIMAX)
5472 /* popcount(X&1) is nop_expr(X&1). */
5473 (simplify
5474 (popcount @0)
5475 (if (tree_nonzero_bits (@0) == 1)
5476 (convert @0)))
5477 /* popcount(X) + popcount(Y) is popcount(X|Y) when X&Y must be zero. */
5478 (simplify
5479 (plus (popcount:s @0) (popcount:s @1))
5480 (if (wi::bit_and (tree_nonzero_bits (@0), tree_nonzero_bits (@1)) == 0)
5481 (popcount (bit_ior @0 @1))))
5482 /* popcount(X) == 0 is X == 0, and related (in)equalities. */
5483 (for cmp (le eq ne gt)
5484 rep (eq eq ne ne)
5485 (simplify
5486 (cmp (popcount @0) integer_zerop)
5487 (rep @0 { build_zero_cst (TREE_TYPE (@0)); }))))
5488
5489 /* Simplify:
5490
5491 a = a1 op a2
5492 r = c ? a : b;
5493
5494 to:
5495
5496 r = c ? a1 op a2 : b;
5497
5498 if the target can do it in one go. This makes the operation conditional
5499 on c, so could drop potentially-trapping arithmetic, but that's a valid
5500 simplification if the result of the operation isn't needed.
5501
5502 Avoid speculatively generating a stand-alone vector comparison
5503 on targets that might not support them. Any target implementing
5504 conditional internal functions must support the same comparisons
5505 inside and outside a VEC_COND_EXPR. */
5506
5507 #if GIMPLE
5508 (for uncond_op (UNCOND_BINARY)
5509 cond_op (COND_BINARY)
5510 (simplify
5511 (vec_cond @0 (view_convert? (uncond_op@4 @1 @2)) @3)
5512 (with { tree op_type = TREE_TYPE (@4); }
5513 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
5514 && element_precision (type) == element_precision (op_type))
5515 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @3))))))
5516 (simplify
5517 (vec_cond @0 @1 (view_convert? (uncond_op@4 @2 @3)))
5518 (with { tree op_type = TREE_TYPE (@4); }
5519 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
5520 && element_precision (type) == element_precision (op_type))
5521 (view_convert (cond_op (bit_not @0) @2 @3 (view_convert:op_type @1)))))))
5522
5523 /* Same for ternary operations. */
5524 (for uncond_op (UNCOND_TERNARY)
5525 cond_op (COND_TERNARY)
5526 (simplify
5527 (vec_cond @0 (view_convert? (uncond_op@5 @1 @2 @3)) @4)
5528 (with { tree op_type = TREE_TYPE (@5); }
5529 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
5530 && element_precision (type) == element_precision (op_type))
5531 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @4))))))
5532 (simplify
5533 (vec_cond @0 @1 (view_convert? (uncond_op@5 @2 @3 @4)))
5534 (with { tree op_type = TREE_TYPE (@5); }
5535 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
5536 && element_precision (type) == element_precision (op_type))
5537 (view_convert (cond_op (bit_not @0) @2 @3 @4
5538 (view_convert:op_type @1)))))))
5539 #endif
5540
5541 /* Detect cases in which a VEC_COND_EXPR effectively replaces the
5542 "else" value of an IFN_COND_*. */
5543 (for cond_op (COND_BINARY)
5544 (simplify
5545 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3)) @4)
5546 (with { tree op_type = TREE_TYPE (@3); }
5547 (if (element_precision (type) == element_precision (op_type))
5548 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @4))))))
5549 (simplify
5550 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5)))
5551 (with { tree op_type = TREE_TYPE (@5); }
5552 (if (inverse_conditions_p (@0, @2)
5553 && element_precision (type) == element_precision (op_type))
5554 (view_convert (cond_op @2 @3 @4 (view_convert:op_type @1)))))))
5555
5556 /* Same for ternary operations. */
5557 (for cond_op (COND_TERNARY)
5558 (simplify
5559 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3 @4)) @5)
5560 (with { tree op_type = TREE_TYPE (@4); }
5561 (if (element_precision (type) == element_precision (op_type))
5562 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @5))))))
5563 (simplify
5564 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5 @6)))
5565 (with { tree op_type = TREE_TYPE (@6); }
5566 (if (inverse_conditions_p (@0, @2)
5567 && element_precision (type) == element_precision (op_type))
5568 (view_convert (cond_op @2 @3 @4 @5 (view_convert:op_type @1)))))))
5569
5570 /* For pointers @0 and @2 and nonnegative constant offset @1, look for
5571 expressions like:
5572
5573 A: (@0 + @1 < @2) | (@2 + @1 < @0)
5574 B: (@0 + @1 <= @2) | (@2 + @1 <= @0)
5575
5576 If pointers are known not to wrap, B checks whether @1 bytes starting
5577 at @0 and @2 do not overlap, while A tests the same thing for @1 + 1
5578 bytes. A is more efficiently tested as:
5579
5580 A: (sizetype) (@0 + @1 - @2) > @1 * 2
5581
5582 The equivalent expression for B is given by replacing @1 with @1 - 1:
5583
5584 B: (sizetype) (@0 + (@1 - 1) - @2) > (@1 - 1) * 2
5585
5586 @0 and @2 can be swapped in both expressions without changing the result.
5587
5588 The folds rely on sizetype's being unsigned (which is always true)
5589 and on its being the same width as the pointer (which we have to check).
5590
5591 The fold replaces two pointer_plus expressions, two comparisons and
5592 an IOR with a pointer_plus, a pointer_diff, and a comparison, so in
5593 the best case it's a saving of two operations. The A fold retains one
5594 of the original pointer_pluses, so is a win even if both pointer_pluses
5595 are used elsewhere. The B fold is a wash if both pointer_pluses are
5596 used elsewhere, since all we end up doing is replacing a comparison with
5597 a pointer_plus. We do still apply the fold under those circumstances
5598 though, in case applying it to other conditions eventually makes one of the
5599 pointer_pluses dead. */
5600 (for ior (truth_orif truth_or bit_ior)
5601 (for cmp (le lt)
5602 (simplify
5603 (ior (cmp:cs (pointer_plus@3 @0 INTEGER_CST@1) @2)
5604 (cmp:cs (pointer_plus@4 @2 @1) @0))
5605 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
5606 && TYPE_OVERFLOW_WRAPS (sizetype)
5607 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (sizetype))
5608 /* Calculate the rhs constant. */
5609 (with { offset_int off = wi::to_offset (@1) - (cmp == LE_EXPR ? 1 : 0);
5610 offset_int rhs = off * 2; }
5611 /* Always fails for negative values. */
5612 (if (wi::min_precision (rhs, UNSIGNED) <= TYPE_PRECISION (sizetype))
5613 /* Since the order of @0 and @2 doesn't matter, let tree_swap_operands_p
5614 pick a canonical order. This increases the chances of using the
5615 same pointer_plus in multiple checks. */
5616 (with { bool swap_p = tree_swap_operands_p (@0, @2);
5617 tree rhs_tree = wide_int_to_tree (sizetype, rhs); }
5618 (if (cmp == LT_EXPR)
5619 (gt (convert:sizetype
5620 (pointer_diff:ssizetype { swap_p ? @4 : @3; }
5621 { swap_p ? @0 : @2; }))
5622 { rhs_tree; })
5623 (gt (convert:sizetype
5624 (pointer_diff:ssizetype
5625 (pointer_plus { swap_p ? @2 : @0; }
5626 { wide_int_to_tree (sizetype, off); })
5627 { swap_p ? @0 : @2; }))
5628 { rhs_tree; })))))))))
5629
5630 /* Fold REDUC (@0 & @1) -> @0[I] & @1[I] if element I is the only nonzero
5631 element of @1. */
5632 (for reduc (IFN_REDUC_PLUS IFN_REDUC_IOR IFN_REDUC_XOR)
5633 (simplify (reduc (view_convert? (bit_and @0 VECTOR_CST@1)))
5634 (with { int i = single_nonzero_element (@1); }
5635 (if (i >= 0)
5636 (with { tree elt = vector_cst_elt (@1, i);
5637 tree elt_type = TREE_TYPE (elt);
5638 unsigned int elt_bits = tree_to_uhwi (TYPE_SIZE (elt_type));
5639 tree size = bitsize_int (elt_bits);
5640 tree pos = bitsize_int (elt_bits * i); }
5641 (view_convert
5642 (bit_and:elt_type
5643 (BIT_FIELD_REF:elt_type @0 { size; } { pos; })
5644 { elt; })))))))
5645
5646 (simplify
5647 (vec_perm @0 @1 VECTOR_CST@2)
5648 (with
5649 {
5650 tree op0 = @0, op1 = @1, op2 = @2;
5651
5652 /* Build a vector of integers from the tree mask. */
5653 vec_perm_builder builder;
5654 if (!tree_to_vec_perm_builder (&builder, op2))
5655 return NULL_TREE;
5656
5657 /* Create a vec_perm_indices for the integer vector. */
5658 poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (type);
5659 bool single_arg = (op0 == op1);
5660 vec_perm_indices sel (builder, single_arg ? 1 : 2, nelts);
5661 }
5662 (if (sel.series_p (0, 1, 0, 1))
5663 { op0; }
5664 (if (sel.series_p (0, 1, nelts, 1))
5665 { op1; }
5666 (with
5667 {
5668 if (!single_arg)
5669 {
5670 if (sel.all_from_input_p (0))
5671 op1 = op0;
5672 else if (sel.all_from_input_p (1))
5673 {
5674 op0 = op1;
5675 sel.rotate_inputs (1);
5676 }
5677 else if (known_ge (poly_uint64 (sel[0]), nelts))
5678 {
5679 std::swap (op0, op1);
5680 sel.rotate_inputs (1);
5681 }
5682 }
5683 gassign *def;
5684 tree cop0 = op0, cop1 = op1;
5685 if (TREE_CODE (op0) == SSA_NAME
5686 && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op0)))
5687 && gimple_assign_rhs_code (def) == CONSTRUCTOR)
5688 cop0 = gimple_assign_rhs1 (def);
5689 if (TREE_CODE (op1) == SSA_NAME
5690 && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op1)))
5691 && gimple_assign_rhs_code (def) == CONSTRUCTOR)
5692 cop1 = gimple_assign_rhs1 (def);
5693
5694 tree t;
5695 }
5696 (if ((TREE_CODE (cop0) == VECTOR_CST
5697 || TREE_CODE (cop0) == CONSTRUCTOR)
5698 && (TREE_CODE (cop1) == VECTOR_CST
5699 || TREE_CODE (cop1) == CONSTRUCTOR)
5700 && (t = fold_vec_perm (type, cop0, cop1, sel)))
5701 { t; }
5702 (with
5703 {
5704 bool changed = (op0 == op1 && !single_arg);
5705 tree ins = NULL_TREE;
5706 unsigned at = 0;
5707
5708 /* See if the permutation is performing a single element
5709 insert from a CONSTRUCTOR or constant and use a BIT_INSERT_EXPR
5710 in that case. But only if the vector mode is supported,
5711 otherwise this is invalid GIMPLE. */
5712 if (TYPE_MODE (type) != BLKmode
5713 && (TREE_CODE (cop0) == VECTOR_CST
5714 || TREE_CODE (cop0) == CONSTRUCTOR
5715 || TREE_CODE (cop1) == VECTOR_CST
5716 || TREE_CODE (cop1) == CONSTRUCTOR))
5717 {
5718 if (sel.series_p (1, 1, nelts + 1, 1))
5719 {
5720 /* After canonicalizing the first elt to come from the
5721 first vector we only can insert the first elt from
5722 the first vector. */
5723 at = 0;
5724 if ((ins = fold_read_from_vector (cop0, sel[0])))
5725 op0 = op1;
5726 }
5727 else
5728 {
5729 unsigned int encoded_nelts = sel.encoding ().encoded_nelts ();
5730 for (at = 0; at < encoded_nelts; ++at)
5731 if (maybe_ne (sel[at], at))
5732 break;
5733 if (at < encoded_nelts && sel.series_p (at + 1, 1, at + 1, 1))
5734 {
5735 if (known_lt (at, nelts))
5736 ins = fold_read_from_vector (cop0, sel[at]);
5737 else
5738 ins = fold_read_from_vector (cop1, sel[at] - nelts);
5739 }
5740 }
5741 }
5742
5743 /* Generate a canonical form of the selector. */
5744 if (!ins && sel.encoding () != builder)
5745 {
5746 /* Some targets are deficient and fail to expand a single
5747 argument permutation while still allowing an equivalent
5748 2-argument version. */
5749 tree oldop2 = op2;
5750 if (sel.ninputs () == 2
5751 || can_vec_perm_const_p (TYPE_MODE (type), sel, false))
5752 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel);
5753 else
5754 {
5755 vec_perm_indices sel2 (builder, 2, nelts);
5756 if (can_vec_perm_const_p (TYPE_MODE (type), sel2, false))
5757 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel2);
5758 else
5759 /* Not directly supported with either encoding,
5760 so use the preferred form. */
5761 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel);
5762 }
5763 if (!operand_equal_p (op2, oldop2, 0))
5764 changed = true;
5765 }
5766 }
5767 (if (ins)
5768 (bit_insert { op0; } { ins; }
5769 { bitsize_int (at * tree_to_uhwi (TYPE_SIZE (TREE_TYPE (type)))); })
5770 (if (changed)
5771 (vec_perm { op0; } { op1; } { op2; }))))))))))
5772
5773 /* VEC_PERM_EXPR (v, v, mask) -> v where v contains same element. */
5774
5775 (match vec_same_elem_p
5776 @0
5777 (if (uniform_vector_p (@0))))
5778
5779 (match vec_same_elem_p
5780 (vec_duplicate @0))
5781
5782 (simplify
5783 (vec_perm vec_same_elem_p@0 @0 @1)
5784 @0)