S/390: Downcase first letter of error messages.
[gcc.git] / gcc / match.pd
1 /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
4
5 Copyright (C) 2014-2017 Free Software Foundation, Inc.
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25
26 /* Generic tree predicates we inherit. */
27 (define_predicates
28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
29 integer_each_onep integer_truep integer_nonzerop
30 real_zerop real_onep real_minus_onep
31 zerop
32 CONSTANT_CLASS_P
33 tree_expr_nonnegative_p
34 tree_expr_nonzero_p
35 integer_valued_real_p
36 integer_pow2p
37 HONOR_NANS)
38
39 /* Operator lists. */
40 (define_operator_list tcc_comparison
41 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
42 (define_operator_list inverted_tcc_comparison
43 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
44 (define_operator_list inverted_tcc_comparison_with_nans
45 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
46 (define_operator_list swapped_tcc_comparison
47 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
48 (define_operator_list simple_comparison lt le eq ne ge gt)
49 (define_operator_list swapped_simple_comparison gt ge eq ne le lt)
50
51 #include "cfn-operators.pd"
52
53 /* Define operand lists for math rounding functions {,i,l,ll}FN,
54 where the versions prefixed with "i" return an int, those prefixed with
55 "l" return a long and those prefixed with "ll" return a long long.
56
57 Also define operand lists:
58
59 X<FN>F for all float functions, in the order i, l, ll
60 X<FN> for all double functions, in the same order
61 X<FN>L for all long double functions, in the same order. */
62 #define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
63 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
64 BUILT_IN_L##FN##F \
65 BUILT_IN_LL##FN##F) \
66 (define_operator_list X##FN BUILT_IN_I##FN \
67 BUILT_IN_L##FN \
68 BUILT_IN_LL##FN) \
69 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
70 BUILT_IN_L##FN##L \
71 BUILT_IN_LL##FN##L)
72
73 DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
74 DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
75 DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
76 DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
77
78 /* Simplifications of operations with one constant operand and
79 simplifications to constants or single values. */
80
81 (for op (plus pointer_plus minus bit_ior bit_xor)
82 (simplify
83 (op @0 integer_zerop)
84 (non_lvalue @0)))
85
86 /* 0 +p index -> (type)index */
87 (simplify
88 (pointer_plus integer_zerop @1)
89 (non_lvalue (convert @1)))
90
91 /* See if ARG1 is zero and X + ARG1 reduces to X.
92 Likewise if the operands are reversed. */
93 (simplify
94 (plus:c @0 real_zerop@1)
95 (if (fold_real_zero_addition_p (type, @1, 0))
96 (non_lvalue @0)))
97
98 /* See if ARG1 is zero and X - ARG1 reduces to X. */
99 (simplify
100 (minus @0 real_zerop@1)
101 (if (fold_real_zero_addition_p (type, @1, 1))
102 (non_lvalue @0)))
103
104 /* Simplify x - x.
105 This is unsafe for certain floats even in non-IEEE formats.
106 In IEEE, it is unsafe because it does wrong for NaNs.
107 Also note that operand_equal_p is always false if an operand
108 is volatile. */
109 (simplify
110 (minus @0 @0)
111 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
112 { build_zero_cst (type); }))
113
114 (simplify
115 (mult @0 integer_zerop@1)
116 @1)
117
118 /* Maybe fold x * 0 to 0. The expressions aren't the same
119 when x is NaN, since x * 0 is also NaN. Nor are they the
120 same in modes with signed zeros, since multiplying a
121 negative value by 0 gives -0, not +0. */
122 (simplify
123 (mult @0 real_zerop@1)
124 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
125 @1))
126
127 /* In IEEE floating point, x*1 is not equivalent to x for snans.
128 Likewise for complex arithmetic with signed zeros. */
129 (simplify
130 (mult @0 real_onep)
131 (if (!HONOR_SNANS (type)
132 && (!HONOR_SIGNED_ZEROS (type)
133 || !COMPLEX_FLOAT_TYPE_P (type)))
134 (non_lvalue @0)))
135
136 /* Transform x * -1.0 into -x. */
137 (simplify
138 (mult @0 real_minus_onep)
139 (if (!HONOR_SNANS (type)
140 && (!HONOR_SIGNED_ZEROS (type)
141 || !COMPLEX_FLOAT_TYPE_P (type)))
142 (negate @0)))
143
144 /* X * 1, X / 1 -> X. */
145 (for op (mult trunc_div ceil_div floor_div round_div exact_div)
146 (simplify
147 (op @0 integer_onep)
148 (non_lvalue @0)))
149
150 /* Preserve explicit divisions by 0: the C++ front-end wants to detect
151 undefined behavior in constexpr evaluation, and assuming that the division
152 traps enables better optimizations than these anyway. */
153 (for div (trunc_div ceil_div floor_div round_div exact_div)
154 /* 0 / X is always zero. */
155 (simplify
156 (div integer_zerop@0 @1)
157 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
158 (if (!integer_zerop (@1))
159 @0))
160 /* X / -1 is -X. */
161 (simplify
162 (div @0 integer_minus_onep@1)
163 (if (!TYPE_UNSIGNED (type))
164 (negate @0)))
165 /* X / X is one. */
166 (simplify
167 (div @0 @0)
168 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
169 (if (!integer_zerop (@0))
170 { build_one_cst (type); }))
171 /* X / abs (X) is X < 0 ? -1 : 1. */
172 (simplify
173 (div:C @0 (abs @0))
174 (if (INTEGRAL_TYPE_P (type)
175 && TYPE_OVERFLOW_UNDEFINED (type))
176 (cond (lt @0 { build_zero_cst (type); })
177 { build_minus_one_cst (type); } { build_one_cst (type); })))
178 /* X / -X is -1. */
179 (simplify
180 (div:C @0 (negate @0))
181 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
182 && TYPE_OVERFLOW_UNDEFINED (type))
183 { build_minus_one_cst (type); })))
184
185 /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
186 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
187 (simplify
188 (floor_div @0 @1)
189 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
190 && TYPE_UNSIGNED (type))
191 (trunc_div @0 @1)))
192
193 /* Combine two successive divisions. Note that combining ceil_div
194 and floor_div is trickier and combining round_div even more so. */
195 (for div (trunc_div exact_div)
196 (simplify
197 (div (div @0 INTEGER_CST@1) INTEGER_CST@2)
198 (with {
199 bool overflow_p;
200 wide_int mul = wi::mul (@1, @2, TYPE_SIGN (type), &overflow_p);
201 }
202 (if (!overflow_p)
203 (div @0 { wide_int_to_tree (type, mul); })
204 (if (TYPE_UNSIGNED (type)
205 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
206 { build_zero_cst (type); })))))
207
208 /* Optimize A / A to 1.0 if we don't care about
209 NaNs or Infinities. */
210 (simplify
211 (rdiv @0 @0)
212 (if (FLOAT_TYPE_P (type)
213 && ! HONOR_NANS (type)
214 && ! HONOR_INFINITIES (type))
215 { build_one_cst (type); }))
216
217 /* Optimize -A / A to -1.0 if we don't care about
218 NaNs or Infinities. */
219 (simplify
220 (rdiv:C @0 (negate @0))
221 (if (FLOAT_TYPE_P (type)
222 && ! HONOR_NANS (type)
223 && ! HONOR_INFINITIES (type))
224 { build_minus_one_cst (type); }))
225
226 /* PR71078: x / abs(x) -> copysign (1.0, x) */
227 (simplify
228 (rdiv:C (convert? @0) (convert? (abs @0)))
229 (if (SCALAR_FLOAT_TYPE_P (type)
230 && ! HONOR_NANS (type)
231 && ! HONOR_INFINITIES (type))
232 (switch
233 (if (types_match (type, float_type_node))
234 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
235 (if (types_match (type, double_type_node))
236 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
237 (if (types_match (type, long_double_type_node))
238 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
239
240 /* In IEEE floating point, x/1 is not equivalent to x for snans. */
241 (simplify
242 (rdiv @0 real_onep)
243 (if (!HONOR_SNANS (type))
244 (non_lvalue @0)))
245
246 /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
247 (simplify
248 (rdiv @0 real_minus_onep)
249 (if (!HONOR_SNANS (type))
250 (negate @0)))
251
252 (if (flag_reciprocal_math)
253 /* Convert (A/B)/C to A/(B*C) */
254 (simplify
255 (rdiv (rdiv:s @0 @1) @2)
256 (rdiv @0 (mult @1 @2)))
257
258 /* Convert A/(B/C) to (A/B)*C */
259 (simplify
260 (rdiv @0 (rdiv:s @1 @2))
261 (mult (rdiv @0 @1) @2)))
262
263 /* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
264 (for div (trunc_div ceil_div floor_div round_div exact_div)
265 (simplify
266 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
267 (if (integer_pow2p (@2)
268 && tree_int_cst_sgn (@2) > 0
269 && wi::add (@2, @1) == 0
270 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
271 (rshift (convert @0) { build_int_cst (integer_type_node,
272 wi::exact_log2 (@2)); }))))
273
274 /* If ARG1 is a constant, we can convert this to a multiply by the
275 reciprocal. This does not have the same rounding properties,
276 so only do this if -freciprocal-math. We can actually
277 always safely do it if ARG1 is a power of two, but it's hard to
278 tell if it is or not in a portable manner. */
279 (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
280 (simplify
281 (rdiv @0 cst@1)
282 (if (optimize)
283 (if (flag_reciprocal_math
284 && !real_zerop (@1))
285 (with
286 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
287 (if (tem)
288 (mult @0 { tem; } )))
289 (if (cst != COMPLEX_CST)
290 (with { tree inverse = exact_inverse (type, @1); }
291 (if (inverse)
292 (mult @0 { inverse; } ))))))))
293
294 (for mod (ceil_mod floor_mod round_mod trunc_mod)
295 /* 0 % X is always zero. */
296 (simplify
297 (mod integer_zerop@0 @1)
298 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
299 (if (!integer_zerop (@1))
300 @0))
301 /* X % 1 is always zero. */
302 (simplify
303 (mod @0 integer_onep)
304 { build_zero_cst (type); })
305 /* X % -1 is zero. */
306 (simplify
307 (mod @0 integer_minus_onep@1)
308 (if (!TYPE_UNSIGNED (type))
309 { build_zero_cst (type); }))
310 /* X % X is zero. */
311 (simplify
312 (mod @0 @0)
313 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
314 (if (!integer_zerop (@0))
315 { build_zero_cst (type); }))
316 /* (X % Y) % Y is just X % Y. */
317 (simplify
318 (mod (mod@2 @0 @1) @1)
319 @2)
320 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
321 (simplify
322 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
323 (if (ANY_INTEGRAL_TYPE_P (type)
324 && TYPE_OVERFLOW_UNDEFINED (type)
325 && wi::multiple_of_p (@1, @2, TYPE_SIGN (type)))
326 { build_zero_cst (type); })))
327
328 /* X % -C is the same as X % C. */
329 (simplify
330 (trunc_mod @0 INTEGER_CST@1)
331 (if (TYPE_SIGN (type) == SIGNED
332 && !TREE_OVERFLOW (@1)
333 && wi::neg_p (@1)
334 && !TYPE_OVERFLOW_TRAPS (type)
335 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
336 && !sign_bit_p (@1, @1))
337 (trunc_mod @0 (negate @1))))
338
339 /* X % -Y is the same as X % Y. */
340 (simplify
341 (trunc_mod @0 (convert? (negate @1)))
342 (if (INTEGRAL_TYPE_P (type)
343 && !TYPE_UNSIGNED (type)
344 && !TYPE_OVERFLOW_TRAPS (type)
345 && tree_nop_conversion_p (type, TREE_TYPE (@1))
346 /* Avoid this transformation if X might be INT_MIN or
347 Y might be -1, because we would then change valid
348 INT_MIN % -(-1) into invalid INT_MIN % -1. */
349 && (expr_not_equal_to (@0, TYPE_MIN_VALUE (type))
350 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
351 (TREE_TYPE (@1))))))
352 (trunc_mod @0 (convert @1))))
353
354 /* X - (X / Y) * Y is the same as X % Y. */
355 (simplify
356 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
357 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
358 (convert (trunc_mod @0 @1))))
359
360 /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
361 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
362 Also optimize A % (C << N) where C is a power of 2,
363 to A & ((C << N) - 1). */
364 (match (power_of_two_cand @1)
365 INTEGER_CST@1)
366 (match (power_of_two_cand @1)
367 (lshift INTEGER_CST@1 @2))
368 (for mod (trunc_mod floor_mod)
369 (simplify
370 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
371 (if ((TYPE_UNSIGNED (type)
372 || tree_expr_nonnegative_p (@0))
373 && tree_nop_conversion_p (type, TREE_TYPE (@3))
374 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
375 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
376
377 /* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
378 (simplify
379 (trunc_div (mult @0 integer_pow2p@1) @1)
380 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
381 (bit_and @0 { wide_int_to_tree
382 (type, wi::mask (TYPE_PRECISION (type) - wi::exact_log2 (@1),
383 false, TYPE_PRECISION (type))); })))
384
385 /* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
386 (simplify
387 (mult (trunc_div @0 integer_pow2p@1) @1)
388 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
389 (bit_and @0 (negate @1))))
390
391 /* Simplify (t * 2) / 2) -> t. */
392 (for div (trunc_div ceil_div floor_div round_div exact_div)
393 (simplify
394 (div (mult @0 @1) @1)
395 (if (ANY_INTEGRAL_TYPE_P (type)
396 && TYPE_OVERFLOW_UNDEFINED (type))
397 @0)))
398
399 (for op (negate abs)
400 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
401 (for coss (COS COSH)
402 (simplify
403 (coss (op @0))
404 (coss @0)))
405 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
406 (for pows (POW)
407 (simplify
408 (pows (op @0) REAL_CST@1)
409 (with { HOST_WIDE_INT n; }
410 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
411 (pows @0 @1)))))
412 /* Likewise for powi. */
413 (for pows (POWI)
414 (simplify
415 (pows (op @0) INTEGER_CST@1)
416 (if (wi::bit_and (@1, 1) == 0)
417 (pows @0 @1))))
418 /* Strip negate and abs from both operands of hypot. */
419 (for hypots (HYPOT)
420 (simplify
421 (hypots (op @0) @1)
422 (hypots @0 @1))
423 (simplify
424 (hypots @0 (op @1))
425 (hypots @0 @1)))
426 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
427 (for copysigns (COPYSIGN)
428 (simplify
429 (copysigns (op @0) @1)
430 (copysigns @0 @1))))
431
432 /* abs(x)*abs(x) -> x*x. Should be valid for all types. */
433 (simplify
434 (mult (abs@1 @0) @1)
435 (mult @0 @0))
436
437 /* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
438 (for coss (COS COSH)
439 copysigns (COPYSIGN)
440 (simplify
441 (coss (copysigns @0 @1))
442 (coss @0)))
443
444 /* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
445 (for pows (POW)
446 copysigns (COPYSIGN)
447 (simplify
448 (pows (copysigns @0 @2) REAL_CST@1)
449 (with { HOST_WIDE_INT n; }
450 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
451 (pows @0 @1)))))
452 /* Likewise for powi. */
453 (for pows (POWI)
454 copysigns (COPYSIGN)
455 (simplify
456 (pows (copysigns @0 @2) INTEGER_CST@1)
457 (if (wi::bit_and (@1, 1) == 0)
458 (pows @0 @1))))
459
460 (for hypots (HYPOT)
461 copysigns (COPYSIGN)
462 /* hypot(copysign(x, y), z) -> hypot(x, z). */
463 (simplify
464 (hypots (copysigns @0 @1) @2)
465 (hypots @0 @2))
466 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
467 (simplify
468 (hypots @0 (copysigns @1 @2))
469 (hypots @0 @1)))
470
471 /* copysign(x, CST) -> [-]abs (x). */
472 (for copysigns (COPYSIGN)
473 (simplify
474 (copysigns @0 REAL_CST@1)
475 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
476 (negate (abs @0))
477 (abs @0))))
478
479 /* copysign(copysign(x, y), z) -> copysign(x, z). */
480 (for copysigns (COPYSIGN)
481 (simplify
482 (copysigns (copysigns @0 @1) @2)
483 (copysigns @0 @2)))
484
485 /* copysign(x,y)*copysign(x,y) -> x*x. */
486 (for copysigns (COPYSIGN)
487 (simplify
488 (mult (copysigns@2 @0 @1) @2)
489 (mult @0 @0)))
490
491 /* ccos(-x) -> ccos(x). Similarly for ccosh. */
492 (for ccoss (CCOS CCOSH)
493 (simplify
494 (ccoss (negate @0))
495 (ccoss @0)))
496
497 /* cabs(-x) and cos(conj(x)) -> cabs(x). */
498 (for ops (conj negate)
499 (for cabss (CABS)
500 (simplify
501 (cabss (ops @0))
502 (cabss @0))))
503
504 /* Fold (a * (1 << b)) into (a << b) */
505 (simplify
506 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
507 (if (! FLOAT_TYPE_P (type)
508 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
509 (lshift @0 @2)))
510
511 /* Fold (C1/X)*C2 into (C1*C2)/X. */
512 (simplify
513 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
514 (if (flag_associative_math
515 && single_use (@3))
516 (with
517 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
518 (if (tem)
519 (rdiv { tem; } @1)))))
520
521 /* Convert C1/(X*C2) into (C1/C2)/X */
522 (simplify
523 (rdiv REAL_CST@0 (mult @1 REAL_CST@2))
524 (if (flag_reciprocal_math)
525 (with
526 { tree tem = const_binop (RDIV_EXPR, type, @0, @2); }
527 (if (tem)
528 (rdiv { tem; } @1)))))
529
530 /* Simplify ~X & X as zero. */
531 (simplify
532 (bit_and:c (convert? @0) (convert? (bit_not @0)))
533 { build_zero_cst (type); })
534
535 /* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
536 (simplify
537 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
538 (if (TYPE_UNSIGNED (type))
539 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
540
541 /* PR35691: Transform
542 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
543 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
544 (for bitop (bit_and bit_ior)
545 cmp (eq ne)
546 (simplify
547 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
548 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
549 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
550 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
551 (cmp (bit_ior @0 (convert @1)) @2))))
552
553 /* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
554 (simplify
555 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
556 (minus (bit_xor @0 @1) @1))
557 (simplify
558 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
559 (if (wi::bit_not (@2) == @1)
560 (minus (bit_xor @0 @1) @1)))
561
562 /* Fold (A & B) - (A & ~B) into B - (A ^ B). */
563 (simplify
564 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
565 (minus @1 (bit_xor @0 @1)))
566
567 /* Simplify (X & ~Y) | (~X & Y) -> X ^ Y. */
568 (simplify
569 (bit_ior (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
570 (bit_xor @0 @1))
571 (simplify
572 (bit_ior:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
573 (if (wi::bit_not (@2) == @1)
574 (bit_xor @0 @1)))
575
576 /* PR53979: Transform ((a ^ b) | a) -> (a | b) */
577 (simplify
578 (bit_ior:c (bit_xor:c @0 @1) @0)
579 (bit_ior @0 @1))
580
581 /* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
582 #if GIMPLE
583 (simplify
584 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
585 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
586 && (get_nonzero_bits (@0) & wi::bit_not (@1)) == 0)
587 (bit_xor @0 @1)))
588 #endif
589
590 /* X % Y is smaller than Y. */
591 (for cmp (lt ge)
592 (simplify
593 (cmp (trunc_mod @0 @1) @1)
594 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
595 { constant_boolean_node (cmp == LT_EXPR, type); })))
596 (for cmp (gt le)
597 (simplify
598 (cmp @1 (trunc_mod @0 @1))
599 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
600 { constant_boolean_node (cmp == GT_EXPR, type); })))
601
602 /* x | ~0 -> ~0 */
603 (simplify
604 (bit_ior @0 integer_all_onesp@1)
605 @1)
606
607 /* x | 0 -> x */
608 (simplify
609 (bit_ior @0 integer_zerop)
610 @0)
611
612 /* x & 0 -> 0 */
613 (simplify
614 (bit_and @0 integer_zerop@1)
615 @1)
616
617 /* ~x | x -> -1 */
618 /* ~x ^ x -> -1 */
619 /* ~x + x -> -1 */
620 (for op (bit_ior bit_xor plus)
621 (simplify
622 (op:c (convert? @0) (convert? (bit_not @0)))
623 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
624
625 /* x ^ x -> 0 */
626 (simplify
627 (bit_xor @0 @0)
628 { build_zero_cst (type); })
629
630 /* Canonicalize X ^ ~0 to ~X. */
631 (simplify
632 (bit_xor @0 integer_all_onesp@1)
633 (bit_not @0))
634
635 /* x & ~0 -> x */
636 (simplify
637 (bit_and @0 integer_all_onesp)
638 (non_lvalue @0))
639
640 /* x & x -> x, x | x -> x */
641 (for bitop (bit_and bit_ior)
642 (simplify
643 (bitop @0 @0)
644 (non_lvalue @0)))
645
646 /* x & C -> x if we know that x & ~C == 0. */
647 #if GIMPLE
648 (simplify
649 (bit_and SSA_NAME@0 INTEGER_CST@1)
650 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
651 && (get_nonzero_bits (@0) & wi::bit_not (@1)) == 0)
652 @0))
653 #endif
654
655 /* x + (x & 1) -> (x + 1) & ~1 */
656 (simplify
657 (plus:c @0 (bit_and:s @0 integer_onep@1))
658 (bit_and (plus @0 @1) (bit_not @1)))
659
660 /* x & ~(x & y) -> x & ~y */
661 /* x | ~(x | y) -> x | ~y */
662 (for bitop (bit_and bit_ior)
663 (simplify
664 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
665 (bitop @0 (bit_not @1))))
666
667 /* (x | y) & ~x -> y & ~x */
668 /* (x & y) | ~x -> y | ~x */
669 (for bitop (bit_and bit_ior)
670 rbitop (bit_ior bit_and)
671 (simplify
672 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
673 (bitop @1 @2)))
674
675 /* (x & y) ^ (x | y) -> x ^ y */
676 (simplify
677 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
678 (bit_xor @0 @1))
679
680 /* (x ^ y) ^ (x | y) -> x & y */
681 (simplify
682 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
683 (bit_and @0 @1))
684
685 /* (x & y) + (x ^ y) -> x | y */
686 /* (x & y) | (x ^ y) -> x | y */
687 /* (x & y) ^ (x ^ y) -> x | y */
688 (for op (plus bit_ior bit_xor)
689 (simplify
690 (op:c (bit_and @0 @1) (bit_xor @0 @1))
691 (bit_ior @0 @1)))
692
693 /* (x & y) + (x | y) -> x + y */
694 (simplify
695 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
696 (plus @0 @1))
697
698 /* (x + y) - (x | y) -> x & y */
699 (simplify
700 (minus (plus @0 @1) (bit_ior @0 @1))
701 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
702 && !TYPE_SATURATING (type))
703 (bit_and @0 @1)))
704
705 /* (x + y) - (x & y) -> x | y */
706 (simplify
707 (minus (plus @0 @1) (bit_and @0 @1))
708 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
709 && !TYPE_SATURATING (type))
710 (bit_ior @0 @1)))
711
712 /* (x | y) - (x ^ y) -> x & y */
713 (simplify
714 (minus (bit_ior @0 @1) (bit_xor @0 @1))
715 (bit_and @0 @1))
716
717 /* (x | y) - (x & y) -> x ^ y */
718 (simplify
719 (minus (bit_ior @0 @1) (bit_and @0 @1))
720 (bit_xor @0 @1))
721
722 /* (x | y) & ~(x & y) -> x ^ y */
723 (simplify
724 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
725 (bit_xor @0 @1))
726
727 /* (x | y) & (~x ^ y) -> x & y */
728 (simplify
729 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
730 (bit_and @0 @1))
731
732 /* ~x & ~y -> ~(x | y)
733 ~x | ~y -> ~(x & y) */
734 (for op (bit_and bit_ior)
735 rop (bit_ior bit_and)
736 (simplify
737 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
738 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
739 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
740 (bit_not (rop (convert @0) (convert @1))))))
741
742 /* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
743 with a constant, and the two constants have no bits in common,
744 we should treat this as a BIT_IOR_EXPR since this may produce more
745 simplifications. */
746 (for op (bit_xor plus)
747 (simplify
748 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
749 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
750 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
751 && tree_nop_conversion_p (type, TREE_TYPE (@2))
752 && wi::bit_and (@1, @3) == 0)
753 (bit_ior (convert @4) (convert @5)))))
754
755 /* (X | Y) ^ X -> Y & ~ X*/
756 (simplify
757 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
758 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
759 (convert (bit_and @1 (bit_not @0)))))
760
761 /* Convert ~X ^ ~Y to X ^ Y. */
762 (simplify
763 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
764 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
765 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
766 (bit_xor (convert @0) (convert @1))))
767
768 /* Convert ~X ^ C to X ^ ~C. */
769 (simplify
770 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
771 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
772 (bit_xor (convert @0) (bit_not @1))))
773
774 /* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
775 (for opo (bit_and bit_xor)
776 opi (bit_xor bit_and)
777 (simplify
778 (opo:c (opi:c @0 @1) @1)
779 (bit_and (bit_not @0) @1)))
780
781 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
782 operands are another bit-wise operation with a common input. If so,
783 distribute the bit operations to save an operation and possibly two if
784 constants are involved. For example, convert
785 (A | B) & (A | C) into A | (B & C)
786 Further simplification will occur if B and C are constants. */
787 (for op (bit_and bit_ior bit_xor)
788 rop (bit_ior bit_and bit_and)
789 (simplify
790 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
791 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
792 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
793 (rop (convert @0) (op (convert @1) (convert @2))))))
794
795 /* Some simple reassociation for bit operations, also handled in reassoc. */
796 /* (X & Y) & Y -> X & Y
797 (X | Y) | Y -> X | Y */
798 (for op (bit_and bit_ior)
799 (simplify
800 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
801 @2))
802 /* (X ^ Y) ^ Y -> X */
803 (simplify
804 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
805 (convert @0))
806 /* (X & Y) & (X & Z) -> (X & Y) & Z
807 (X | Y) | (X | Z) -> (X | Y) | Z */
808 (for op (bit_and bit_ior)
809 (simplify
810 (op:c (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
811 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
812 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
813 (if (single_use (@5) && single_use (@6))
814 (op @3 (convert @2))
815 (if (single_use (@3) && single_use (@4))
816 (op (convert @1) @5))))))
817 /* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
818 (simplify
819 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
820 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
821 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
822 (bit_xor (convert @1) (convert @2))))
823
824 (simplify
825 (abs (abs@1 @0))
826 @1)
827 (simplify
828 (abs (negate @0))
829 (abs @0))
830 (simplify
831 (abs tree_expr_nonnegative_p@0)
832 @0)
833
834 /* A few cases of fold-const.c negate_expr_p predicate. */
835 (match negate_expr_p
836 INTEGER_CST
837 (if ((INTEGRAL_TYPE_P (type)
838 && TYPE_OVERFLOW_WRAPS (type))
839 || (!TYPE_OVERFLOW_SANITIZED (type)
840 && may_negate_without_overflow_p (t)))))
841 (match negate_expr_p
842 FIXED_CST)
843 (match negate_expr_p
844 (negate @0)
845 (if (!TYPE_OVERFLOW_SANITIZED (type))))
846 (match negate_expr_p
847 REAL_CST
848 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
849 /* VECTOR_CST handling of non-wrapping types would recurse in unsupported
850 ways. */
851 (match negate_expr_p
852 VECTOR_CST
853 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
854
855 /* (-A) * (-B) -> A * B */
856 (simplify
857 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
858 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
859 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
860 (mult (convert @0) (convert (negate @1)))))
861
862 /* -(A + B) -> (-B) - A. */
863 (simplify
864 (negate (plus:c @0 negate_expr_p@1))
865 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
866 && !HONOR_SIGNED_ZEROS (element_mode (type)))
867 (minus (negate @1) @0)))
868
869 /* A - B -> A + (-B) if B is easily negatable. */
870 (simplify
871 (minus @0 negate_expr_p@1)
872 (if (!FIXED_POINT_TYPE_P (type))
873 (plus @0 (negate @1))))
874
875 /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
876 when profitable.
877 For bitwise binary operations apply operand conversions to the
878 binary operation result instead of to the operands. This allows
879 to combine successive conversions and bitwise binary operations.
880 We combine the above two cases by using a conditional convert. */
881 (for bitop (bit_and bit_ior bit_xor)
882 (simplify
883 (bitop (convert @0) (convert? @1))
884 (if (((TREE_CODE (@1) == INTEGER_CST
885 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
886 && int_fits_type_p (@1, TREE_TYPE (@0)))
887 || types_match (@0, @1))
888 /* ??? This transform conflicts with fold-const.c doing
889 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
890 constants (if x has signed type, the sign bit cannot be set
891 in c). This folds extension into the BIT_AND_EXPR.
892 Restrict it to GIMPLE to avoid endless recursions. */
893 && (bitop != BIT_AND_EXPR || GIMPLE)
894 && (/* That's a good idea if the conversion widens the operand, thus
895 after hoisting the conversion the operation will be narrower. */
896 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
897 /* It's also a good idea if the conversion is to a non-integer
898 mode. */
899 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
900 /* Or if the precision of TO is not the same as the precision
901 of its mode. */
902 || TYPE_PRECISION (type) != GET_MODE_PRECISION (TYPE_MODE (type))))
903 (convert (bitop @0 (convert @1))))))
904
905 (for bitop (bit_and bit_ior)
906 rbitop (bit_ior bit_and)
907 /* (x | y) & x -> x */
908 /* (x & y) | x -> x */
909 (simplify
910 (bitop:c (rbitop:c @0 @1) @0)
911 @0)
912 /* (~x | y) & x -> x & y */
913 /* (~x & y) | x -> x | y */
914 (simplify
915 (bitop:c (rbitop:c (bit_not @0) @1) @0)
916 (bitop @0 @1)))
917
918 /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
919 (simplify
920 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
921 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
922
923 /* Combine successive equal operations with constants. */
924 (for bitop (bit_and bit_ior bit_xor)
925 (simplify
926 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
927 (bitop @0 (bitop @1 @2))))
928
929 /* Try simple folding for X op !X, and X op X with the help
930 of the truth_valued_p and logical_inverted_value predicates. */
931 (match truth_valued_p
932 @0
933 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
934 (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
935 (match truth_valued_p
936 (op @0 @1)))
937 (match truth_valued_p
938 (truth_not @0))
939
940 (match (logical_inverted_value @0)
941 (truth_not @0))
942 (match (logical_inverted_value @0)
943 (bit_not truth_valued_p@0))
944 (match (logical_inverted_value @0)
945 (eq @0 integer_zerop))
946 (match (logical_inverted_value @0)
947 (ne truth_valued_p@0 integer_truep))
948 (match (logical_inverted_value @0)
949 (bit_xor truth_valued_p@0 integer_truep))
950
951 /* X & !X -> 0. */
952 (simplify
953 (bit_and:c @0 (logical_inverted_value @0))
954 { build_zero_cst (type); })
955 /* X | !X and X ^ !X -> 1, , if X is truth-valued. */
956 (for op (bit_ior bit_xor)
957 (simplify
958 (op:c truth_valued_p@0 (logical_inverted_value @0))
959 { constant_boolean_node (true, type); }))
960 /* X ==/!= !X is false/true. */
961 (for op (eq ne)
962 (simplify
963 (op:c truth_valued_p@0 (logical_inverted_value @0))
964 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
965
966 /* ~~x -> x */
967 (simplify
968 (bit_not (bit_not @0))
969 @0)
970
971 /* Convert ~ (-A) to A - 1. */
972 (simplify
973 (bit_not (convert? (negate @0)))
974 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
975 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
976 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
977
978 /* Convert ~ (A - 1) or ~ (A + -1) to -A. */
979 (simplify
980 (bit_not (convert? (minus @0 integer_each_onep)))
981 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
982 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
983 (convert (negate @0))))
984 (simplify
985 (bit_not (convert? (plus @0 integer_all_onesp)))
986 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
987 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
988 (convert (negate @0))))
989
990 /* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
991 (simplify
992 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
993 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
994 (convert (bit_xor @0 (bit_not @1)))))
995 (simplify
996 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
997 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
998 (convert (bit_xor @0 @1))))
999
1000 /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1001 (simplify
1002 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1003 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
1004
1005 /* Fold A - (A & B) into ~B & A. */
1006 (simplify
1007 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
1008 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1009 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1010 (convert (bit_and (bit_not @1) @0))))
1011
1012 /* For integral types with undefined overflow and C != 0 fold
1013 x * C EQ/NE y * C into x EQ/NE y. */
1014 (for cmp (eq ne)
1015 (simplify
1016 (cmp (mult:c @0 @1) (mult:c @2 @1))
1017 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1018 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1019 && tree_expr_nonzero_p (@1))
1020 (cmp @0 @2))))
1021
1022 /* For integral types with undefined overflow and C != 0 fold
1023 x * C RELOP y * C into:
1024
1025 x RELOP y for nonnegative C
1026 y RELOP x for negative C */
1027 (for cmp (lt gt le ge)
1028 (simplify
1029 (cmp (mult:c @0 @1) (mult:c @2 @1))
1030 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1031 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1032 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
1033 (cmp @0 @2)
1034 (if (TREE_CODE (@1) == INTEGER_CST
1035 && wi::neg_p (@1, TYPE_SIGN (TREE_TYPE (@1))))
1036 (cmp @2 @0))))))
1037
1038 /* ((X inner_op C0) outer_op C1)
1039 With X being a tree where value_range has reasoned certain bits to always be
1040 zero throughout its computed value range,
1041 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1042 where zero_mask has 1's for all bits that are sure to be 0 in
1043 and 0's otherwise.
1044 if (inner_op == '^') C0 &= ~C1;
1045 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1046 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1047 */
1048 (for inner_op (bit_ior bit_xor)
1049 outer_op (bit_xor bit_ior)
1050 (simplify
1051 (outer_op
1052 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1053 (with
1054 {
1055 bool fail = false;
1056 wide_int zero_mask_not;
1057 wide_int C0;
1058 wide_int cst_emit;
1059
1060 if (TREE_CODE (@2) == SSA_NAME)
1061 zero_mask_not = get_nonzero_bits (@2);
1062 else
1063 fail = true;
1064
1065 if (inner_op == BIT_XOR_EXPR)
1066 {
1067 C0 = wi::bit_and_not (@0, @1);
1068 cst_emit = wi::bit_or (C0, @1);
1069 }
1070 else
1071 {
1072 C0 = @0;
1073 cst_emit = wi::bit_xor (@0, @1);
1074 }
1075 }
1076 (if (!fail && wi::bit_and (C0, zero_mask_not) == 0)
1077 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
1078 (if (!fail && wi::bit_and (@1, zero_mask_not) == 0)
1079 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1080
1081 /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1082 (simplify
1083 (pointer_plus (pointer_plus:s @0 @1) @3)
1084 (pointer_plus @0 (plus @1 @3)))
1085
1086 /* Pattern match
1087 tem1 = (long) ptr1;
1088 tem2 = (long) ptr2;
1089 tem3 = tem2 - tem1;
1090 tem4 = (unsigned long) tem3;
1091 tem5 = ptr1 + tem4;
1092 and produce
1093 tem5 = ptr2; */
1094 (simplify
1095 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1096 /* Conditionally look through a sign-changing conversion. */
1097 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1098 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1099 || (GENERIC && type == TREE_TYPE (@1))))
1100 @1))
1101
1102 /* Pattern match
1103 tem = (sizetype) ptr;
1104 tem = tem & algn;
1105 tem = -tem;
1106 ... = ptr p+ tem;
1107 and produce the simpler and easier to analyze with respect to alignment
1108 ... = ptr & ~algn; */
1109 (simplify
1110 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
1111 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), wi::bit_not (@1)); }
1112 (bit_and @0 { algn; })))
1113
1114 /* Try folding difference of addresses. */
1115 (simplify
1116 (minus (convert ADDR_EXPR@0) (convert @1))
1117 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1118 (with { HOST_WIDE_INT diff; }
1119 (if (ptr_difference_const (@0, @1, &diff))
1120 { build_int_cst_type (type, diff); }))))
1121 (simplify
1122 (minus (convert @0) (convert ADDR_EXPR@1))
1123 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1124 (with { HOST_WIDE_INT diff; }
1125 (if (ptr_difference_const (@0, @1, &diff))
1126 { build_int_cst_type (type, diff); }))))
1127
1128 /* If arg0 is derived from the address of an object or function, we may
1129 be able to fold this expression using the object or function's
1130 alignment. */
1131 (simplify
1132 (bit_and (convert? @0) INTEGER_CST@1)
1133 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1134 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1135 (with
1136 {
1137 unsigned int align;
1138 unsigned HOST_WIDE_INT bitpos;
1139 get_pointer_alignment_1 (@0, &align, &bitpos);
1140 }
1141 (if (wi::ltu_p (@1, align / BITS_PER_UNIT))
1142 { wide_int_to_tree (type, wi::bit_and (@1, bitpos / BITS_PER_UNIT)); }))))
1143
1144
1145 /* We can't reassociate at all for saturating types. */
1146 (if (!TYPE_SATURATING (type))
1147
1148 /* Contract negates. */
1149 /* A + (-B) -> A - B */
1150 (simplify
1151 (plus:c (convert1? @0) (convert2? (negate @1)))
1152 /* Apply STRIP_NOPS on @0 and the negate. */
1153 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1154 && tree_nop_conversion_p (type, TREE_TYPE (@1))
1155 && !TYPE_OVERFLOW_SANITIZED (type))
1156 (minus (convert @0) (convert @1))))
1157 /* A - (-B) -> A + B */
1158 (simplify
1159 (minus (convert1? @0) (convert2? (negate @1)))
1160 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1161 && tree_nop_conversion_p (type, TREE_TYPE (@1))
1162 && !TYPE_OVERFLOW_SANITIZED (type))
1163 (plus (convert @0) (convert @1))))
1164 /* -(-A) -> A */
1165 (simplify
1166 (negate (convert? (negate @1)))
1167 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1168 && !TYPE_OVERFLOW_SANITIZED (type))
1169 (convert @1)))
1170
1171 /* We can't reassociate floating-point unless -fassociative-math
1172 or fixed-point plus or minus because of saturation to +-Inf. */
1173 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
1174 && !FIXED_POINT_TYPE_P (type))
1175
1176 /* Match patterns that allow contracting a plus-minus pair
1177 irrespective of overflow issues. */
1178 /* (A +- B) - A -> +- B */
1179 /* (A +- B) -+ B -> A */
1180 /* A - (A +- B) -> -+ B */
1181 /* A +- (B -+ A) -> +- B */
1182 (simplify
1183 (minus (plus:c @0 @1) @0)
1184 @1)
1185 (simplify
1186 (minus (minus @0 @1) @0)
1187 (negate @1))
1188 (simplify
1189 (plus:c (minus @0 @1) @1)
1190 @0)
1191 (simplify
1192 (minus @0 (plus:c @0 @1))
1193 (negate @1))
1194 (simplify
1195 (minus @0 (minus @0 @1))
1196 @1)
1197
1198 /* (A +- CST1) +- CST2 -> A + CST3 */
1199 (for outer_op (plus minus)
1200 (for inner_op (plus minus)
1201 (simplify
1202 (outer_op (inner_op @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1203 /* If the constant operation overflows we cannot do the transform
1204 as we would introduce undefined overflow, for example
1205 with (a - 1) + INT_MIN. */
1206 (with { tree cst = const_binop (outer_op == inner_op
1207 ? PLUS_EXPR : MINUS_EXPR, type, @1, @2); }
1208 (if (cst && !TREE_OVERFLOW (cst))
1209 (inner_op @0 { cst; } ))))))
1210
1211 /* (CST1 - A) +- CST2 -> CST3 - A */
1212 (for outer_op (plus minus)
1213 (simplify
1214 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
1215 (with { tree cst = const_binop (outer_op, type, @1, @2); }
1216 (if (cst && !TREE_OVERFLOW (cst))
1217 (minus { cst; } @0)))))
1218
1219 /* CST1 - (CST2 - A) -> CST3 + A */
1220 (simplify
1221 (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0))
1222 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
1223 (if (cst && !TREE_OVERFLOW (cst))
1224 (plus { cst; } @0))))
1225
1226 /* ~A + A -> -1 */
1227 (simplify
1228 (plus:c (bit_not @0) @0)
1229 (if (!TYPE_OVERFLOW_TRAPS (type))
1230 { build_all_ones_cst (type); }))
1231
1232 /* ~A + 1 -> -A */
1233 (simplify
1234 (plus (convert? (bit_not @0)) integer_each_onep)
1235 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1236 (negate (convert @0))))
1237
1238 /* -A - 1 -> ~A */
1239 (simplify
1240 (minus (convert? (negate @0)) integer_each_onep)
1241 (if (!TYPE_OVERFLOW_TRAPS (type)
1242 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1243 (bit_not (convert @0))))
1244
1245 /* -1 - A -> ~A */
1246 (simplify
1247 (minus integer_all_onesp @0)
1248 (bit_not @0))
1249
1250 /* (T)(P + A) - (T)P -> (T) A */
1251 (for add (plus pointer_plus)
1252 (simplify
1253 (minus (convert (add @@0 @1))
1254 (convert @0))
1255 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1256 /* For integer types, if A has a smaller type
1257 than T the result depends on the possible
1258 overflow in P + A.
1259 E.g. T=size_t, A=(unsigned)429497295, P>0.
1260 However, if an overflow in P + A would cause
1261 undefined behavior, we can assume that there
1262 is no overflow. */
1263 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1264 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1265 /* For pointer types, if the conversion of A to the
1266 final type requires a sign- or zero-extension,
1267 then we have to punt - it is not defined which
1268 one is correct. */
1269 || (POINTER_TYPE_P (TREE_TYPE (@0))
1270 && TREE_CODE (@1) == INTEGER_CST
1271 && tree_int_cst_sign_bit (@1) == 0))
1272 (convert @1))))
1273
1274 /* (T)P - (T)(P + A) -> -(T) A */
1275 (for add (plus pointer_plus)
1276 (simplify
1277 (minus (convert @0)
1278 (convert (add @@0 @1)))
1279 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1280 /* For integer types, if A has a smaller type
1281 than T the result depends on the possible
1282 overflow in P + A.
1283 E.g. T=size_t, A=(unsigned)429497295, P>0.
1284 However, if an overflow in P + A would cause
1285 undefined behavior, we can assume that there
1286 is no overflow. */
1287 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1288 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1289 /* For pointer types, if the conversion of A to the
1290 final type requires a sign- or zero-extension,
1291 then we have to punt - it is not defined which
1292 one is correct. */
1293 || (POINTER_TYPE_P (TREE_TYPE (@0))
1294 && TREE_CODE (@1) == INTEGER_CST
1295 && tree_int_cst_sign_bit (@1) == 0))
1296 (negate (convert @1)))))
1297
1298 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
1299 (for add (plus pointer_plus)
1300 (simplify
1301 (minus (convert (add @@0 @1))
1302 (convert (add @0 @2)))
1303 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1304 /* For integer types, if A has a smaller type
1305 than T the result depends on the possible
1306 overflow in P + A.
1307 E.g. T=size_t, A=(unsigned)429497295, P>0.
1308 However, if an overflow in P + A would cause
1309 undefined behavior, we can assume that there
1310 is no overflow. */
1311 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1312 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1313 /* For pointer types, if the conversion of A to the
1314 final type requires a sign- or zero-extension,
1315 then we have to punt - it is not defined which
1316 one is correct. */
1317 || (POINTER_TYPE_P (TREE_TYPE (@0))
1318 && TREE_CODE (@1) == INTEGER_CST
1319 && tree_int_cst_sign_bit (@1) == 0
1320 && TREE_CODE (@2) == INTEGER_CST
1321 && tree_int_cst_sign_bit (@2) == 0))
1322 (minus (convert @1) (convert @2)))))))
1323
1324
1325 /* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
1326
1327 (for minmax (min max FMIN FMAX)
1328 (simplify
1329 (minmax @0 @0)
1330 @0))
1331 /* min(max(x,y),y) -> y. */
1332 (simplify
1333 (min:c (max:c @0 @1) @1)
1334 @1)
1335 /* max(min(x,y),y) -> y. */
1336 (simplify
1337 (max:c (min:c @0 @1) @1)
1338 @1)
1339 /* max(a,-a) -> abs(a). */
1340 (simplify
1341 (max:c @0 (negate @0))
1342 (if (TREE_CODE (type) != COMPLEX_TYPE
1343 && (! ANY_INTEGRAL_TYPE_P (type)
1344 || TYPE_OVERFLOW_UNDEFINED (type)))
1345 (abs @0)))
1346 /* min(a,-a) -> -abs(a). */
1347 (simplify
1348 (min:c @0 (negate @0))
1349 (if (TREE_CODE (type) != COMPLEX_TYPE
1350 && (! ANY_INTEGRAL_TYPE_P (type)
1351 || TYPE_OVERFLOW_UNDEFINED (type)))
1352 (negate (abs @0))))
1353 (simplify
1354 (min @0 @1)
1355 (switch
1356 (if (INTEGRAL_TYPE_P (type)
1357 && TYPE_MIN_VALUE (type)
1358 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
1359 @1)
1360 (if (INTEGRAL_TYPE_P (type)
1361 && TYPE_MAX_VALUE (type)
1362 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
1363 @0)))
1364 (simplify
1365 (max @0 @1)
1366 (switch
1367 (if (INTEGRAL_TYPE_P (type)
1368 && TYPE_MAX_VALUE (type)
1369 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
1370 @1)
1371 (if (INTEGRAL_TYPE_P (type)
1372 && TYPE_MIN_VALUE (type)
1373 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
1374 @0)))
1375
1376 /* max (a, a + CST) -> a + CST where CST is positive. */
1377 /* max (a, a + CST) -> a where CST is negative. */
1378 (simplify
1379 (max:c @0 (plus@2 @0 INTEGER_CST@1))
1380 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1381 (if (tree_int_cst_sgn (@1) > 0)
1382 @2
1383 @0)))
1384
1385 /* min (a, a + CST) -> a where CST is positive. */
1386 /* min (a, a + CST) -> a + CST where CST is negative. */
1387 (simplify
1388 (min:c @0 (plus@2 @0 INTEGER_CST@1))
1389 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1390 (if (tree_int_cst_sgn (@1) > 0)
1391 @0
1392 @2)))
1393
1394 /* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
1395 and the outer convert demotes the expression back to x's type. */
1396 (for minmax (min max)
1397 (simplify
1398 (convert (minmax@0 (convert @1) INTEGER_CST@2))
1399 (if (INTEGRAL_TYPE_P (type)
1400 && types_match (@1, type) && int_fits_type_p (@2, type)
1401 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
1402 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
1403 (minmax @1 (convert @2)))))
1404
1405 (for minmax (FMIN FMAX)
1406 /* If either argument is NaN, return the other one. Avoid the
1407 transformation if we get (and honor) a signalling NaN. */
1408 (simplify
1409 (minmax:c @0 REAL_CST@1)
1410 (if (real_isnan (TREE_REAL_CST_PTR (@1))
1411 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
1412 @0)))
1413 /* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
1414 functions to return the numeric arg if the other one is NaN.
1415 MIN and MAX don't honor that, so only transform if -ffinite-math-only
1416 is set. C99 doesn't require -0.0 to be handled, so we don't have to
1417 worry about it either. */
1418 (if (flag_finite_math_only)
1419 (simplify
1420 (FMIN @0 @1)
1421 (min @0 @1))
1422 (simplify
1423 (FMAX @0 @1)
1424 (max @0 @1)))
1425 /* min (-A, -B) -> -max (A, B) */
1426 (for minmax (min max FMIN FMAX)
1427 maxmin (max min FMAX FMIN)
1428 (simplify
1429 (minmax (negate:s@2 @0) (negate:s@3 @1))
1430 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
1431 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1432 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
1433 (negate (maxmin @0 @1)))))
1434 /* MIN (~X, ~Y) -> ~MAX (X, Y)
1435 MAX (~X, ~Y) -> ~MIN (X, Y) */
1436 (for minmax (min max)
1437 maxmin (max min)
1438 (simplify
1439 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
1440 (bit_not (maxmin @0 @1))))
1441
1442 /* MIN (X, Y) == X -> X <= Y */
1443 (for minmax (min min max max)
1444 cmp (eq ne eq ne )
1445 out (le gt ge lt )
1446 (simplify
1447 (cmp:c (minmax:c @0 @1) @0)
1448 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
1449 (out @0 @1))))
1450 /* MIN (X, 5) == 0 -> X == 0
1451 MIN (X, 5) == 7 -> false */
1452 (for cmp (eq ne)
1453 (simplify
1454 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
1455 (if (wi::lt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1456 { constant_boolean_node (cmp == NE_EXPR, type); }
1457 (if (wi::gt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1458 (cmp @0 @2)))))
1459 (for cmp (eq ne)
1460 (simplify
1461 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
1462 (if (wi::gt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1463 { constant_boolean_node (cmp == NE_EXPR, type); }
1464 (if (wi::lt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1465 (cmp @0 @2)))))
1466 /* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
1467 (for minmax (min min max max min min max max )
1468 cmp (lt le gt ge gt ge lt le )
1469 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
1470 (simplify
1471 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
1472 (comb (cmp @0 @2) (cmp @1 @2))))
1473
1474 /* Simplifications of shift and rotates. */
1475
1476 (for rotate (lrotate rrotate)
1477 (simplify
1478 (rotate integer_all_onesp@0 @1)
1479 @0))
1480
1481 /* Optimize -1 >> x for arithmetic right shifts. */
1482 (simplify
1483 (rshift integer_all_onesp@0 @1)
1484 (if (!TYPE_UNSIGNED (type)
1485 && tree_expr_nonnegative_p (@1))
1486 @0))
1487
1488 /* Optimize (x >> c) << c into x & (-1<<c). */
1489 (simplify
1490 (lshift (rshift @0 INTEGER_CST@1) @1)
1491 (if (wi::ltu_p (@1, element_precision (type)))
1492 (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
1493
1494 /* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
1495 types. */
1496 (simplify
1497 (rshift (lshift @0 INTEGER_CST@1) @1)
1498 (if (TYPE_UNSIGNED (type)
1499 && (wi::ltu_p (@1, element_precision (type))))
1500 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
1501
1502 (for shiftrotate (lrotate rrotate lshift rshift)
1503 (simplify
1504 (shiftrotate @0 integer_zerop)
1505 (non_lvalue @0))
1506 (simplify
1507 (shiftrotate integer_zerop@0 @1)
1508 @0)
1509 /* Prefer vector1 << scalar to vector1 << vector2
1510 if vector2 is uniform. */
1511 (for vec (VECTOR_CST CONSTRUCTOR)
1512 (simplify
1513 (shiftrotate @0 vec@1)
1514 (with { tree tem = uniform_vector_p (@1); }
1515 (if (tem)
1516 (shiftrotate @0 { tem; }))))))
1517
1518 /* Simplify X << Y where Y's low width bits are 0 to X, as only valid
1519 Y is 0. Similarly for X >> Y. */
1520 #if GIMPLE
1521 (for shift (lshift rshift)
1522 (simplify
1523 (shift @0 SSA_NAME@1)
1524 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
1525 (with {
1526 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
1527 int prec = TYPE_PRECISION (TREE_TYPE (@1));
1528 }
1529 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
1530 @0)))))
1531 #endif
1532
1533 /* Rewrite an LROTATE_EXPR by a constant into an
1534 RROTATE_EXPR by a new constant. */
1535 (simplify
1536 (lrotate @0 INTEGER_CST@1)
1537 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
1538 build_int_cst (TREE_TYPE (@1),
1539 element_precision (type)), @1); }))
1540
1541 /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
1542 (for op (lrotate rrotate rshift lshift)
1543 (simplify
1544 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
1545 (with { unsigned int prec = element_precision (type); }
1546 (if (wi::ge_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1)))
1547 && wi::lt_p (@1, prec, TYPE_SIGN (TREE_TYPE (@1)))
1548 && wi::ge_p (@2, 0, TYPE_SIGN (TREE_TYPE (@2)))
1549 && wi::lt_p (@2, prec, TYPE_SIGN (TREE_TYPE (@2))))
1550 (with { unsigned int low = wi::add (@1, @2).to_uhwi (); }
1551 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
1552 being well defined. */
1553 (if (low >= prec)
1554 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
1555 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
1556 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
1557 { build_zero_cst (type); }
1558 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
1559 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
1560
1561
1562 /* ((1 << A) & 1) != 0 -> A == 0
1563 ((1 << A) & 1) == 0 -> A != 0 */
1564 (for cmp (ne eq)
1565 icmp (eq ne)
1566 (simplify
1567 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
1568 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
1569
1570 /* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
1571 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
1572 if CST2 != 0. */
1573 (for cmp (ne eq)
1574 (simplify
1575 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
1576 (with { int cand = wi::ctz (@2) - wi::ctz (@0); }
1577 (if (cand < 0
1578 || (!integer_zerop (@2)
1579 && wi::ne_p (wi::lshift (@0, cand), @2)))
1580 { constant_boolean_node (cmp == NE_EXPR, type); }
1581 (if (!integer_zerop (@2)
1582 && wi::eq_p (wi::lshift (@0, cand), @2))
1583 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
1584
1585 /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
1586 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
1587 if the new mask might be further optimized. */
1588 (for shift (lshift rshift)
1589 (simplify
1590 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
1591 INTEGER_CST@2)
1592 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
1593 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
1594 && tree_fits_uhwi_p (@1)
1595 && tree_to_uhwi (@1) > 0
1596 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
1597 (with
1598 {
1599 unsigned int shiftc = tree_to_uhwi (@1);
1600 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
1601 unsigned HOST_WIDE_INT newmask, zerobits = 0;
1602 tree shift_type = TREE_TYPE (@3);
1603 unsigned int prec;
1604
1605 if (shift == LSHIFT_EXPR)
1606 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
1607 else if (shift == RSHIFT_EXPR
1608 && (TYPE_PRECISION (shift_type)
1609 == GET_MODE_PRECISION (TYPE_MODE (shift_type))))
1610 {
1611 prec = TYPE_PRECISION (TREE_TYPE (@3));
1612 tree arg00 = @0;
1613 /* See if more bits can be proven as zero because of
1614 zero extension. */
1615 if (@3 != @0
1616 && TYPE_UNSIGNED (TREE_TYPE (@0)))
1617 {
1618 tree inner_type = TREE_TYPE (@0);
1619 if ((TYPE_PRECISION (inner_type)
1620 == GET_MODE_PRECISION (TYPE_MODE (inner_type)))
1621 && TYPE_PRECISION (inner_type) < prec)
1622 {
1623 prec = TYPE_PRECISION (inner_type);
1624 /* See if we can shorten the right shift. */
1625 if (shiftc < prec)
1626 shift_type = inner_type;
1627 /* Otherwise X >> C1 is all zeros, so we'll optimize
1628 it into (X, 0) later on by making sure zerobits
1629 is all ones. */
1630 }
1631 }
1632 zerobits = HOST_WIDE_INT_M1U;
1633 if (shiftc < prec)
1634 {
1635 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
1636 zerobits <<= prec - shiftc;
1637 }
1638 /* For arithmetic shift if sign bit could be set, zerobits
1639 can contain actually sign bits, so no transformation is
1640 possible, unless MASK masks them all away. In that
1641 case the shift needs to be converted into logical shift. */
1642 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
1643 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
1644 {
1645 if ((mask & zerobits) == 0)
1646 shift_type = unsigned_type_for (TREE_TYPE (@3));
1647 else
1648 zerobits = 0;
1649 }
1650 }
1651 }
1652 /* ((X << 16) & 0xff00) is (X, 0). */
1653 (if ((mask & zerobits) == mask)
1654 { build_int_cst (type, 0); }
1655 (with { newmask = mask | zerobits; }
1656 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
1657 (with
1658 {
1659 /* Only do the transformation if NEWMASK is some integer
1660 mode's mask. */
1661 for (prec = BITS_PER_UNIT;
1662 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
1663 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
1664 break;
1665 }
1666 (if (prec < HOST_BITS_PER_WIDE_INT
1667 || newmask == HOST_WIDE_INT_M1U)
1668 (with
1669 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
1670 (if (!tree_int_cst_equal (newmaskt, @2))
1671 (if (shift_type != TREE_TYPE (@3))
1672 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
1673 (bit_and @4 { newmaskt; })))))))))))))
1674
1675 /* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
1676 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
1677 (for shift (lshift rshift)
1678 (for bit_op (bit_and bit_xor bit_ior)
1679 (simplify
1680 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
1681 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1682 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
1683 (bit_op (shift (convert @0) @1) { mask; }))))))
1684
1685 /* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
1686 (simplify
1687 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
1688 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
1689 && (element_precision (TREE_TYPE (@0))
1690 <= element_precision (TREE_TYPE (@1))
1691 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
1692 (with
1693 { tree shift_type = TREE_TYPE (@0); }
1694 (convert (rshift (convert:shift_type @1) @2)))))
1695
1696 /* ~(~X >>r Y) -> X >>r Y
1697 ~(~X <<r Y) -> X <<r Y */
1698 (for rotate (lrotate rrotate)
1699 (simplify
1700 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
1701 (if ((element_precision (TREE_TYPE (@0))
1702 <= element_precision (TREE_TYPE (@1))
1703 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
1704 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
1705 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
1706 (with
1707 { tree rotate_type = TREE_TYPE (@0); }
1708 (convert (rotate (convert:rotate_type @1) @2))))))
1709
1710 /* Simplifications of conversions. */
1711
1712 /* Basic strip-useless-type-conversions / strip_nops. */
1713 (for cvt (convert view_convert float fix_trunc)
1714 (simplify
1715 (cvt @0)
1716 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
1717 || (GENERIC && type == TREE_TYPE (@0)))
1718 @0)))
1719
1720 /* Contract view-conversions. */
1721 (simplify
1722 (view_convert (view_convert @0))
1723 (view_convert @0))
1724
1725 /* For integral conversions with the same precision or pointer
1726 conversions use a NOP_EXPR instead. */
1727 (simplify
1728 (view_convert @0)
1729 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
1730 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
1731 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
1732 (convert @0)))
1733
1734 /* Strip inner integral conversions that do not change precision or size. */
1735 (simplify
1736 (view_convert (convert@0 @1))
1737 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
1738 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
1739 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
1740 && (TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))))
1741 (view_convert @1)))
1742
1743 /* Re-association barriers around constants and other re-association
1744 barriers can be removed. */
1745 (simplify
1746 (paren CONSTANT_CLASS_P@0)
1747 @0)
1748 (simplify
1749 (paren (paren@1 @0))
1750 @1)
1751
1752 /* Handle cases of two conversions in a row. */
1753 (for ocvt (convert float fix_trunc)
1754 (for icvt (convert float)
1755 (simplify
1756 (ocvt (icvt@1 @0))
1757 (with
1758 {
1759 tree inside_type = TREE_TYPE (@0);
1760 tree inter_type = TREE_TYPE (@1);
1761 int inside_int = INTEGRAL_TYPE_P (inside_type);
1762 int inside_ptr = POINTER_TYPE_P (inside_type);
1763 int inside_float = FLOAT_TYPE_P (inside_type);
1764 int inside_vec = VECTOR_TYPE_P (inside_type);
1765 unsigned int inside_prec = TYPE_PRECISION (inside_type);
1766 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
1767 int inter_int = INTEGRAL_TYPE_P (inter_type);
1768 int inter_ptr = POINTER_TYPE_P (inter_type);
1769 int inter_float = FLOAT_TYPE_P (inter_type);
1770 int inter_vec = VECTOR_TYPE_P (inter_type);
1771 unsigned int inter_prec = TYPE_PRECISION (inter_type);
1772 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
1773 int final_int = INTEGRAL_TYPE_P (type);
1774 int final_ptr = POINTER_TYPE_P (type);
1775 int final_float = FLOAT_TYPE_P (type);
1776 int final_vec = VECTOR_TYPE_P (type);
1777 unsigned int final_prec = TYPE_PRECISION (type);
1778 int final_unsignedp = TYPE_UNSIGNED (type);
1779 }
1780 (switch
1781 /* In addition to the cases of two conversions in a row
1782 handled below, if we are converting something to its own
1783 type via an object of identical or wider precision, neither
1784 conversion is needed. */
1785 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
1786 || (GENERIC
1787 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
1788 && (((inter_int || inter_ptr) && final_int)
1789 || (inter_float && final_float))
1790 && inter_prec >= final_prec)
1791 (ocvt @0))
1792
1793 /* Likewise, if the intermediate and initial types are either both
1794 float or both integer, we don't need the middle conversion if the
1795 former is wider than the latter and doesn't change the signedness
1796 (for integers). Avoid this if the final type is a pointer since
1797 then we sometimes need the middle conversion. */
1798 (if (((inter_int && inside_int) || (inter_float && inside_float))
1799 && (final_int || final_float)
1800 && inter_prec >= inside_prec
1801 && (inter_float || inter_unsignedp == inside_unsignedp))
1802 (ocvt @0))
1803
1804 /* If we have a sign-extension of a zero-extended value, we can
1805 replace that by a single zero-extension. Likewise if the
1806 final conversion does not change precision we can drop the
1807 intermediate conversion. */
1808 (if (inside_int && inter_int && final_int
1809 && ((inside_prec < inter_prec && inter_prec < final_prec
1810 && inside_unsignedp && !inter_unsignedp)
1811 || final_prec == inter_prec))
1812 (ocvt @0))
1813
1814 /* Two conversions in a row are not needed unless:
1815 - some conversion is floating-point (overstrict for now), or
1816 - some conversion is a vector (overstrict for now), or
1817 - the intermediate type is narrower than both initial and
1818 final, or
1819 - the intermediate type and innermost type differ in signedness,
1820 and the outermost type is wider than the intermediate, or
1821 - the initial type is a pointer type and the precisions of the
1822 intermediate and final types differ, or
1823 - the final type is a pointer type and the precisions of the
1824 initial and intermediate types differ. */
1825 (if (! inside_float && ! inter_float && ! final_float
1826 && ! inside_vec && ! inter_vec && ! final_vec
1827 && (inter_prec >= inside_prec || inter_prec >= final_prec)
1828 && ! (inside_int && inter_int
1829 && inter_unsignedp != inside_unsignedp
1830 && inter_prec < final_prec)
1831 && ((inter_unsignedp && inter_prec > inside_prec)
1832 == (final_unsignedp && final_prec > inter_prec))
1833 && ! (inside_ptr && inter_prec != final_prec)
1834 && ! (final_ptr && inside_prec != inter_prec))
1835 (ocvt @0))
1836
1837 /* A truncation to an unsigned type (a zero-extension) should be
1838 canonicalized as bitwise and of a mask. */
1839 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
1840 && final_int && inter_int && inside_int
1841 && final_prec == inside_prec
1842 && final_prec > inter_prec
1843 && inter_unsignedp)
1844 (convert (bit_and @0 { wide_int_to_tree
1845 (inside_type,
1846 wi::mask (inter_prec, false,
1847 TYPE_PRECISION (inside_type))); })))
1848
1849 /* If we are converting an integer to a floating-point that can
1850 represent it exactly and back to an integer, we can skip the
1851 floating-point conversion. */
1852 (if (GIMPLE /* PR66211 */
1853 && inside_int && inter_float && final_int &&
1854 (unsigned) significand_size (TYPE_MODE (inter_type))
1855 >= inside_prec - !inside_unsignedp)
1856 (convert @0)))))))
1857
1858 /* If we have a narrowing conversion to an integral type that is fed by a
1859 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
1860 masks off bits outside the final type (and nothing else). */
1861 (simplify
1862 (convert (bit_and @0 INTEGER_CST@1))
1863 (if (INTEGRAL_TYPE_P (type)
1864 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1865 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
1866 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
1867 TYPE_PRECISION (type)), 0))
1868 (convert @0)))
1869
1870
1871 /* (X /[ex] A) * A -> X. */
1872 (simplify
1873 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
1874 (convert @0))
1875
1876 /* Canonicalization of binary operations. */
1877
1878 /* Convert X + -C into X - C. */
1879 (simplify
1880 (plus @0 REAL_CST@1)
1881 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
1882 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
1883 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
1884 (minus @0 { tem; })))))
1885
1886 /* Convert x+x into x*2. */
1887 (simplify
1888 (plus @0 @0)
1889 (if (SCALAR_FLOAT_TYPE_P (type))
1890 (mult @0 { build_real (type, dconst2); })
1891 (if (INTEGRAL_TYPE_P (type))
1892 (mult @0 { build_int_cst (type, 2); }))))
1893
1894 (simplify
1895 (minus integer_zerop @1)
1896 (negate @1))
1897
1898 /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
1899 ARG0 is zero and X + ARG0 reduces to X, since that would mean
1900 (-ARG1 + ARG0) reduces to -ARG1. */
1901 (simplify
1902 (minus real_zerop@0 @1)
1903 (if (fold_real_zero_addition_p (type, @0, 0))
1904 (negate @1)))
1905
1906 /* Transform x * -1 into -x. */
1907 (simplify
1908 (mult @0 integer_minus_onep)
1909 (negate @0))
1910
1911 /* True if we can easily extract the real and imaginary parts of a complex
1912 number. */
1913 (match compositional_complex
1914 (convert? (complex @0 @1)))
1915
1916 /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
1917 (simplify
1918 (complex (realpart @0) (imagpart @0))
1919 @0)
1920 (simplify
1921 (realpart (complex @0 @1))
1922 @0)
1923 (simplify
1924 (imagpart (complex @0 @1))
1925 @1)
1926
1927 /* Sometimes we only care about half of a complex expression. */
1928 (simplify
1929 (realpart (convert?:s (conj:s @0)))
1930 (convert (realpart @0)))
1931 (simplify
1932 (imagpart (convert?:s (conj:s @0)))
1933 (convert (negate (imagpart @0))))
1934 (for part (realpart imagpart)
1935 (for op (plus minus)
1936 (simplify
1937 (part (convert?:s@2 (op:s @0 @1)))
1938 (convert (op (part @0) (part @1))))))
1939 (simplify
1940 (realpart (convert?:s (CEXPI:s @0)))
1941 (convert (COS @0)))
1942 (simplify
1943 (imagpart (convert?:s (CEXPI:s @0)))
1944 (convert (SIN @0)))
1945
1946 /* conj(conj(x)) -> x */
1947 (simplify
1948 (conj (convert? (conj @0)))
1949 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
1950 (convert @0)))
1951
1952 /* conj({x,y}) -> {x,-y} */
1953 (simplify
1954 (conj (convert?:s (complex:s @0 @1)))
1955 (with { tree itype = TREE_TYPE (type); }
1956 (complex (convert:itype @0) (negate (convert:itype @1)))))
1957
1958 /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
1959 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
1960 (simplify
1961 (bswap (bswap @0))
1962 @0)
1963 (simplify
1964 (bswap (bit_not (bswap @0)))
1965 (bit_not @0))
1966 (for bitop (bit_xor bit_ior bit_and)
1967 (simplify
1968 (bswap (bitop:c (bswap @0) @1))
1969 (bitop @0 (bswap @1)))))
1970
1971
1972 /* Combine COND_EXPRs and VEC_COND_EXPRs. */
1973
1974 /* Simplify constant conditions.
1975 Only optimize constant conditions when the selected branch
1976 has the same type as the COND_EXPR. This avoids optimizing
1977 away "c ? x : throw", where the throw has a void type.
1978 Note that we cannot throw away the fold-const.c variant nor
1979 this one as we depend on doing this transform before possibly
1980 A ? B : B -> B triggers and the fold-const.c one can optimize
1981 0 ? A : B to B even if A has side-effects. Something
1982 genmatch cannot handle. */
1983 (simplify
1984 (cond INTEGER_CST@0 @1 @2)
1985 (if (integer_zerop (@0))
1986 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
1987 @2)
1988 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
1989 @1)))
1990 (simplify
1991 (vec_cond VECTOR_CST@0 @1 @2)
1992 (if (integer_all_onesp (@0))
1993 @1
1994 (if (integer_zerop (@0))
1995 @2)))
1996
1997 /* Simplification moved from fold_cond_expr_with_comparison. It may also
1998 be extended. */
1999 /* This pattern implements two kinds simplification:
2000
2001 Case 1)
2002 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
2003 1) Conversions are type widening from smaller type.
2004 2) Const c1 equals to c2 after canonicalizing comparison.
2005 3) Comparison has tree code LT, LE, GT or GE.
2006 This specific pattern is needed when (cmp (convert x) c) may not
2007 be simplified by comparison patterns because of multiple uses of
2008 x. It also makes sense here because simplifying across multiple
2009 referred var is always benefitial for complicated cases.
2010
2011 Case 2)
2012 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
2013 (for cmp (lt le gt ge eq)
2014 (simplify
2015 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
2016 (with
2017 {
2018 tree from_type = TREE_TYPE (@1);
2019 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
2020 enum tree_code code = ERROR_MARK;
2021
2022 if (INTEGRAL_TYPE_P (from_type)
2023 && int_fits_type_p (@2, from_type)
2024 && (types_match (c1_type, from_type)
2025 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
2026 && (TYPE_UNSIGNED (from_type)
2027 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
2028 && (types_match (c2_type, from_type)
2029 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
2030 && (TYPE_UNSIGNED (from_type)
2031 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
2032 {
2033 if (cmp != EQ_EXPR)
2034 {
2035 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
2036 {
2037 /* X <= Y - 1 equals to X < Y. */
2038 if (cmp == LE_EXPR)
2039 code = LT_EXPR;
2040 /* X > Y - 1 equals to X >= Y. */
2041 if (cmp == GT_EXPR)
2042 code = GE_EXPR;
2043 }
2044 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
2045 {
2046 /* X < Y + 1 equals to X <= Y. */
2047 if (cmp == LT_EXPR)
2048 code = LE_EXPR;
2049 /* X >= Y + 1 equals to X > Y. */
2050 if (cmp == GE_EXPR)
2051 code = GT_EXPR;
2052 }
2053 if (code != ERROR_MARK
2054 || wi::to_widest (@2) == wi::to_widest (@3))
2055 {
2056 if (cmp == LT_EXPR || cmp == LE_EXPR)
2057 code = MIN_EXPR;
2058 if (cmp == GT_EXPR || cmp == GE_EXPR)
2059 code = MAX_EXPR;
2060 }
2061 }
2062 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
2063 else if (int_fits_type_p (@3, from_type))
2064 code = EQ_EXPR;
2065 }
2066 }
2067 (if (code == MAX_EXPR)
2068 (convert (max @1 (convert @2)))
2069 (if (code == MIN_EXPR)
2070 (convert (min @1 (convert @2)))
2071 (if (code == EQ_EXPR)
2072 (convert (cond (eq @1 (convert @3))
2073 (convert:from_type @3) (convert:from_type @2)))))))))
2074
2075 /* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
2076
2077 1) OP is PLUS or MINUS.
2078 2) CMP is LT, LE, GT or GE.
2079 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
2080
2081 This pattern also handles special cases like:
2082
2083 A) Operand x is a unsigned to signed type conversion and c1 is
2084 integer zero. In this case,
2085 (signed type)x < 0 <=> x > MAX_VAL(signed type)
2086 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
2087 B) Const c1 may not equal to (C3 op' C2). In this case we also
2088 check equality for (c1+1) and (c1-1) by adjusting comparison
2089 code.
2090
2091 TODO: Though signed type is handled by this pattern, it cannot be
2092 simplified at the moment because C standard requires additional
2093 type promotion. In order to match&simplify it here, the IR needs
2094 to be cleaned up by other optimizers, i.e, VRP. */
2095 (for op (plus minus)
2096 (for cmp (lt le gt ge)
2097 (simplify
2098 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
2099 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
2100 (if (types_match (from_type, to_type)
2101 /* Check if it is special case A). */
2102 || (TYPE_UNSIGNED (from_type)
2103 && !TYPE_UNSIGNED (to_type)
2104 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
2105 && integer_zerop (@1)
2106 && (cmp == LT_EXPR || cmp == GE_EXPR)))
2107 (with
2108 {
2109 bool overflow = false;
2110 enum tree_code code, cmp_code = cmp;
2111 wide_int real_c1, c1 = @1, c2 = @2, c3 = @3;
2112 signop sgn = TYPE_SIGN (from_type);
2113
2114 /* Handle special case A), given x of unsigned type:
2115 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
2116 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
2117 if (!types_match (from_type, to_type))
2118 {
2119 if (cmp_code == LT_EXPR)
2120 cmp_code = GT_EXPR;
2121 if (cmp_code == GE_EXPR)
2122 cmp_code = LE_EXPR;
2123 c1 = wi::max_value (to_type);
2124 }
2125 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
2126 compute (c3 op' c2) and check if it equals to c1 with op' being
2127 the inverted operator of op. Make sure overflow doesn't happen
2128 if it is undefined. */
2129 if (op == PLUS_EXPR)
2130 real_c1 = wi::sub (c3, c2, sgn, &overflow);
2131 else
2132 real_c1 = wi::add (c3, c2, sgn, &overflow);
2133
2134 code = cmp_code;
2135 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
2136 {
2137 /* Check if c1 equals to real_c1. Boundary condition is handled
2138 by adjusting comparison operation if necessary. */
2139 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
2140 && !overflow)
2141 {
2142 /* X <= Y - 1 equals to X < Y. */
2143 if (cmp_code == LE_EXPR)
2144 code = LT_EXPR;
2145 /* X > Y - 1 equals to X >= Y. */
2146 if (cmp_code == GT_EXPR)
2147 code = GE_EXPR;
2148 }
2149 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
2150 && !overflow)
2151 {
2152 /* X < Y + 1 equals to X <= Y. */
2153 if (cmp_code == LT_EXPR)
2154 code = LE_EXPR;
2155 /* X >= Y + 1 equals to X > Y. */
2156 if (cmp_code == GE_EXPR)
2157 code = GT_EXPR;
2158 }
2159 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
2160 {
2161 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
2162 code = MIN_EXPR;
2163 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
2164 code = MAX_EXPR;
2165 }
2166 }
2167 }
2168 (if (code == MAX_EXPR)
2169 (op (max @X { wide_int_to_tree (from_type, real_c1); })
2170 { wide_int_to_tree (from_type, c2); })
2171 (if (code == MIN_EXPR)
2172 (op (min @X { wide_int_to_tree (from_type, real_c1); })
2173 { wide_int_to_tree (from_type, c2); })))))))))
2174
2175 (for cnd (cond vec_cond)
2176 /* A ? B : (A ? X : C) -> A ? B : C. */
2177 (simplify
2178 (cnd @0 (cnd @0 @1 @2) @3)
2179 (cnd @0 @1 @3))
2180 (simplify
2181 (cnd @0 @1 (cnd @0 @2 @3))
2182 (cnd @0 @1 @3))
2183 /* A ? B : (!A ? C : X) -> A ? B : C. */
2184 /* ??? This matches embedded conditions open-coded because genmatch
2185 would generate matching code for conditions in separate stmts only.
2186 The following is still important to merge then and else arm cases
2187 from if-conversion. */
2188 (simplify
2189 (cnd @0 @1 (cnd @2 @3 @4))
2190 (if (COMPARISON_CLASS_P (@0)
2191 && COMPARISON_CLASS_P (@2)
2192 && invert_tree_comparison
2193 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@2)
2194 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@2, 0), 0)
2195 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@2, 1), 0))
2196 (cnd @0 @1 @3)))
2197 (simplify
2198 (cnd @0 (cnd @1 @2 @3) @4)
2199 (if (COMPARISON_CLASS_P (@0)
2200 && COMPARISON_CLASS_P (@1)
2201 && invert_tree_comparison
2202 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@1)
2203 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@1, 0), 0)
2204 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@1, 1), 0))
2205 (cnd @0 @3 @4)))
2206
2207 /* A ? B : B -> B. */
2208 (simplify
2209 (cnd @0 @1 @1)
2210 @1)
2211
2212 /* !A ? B : C -> A ? C : B. */
2213 (simplify
2214 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
2215 (cnd @0 @2 @1)))
2216
2217 /* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
2218 return all -1 or all 0 results. */
2219 /* ??? We could instead convert all instances of the vec_cond to negate,
2220 but that isn't necessarily a win on its own. */
2221 (simplify
2222 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
2223 (if (VECTOR_TYPE_P (type)
2224 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
2225 && (TYPE_MODE (TREE_TYPE (type))
2226 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
2227 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
2228
2229 /* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
2230 (simplify
2231 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
2232 (if (VECTOR_TYPE_P (type)
2233 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
2234 && (TYPE_MODE (TREE_TYPE (type))
2235 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
2236 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
2237
2238
2239 /* Simplifications of comparisons. */
2240
2241 /* See if we can reduce the magnitude of a constant involved in a
2242 comparison by changing the comparison code. This is a canonicalization
2243 formerly done by maybe_canonicalize_comparison_1. */
2244 (for cmp (le gt)
2245 acmp (lt ge)
2246 (simplify
2247 (cmp @0 INTEGER_CST@1)
2248 (if (tree_int_cst_sgn (@1) == -1)
2249 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
2250 (for cmp (ge lt)
2251 acmp (gt le)
2252 (simplify
2253 (cmp @0 INTEGER_CST@1)
2254 (if (tree_int_cst_sgn (@1) == 1)
2255 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
2256
2257
2258 /* We can simplify a logical negation of a comparison to the
2259 inverted comparison. As we cannot compute an expression
2260 operator using invert_tree_comparison we have to simulate
2261 that with expression code iteration. */
2262 (for cmp (tcc_comparison)
2263 icmp (inverted_tcc_comparison)
2264 ncmp (inverted_tcc_comparison_with_nans)
2265 /* Ideally we'd like to combine the following two patterns
2266 and handle some more cases by using
2267 (logical_inverted_value (cmp @0 @1))
2268 here but for that genmatch would need to "inline" that.
2269 For now implement what forward_propagate_comparison did. */
2270 (simplify
2271 (bit_not (cmp @0 @1))
2272 (if (VECTOR_TYPE_P (type)
2273 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
2274 /* Comparison inversion may be impossible for trapping math,
2275 invert_tree_comparison will tell us. But we can't use
2276 a computed operator in the replacement tree thus we have
2277 to play the trick below. */
2278 (with { enum tree_code ic = invert_tree_comparison
2279 (cmp, HONOR_NANS (@0)); }
2280 (if (ic == icmp)
2281 (icmp @0 @1)
2282 (if (ic == ncmp)
2283 (ncmp @0 @1))))))
2284 (simplify
2285 (bit_xor (cmp @0 @1) integer_truep)
2286 (with { enum tree_code ic = invert_tree_comparison
2287 (cmp, HONOR_NANS (@0)); }
2288 (if (ic == icmp)
2289 (icmp @0 @1)
2290 (if (ic == ncmp)
2291 (ncmp @0 @1))))))
2292
2293 /* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
2294 ??? The transformation is valid for the other operators if overflow
2295 is undefined for the type, but performing it here badly interacts
2296 with the transformation in fold_cond_expr_with_comparison which
2297 attempts to synthetize ABS_EXPR. */
2298 (for cmp (eq ne)
2299 (simplify
2300 (cmp (minus@2 @0 @1) integer_zerop)
2301 (if (single_use (@2))
2302 (cmp @0 @1))))
2303
2304 /* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
2305 signed arithmetic case. That form is created by the compiler
2306 often enough for folding it to be of value. One example is in
2307 computing loop trip counts after Operator Strength Reduction. */
2308 (for cmp (simple_comparison)
2309 scmp (swapped_simple_comparison)
2310 (simplify
2311 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
2312 /* Handle unfolded multiplication by zero. */
2313 (if (integer_zerop (@1))
2314 (cmp @1 @2)
2315 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2316 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2317 && single_use (@3))
2318 /* If @1 is negative we swap the sense of the comparison. */
2319 (if (tree_int_cst_sgn (@1) < 0)
2320 (scmp @0 @2)
2321 (cmp @0 @2))))))
2322
2323 /* Simplify comparison of something with itself. For IEEE
2324 floating-point, we can only do some of these simplifications. */
2325 (for cmp (eq ge le)
2326 (simplify
2327 (cmp @0 @0)
2328 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
2329 || ! HONOR_NANS (@0))
2330 { constant_boolean_node (true, type); }
2331 (if (cmp != EQ_EXPR)
2332 (eq @0 @0)))))
2333 (for cmp (ne gt lt)
2334 (simplify
2335 (cmp @0 @0)
2336 (if (cmp != NE_EXPR
2337 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
2338 || ! HONOR_NANS (@0))
2339 { constant_boolean_node (false, type); })))
2340 (for cmp (unle unge uneq)
2341 (simplify
2342 (cmp @0 @0)
2343 { constant_boolean_node (true, type); }))
2344 (for cmp (unlt ungt)
2345 (simplify
2346 (cmp @0 @0)
2347 (unordered @0 @0)))
2348 (simplify
2349 (ltgt @0 @0)
2350 (if (!flag_trapping_math)
2351 { constant_boolean_node (false, type); }))
2352
2353 /* Fold ~X op ~Y as Y op X. */
2354 (for cmp (simple_comparison)
2355 (simplify
2356 (cmp (bit_not@2 @0) (bit_not@3 @1))
2357 (if (single_use (@2) && single_use (@3))
2358 (cmp @1 @0))))
2359
2360 /* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
2361 (for cmp (simple_comparison)
2362 scmp (swapped_simple_comparison)
2363 (simplify
2364 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
2365 (if (single_use (@2)
2366 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
2367 (scmp @0 (bit_not @1)))))
2368
2369 (for cmp (simple_comparison)
2370 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
2371 (simplify
2372 (cmp (convert@2 @0) (convert? @1))
2373 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2374 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
2375 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
2376 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
2377 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
2378 (with
2379 {
2380 tree type1 = TREE_TYPE (@1);
2381 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
2382 {
2383 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
2384 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
2385 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
2386 type1 = float_type_node;
2387 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
2388 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
2389 type1 = double_type_node;
2390 }
2391 tree newtype
2392 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
2393 ? TREE_TYPE (@0) : type1);
2394 }
2395 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
2396 (cmp (convert:newtype @0) (convert:newtype @1))))))
2397
2398 (simplify
2399 (cmp @0 REAL_CST@1)
2400 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
2401 (switch
2402 /* a CMP (-0) -> a CMP 0 */
2403 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
2404 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
2405 /* x != NaN is always true, other ops are always false. */
2406 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
2407 && ! HONOR_SNANS (@1))
2408 { constant_boolean_node (cmp == NE_EXPR, type); })
2409 /* Fold comparisons against infinity. */
2410 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
2411 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
2412 (with
2413 {
2414 REAL_VALUE_TYPE max;
2415 enum tree_code code = cmp;
2416 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
2417 if (neg)
2418 code = swap_tree_comparison (code);
2419 }
2420 (switch
2421 /* x > +Inf is always false, if with ignore sNANs. */
2422 (if (code == GT_EXPR
2423 && ! HONOR_SNANS (@0))
2424 { constant_boolean_node (false, type); })
2425 (if (code == LE_EXPR)
2426 /* x <= +Inf is always true, if we don't case about NaNs. */
2427 (if (! HONOR_NANS (@0))
2428 { constant_boolean_node (true, type); }
2429 /* x <= +Inf is the same as x == x, i.e. !isnan(x). */
2430 (eq @0 @0)))
2431 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX. */
2432 (if (code == EQ_EXPR || code == GE_EXPR)
2433 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2434 (if (neg)
2435 (lt @0 { build_real (TREE_TYPE (@0), max); })
2436 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
2437 /* x < +Inf is always equal to x <= DBL_MAX. */
2438 (if (code == LT_EXPR)
2439 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2440 (if (neg)
2441 (ge @0 { build_real (TREE_TYPE (@0), max); })
2442 (le @0 { build_real (TREE_TYPE (@0), max); }))))
2443 /* x != +Inf is always equal to !(x > DBL_MAX). */
2444 (if (code == NE_EXPR)
2445 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2446 (if (! HONOR_NANS (@0))
2447 (if (neg)
2448 (ge @0 { build_real (TREE_TYPE (@0), max); })
2449 (le @0 { build_real (TREE_TYPE (@0), max); }))
2450 (if (neg)
2451 (bit_xor (lt @0 { build_real (TREE_TYPE (@0), max); })
2452 { build_one_cst (type); })
2453 (bit_xor (gt @0 { build_real (TREE_TYPE (@0), max); })
2454 { build_one_cst (type); }))))))))))
2455
2456 /* If this is a comparison of a real constant with a PLUS_EXPR
2457 or a MINUS_EXPR of a real constant, we can convert it into a
2458 comparison with a revised real constant as long as no overflow
2459 occurs when unsafe_math_optimizations are enabled. */
2460 (if (flag_unsafe_math_optimizations)
2461 (for op (plus minus)
2462 (simplify
2463 (cmp (op @0 REAL_CST@1) REAL_CST@2)
2464 (with
2465 {
2466 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
2467 TREE_TYPE (@1), @2, @1);
2468 }
2469 (if (tem && !TREE_OVERFLOW (tem))
2470 (cmp @0 { tem; }))))))
2471
2472 /* Likewise, we can simplify a comparison of a real constant with
2473 a MINUS_EXPR whose first operand is also a real constant, i.e.
2474 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
2475 floating-point types only if -fassociative-math is set. */
2476 (if (flag_associative_math)
2477 (simplify
2478 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
2479 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
2480 (if (tem && !TREE_OVERFLOW (tem))
2481 (cmp { tem; } @1)))))
2482
2483 /* Fold comparisons against built-in math functions. */
2484 (if (flag_unsafe_math_optimizations
2485 && ! flag_errno_math)
2486 (for sq (SQRT)
2487 (simplify
2488 (cmp (sq @0) REAL_CST@1)
2489 (switch
2490 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
2491 (switch
2492 /* sqrt(x) < y is always false, if y is negative. */
2493 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
2494 { constant_boolean_node (false, type); })
2495 /* sqrt(x) > y is always true, if y is negative and we
2496 don't care about NaNs, i.e. negative values of x. */
2497 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
2498 { constant_boolean_node (true, type); })
2499 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
2500 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
2501 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
2502 (switch
2503 /* sqrt(x) < 0 is always false. */
2504 (if (cmp == LT_EXPR)
2505 { constant_boolean_node (false, type); })
2506 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
2507 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
2508 { constant_boolean_node (true, type); })
2509 /* sqrt(x) <= 0 -> x == 0. */
2510 (if (cmp == LE_EXPR)
2511 (eq @0 @1))
2512 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
2513 == or !=. In the last case:
2514
2515 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
2516
2517 if x is negative or NaN. Due to -funsafe-math-optimizations,
2518 the results for other x follow from natural arithmetic. */
2519 (cmp @0 @1)))
2520 (if (cmp == GT_EXPR || cmp == GE_EXPR)
2521 (with
2522 {
2523 REAL_VALUE_TYPE c2;
2524 real_arithmetic (&c2, MULT_EXPR,
2525 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
2526 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
2527 }
2528 (if (REAL_VALUE_ISINF (c2))
2529 /* sqrt(x) > y is x == +Inf, when y is very large. */
2530 (if (HONOR_INFINITIES (@0))
2531 (eq @0 { build_real (TREE_TYPE (@0), c2); })
2532 { constant_boolean_node (false, type); })
2533 /* sqrt(x) > c is the same as x > c*c. */
2534 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
2535 (if (cmp == LT_EXPR || cmp == LE_EXPR)
2536 (with
2537 {
2538 REAL_VALUE_TYPE c2;
2539 real_arithmetic (&c2, MULT_EXPR,
2540 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
2541 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
2542 }
2543 (if (REAL_VALUE_ISINF (c2))
2544 (switch
2545 /* sqrt(x) < y is always true, when y is a very large
2546 value and we don't care about NaNs or Infinities. */
2547 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
2548 { constant_boolean_node (true, type); })
2549 /* sqrt(x) < y is x != +Inf when y is very large and we
2550 don't care about NaNs. */
2551 (if (! HONOR_NANS (@0))
2552 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
2553 /* sqrt(x) < y is x >= 0 when y is very large and we
2554 don't care about Infinities. */
2555 (if (! HONOR_INFINITIES (@0))
2556 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
2557 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
2558 (if (GENERIC)
2559 (truth_andif
2560 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
2561 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
2562 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
2563 (if (! HONOR_NANS (@0))
2564 (cmp @0 { build_real (TREE_TYPE (@0), c2); })
2565 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
2566 (if (GENERIC)
2567 (truth_andif
2568 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
2569 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))))))))))
2570
2571 /* Fold A /[ex] B CMP C to A CMP B * C. */
2572 (for cmp (eq ne)
2573 (simplify
2574 (cmp (exact_div @0 @1) INTEGER_CST@2)
2575 (if (!integer_zerop (@1))
2576 (if (wi::eq_p (@2, 0))
2577 (cmp @0 @2)
2578 (if (TREE_CODE (@1) == INTEGER_CST)
2579 (with
2580 {
2581 bool ovf;
2582 wide_int prod = wi::mul (@2, @1, TYPE_SIGN (TREE_TYPE (@1)), &ovf);
2583 }
2584 (if (ovf)
2585 { constant_boolean_node (cmp == NE_EXPR, type); }
2586 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
2587 (for cmp (lt le gt ge)
2588 (simplify
2589 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
2590 (if (wi::gt_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1))))
2591 (with
2592 {
2593 bool ovf;
2594 wide_int prod = wi::mul (@2, @1, TYPE_SIGN (TREE_TYPE (@1)), &ovf);
2595 }
2596 (if (ovf)
2597 { constant_boolean_node (wi::lt_p (@2, 0, TYPE_SIGN (TREE_TYPE (@2)))
2598 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
2599 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
2600
2601 /* Unordered tests if either argument is a NaN. */
2602 (simplify
2603 (bit_ior (unordered @0 @0) (unordered @1 @1))
2604 (if (types_match (@0, @1))
2605 (unordered @0 @1)))
2606 (simplify
2607 (bit_and (ordered @0 @0) (ordered @1 @1))
2608 (if (types_match (@0, @1))
2609 (ordered @0 @1)))
2610 (simplify
2611 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
2612 @2)
2613 (simplify
2614 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
2615 @2)
2616
2617 /* Simple range test simplifications. */
2618 /* A < B || A >= B -> true. */
2619 (for test1 (lt le le le ne ge)
2620 test2 (ge gt ge ne eq ne)
2621 (simplify
2622 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
2623 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2624 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
2625 { constant_boolean_node (true, type); })))
2626 /* A < B && A >= B -> false. */
2627 (for test1 (lt lt lt le ne eq)
2628 test2 (ge gt eq gt eq gt)
2629 (simplify
2630 (bit_and:c (test1 @0 @1) (test2 @0 @1))
2631 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2632 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
2633 { constant_boolean_node (false, type); })))
2634
2635 /* -A CMP -B -> B CMP A. */
2636 (for cmp (tcc_comparison)
2637 scmp (swapped_tcc_comparison)
2638 (simplify
2639 (cmp (negate @0) (negate @1))
2640 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2641 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2642 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2643 (scmp @0 @1)))
2644 (simplify
2645 (cmp (negate @0) CONSTANT_CLASS_P@1)
2646 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2647 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2648 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2649 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
2650 (if (tem && !TREE_OVERFLOW (tem))
2651 (scmp @0 { tem; }))))))
2652
2653 /* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
2654 (for op (eq ne)
2655 (simplify
2656 (op (abs @0) zerop@1)
2657 (op @0 @1)))
2658
2659 /* From fold_sign_changed_comparison and fold_widened_comparison. */
2660 (for cmp (simple_comparison)
2661 (simplify
2662 (cmp (convert@0 @00) (convert?@1 @10))
2663 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2664 /* Disable this optimization if we're casting a function pointer
2665 type on targets that require function pointer canonicalization. */
2666 && !(targetm.have_canonicalize_funcptr_for_compare ()
2667 && TREE_CODE (TREE_TYPE (@00)) == POINTER_TYPE
2668 && TREE_CODE (TREE_TYPE (TREE_TYPE (@00))) == FUNCTION_TYPE)
2669 && single_use (@0))
2670 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
2671 && (TREE_CODE (@10) == INTEGER_CST
2672 || (@1 != @10 && types_match (TREE_TYPE (@10), TREE_TYPE (@00))))
2673 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
2674 || cmp == NE_EXPR
2675 || cmp == EQ_EXPR)
2676 && (POINTER_TYPE_P (TREE_TYPE (@00)) == POINTER_TYPE_P (TREE_TYPE (@0))))
2677 /* ??? The special-casing of INTEGER_CST conversion was in the original
2678 code and here to avoid a spurious overflow flag on the resulting
2679 constant which fold_convert produces. */
2680 (if (TREE_CODE (@1) == INTEGER_CST)
2681 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
2682 TREE_OVERFLOW (@1)); })
2683 (cmp @00 (convert @1)))
2684
2685 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
2686 /* If possible, express the comparison in the shorter mode. */
2687 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
2688 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
2689 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
2690 && TYPE_UNSIGNED (TREE_TYPE (@00))))
2691 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
2692 || ((TYPE_PRECISION (TREE_TYPE (@00))
2693 >= TYPE_PRECISION (TREE_TYPE (@10)))
2694 && (TYPE_UNSIGNED (TREE_TYPE (@00))
2695 == TYPE_UNSIGNED (TREE_TYPE (@10))))
2696 || (TREE_CODE (@10) == INTEGER_CST
2697 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
2698 && int_fits_type_p (@10, TREE_TYPE (@00)))))
2699 (cmp @00 (convert @10))
2700 (if (TREE_CODE (@10) == INTEGER_CST
2701 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
2702 && !int_fits_type_p (@10, TREE_TYPE (@00)))
2703 (with
2704 {
2705 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
2706 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
2707 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
2708 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
2709 }
2710 (if (above || below)
2711 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
2712 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
2713 (if (cmp == LT_EXPR || cmp == LE_EXPR)
2714 { constant_boolean_node (above ? true : false, type); }
2715 (if (cmp == GT_EXPR || cmp == GE_EXPR)
2716 { constant_boolean_node (above ? false : true, type); }))))))))))))
2717
2718 (for cmp (eq ne)
2719 /* A local variable can never be pointed to by
2720 the default SSA name of an incoming parameter.
2721 SSA names are canonicalized to 2nd place. */
2722 (simplify
2723 (cmp addr@0 SSA_NAME@1)
2724 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
2725 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
2726 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
2727 (if (TREE_CODE (base) == VAR_DECL
2728 && auto_var_in_fn_p (base, current_function_decl))
2729 (if (cmp == NE_EXPR)
2730 { constant_boolean_node (true, type); }
2731 { constant_boolean_node (false, type); }))))))
2732
2733 /* Equality compare simplifications from fold_binary */
2734 (for cmp (eq ne)
2735
2736 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
2737 Similarly for NE_EXPR. */
2738 (simplify
2739 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
2740 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
2741 && wi::bit_and_not (@1, @2) != 0)
2742 { constant_boolean_node (cmp == NE_EXPR, type); }))
2743
2744 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
2745 (simplify
2746 (cmp (bit_xor @0 @1) integer_zerop)
2747 (cmp @0 @1))
2748
2749 /* (X ^ Y) == Y becomes X == 0.
2750 Likewise (X ^ Y) == X becomes Y == 0. */
2751 (simplify
2752 (cmp:c (bit_xor:c @0 @1) @0)
2753 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
2754
2755 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
2756 (simplify
2757 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
2758 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
2759 (cmp @0 (bit_xor @1 (convert @2)))))
2760
2761 (simplify
2762 (cmp (convert? addr@0) integer_zerop)
2763 (if (tree_single_nonzero_warnv_p (@0, NULL))
2764 { constant_boolean_node (cmp == NE_EXPR, type); })))
2765
2766 /* If we have (A & C) == C where C is a power of 2, convert this into
2767 (A & C) != 0. Similarly for NE_EXPR. */
2768 (for cmp (eq ne)
2769 icmp (ne eq)
2770 (simplify
2771 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
2772 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
2773
2774 /* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
2775 convert this into a shift followed by ANDing with D. */
2776 (simplify
2777 (cond
2778 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
2779 integer_pow2p@2 integer_zerop)
2780 (with {
2781 int shift = wi::exact_log2 (@2) - wi::exact_log2 (@1);
2782 }
2783 (if (shift > 0)
2784 (bit_and
2785 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
2786 (bit_and
2787 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); })) @2))))
2788
2789 /* If we have (A & C) != 0 where C is the sign bit of A, convert
2790 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
2791 (for cmp (eq ne)
2792 ncmp (ge lt)
2793 (simplify
2794 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
2795 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
2796 && (TYPE_PRECISION (TREE_TYPE (@0))
2797 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
2798 && element_precision (@2) >= element_precision (@0)
2799 && wi::only_sign_bit_p (@1, element_precision (@0)))
2800 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
2801 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
2802
2803 /* If we have A < 0 ? C : 0 where C is a power of 2, convert
2804 this into a right shift or sign extension followed by ANDing with C. */
2805 (simplify
2806 (cond
2807 (lt @0 integer_zerop)
2808 integer_pow2p@1 integer_zerop)
2809 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
2810 (with {
2811 int shift = element_precision (@0) - wi::exact_log2 (@1) - 1;
2812 }
2813 (if (shift >= 0)
2814 (bit_and
2815 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
2816 @1)
2817 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
2818 sign extension followed by AND with C will achieve the effect. */
2819 (bit_and (convert @0) @1)))))
2820
2821 /* When the addresses are not directly of decls compare base and offset.
2822 This implements some remaining parts of fold_comparison address
2823 comparisons but still no complete part of it. Still it is good
2824 enough to make fold_stmt not regress when not dispatching to fold_binary. */
2825 (for cmp (simple_comparison)
2826 (simplify
2827 (cmp (convert1?@2 addr@0) (convert2? addr@1))
2828 (with
2829 {
2830 HOST_WIDE_INT off0, off1;
2831 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
2832 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
2833 if (base0 && TREE_CODE (base0) == MEM_REF)
2834 {
2835 off0 += mem_ref_offset (base0).to_short_addr ();
2836 base0 = TREE_OPERAND (base0, 0);
2837 }
2838 if (base1 && TREE_CODE (base1) == MEM_REF)
2839 {
2840 off1 += mem_ref_offset (base1).to_short_addr ();
2841 base1 = TREE_OPERAND (base1, 0);
2842 }
2843 }
2844 (if (base0 && base1)
2845 (with
2846 {
2847 int equal = 2;
2848 /* Punt in GENERIC on variables with value expressions;
2849 the value expressions might point to fields/elements
2850 of other vars etc. */
2851 if (GENERIC
2852 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
2853 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
2854 ;
2855 else if (decl_in_symtab_p (base0)
2856 && decl_in_symtab_p (base1))
2857 equal = symtab_node::get_create (base0)
2858 ->equal_address_to (symtab_node::get_create (base1));
2859 else if ((DECL_P (base0)
2860 || TREE_CODE (base0) == SSA_NAME
2861 || TREE_CODE (base0) == STRING_CST)
2862 && (DECL_P (base1)
2863 || TREE_CODE (base1) == SSA_NAME
2864 || TREE_CODE (base1) == STRING_CST))
2865 equal = (base0 == base1);
2866 }
2867 (if (equal == 1
2868 && (cmp == EQ_EXPR || cmp == NE_EXPR
2869 /* If the offsets are equal we can ignore overflow. */
2870 || off0 == off1
2871 || POINTER_TYPE_OVERFLOW_UNDEFINED
2872 /* Or if we compare using pointers to decls or strings. */
2873 || (POINTER_TYPE_P (TREE_TYPE (@2))
2874 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST))))
2875 (switch
2876 (if (cmp == EQ_EXPR)
2877 { constant_boolean_node (off0 == off1, type); })
2878 (if (cmp == NE_EXPR)
2879 { constant_boolean_node (off0 != off1, type); })
2880 (if (cmp == LT_EXPR)
2881 { constant_boolean_node (off0 < off1, type); })
2882 (if (cmp == LE_EXPR)
2883 { constant_boolean_node (off0 <= off1, type); })
2884 (if (cmp == GE_EXPR)
2885 { constant_boolean_node (off0 >= off1, type); })
2886 (if (cmp == GT_EXPR)
2887 { constant_boolean_node (off0 > off1, type); }))
2888 (if (equal == 0
2889 && DECL_P (base0) && DECL_P (base1)
2890 /* If we compare this as integers require equal offset. */
2891 && (!INTEGRAL_TYPE_P (TREE_TYPE (@2))
2892 || off0 == off1))
2893 (switch
2894 (if (cmp == EQ_EXPR)
2895 { constant_boolean_node (false, type); })
2896 (if (cmp == NE_EXPR)
2897 { constant_boolean_node (true, type); })))))))))
2898
2899 /* Simplify pointer equality compares using PTA. */
2900 (for neeq (ne eq)
2901 (simplify
2902 (neeq @0 @1)
2903 (if (POINTER_TYPE_P (TREE_TYPE (@0))
2904 && ptrs_compare_unequal (@0, @1))
2905 { neeq == EQ_EXPR ? boolean_false_node : boolean_true_node; })))
2906
2907 /* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
2908 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
2909 Disable the transform if either operand is pointer to function.
2910 This broke pr22051-2.c for arm where function pointer
2911 canonicalizaion is not wanted. */
2912
2913 (for cmp (ne eq)
2914 (simplify
2915 (cmp (convert @0) INTEGER_CST@1)
2916 (if ((POINTER_TYPE_P (TREE_TYPE (@0)) && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
2917 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
2918 || (INTEGRAL_TYPE_P (TREE_TYPE (@0)) && POINTER_TYPE_P (TREE_TYPE (@1))
2919 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
2920 (cmp @0 (convert @1)))))
2921
2922 /* Non-equality compare simplifications from fold_binary */
2923 (for cmp (lt gt le ge)
2924 /* Comparisons with the highest or lowest possible integer of
2925 the specified precision will have known values. */
2926 (simplify
2927 (cmp (convert?@2 @0) INTEGER_CST@1)
2928 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
2929 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
2930 (with
2931 {
2932 tree arg1_type = TREE_TYPE (@1);
2933 unsigned int prec = TYPE_PRECISION (arg1_type);
2934 wide_int max = wi::max_value (arg1_type);
2935 wide_int signed_max = wi::max_value (prec, SIGNED);
2936 wide_int min = wi::min_value (arg1_type);
2937 }
2938 (switch
2939 (if (wi::eq_p (@1, max))
2940 (switch
2941 (if (cmp == GT_EXPR)
2942 { constant_boolean_node (false, type); })
2943 (if (cmp == GE_EXPR)
2944 (eq @2 @1))
2945 (if (cmp == LE_EXPR)
2946 { constant_boolean_node (true, type); })
2947 (if (cmp == LT_EXPR)
2948 (ne @2 @1))))
2949 (if (wi::eq_p (@1, min))
2950 (switch
2951 (if (cmp == LT_EXPR)
2952 { constant_boolean_node (false, type); })
2953 (if (cmp == LE_EXPR)
2954 (eq @2 @1))
2955 (if (cmp == GE_EXPR)
2956 { constant_boolean_node (true, type); })
2957 (if (cmp == GT_EXPR)
2958 (ne @2 @1))))
2959 (if (wi::eq_p (@1, max - 1))
2960 (switch
2961 (if (cmp == GT_EXPR)
2962 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))
2963 (if (cmp == LE_EXPR)
2964 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
2965 (if (wi::eq_p (@1, min + 1))
2966 (switch
2967 (if (cmp == GE_EXPR)
2968 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))
2969 (if (cmp == LT_EXPR)
2970 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
2971 (if (wi::eq_p (@1, signed_max)
2972 && TYPE_UNSIGNED (arg1_type)
2973 /* We will flip the signedness of the comparison operator
2974 associated with the mode of @1, so the sign bit is
2975 specified by this mode. Check that @1 is the signed
2976 max associated with this sign bit. */
2977 && prec == GET_MODE_PRECISION (TYPE_MODE (arg1_type))
2978 /* signed_type does not work on pointer types. */
2979 && INTEGRAL_TYPE_P (arg1_type))
2980 /* The following case also applies to X < signed_max+1
2981 and X >= signed_max+1 because previous transformations. */
2982 (if (cmp == LE_EXPR || cmp == GT_EXPR)
2983 (with { tree st = signed_type_for (arg1_type); }
2984 (if (cmp == LE_EXPR)
2985 (ge (convert:st @0) { build_zero_cst (st); })
2986 (lt (convert:st @0) { build_zero_cst (st); }))))))))))
2987
2988 (for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
2989 /* If the second operand is NaN, the result is constant. */
2990 (simplify
2991 (cmp @0 REAL_CST@1)
2992 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
2993 && (cmp != LTGT_EXPR || ! flag_trapping_math))
2994 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
2995 ? false : true, type); })))
2996
2997 /* bool_var != 0 becomes bool_var. */
2998 (simplify
2999 (ne @0 integer_zerop)
3000 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
3001 && types_match (type, TREE_TYPE (@0)))
3002 (non_lvalue @0)))
3003 /* bool_var == 1 becomes bool_var. */
3004 (simplify
3005 (eq @0 integer_onep)
3006 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
3007 && types_match (type, TREE_TYPE (@0)))
3008 (non_lvalue @0)))
3009 /* Do not handle
3010 bool_var == 0 becomes !bool_var or
3011 bool_var != 1 becomes !bool_var
3012 here because that only is good in assignment context as long
3013 as we require a tcc_comparison in GIMPLE_CONDs where we'd
3014 replace if (x == 0) with tem = ~x; if (tem != 0) which is
3015 clearly less optimal and which we'll transform again in forwprop. */
3016
3017 /* When one argument is a constant, overflow detection can be simplified.
3018 Currently restricted to single use so as not to interfere too much with
3019 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
3020 A + CST CMP A -> A CMP' CST' */
3021 (for cmp (lt le ge gt)
3022 out (gt gt le le)
3023 (simplify
3024 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
3025 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3026 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
3027 && wi::ne_p (@1, 0)
3028 && single_use (@2))
3029 (out @0 { wide_int_to_tree (TREE_TYPE (@0), wi::max_value
3030 (TYPE_PRECISION (TREE_TYPE (@0)), UNSIGNED) - @1); }))))
3031
3032 /* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
3033 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
3034 expects the long form, so we restrict the transformation for now. */
3035 (for cmp (gt le)
3036 (simplify
3037 (cmp:c (minus@2 @0 @1) @0)
3038 (if (single_use (@2)
3039 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3040 && TYPE_UNSIGNED (TREE_TYPE (@0))
3041 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3042 (cmp @1 @0))))
3043
3044 /* Testing for overflow is unnecessary if we already know the result. */
3045 /* A - B > A */
3046 (for cmp (gt le)
3047 out (ne eq)
3048 (simplify
3049 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
3050 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3051 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
3052 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
3053 /* A + B < A */
3054 (for cmp (lt ge)
3055 out (ne eq)
3056 (simplify
3057 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
3058 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3059 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
3060 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
3061
3062 /* For unsigned operands, -1 / B < A checks whether A * B would overflow.
3063 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
3064 (for cmp (lt ge)
3065 out (ne eq)
3066 (simplify
3067 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
3068 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
3069 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
3070 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
3071
3072 /* Simplification of math builtins. These rules must all be optimizations
3073 as well as IL simplifications. If there is a possibility that the new
3074 form could be a pessimization, the rule should go in the canonicalization
3075 section that follows this one.
3076
3077 Rules can generally go in this section if they satisfy one of
3078 the following:
3079
3080 - the rule describes an identity
3081
3082 - the rule replaces calls with something as simple as addition or
3083 multiplication
3084
3085 - the rule contains unary calls only and simplifies the surrounding
3086 arithmetic. (The idea here is to exclude non-unary calls in which
3087 one operand is constant and in which the call is known to be cheap
3088 when the operand has that value.) */
3089
3090 (if (flag_unsafe_math_optimizations)
3091 /* Simplify sqrt(x) * sqrt(x) -> x. */
3092 (simplify
3093 (mult (SQRT@1 @0) @1)
3094 (if (!HONOR_SNANS (type))
3095 @0))
3096
3097 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
3098 (for root (SQRT CBRT)
3099 (simplify
3100 (mult (root:s @0) (root:s @1))
3101 (root (mult @0 @1))))
3102
3103 /* Simplify expN(x) * expN(y) -> expN(x+y). */
3104 (for exps (EXP EXP2 EXP10 POW10)
3105 (simplify
3106 (mult (exps:s @0) (exps:s @1))
3107 (exps (plus @0 @1))))
3108
3109 /* Simplify a/root(b/c) into a*root(c/b). */
3110 (for root (SQRT CBRT)
3111 (simplify
3112 (rdiv @0 (root:s (rdiv:s @1 @2)))
3113 (mult @0 (root (rdiv @2 @1)))))
3114
3115 /* Simplify x/expN(y) into x*expN(-y). */
3116 (for exps (EXP EXP2 EXP10 POW10)
3117 (simplify
3118 (rdiv @0 (exps:s @1))
3119 (mult @0 (exps (negate @1)))))
3120
3121 (for logs (LOG LOG2 LOG10 LOG10)
3122 exps (EXP EXP2 EXP10 POW10)
3123 /* logN(expN(x)) -> x. */
3124 (simplify
3125 (logs (exps @0))
3126 @0)
3127 /* expN(logN(x)) -> x. */
3128 (simplify
3129 (exps (logs @0))
3130 @0))
3131
3132 /* Optimize logN(func()) for various exponential functions. We
3133 want to determine the value "x" and the power "exponent" in
3134 order to transform logN(x**exponent) into exponent*logN(x). */
3135 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
3136 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
3137 (simplify
3138 (logs (exps @0))
3139 (if (SCALAR_FLOAT_TYPE_P (type))
3140 (with {
3141 tree x;
3142 switch (exps)
3143 {
3144 CASE_CFN_EXP:
3145 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
3146 x = build_real_truncate (type, dconst_e ());
3147 break;
3148 CASE_CFN_EXP2:
3149 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
3150 x = build_real (type, dconst2);
3151 break;
3152 CASE_CFN_EXP10:
3153 CASE_CFN_POW10:
3154 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
3155 {
3156 REAL_VALUE_TYPE dconst10;
3157 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
3158 x = build_real (type, dconst10);
3159 }
3160 break;
3161 default:
3162 gcc_unreachable ();
3163 }
3164 }
3165 (mult (logs { x; }) @0)))))
3166
3167 (for logs (LOG LOG
3168 LOG2 LOG2
3169 LOG10 LOG10)
3170 exps (SQRT CBRT)
3171 (simplify
3172 (logs (exps @0))
3173 (if (SCALAR_FLOAT_TYPE_P (type))
3174 (with {
3175 tree x;
3176 switch (exps)
3177 {
3178 CASE_CFN_SQRT:
3179 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
3180 x = build_real (type, dconsthalf);
3181 break;
3182 CASE_CFN_CBRT:
3183 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
3184 x = build_real_truncate (type, dconst_third ());
3185 break;
3186 default:
3187 gcc_unreachable ();
3188 }
3189 }
3190 (mult { x; } (logs @0))))))
3191
3192 /* logN(pow(x,exponent)) -> exponent*logN(x). */
3193 (for logs (LOG LOG2 LOG10)
3194 pows (POW)
3195 (simplify
3196 (logs (pows @0 @1))
3197 (mult @1 (logs @0))))
3198
3199 (for sqrts (SQRT)
3200 cbrts (CBRT)
3201 pows (POW)
3202 exps (EXP EXP2 EXP10 POW10)
3203 /* sqrt(expN(x)) -> expN(x*0.5). */
3204 (simplify
3205 (sqrts (exps @0))
3206 (exps (mult @0 { build_real (type, dconsthalf); })))
3207 /* cbrt(expN(x)) -> expN(x/3). */
3208 (simplify
3209 (cbrts (exps @0))
3210 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
3211 /* pow(expN(x), y) -> expN(x*y). */
3212 (simplify
3213 (pows (exps @0) @1)
3214 (exps (mult @0 @1))))
3215
3216 /* tan(atan(x)) -> x. */
3217 (for tans (TAN)
3218 atans (ATAN)
3219 (simplify
3220 (tans (atans @0))
3221 @0)))
3222
3223 /* cabs(x+0i) or cabs(0+xi) -> abs(x). */
3224 (simplify
3225 (CABS (complex:C @0 real_zerop@1))
3226 (abs @0))
3227
3228 /* trunc(trunc(x)) -> trunc(x), etc. */
3229 (for fns (TRUNC FLOOR CEIL ROUND NEARBYINT RINT)
3230 (simplify
3231 (fns (fns @0))
3232 (fns @0)))
3233 /* f(x) -> x if x is integer valued and f does nothing for such values. */
3234 (for fns (TRUNC FLOOR CEIL ROUND NEARBYINT RINT)
3235 (simplify
3236 (fns integer_valued_real_p@0)
3237 @0))
3238
3239 /* hypot(x,0) and hypot(0,x) -> abs(x). */
3240 (simplify
3241 (HYPOT:c @0 real_zerop@1)
3242 (abs @0))
3243
3244 /* pow(1,x) -> 1. */
3245 (simplify
3246 (POW real_onep@0 @1)
3247 @0)
3248
3249 (simplify
3250 /* copysign(x,x) -> x. */
3251 (COPYSIGN @0 @0)
3252 @0)
3253
3254 (simplify
3255 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
3256 (COPYSIGN @0 tree_expr_nonnegative_p@1)
3257 (abs @0))
3258
3259 (for scale (LDEXP SCALBN SCALBLN)
3260 /* ldexp(0, x) -> 0. */
3261 (simplify
3262 (scale real_zerop@0 @1)
3263 @0)
3264 /* ldexp(x, 0) -> x. */
3265 (simplify
3266 (scale @0 integer_zerop@1)
3267 @0)
3268 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
3269 (simplify
3270 (scale REAL_CST@0 @1)
3271 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
3272 @0)))
3273
3274 /* Canonicalization of sequences of math builtins. These rules represent
3275 IL simplifications but are not necessarily optimizations.
3276
3277 The sincos pass is responsible for picking "optimal" implementations
3278 of math builtins, which may be more complicated and can sometimes go
3279 the other way, e.g. converting pow into a sequence of sqrts.
3280 We only want to do these canonicalizations before the pass has run. */
3281
3282 (if (flag_unsafe_math_optimizations && canonicalize_math_p ())
3283 /* Simplify tan(x) * cos(x) -> sin(x). */
3284 (simplify
3285 (mult:c (TAN:s @0) (COS:s @0))
3286 (SIN @0))
3287
3288 /* Simplify x * pow(x,c) -> pow(x,c+1). */
3289 (simplify
3290 (mult:c @0 (POW:s @0 REAL_CST@1))
3291 (if (!TREE_OVERFLOW (@1))
3292 (POW @0 (plus @1 { build_one_cst (type); }))))
3293
3294 /* Simplify sin(x) / cos(x) -> tan(x). */
3295 (simplify
3296 (rdiv (SIN:s @0) (COS:s @0))
3297 (TAN @0))
3298
3299 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
3300 (simplify
3301 (rdiv (COS:s @0) (SIN:s @0))
3302 (rdiv { build_one_cst (type); } (TAN @0)))
3303
3304 /* Simplify sin(x) / tan(x) -> cos(x). */
3305 (simplify
3306 (rdiv (SIN:s @0) (TAN:s @0))
3307 (if (! HONOR_NANS (@0)
3308 && ! HONOR_INFINITIES (@0))
3309 (COS @0)))
3310
3311 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
3312 (simplify
3313 (rdiv (TAN:s @0) (SIN:s @0))
3314 (if (! HONOR_NANS (@0)
3315 && ! HONOR_INFINITIES (@0))
3316 (rdiv { build_one_cst (type); } (COS @0))))
3317
3318 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
3319 (simplify
3320 (mult (POW:s @0 @1) (POW:s @0 @2))
3321 (POW @0 (plus @1 @2)))
3322
3323 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
3324 (simplify
3325 (mult (POW:s @0 @1) (POW:s @2 @1))
3326 (POW (mult @0 @2) @1))
3327
3328 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
3329 (simplify
3330 (mult (POWI:s @0 @1) (POWI:s @2 @1))
3331 (POWI (mult @0 @2) @1))
3332
3333 /* Simplify pow(x,c) / x -> pow(x,c-1). */
3334 (simplify
3335 (rdiv (POW:s @0 REAL_CST@1) @0)
3336 (if (!TREE_OVERFLOW (@1))
3337 (POW @0 (minus @1 { build_one_cst (type); }))))
3338
3339 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
3340 (simplify
3341 (rdiv @0 (POW:s @1 @2))
3342 (mult @0 (POW @1 (negate @2))))
3343
3344 (for sqrts (SQRT)
3345 cbrts (CBRT)
3346 pows (POW)
3347 /* sqrt(sqrt(x)) -> pow(x,1/4). */
3348 (simplify
3349 (sqrts (sqrts @0))
3350 (pows @0 { build_real (type, dconst_quarter ()); }))
3351 /* sqrt(cbrt(x)) -> pow(x,1/6). */
3352 (simplify
3353 (sqrts (cbrts @0))
3354 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
3355 /* cbrt(sqrt(x)) -> pow(x,1/6). */
3356 (simplify
3357 (cbrts (sqrts @0))
3358 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
3359 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
3360 (simplify
3361 (cbrts (cbrts tree_expr_nonnegative_p@0))
3362 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
3363 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
3364 (simplify
3365 (sqrts (pows @0 @1))
3366 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
3367 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
3368 (simplify
3369 (cbrts (pows tree_expr_nonnegative_p@0 @1))
3370 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
3371 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
3372 (simplify
3373 (pows (sqrts @0) @1)
3374 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
3375 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
3376 (simplify
3377 (pows (cbrts tree_expr_nonnegative_p@0) @1)
3378 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
3379 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
3380 (simplify
3381 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
3382 (pows @0 (mult @1 @2))))
3383
3384 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
3385 (simplify
3386 (CABS (complex @0 @0))
3387 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
3388
3389 /* hypot(x,x) -> fabs(x)*sqrt(2). */
3390 (simplify
3391 (HYPOT @0 @0)
3392 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
3393
3394 /* cexp(x+yi) -> exp(x)*cexpi(y). */
3395 (for cexps (CEXP)
3396 exps (EXP)
3397 cexpis (CEXPI)
3398 (simplify
3399 (cexps compositional_complex@0)
3400 (if (targetm.libc_has_function (function_c99_math_complex))
3401 (complex
3402 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
3403 (mult @1 (imagpart @2)))))))
3404
3405 (if (canonicalize_math_p ())
3406 /* floor(x) -> trunc(x) if x is nonnegative. */
3407 (for floors (FLOOR)
3408 truncs (TRUNC)
3409 (simplify
3410 (floors tree_expr_nonnegative_p@0)
3411 (truncs @0))))
3412
3413 (match double_value_p
3414 @0
3415 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
3416 (for froms (BUILT_IN_TRUNCL
3417 BUILT_IN_FLOORL
3418 BUILT_IN_CEILL
3419 BUILT_IN_ROUNDL
3420 BUILT_IN_NEARBYINTL
3421 BUILT_IN_RINTL)
3422 tos (BUILT_IN_TRUNC
3423 BUILT_IN_FLOOR
3424 BUILT_IN_CEIL
3425 BUILT_IN_ROUND
3426 BUILT_IN_NEARBYINT
3427 BUILT_IN_RINT)
3428 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
3429 (if (optimize && canonicalize_math_p ())
3430 (simplify
3431 (froms (convert double_value_p@0))
3432 (convert (tos @0)))))
3433
3434 (match float_value_p
3435 @0
3436 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
3437 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
3438 BUILT_IN_FLOORL BUILT_IN_FLOOR
3439 BUILT_IN_CEILL BUILT_IN_CEIL
3440 BUILT_IN_ROUNDL BUILT_IN_ROUND
3441 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
3442 BUILT_IN_RINTL BUILT_IN_RINT)
3443 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
3444 BUILT_IN_FLOORF BUILT_IN_FLOORF
3445 BUILT_IN_CEILF BUILT_IN_CEILF
3446 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
3447 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
3448 BUILT_IN_RINTF BUILT_IN_RINTF)
3449 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
3450 if x is a float. */
3451 (if (optimize && canonicalize_math_p ()
3452 && targetm.libc_has_function (function_c99_misc))
3453 (simplify
3454 (froms (convert float_value_p@0))
3455 (convert (tos @0)))))
3456
3457 (for froms (XFLOORL XCEILL XROUNDL XRINTL)
3458 tos (XFLOOR XCEIL XROUND XRINT)
3459 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
3460 (if (optimize && canonicalize_math_p ())
3461 (simplify
3462 (froms (convert double_value_p@0))
3463 (tos @0))))
3464
3465 (for froms (XFLOORL XCEILL XROUNDL XRINTL
3466 XFLOOR XCEIL XROUND XRINT)
3467 tos (XFLOORF XCEILF XROUNDF XRINTF)
3468 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
3469 if x is a float. */
3470 (if (optimize && canonicalize_math_p ())
3471 (simplify
3472 (froms (convert float_value_p@0))
3473 (tos @0))))
3474
3475 (if (canonicalize_math_p ())
3476 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
3477 (for floors (IFLOOR LFLOOR LLFLOOR)
3478 (simplify
3479 (floors tree_expr_nonnegative_p@0)
3480 (fix_trunc @0))))
3481
3482 (if (canonicalize_math_p ())
3483 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
3484 (for fns (IFLOOR LFLOOR LLFLOOR
3485 ICEIL LCEIL LLCEIL
3486 IROUND LROUND LLROUND)
3487 (simplify
3488 (fns integer_valued_real_p@0)
3489 (fix_trunc @0)))
3490 (if (!flag_errno_math)
3491 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
3492 (for rints (IRINT LRINT LLRINT)
3493 (simplify
3494 (rints integer_valued_real_p@0)
3495 (fix_trunc @0)))))
3496
3497 (if (canonicalize_math_p ())
3498 (for ifn (IFLOOR ICEIL IROUND IRINT)
3499 lfn (LFLOOR LCEIL LROUND LRINT)
3500 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
3501 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
3502 sizeof (int) == sizeof (long). */
3503 (if (TYPE_PRECISION (integer_type_node)
3504 == TYPE_PRECISION (long_integer_type_node))
3505 (simplify
3506 (ifn @0)
3507 (lfn:long_integer_type_node @0)))
3508 /* Canonicalize llround (x) to lround (x) on LP64 targets where
3509 sizeof (long long) == sizeof (long). */
3510 (if (TYPE_PRECISION (long_long_integer_type_node)
3511 == TYPE_PRECISION (long_integer_type_node))
3512 (simplify
3513 (llfn @0)
3514 (lfn:long_integer_type_node @0)))))
3515
3516 /* cproj(x) -> x if we're ignoring infinities. */
3517 (simplify
3518 (CPROJ @0)
3519 (if (!HONOR_INFINITIES (type))
3520 @0))
3521
3522 /* If the real part is inf and the imag part is known to be
3523 nonnegative, return (inf + 0i). */
3524 (simplify
3525 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
3526 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
3527 { build_complex_inf (type, false); }))
3528
3529 /* If the imag part is inf, return (inf+I*copysign(0,imag)). */
3530 (simplify
3531 (CPROJ (complex @0 REAL_CST@1))
3532 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
3533 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
3534
3535 (for pows (POW)
3536 sqrts (SQRT)
3537 cbrts (CBRT)
3538 (simplify
3539 (pows @0 REAL_CST@1)
3540 (with {
3541 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
3542 REAL_VALUE_TYPE tmp;
3543 }
3544 (switch
3545 /* pow(x,0) -> 1. */
3546 (if (real_equal (value, &dconst0))
3547 { build_real (type, dconst1); })
3548 /* pow(x,1) -> x. */
3549 (if (real_equal (value, &dconst1))
3550 @0)
3551 /* pow(x,-1) -> 1/x. */
3552 (if (real_equal (value, &dconstm1))
3553 (rdiv { build_real (type, dconst1); } @0))
3554 /* pow(x,0.5) -> sqrt(x). */
3555 (if (flag_unsafe_math_optimizations
3556 && canonicalize_math_p ()
3557 && real_equal (value, &dconsthalf))
3558 (sqrts @0))
3559 /* pow(x,1/3) -> cbrt(x). */
3560 (if (flag_unsafe_math_optimizations
3561 && canonicalize_math_p ()
3562 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
3563 real_equal (value, &tmp)))
3564 (cbrts @0))))))
3565
3566 /* powi(1,x) -> 1. */
3567 (simplify
3568 (POWI real_onep@0 @1)
3569 @0)
3570
3571 (simplify
3572 (POWI @0 INTEGER_CST@1)
3573 (switch
3574 /* powi(x,0) -> 1. */
3575 (if (wi::eq_p (@1, 0))
3576 { build_real (type, dconst1); })
3577 /* powi(x,1) -> x. */
3578 (if (wi::eq_p (@1, 1))
3579 @0)
3580 /* powi(x,-1) -> 1/x. */
3581 (if (wi::eq_p (@1, -1))
3582 (rdiv { build_real (type, dconst1); } @0))))
3583
3584 /* Narrowing of arithmetic and logical operations.
3585
3586 These are conceptually similar to the transformations performed for
3587 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
3588 term we want to move all that code out of the front-ends into here. */
3589
3590 /* If we have a narrowing conversion of an arithmetic operation where
3591 both operands are widening conversions from the same type as the outer
3592 narrowing conversion. Then convert the innermost operands to a suitable
3593 unsigned type (to avoid introducing undefined behavior), perform the
3594 operation and convert the result to the desired type. */
3595 (for op (plus minus)
3596 (simplify
3597 (convert (op:s (convert@2 @0) (convert?@3 @1)))
3598 (if (INTEGRAL_TYPE_P (type)
3599 /* We check for type compatibility between @0 and @1 below,
3600 so there's no need to check that @1/@3 are integral types. */
3601 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
3602 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
3603 /* The precision of the type of each operand must match the
3604 precision of the mode of each operand, similarly for the
3605 result. */
3606 && (TYPE_PRECISION (TREE_TYPE (@0))
3607 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
3608 && (TYPE_PRECISION (TREE_TYPE (@1))
3609 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@1))))
3610 && TYPE_PRECISION (type) == GET_MODE_PRECISION (TYPE_MODE (type))
3611 /* The inner conversion must be a widening conversion. */
3612 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
3613 && types_match (@0, type)
3614 && (types_match (@0, @1)
3615 /* Or the second operand is const integer or converted const
3616 integer from valueize. */
3617 || TREE_CODE (@1) == INTEGER_CST))
3618 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3619 (op @0 (convert @1))
3620 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
3621 (convert (op (convert:utype @0)
3622 (convert:utype @1))))))))
3623
3624 /* This is another case of narrowing, specifically when there's an outer
3625 BIT_AND_EXPR which masks off bits outside the type of the innermost
3626 operands. Like the previous case we have to convert the operands
3627 to unsigned types to avoid introducing undefined behavior for the
3628 arithmetic operation. */
3629 (for op (minus plus)
3630 (simplify
3631 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
3632 (if (INTEGRAL_TYPE_P (type)
3633 /* We check for type compatibility between @0 and @1 below,
3634 so there's no need to check that @1/@3 are integral types. */
3635 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
3636 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
3637 /* The precision of the type of each operand must match the
3638 precision of the mode of each operand, similarly for the
3639 result. */
3640 && (TYPE_PRECISION (TREE_TYPE (@0))
3641 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
3642 && (TYPE_PRECISION (TREE_TYPE (@1))
3643 == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@1))))
3644 && TYPE_PRECISION (type) == GET_MODE_PRECISION (TYPE_MODE (type))
3645 /* The inner conversion must be a widening conversion. */
3646 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
3647 && types_match (@0, @1)
3648 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
3649 <= TYPE_PRECISION (TREE_TYPE (@0)))
3650 && (wi::bit_and (@4, wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
3651 true, TYPE_PRECISION (type))) == 0))
3652 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3653 (with { tree ntype = TREE_TYPE (@0); }
3654 (convert (bit_and (op @0 @1) (convert:ntype @4))))
3655 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
3656 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
3657 (convert:utype @4))))))))
3658
3659 /* Transform (@0 < @1 and @0 < @2) to use min,
3660 (@0 > @1 and @0 > @2) to use max */
3661 (for op (lt le gt ge)
3662 ext (min min max max)
3663 (simplify
3664 (bit_and (op:cs @0 @1) (op:cs @0 @2))
3665 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3666 && TREE_CODE (@0) != INTEGER_CST)
3667 (op @0 (ext @1 @2)))))
3668
3669 (simplify
3670 /* signbit(x) -> 0 if x is nonnegative. */
3671 (SIGNBIT tree_expr_nonnegative_p@0)
3672 { integer_zero_node; })
3673
3674 (simplify
3675 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
3676 (SIGNBIT @0)
3677 (if (!HONOR_SIGNED_ZEROS (@0))
3678 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
3679
3680 /* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
3681 (for cmp (eq ne)
3682 (for op (plus minus)
3683 rop (minus plus)
3684 (simplify
3685 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
3686 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
3687 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
3688 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
3689 && !TYPE_SATURATING (TREE_TYPE (@0)))
3690 (with { tree res = int_const_binop (rop, @2, @1); }
3691 (if (TREE_OVERFLOW (res))
3692 { constant_boolean_node (cmp == NE_EXPR, type); }
3693 (if (single_use (@3))
3694 (cmp @0 { res; }))))))))
3695 (for cmp (lt le gt ge)
3696 (for op (plus minus)
3697 rop (minus plus)
3698 (simplify
3699 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
3700 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
3701 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
3702 (with { tree res = int_const_binop (rop, @2, @1); }
3703 (if (TREE_OVERFLOW (res))
3704 {
3705 fold_overflow_warning (("assuming signed overflow does not occur "
3706 "when simplifying conditional to constant"),
3707 WARN_STRICT_OVERFLOW_CONDITIONAL);
3708 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
3709 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
3710 bool ovf_high = wi::lt_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1)))
3711 != (op == MINUS_EXPR);
3712 constant_boolean_node (less == ovf_high, type);
3713 }
3714 (if (single_use (@3))
3715 (with
3716 {
3717 fold_overflow_warning (("assuming signed overflow does not occur "
3718 "when changing X +- C1 cmp C2 to "
3719 "X cmp C2 -+ C1"),
3720 WARN_STRICT_OVERFLOW_COMPARISON);
3721 }
3722 (cmp @0 { res; })))))))))
3723
3724 /* Canonicalizations of BIT_FIELD_REFs. */
3725
3726 (simplify
3727 (BIT_FIELD_REF @0 @1 @2)
3728 (switch
3729 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
3730 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
3731 (switch
3732 (if (integer_zerop (@2))
3733 (view_convert (realpart @0)))
3734 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
3735 (view_convert (imagpart @0)))))
3736 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3737 && INTEGRAL_TYPE_P (type)
3738 /* On GIMPLE this should only apply to register arguments. */
3739 && (! GIMPLE || is_gimple_reg (@0))
3740 /* A bit-field-ref that referenced the full argument can be stripped. */
3741 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
3742 && integer_zerop (@2))
3743 /* Low-parts can be reduced to integral conversions.
3744 ??? The following doesn't work for PDP endian. */
3745 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
3746 /* Don't even think about BITS_BIG_ENDIAN. */
3747 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
3748 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
3749 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
3750 ? (TYPE_PRECISION (TREE_TYPE (@0))
3751 - TYPE_PRECISION (type))
3752 : 0)) == 0)))
3753 (convert @0))))
3754
3755 /* Simplify vector extracts. */
3756
3757 (simplify
3758 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
3759 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
3760 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
3761 || (VECTOR_TYPE_P (type)
3762 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
3763 (with
3764 {
3765 tree ctor = (TREE_CODE (@0) == SSA_NAME
3766 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
3767 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
3768 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
3769 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
3770 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
3771 }
3772 (if (n != 0
3773 && (idx % width) == 0
3774 && (n % width) == 0
3775 && ((idx + n) / width) <= TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor)))
3776 (with
3777 {
3778 idx = idx / width;
3779 n = n / width;
3780 /* Constructor elements can be subvectors. */
3781 unsigned HOST_WIDE_INT k = 1;
3782 if (CONSTRUCTOR_NELTS (ctor) != 0)
3783 {
3784 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
3785 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
3786 k = TYPE_VECTOR_SUBPARTS (cons_elem);
3787 }
3788 }
3789 (switch
3790 /* We keep an exact subset of the constructor elements. */
3791 (if ((idx % k) == 0 && (n % k) == 0)
3792 (if (CONSTRUCTOR_NELTS (ctor) == 0)
3793 { build_constructor (type, NULL); }
3794 (with
3795 {
3796 idx /= k;
3797 n /= k;
3798 }
3799 (if (n == 1)
3800 (if (idx < CONSTRUCTOR_NELTS (ctor))
3801 { CONSTRUCTOR_ELT (ctor, idx)->value; }
3802 { build_zero_cst (type); })
3803 {
3804 vec<constructor_elt, va_gc> *vals;
3805 vec_alloc (vals, n);
3806 for (unsigned i = 0;
3807 i < n && idx + i < CONSTRUCTOR_NELTS (ctor); ++i)
3808 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
3809 CONSTRUCTOR_ELT (ctor, idx + i)->value);
3810 build_constructor (type, vals);
3811 }))))
3812 /* The bitfield references a single constructor element. */
3813 (if (idx + n <= (idx / k + 1) * k)
3814 (switch
3815 (if (CONSTRUCTOR_NELTS (ctor) <= idx / k)
3816 { build_zero_cst (type); })
3817 (if (n == k)
3818 { CONSTRUCTOR_ELT (ctor, idx / k)->value; })
3819 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / k)->value; }
3820 @1 { bitsize_int ((idx % k) * width); })))))))))