re PR sanitizer/81929 (exponential slowdown in undefined behavior sanitizer for strea...
[gcc.git] / gcc / match.pd
1 /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
4
5 Copyright (C) 2014-2017 Free Software Foundation, Inc.
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25
26 /* Generic tree predicates we inherit. */
27 (define_predicates
28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
29 integer_each_onep integer_truep integer_nonzerop
30 real_zerop real_onep real_minus_onep
31 zerop
32 CONSTANT_CLASS_P
33 tree_expr_nonnegative_p
34 tree_expr_nonzero_p
35 integer_valued_real_p
36 integer_pow2p
37 HONOR_NANS)
38
39 /* Operator lists. */
40 (define_operator_list tcc_comparison
41 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
42 (define_operator_list inverted_tcc_comparison
43 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
44 (define_operator_list inverted_tcc_comparison_with_nans
45 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
46 (define_operator_list swapped_tcc_comparison
47 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
48 (define_operator_list simple_comparison lt le eq ne ge gt)
49 (define_operator_list swapped_simple_comparison gt ge eq ne le lt)
50
51 #include "cfn-operators.pd"
52
53 /* Define operand lists for math rounding functions {,i,l,ll}FN,
54 where the versions prefixed with "i" return an int, those prefixed with
55 "l" return a long and those prefixed with "ll" return a long long.
56
57 Also define operand lists:
58
59 X<FN>F for all float functions, in the order i, l, ll
60 X<FN> for all double functions, in the same order
61 X<FN>L for all long double functions, in the same order. */
62 #define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
63 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
64 BUILT_IN_L##FN##F \
65 BUILT_IN_LL##FN##F) \
66 (define_operator_list X##FN BUILT_IN_I##FN \
67 BUILT_IN_L##FN \
68 BUILT_IN_LL##FN) \
69 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
70 BUILT_IN_L##FN##L \
71 BUILT_IN_LL##FN##L)
72
73 DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
74 DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
75 DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
76 DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
77
78 /* As opposed to convert?, this still creates a single pattern, so
79 it is not a suitable replacement for convert? in all cases. */
80 (match (nop_convert @0)
81 (convert @0)
82 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
83 (match (nop_convert @0)
84 (view_convert @0)
85 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
86 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0))
87 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
88 /* This one has to be last, or it shadows the others. */
89 (match (nop_convert @0)
90 @0)
91
92 /* Simplifications of operations with one constant operand and
93 simplifications to constants or single values. */
94
95 (for op (plus pointer_plus minus bit_ior bit_xor)
96 (simplify
97 (op @0 integer_zerop)
98 (non_lvalue @0)))
99
100 /* 0 +p index -> (type)index */
101 (simplify
102 (pointer_plus integer_zerop @1)
103 (non_lvalue (convert @1)))
104
105 /* See if ARG1 is zero and X + ARG1 reduces to X.
106 Likewise if the operands are reversed. */
107 (simplify
108 (plus:c @0 real_zerop@1)
109 (if (fold_real_zero_addition_p (type, @1, 0))
110 (non_lvalue @0)))
111
112 /* See if ARG1 is zero and X - ARG1 reduces to X. */
113 (simplify
114 (minus @0 real_zerop@1)
115 (if (fold_real_zero_addition_p (type, @1, 1))
116 (non_lvalue @0)))
117
118 /* Simplify x - x.
119 This is unsafe for certain floats even in non-IEEE formats.
120 In IEEE, it is unsafe because it does wrong for NaNs.
121 Also note that operand_equal_p is always false if an operand
122 is volatile. */
123 (simplify
124 (minus @0 @0)
125 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
126 { build_zero_cst (type); }))
127
128 (simplify
129 (mult @0 integer_zerop@1)
130 @1)
131
132 /* Maybe fold x * 0 to 0. The expressions aren't the same
133 when x is NaN, since x * 0 is also NaN. Nor are they the
134 same in modes with signed zeros, since multiplying a
135 negative value by 0 gives -0, not +0. */
136 (simplify
137 (mult @0 real_zerop@1)
138 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
139 @1))
140
141 /* In IEEE floating point, x*1 is not equivalent to x for snans.
142 Likewise for complex arithmetic with signed zeros. */
143 (simplify
144 (mult @0 real_onep)
145 (if (!HONOR_SNANS (type)
146 && (!HONOR_SIGNED_ZEROS (type)
147 || !COMPLEX_FLOAT_TYPE_P (type)))
148 (non_lvalue @0)))
149
150 /* Transform x * -1.0 into -x. */
151 (simplify
152 (mult @0 real_minus_onep)
153 (if (!HONOR_SNANS (type)
154 && (!HONOR_SIGNED_ZEROS (type)
155 || !COMPLEX_FLOAT_TYPE_P (type)))
156 (negate @0)))
157
158 (for cmp (gt ge lt le)
159 outp (convert convert negate negate)
160 outn (negate negate convert convert)
161 /* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */
162 /* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */
163 /* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
164 /* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
165 (simplify
166 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)
167 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
168 && types_match (type, TREE_TYPE (@0)))
169 (switch
170 (if (types_match (type, float_type_node))
171 (BUILT_IN_COPYSIGNF @1 (outp @0)))
172 (if (types_match (type, double_type_node))
173 (BUILT_IN_COPYSIGN @1 (outp @0)))
174 (if (types_match (type, long_double_type_node))
175 (BUILT_IN_COPYSIGNL @1 (outp @0))))))
176 /* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
177 /* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
178 /* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */
179 /* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */
180 (simplify
181 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1)
182 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
183 && types_match (type, TREE_TYPE (@0)))
184 (switch
185 (if (types_match (type, float_type_node))
186 (BUILT_IN_COPYSIGNF @1 (outn @0)))
187 (if (types_match (type, double_type_node))
188 (BUILT_IN_COPYSIGN @1 (outn @0)))
189 (if (types_match (type, long_double_type_node))
190 (BUILT_IN_COPYSIGNL @1 (outn @0)))))))
191
192 /* Transform X * copysign (1.0, X) into abs(X). */
193 (simplify
194 (mult:c @0 (COPYSIGN real_onep @0))
195 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
196 (abs @0)))
197
198 /* Transform X * copysign (1.0, -X) into -abs(X). */
199 (simplify
200 (mult:c @0 (COPYSIGN real_onep (negate @0)))
201 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
202 (negate (abs @0))))
203
204 /* Transform copysign (CST, X) into copysign (ABS(CST), X). */
205 (simplify
206 (COPYSIGN REAL_CST@0 @1)
207 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
208 (COPYSIGN (negate @0) @1)))
209
210 /* X * 1, X / 1 -> X. */
211 (for op (mult trunc_div ceil_div floor_div round_div exact_div)
212 (simplify
213 (op @0 integer_onep)
214 (non_lvalue @0)))
215
216 /* (A / (1 << B)) -> (A >> B).
217 Only for unsigned A. For signed A, this would not preserve rounding
218 toward zero.
219 For example: (-1 / ( 1 << B)) != -1 >> B. */
220 (simplify
221 (trunc_div @0 (lshift integer_onep@1 @2))
222 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
223 && (!VECTOR_TYPE_P (type)
224 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
225 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar)))
226 (rshift @0 @2)))
227
228 /* Preserve explicit divisions by 0: the C++ front-end wants to detect
229 undefined behavior in constexpr evaluation, and assuming that the division
230 traps enables better optimizations than these anyway. */
231 (for div (trunc_div ceil_div floor_div round_div exact_div)
232 /* 0 / X is always zero. */
233 (simplify
234 (div integer_zerop@0 @1)
235 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
236 (if (!integer_zerop (@1))
237 @0))
238 /* X / -1 is -X. */
239 (simplify
240 (div @0 integer_minus_onep@1)
241 (if (!TYPE_UNSIGNED (type))
242 (negate @0)))
243 /* X / X is one. */
244 (simplify
245 (div @0 @0)
246 /* But not for 0 / 0 so that we can get the proper warnings and errors.
247 And not for _Fract types where we can't build 1. */
248 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type)))
249 { build_one_cst (type); }))
250 /* X / abs (X) is X < 0 ? -1 : 1. */
251 (simplify
252 (div:C @0 (abs @0))
253 (if (INTEGRAL_TYPE_P (type)
254 && TYPE_OVERFLOW_UNDEFINED (type))
255 (cond (lt @0 { build_zero_cst (type); })
256 { build_minus_one_cst (type); } { build_one_cst (type); })))
257 /* X / -X is -1. */
258 (simplify
259 (div:C @0 (negate @0))
260 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
261 && TYPE_OVERFLOW_UNDEFINED (type))
262 { build_minus_one_cst (type); })))
263
264 /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
265 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
266 (simplify
267 (floor_div @0 @1)
268 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
269 && TYPE_UNSIGNED (type))
270 (trunc_div @0 @1)))
271
272 /* Combine two successive divisions. Note that combining ceil_div
273 and floor_div is trickier and combining round_div even more so. */
274 (for div (trunc_div exact_div)
275 (simplify
276 (div (div @0 INTEGER_CST@1) INTEGER_CST@2)
277 (with {
278 bool overflow_p;
279 wide_int mul = wi::mul (@1, @2, TYPE_SIGN (type), &overflow_p);
280 }
281 (if (!overflow_p)
282 (div @0 { wide_int_to_tree (type, mul); })
283 (if (TYPE_UNSIGNED (type)
284 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
285 { build_zero_cst (type); })))))
286
287 /* Combine successive multiplications. Similar to above, but handling
288 overflow is different. */
289 (simplify
290 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
291 (with {
292 bool overflow_p;
293 wide_int mul = wi::mul (@1, @2, TYPE_SIGN (type), &overflow_p);
294 }
295 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
296 otherwise undefined overflow implies that @0 must be zero. */
297 (if (!overflow_p || TYPE_OVERFLOW_WRAPS (type))
298 (mult @0 { wide_int_to_tree (type, mul); }))))
299
300 /* Optimize A / A to 1.0 if we don't care about
301 NaNs or Infinities. */
302 (simplify
303 (rdiv @0 @0)
304 (if (FLOAT_TYPE_P (type)
305 && ! HONOR_NANS (type)
306 && ! HONOR_INFINITIES (type))
307 { build_one_cst (type); }))
308
309 /* Optimize -A / A to -1.0 if we don't care about
310 NaNs or Infinities. */
311 (simplify
312 (rdiv:C @0 (negate @0))
313 (if (FLOAT_TYPE_P (type)
314 && ! HONOR_NANS (type)
315 && ! HONOR_INFINITIES (type))
316 { build_minus_one_cst (type); }))
317
318 /* PR71078: x / abs(x) -> copysign (1.0, x) */
319 (simplify
320 (rdiv:C (convert? @0) (convert? (abs @0)))
321 (if (SCALAR_FLOAT_TYPE_P (type)
322 && ! HONOR_NANS (type)
323 && ! HONOR_INFINITIES (type))
324 (switch
325 (if (types_match (type, float_type_node))
326 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
327 (if (types_match (type, double_type_node))
328 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
329 (if (types_match (type, long_double_type_node))
330 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
331
332 /* In IEEE floating point, x/1 is not equivalent to x for snans. */
333 (simplify
334 (rdiv @0 real_onep)
335 (if (!HONOR_SNANS (type))
336 (non_lvalue @0)))
337
338 /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
339 (simplify
340 (rdiv @0 real_minus_onep)
341 (if (!HONOR_SNANS (type))
342 (negate @0)))
343
344 (if (flag_reciprocal_math)
345 /* Convert (A/B)/C to A/(B*C) */
346 (simplify
347 (rdiv (rdiv:s @0 @1) @2)
348 (rdiv @0 (mult @1 @2)))
349
350 /* Convert A/(B/C) to (A/B)*C */
351 (simplify
352 (rdiv @0 (rdiv:s @1 @2))
353 (mult (rdiv @0 @1) @2)))
354
355 /* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
356 (for div (trunc_div ceil_div floor_div round_div exact_div)
357 (simplify
358 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
359 (if (integer_pow2p (@2)
360 && tree_int_cst_sgn (@2) > 0
361 && wi::add (@2, @1) == 0
362 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
363 (rshift (convert @0) { build_int_cst (integer_type_node,
364 wi::exact_log2 (@2)); }))))
365
366 /* If ARG1 is a constant, we can convert this to a multiply by the
367 reciprocal. This does not have the same rounding properties,
368 so only do this if -freciprocal-math. We can actually
369 always safely do it if ARG1 is a power of two, but it's hard to
370 tell if it is or not in a portable manner. */
371 (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
372 (simplify
373 (rdiv @0 cst@1)
374 (if (optimize)
375 (if (flag_reciprocal_math
376 && !real_zerop (@1))
377 (with
378 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
379 (if (tem)
380 (mult @0 { tem; } )))
381 (if (cst != COMPLEX_CST)
382 (with { tree inverse = exact_inverse (type, @1); }
383 (if (inverse)
384 (mult @0 { inverse; } ))))))))
385
386 (for mod (ceil_mod floor_mod round_mod trunc_mod)
387 /* 0 % X is always zero. */
388 (simplify
389 (mod integer_zerop@0 @1)
390 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
391 (if (!integer_zerop (@1))
392 @0))
393 /* X % 1 is always zero. */
394 (simplify
395 (mod @0 integer_onep)
396 { build_zero_cst (type); })
397 /* X % -1 is zero. */
398 (simplify
399 (mod @0 integer_minus_onep@1)
400 (if (!TYPE_UNSIGNED (type))
401 { build_zero_cst (type); }))
402 /* X % X is zero. */
403 (simplify
404 (mod @0 @0)
405 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
406 (if (!integer_zerop (@0))
407 { build_zero_cst (type); }))
408 /* (X % Y) % Y is just X % Y. */
409 (simplify
410 (mod (mod@2 @0 @1) @1)
411 @2)
412 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
413 (simplify
414 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
415 (if (ANY_INTEGRAL_TYPE_P (type)
416 && TYPE_OVERFLOW_UNDEFINED (type)
417 && wi::multiple_of_p (@1, @2, TYPE_SIGN (type)))
418 { build_zero_cst (type); })))
419
420 /* X % -C is the same as X % C. */
421 (simplify
422 (trunc_mod @0 INTEGER_CST@1)
423 (if (TYPE_SIGN (type) == SIGNED
424 && !TREE_OVERFLOW (@1)
425 && wi::neg_p (@1)
426 && !TYPE_OVERFLOW_TRAPS (type)
427 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
428 && !sign_bit_p (@1, @1))
429 (trunc_mod @0 (negate @1))))
430
431 /* X % -Y is the same as X % Y. */
432 (simplify
433 (trunc_mod @0 (convert? (negate @1)))
434 (if (INTEGRAL_TYPE_P (type)
435 && !TYPE_UNSIGNED (type)
436 && !TYPE_OVERFLOW_TRAPS (type)
437 && tree_nop_conversion_p (type, TREE_TYPE (@1))
438 /* Avoid this transformation if X might be INT_MIN or
439 Y might be -1, because we would then change valid
440 INT_MIN % -(-1) into invalid INT_MIN % -1. */
441 && (expr_not_equal_to (@0, TYPE_MIN_VALUE (type))
442 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
443 (TREE_TYPE (@1))))))
444 (trunc_mod @0 (convert @1))))
445
446 /* X - (X / Y) * Y is the same as X % Y. */
447 (simplify
448 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
449 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
450 (convert (trunc_mod @0 @1))))
451
452 /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
453 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
454 Also optimize A % (C << N) where C is a power of 2,
455 to A & ((C << N) - 1). */
456 (match (power_of_two_cand @1)
457 INTEGER_CST@1)
458 (match (power_of_two_cand @1)
459 (lshift INTEGER_CST@1 @2))
460 (for mod (trunc_mod floor_mod)
461 (simplify
462 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
463 (if ((TYPE_UNSIGNED (type)
464 || tree_expr_nonnegative_p (@0))
465 && tree_nop_conversion_p (type, TREE_TYPE (@3))
466 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
467 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
468
469 /* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
470 (simplify
471 (trunc_div (mult @0 integer_pow2p@1) @1)
472 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
473 (bit_and @0 { wide_int_to_tree
474 (type, wi::mask (TYPE_PRECISION (type) - wi::exact_log2 (@1),
475 false, TYPE_PRECISION (type))); })))
476
477 /* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
478 (simplify
479 (mult (trunc_div @0 integer_pow2p@1) @1)
480 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
481 (bit_and @0 (negate @1))))
482
483 /* Simplify (t * 2) / 2) -> t. */
484 (for div (trunc_div ceil_div floor_div round_div exact_div)
485 (simplify
486 (div (mult @0 @1) @1)
487 (if (ANY_INTEGRAL_TYPE_P (type)
488 && TYPE_OVERFLOW_UNDEFINED (type))
489 @0)))
490
491 (for op (negate abs)
492 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
493 (for coss (COS COSH)
494 (simplify
495 (coss (op @0))
496 (coss @0)))
497 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
498 (for pows (POW)
499 (simplify
500 (pows (op @0) REAL_CST@1)
501 (with { HOST_WIDE_INT n; }
502 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
503 (pows @0 @1)))))
504 /* Likewise for powi. */
505 (for pows (POWI)
506 (simplify
507 (pows (op @0) INTEGER_CST@1)
508 (if (wi::bit_and (@1, 1) == 0)
509 (pows @0 @1))))
510 /* Strip negate and abs from both operands of hypot. */
511 (for hypots (HYPOT)
512 (simplify
513 (hypots (op @0) @1)
514 (hypots @0 @1))
515 (simplify
516 (hypots @0 (op @1))
517 (hypots @0 @1)))
518 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
519 (for copysigns (COPYSIGN)
520 (simplify
521 (copysigns (op @0) @1)
522 (copysigns @0 @1))))
523
524 /* abs(x)*abs(x) -> x*x. Should be valid for all types. */
525 (simplify
526 (mult (abs@1 @0) @1)
527 (mult @0 @0))
528
529 /* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
530 (for coss (COS COSH)
531 copysigns (COPYSIGN)
532 (simplify
533 (coss (copysigns @0 @1))
534 (coss @0)))
535
536 /* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
537 (for pows (POW)
538 copysigns (COPYSIGN)
539 (simplify
540 (pows (copysigns @0 @2) REAL_CST@1)
541 (with { HOST_WIDE_INT n; }
542 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
543 (pows @0 @1)))))
544 /* Likewise for powi. */
545 (for pows (POWI)
546 copysigns (COPYSIGN)
547 (simplify
548 (pows (copysigns @0 @2) INTEGER_CST@1)
549 (if (wi::bit_and (@1, 1) == 0)
550 (pows @0 @1))))
551
552 (for hypots (HYPOT)
553 copysigns (COPYSIGN)
554 /* hypot(copysign(x, y), z) -> hypot(x, z). */
555 (simplify
556 (hypots (copysigns @0 @1) @2)
557 (hypots @0 @2))
558 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
559 (simplify
560 (hypots @0 (copysigns @1 @2))
561 (hypots @0 @1)))
562
563 /* copysign(x, CST) -> [-]abs (x). */
564 (for copysigns (COPYSIGN)
565 (simplify
566 (copysigns @0 REAL_CST@1)
567 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
568 (negate (abs @0))
569 (abs @0))))
570
571 /* copysign(copysign(x, y), z) -> copysign(x, z). */
572 (for copysigns (COPYSIGN)
573 (simplify
574 (copysigns (copysigns @0 @1) @2)
575 (copysigns @0 @2)))
576
577 /* copysign(x,y)*copysign(x,y) -> x*x. */
578 (for copysigns (COPYSIGN)
579 (simplify
580 (mult (copysigns@2 @0 @1) @2)
581 (mult @0 @0)))
582
583 /* ccos(-x) -> ccos(x). Similarly for ccosh. */
584 (for ccoss (CCOS CCOSH)
585 (simplify
586 (ccoss (negate @0))
587 (ccoss @0)))
588
589 /* cabs(-x) and cos(conj(x)) -> cabs(x). */
590 (for ops (conj negate)
591 (for cabss (CABS)
592 (simplify
593 (cabss (ops @0))
594 (cabss @0))))
595
596 /* Fold (a * (1 << b)) into (a << b) */
597 (simplify
598 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
599 (if (! FLOAT_TYPE_P (type)
600 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
601 (lshift @0 @2)))
602
603 /* Fold (C1/X)*C2 into (C1*C2)/X. */
604 (simplify
605 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
606 (if (flag_associative_math
607 && single_use (@3))
608 (with
609 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
610 (if (tem)
611 (rdiv { tem; } @1)))))
612
613 /* Convert C1/(X*C2) into (C1/C2)/X */
614 (simplify
615 (rdiv REAL_CST@0 (mult @1 REAL_CST@2))
616 (if (flag_reciprocal_math)
617 (with
618 { tree tem = const_binop (RDIV_EXPR, type, @0, @2); }
619 (if (tem)
620 (rdiv { tem; } @1)))))
621
622 /* Simplify ~X & X as zero. */
623 (simplify
624 (bit_and:c (convert? @0) (convert? (bit_not @0)))
625 { build_zero_cst (type); })
626
627 /* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
628 (simplify
629 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
630 (if (TYPE_UNSIGNED (type))
631 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
632
633 /* PR35691: Transform
634 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
635 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
636 (for bitop (bit_and bit_ior)
637 cmp (eq ne)
638 (simplify
639 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
640 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
641 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
642 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
643 (cmp (bit_ior @0 (convert @1)) @2))))
644
645 /* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
646 (simplify
647 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
648 (minus (bit_xor @0 @1) @1))
649 (simplify
650 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
651 (if (wi::bit_not (@2) == @1)
652 (minus (bit_xor @0 @1) @1)))
653
654 /* Fold (A & B) - (A & ~B) into B - (A ^ B). */
655 (simplify
656 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
657 (minus @1 (bit_xor @0 @1)))
658
659 /* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
660 (for op (bit_ior bit_xor plus)
661 (simplify
662 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
663 (bit_xor @0 @1))
664 (simplify
665 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
666 (if (wi::bit_not (@2) == @1)
667 (bit_xor @0 @1))))
668
669 /* PR53979: Transform ((a ^ b) | a) -> (a | b) */
670 (simplify
671 (bit_ior:c (bit_xor:c @0 @1) @0)
672 (bit_ior @0 @1))
673
674 /* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
675 #if GIMPLE
676 (simplify
677 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
678 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
679 && (get_nonzero_bits (@0) & wi::bit_not (@1)) == 0)
680 (bit_xor @0 @1)))
681 #endif
682
683 /* X % Y is smaller than Y. */
684 (for cmp (lt ge)
685 (simplify
686 (cmp (trunc_mod @0 @1) @1)
687 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
688 { constant_boolean_node (cmp == LT_EXPR, type); })))
689 (for cmp (gt le)
690 (simplify
691 (cmp @1 (trunc_mod @0 @1))
692 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
693 { constant_boolean_node (cmp == GT_EXPR, type); })))
694
695 /* x | ~0 -> ~0 */
696 (simplify
697 (bit_ior @0 integer_all_onesp@1)
698 @1)
699
700 /* x | 0 -> x */
701 (simplify
702 (bit_ior @0 integer_zerop)
703 @0)
704
705 /* x & 0 -> 0 */
706 (simplify
707 (bit_and @0 integer_zerop@1)
708 @1)
709
710 /* ~x | x -> -1 */
711 /* ~x ^ x -> -1 */
712 /* ~x + x -> -1 */
713 (for op (bit_ior bit_xor plus)
714 (simplify
715 (op:c (convert? @0) (convert? (bit_not @0)))
716 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
717
718 /* x ^ x -> 0 */
719 (simplify
720 (bit_xor @0 @0)
721 { build_zero_cst (type); })
722
723 /* Canonicalize X ^ ~0 to ~X. */
724 (simplify
725 (bit_xor @0 integer_all_onesp@1)
726 (bit_not @0))
727
728 /* x & ~0 -> x */
729 (simplify
730 (bit_and @0 integer_all_onesp)
731 (non_lvalue @0))
732
733 /* x & x -> x, x | x -> x */
734 (for bitop (bit_and bit_ior)
735 (simplify
736 (bitop @0 @0)
737 (non_lvalue @0)))
738
739 /* x & C -> x if we know that x & ~C == 0. */
740 #if GIMPLE
741 (simplify
742 (bit_and SSA_NAME@0 INTEGER_CST@1)
743 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
744 && (get_nonzero_bits (@0) & wi::bit_not (@1)) == 0)
745 @0))
746 #endif
747
748 /* x + (x & 1) -> (x + 1) & ~1 */
749 (simplify
750 (plus:c @0 (bit_and:s @0 integer_onep@1))
751 (bit_and (plus @0 @1) (bit_not @1)))
752
753 /* x & ~(x & y) -> x & ~y */
754 /* x | ~(x | y) -> x | ~y */
755 (for bitop (bit_and bit_ior)
756 (simplify
757 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
758 (bitop @0 (bit_not @1))))
759
760 /* (x | y) & ~x -> y & ~x */
761 /* (x & y) | ~x -> y | ~x */
762 (for bitop (bit_and bit_ior)
763 rbitop (bit_ior bit_and)
764 (simplify
765 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
766 (bitop @1 @2)))
767
768 /* (x & y) ^ (x | y) -> x ^ y */
769 (simplify
770 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
771 (bit_xor @0 @1))
772
773 /* (x ^ y) ^ (x | y) -> x & y */
774 (simplify
775 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
776 (bit_and @0 @1))
777
778 /* (x & y) + (x ^ y) -> x | y */
779 /* (x & y) | (x ^ y) -> x | y */
780 /* (x & y) ^ (x ^ y) -> x | y */
781 (for op (plus bit_ior bit_xor)
782 (simplify
783 (op:c (bit_and @0 @1) (bit_xor @0 @1))
784 (bit_ior @0 @1)))
785
786 /* (x & y) + (x | y) -> x + y */
787 (simplify
788 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
789 (plus @0 @1))
790
791 /* (x + y) - (x | y) -> x & y */
792 (simplify
793 (minus (plus @0 @1) (bit_ior @0 @1))
794 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
795 && !TYPE_SATURATING (type))
796 (bit_and @0 @1)))
797
798 /* (x + y) - (x & y) -> x | y */
799 (simplify
800 (minus (plus @0 @1) (bit_and @0 @1))
801 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
802 && !TYPE_SATURATING (type))
803 (bit_ior @0 @1)))
804
805 /* (x | y) - (x ^ y) -> x & y */
806 (simplify
807 (minus (bit_ior @0 @1) (bit_xor @0 @1))
808 (bit_and @0 @1))
809
810 /* (x | y) - (x & y) -> x ^ y */
811 (simplify
812 (minus (bit_ior @0 @1) (bit_and @0 @1))
813 (bit_xor @0 @1))
814
815 /* (x | y) & ~(x & y) -> x ^ y */
816 (simplify
817 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
818 (bit_xor @0 @1))
819
820 /* (x | y) & (~x ^ y) -> x & y */
821 (simplify
822 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
823 (bit_and @0 @1))
824
825 /* ~x & ~y -> ~(x | y)
826 ~x | ~y -> ~(x & y) */
827 (for op (bit_and bit_ior)
828 rop (bit_ior bit_and)
829 (simplify
830 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
831 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
832 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
833 (bit_not (rop (convert @0) (convert @1))))))
834
835 /* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
836 with a constant, and the two constants have no bits in common,
837 we should treat this as a BIT_IOR_EXPR since this may produce more
838 simplifications. */
839 (for op (bit_xor plus)
840 (simplify
841 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
842 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
843 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
844 && tree_nop_conversion_p (type, TREE_TYPE (@2))
845 && wi::bit_and (@1, @3) == 0)
846 (bit_ior (convert @4) (convert @5)))))
847
848 /* (X | Y) ^ X -> Y & ~ X*/
849 (simplify
850 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
851 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
852 (convert (bit_and @1 (bit_not @0)))))
853
854 /* Convert ~X ^ ~Y to X ^ Y. */
855 (simplify
856 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
857 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
858 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
859 (bit_xor (convert @0) (convert @1))))
860
861 /* Convert ~X ^ C to X ^ ~C. */
862 (simplify
863 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
864 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
865 (bit_xor (convert @0) (bit_not @1))))
866
867 /* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
868 (for opo (bit_and bit_xor)
869 opi (bit_xor bit_and)
870 (simplify
871 (opo:c (opi:c @0 @1) @1)
872 (bit_and (bit_not @0) @1)))
873
874 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
875 operands are another bit-wise operation with a common input. If so,
876 distribute the bit operations to save an operation and possibly two if
877 constants are involved. For example, convert
878 (A | B) & (A | C) into A | (B & C)
879 Further simplification will occur if B and C are constants. */
880 (for op (bit_and bit_ior bit_xor)
881 rop (bit_ior bit_and bit_and)
882 (simplify
883 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
884 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
885 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
886 (rop (convert @0) (op (convert @1) (convert @2))))))
887
888 /* Some simple reassociation for bit operations, also handled in reassoc. */
889 /* (X & Y) & Y -> X & Y
890 (X | Y) | Y -> X | Y */
891 (for op (bit_and bit_ior)
892 (simplify
893 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
894 @2))
895 /* (X ^ Y) ^ Y -> X */
896 (simplify
897 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
898 (convert @0))
899 /* (X & Y) & (X & Z) -> (X & Y) & Z
900 (X | Y) | (X | Z) -> (X | Y) | Z */
901 (for op (bit_and bit_ior)
902 (simplify
903 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
904 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
905 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
906 (if (single_use (@5) && single_use (@6))
907 (op @3 (convert @2))
908 (if (single_use (@3) && single_use (@4))
909 (op (convert @1) @5))))))
910 /* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
911 (simplify
912 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
913 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
914 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
915 (bit_xor (convert @1) (convert @2))))
916
917 (simplify
918 (abs (abs@1 @0))
919 @1)
920 (simplify
921 (abs (negate @0))
922 (abs @0))
923 (simplify
924 (abs tree_expr_nonnegative_p@0)
925 @0)
926
927 /* A few cases of fold-const.c negate_expr_p predicate. */
928 (match negate_expr_p
929 INTEGER_CST
930 (if ((INTEGRAL_TYPE_P (type)
931 && TYPE_UNSIGNED (type))
932 || (!TYPE_OVERFLOW_SANITIZED (type)
933 && may_negate_without_overflow_p (t)))))
934 (match negate_expr_p
935 FIXED_CST)
936 (match negate_expr_p
937 (negate @0)
938 (if (!TYPE_OVERFLOW_SANITIZED (type))))
939 (match negate_expr_p
940 REAL_CST
941 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
942 /* VECTOR_CST handling of non-wrapping types would recurse in unsupported
943 ways. */
944 (match negate_expr_p
945 VECTOR_CST
946 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
947
948 /* (-A) * (-B) -> A * B */
949 (simplify
950 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
951 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
952 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
953 (mult (convert @0) (convert (negate @1)))))
954
955 /* -(A + B) -> (-B) - A. */
956 (simplify
957 (negate (plus:c @0 negate_expr_p@1))
958 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
959 && !HONOR_SIGNED_ZEROS (element_mode (type)))
960 (minus (negate @1) @0)))
961
962 /* A - B -> A + (-B) if B is easily negatable. */
963 (simplify
964 (minus @0 negate_expr_p@1)
965 (if (!FIXED_POINT_TYPE_P (type))
966 (plus @0 (negate @1))))
967
968 /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
969 when profitable.
970 For bitwise binary operations apply operand conversions to the
971 binary operation result instead of to the operands. This allows
972 to combine successive conversions and bitwise binary operations.
973 We combine the above two cases by using a conditional convert. */
974 (for bitop (bit_and bit_ior bit_xor)
975 (simplify
976 (bitop (convert @0) (convert? @1))
977 (if (((TREE_CODE (@1) == INTEGER_CST
978 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
979 && int_fits_type_p (@1, TREE_TYPE (@0)))
980 || types_match (@0, @1))
981 /* ??? This transform conflicts with fold-const.c doing
982 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
983 constants (if x has signed type, the sign bit cannot be set
984 in c). This folds extension into the BIT_AND_EXPR.
985 Restrict it to GIMPLE to avoid endless recursions. */
986 && (bitop != BIT_AND_EXPR || GIMPLE)
987 && (/* That's a good idea if the conversion widens the operand, thus
988 after hoisting the conversion the operation will be narrower. */
989 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
990 /* It's also a good idea if the conversion is to a non-integer
991 mode. */
992 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
993 /* Or if the precision of TO is not the same as the precision
994 of its mode. */
995 || !type_has_mode_precision_p (type)))
996 (convert (bitop @0 (convert @1))))))
997
998 (for bitop (bit_and bit_ior)
999 rbitop (bit_ior bit_and)
1000 /* (x | y) & x -> x */
1001 /* (x & y) | x -> x */
1002 (simplify
1003 (bitop:c (rbitop:c @0 @1) @0)
1004 @0)
1005 /* (~x | y) & x -> x & y */
1006 /* (~x & y) | x -> x | y */
1007 (simplify
1008 (bitop:c (rbitop:c (bit_not @0) @1) @0)
1009 (bitop @0 @1)))
1010
1011 /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
1012 (simplify
1013 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1014 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
1015
1016 /* Combine successive equal operations with constants. */
1017 (for bitop (bit_and bit_ior bit_xor)
1018 (simplify
1019 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1020 (bitop @0 (bitop @1 @2))))
1021
1022 /* Try simple folding for X op !X, and X op X with the help
1023 of the truth_valued_p and logical_inverted_value predicates. */
1024 (match truth_valued_p
1025 @0
1026 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
1027 (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
1028 (match truth_valued_p
1029 (op @0 @1)))
1030 (match truth_valued_p
1031 (truth_not @0))
1032
1033 (match (logical_inverted_value @0)
1034 (truth_not @0))
1035 (match (logical_inverted_value @0)
1036 (bit_not truth_valued_p@0))
1037 (match (logical_inverted_value @0)
1038 (eq @0 integer_zerop))
1039 (match (logical_inverted_value @0)
1040 (ne truth_valued_p@0 integer_truep))
1041 (match (logical_inverted_value @0)
1042 (bit_xor truth_valued_p@0 integer_truep))
1043
1044 /* X & !X -> 0. */
1045 (simplify
1046 (bit_and:c @0 (logical_inverted_value @0))
1047 { build_zero_cst (type); })
1048 /* X | !X and X ^ !X -> 1, , if X is truth-valued. */
1049 (for op (bit_ior bit_xor)
1050 (simplify
1051 (op:c truth_valued_p@0 (logical_inverted_value @0))
1052 { constant_boolean_node (true, type); }))
1053 /* X ==/!= !X is false/true. */
1054 (for op (eq ne)
1055 (simplify
1056 (op:c truth_valued_p@0 (logical_inverted_value @0))
1057 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
1058
1059 /* ~~x -> x */
1060 (simplify
1061 (bit_not (bit_not @0))
1062 @0)
1063
1064 /* Convert ~ (-A) to A - 1. */
1065 (simplify
1066 (bit_not (convert? (negate @0)))
1067 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1068 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1069 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
1070
1071 /* Convert ~ (A - 1) or ~ (A + -1) to -A. */
1072 (simplify
1073 (bit_not (convert? (minus @0 integer_each_onep)))
1074 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1075 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1076 (convert (negate @0))))
1077 (simplify
1078 (bit_not (convert? (plus @0 integer_all_onesp)))
1079 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1080 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1081 (convert (negate @0))))
1082
1083 /* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
1084 (simplify
1085 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
1086 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1087 (convert (bit_xor @0 (bit_not @1)))))
1088 (simplify
1089 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
1090 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1091 (convert (bit_xor @0 @1))))
1092
1093 /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1094 (simplify
1095 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1096 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
1097
1098 /* Fold A - (A & B) into ~B & A. */
1099 (simplify
1100 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
1101 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1102 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1103 (convert (bit_and (bit_not @1) @0))))
1104
1105 /* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
1106 (for cmp (gt lt ge le)
1107 (simplify
1108 (mult (convert (cmp @0 @1)) @2)
1109 (cond (cmp @0 @1) @2 { build_zero_cst (type); })))
1110
1111 /* For integral types with undefined overflow and C != 0 fold
1112 x * C EQ/NE y * C into x EQ/NE y. */
1113 (for cmp (eq ne)
1114 (simplify
1115 (cmp (mult:c @0 @1) (mult:c @2 @1))
1116 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1117 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1118 && tree_expr_nonzero_p (@1))
1119 (cmp @0 @2))))
1120
1121 /* For integral types with wrapping overflow and C odd fold
1122 x * C EQ/NE y * C into x EQ/NE y. */
1123 (for cmp (eq ne)
1124 (simplify
1125 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
1126 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1127 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
1128 && (TREE_INT_CST_LOW (@1) & 1) != 0)
1129 (cmp @0 @2))))
1130
1131 /* For integral types with undefined overflow and C != 0 fold
1132 x * C RELOP y * C into:
1133
1134 x RELOP y for nonnegative C
1135 y RELOP x for negative C */
1136 (for cmp (lt gt le ge)
1137 (simplify
1138 (cmp (mult:c @0 @1) (mult:c @2 @1))
1139 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1140 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1141 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
1142 (cmp @0 @2)
1143 (if (TREE_CODE (@1) == INTEGER_CST
1144 && wi::neg_p (@1, TYPE_SIGN (TREE_TYPE (@1))))
1145 (cmp @2 @0))))))
1146
1147 /* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
1148 (for cmp (le gt)
1149 icmp (gt le)
1150 (simplify
1151 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
1152 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1153 && TYPE_UNSIGNED (TREE_TYPE (@0))
1154 && TYPE_PRECISION (TREE_TYPE (@0)) > 1
1155 && wi::eq_p (@2, wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)),
1156 SIGNED) - 1))
1157 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1158 (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
1159
1160 /* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
1161 (for cmp (simple_comparison)
1162 (simplify
1163 (cmp (exact_div @0 INTEGER_CST@2) (exact_div @1 @2))
1164 (if (wi::gt_p(@2, 0, TYPE_SIGN (TREE_TYPE (@2))))
1165 (cmp @0 @1))))
1166
1167 /* X / C1 op C2 into a simple range test. */
1168 (for cmp (simple_comparison)
1169 (simplify
1170 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
1171 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1172 && integer_nonzerop (@1)
1173 && !TREE_OVERFLOW (@1)
1174 && !TREE_OVERFLOW (@2))
1175 (with { tree lo, hi; bool neg_overflow;
1176 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
1177 &neg_overflow); }
1178 (switch
1179 (if (code == LT_EXPR || code == GE_EXPR)
1180 (if (TREE_OVERFLOW (lo))
1181 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
1182 (if (code == LT_EXPR)
1183 (lt @0 { lo; })
1184 (ge @0 { lo; }))))
1185 (if (code == LE_EXPR || code == GT_EXPR)
1186 (if (TREE_OVERFLOW (hi))
1187 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
1188 (if (code == LE_EXPR)
1189 (le @0 { hi; })
1190 (gt @0 { hi; }))))
1191 (if (!lo && !hi)
1192 { build_int_cst (type, code == NE_EXPR); })
1193 (if (code == EQ_EXPR && !hi)
1194 (ge @0 { lo; }))
1195 (if (code == EQ_EXPR && !lo)
1196 (le @0 { hi; }))
1197 (if (code == NE_EXPR && !hi)
1198 (lt @0 { lo; }))
1199 (if (code == NE_EXPR && !lo)
1200 (gt @0 { hi; }))
1201 (if (GENERIC)
1202 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
1203 lo, hi); })
1204 (with
1205 {
1206 tree etype = range_check_type (TREE_TYPE (@0));
1207 if (etype)
1208 {
1209 if (! TYPE_UNSIGNED (etype))
1210 etype = unsigned_type_for (etype);
1211 hi = fold_convert (etype, hi);
1212 lo = fold_convert (etype, lo);
1213 hi = const_binop (MINUS_EXPR, etype, hi, lo);
1214 }
1215 }
1216 (if (etype && hi && !TREE_OVERFLOW (hi))
1217 (if (code == EQ_EXPR)
1218 (le (minus (convert:etype @0) { lo; }) { hi; })
1219 (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
1220
1221 /* X + Z < Y + Z is the same as X < Y when there is no overflow. */
1222 (for op (lt le ge gt)
1223 (simplify
1224 (op (plus:c @0 @2) (plus:c @1 @2))
1225 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1226 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1227 (op @0 @1))))
1228 /* For equality and subtraction, this is also true with wrapping overflow. */
1229 (for op (eq ne minus)
1230 (simplify
1231 (op (plus:c @0 @2) (plus:c @1 @2))
1232 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1233 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1234 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1235 (op @0 @1))))
1236
1237 /* X - Z < Y - Z is the same as X < Y when there is no overflow. */
1238 (for op (lt le ge gt)
1239 (simplify
1240 (op (minus @0 @2) (minus @1 @2))
1241 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1242 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1243 (op @0 @1))))
1244 /* For equality and subtraction, this is also true with wrapping overflow. */
1245 (for op (eq ne minus)
1246 (simplify
1247 (op (minus @0 @2) (minus @1 @2))
1248 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1249 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1250 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1251 (op @0 @1))))
1252
1253 /* Z - X < Z - Y is the same as Y < X when there is no overflow. */
1254 (for op (lt le ge gt)
1255 (simplify
1256 (op (minus @2 @0) (minus @2 @1))
1257 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1258 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1259 (op @1 @0))))
1260 /* For equality and subtraction, this is also true with wrapping overflow. */
1261 (for op (eq ne minus)
1262 (simplify
1263 (op (minus @2 @0) (minus @2 @1))
1264 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1265 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1266 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1267 (op @1 @0))))
1268
1269 /* X == C - X can never be true if C is odd. */
1270 (for cmp (eq ne)
1271 (simplify
1272 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
1273 (if (TREE_INT_CST_LOW (@1) & 1)
1274 { constant_boolean_node (cmp == NE_EXPR, type); })))
1275
1276 /* Arguments on which one can call get_nonzero_bits to get the bits
1277 possibly set. */
1278 (match with_possible_nonzero_bits
1279 INTEGER_CST@0)
1280 (match with_possible_nonzero_bits
1281 SSA_NAME@0
1282 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
1283 /* Slightly extended version, do not make it recursive to keep it cheap. */
1284 (match (with_possible_nonzero_bits2 @0)
1285 with_possible_nonzero_bits@0)
1286 (match (with_possible_nonzero_bits2 @0)
1287 (bit_and:c with_possible_nonzero_bits@0 @2))
1288
1289 /* Same for bits that are known to be set, but we do not have
1290 an equivalent to get_nonzero_bits yet. */
1291 (match (with_certain_nonzero_bits2 @0)
1292 INTEGER_CST@0)
1293 (match (with_certain_nonzero_bits2 @0)
1294 (bit_ior @1 INTEGER_CST@0))
1295
1296 /* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
1297 (for cmp (eq ne)
1298 (simplify
1299 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
1300 (if ((~get_nonzero_bits (@0) & @1) != 0)
1301 { constant_boolean_node (cmp == NE_EXPR, type); })))
1302
1303 /* ((X inner_op C0) outer_op C1)
1304 With X being a tree where value_range has reasoned certain bits to always be
1305 zero throughout its computed value range,
1306 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1307 where zero_mask has 1's for all bits that are sure to be 0 in
1308 and 0's otherwise.
1309 if (inner_op == '^') C0 &= ~C1;
1310 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1311 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1312 */
1313 (for inner_op (bit_ior bit_xor)
1314 outer_op (bit_xor bit_ior)
1315 (simplify
1316 (outer_op
1317 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1318 (with
1319 {
1320 bool fail = false;
1321 wide_int zero_mask_not;
1322 wide_int C0;
1323 wide_int cst_emit;
1324
1325 if (TREE_CODE (@2) == SSA_NAME)
1326 zero_mask_not = get_nonzero_bits (@2);
1327 else
1328 fail = true;
1329
1330 if (inner_op == BIT_XOR_EXPR)
1331 {
1332 C0 = wi::bit_and_not (@0, @1);
1333 cst_emit = wi::bit_or (C0, @1);
1334 }
1335 else
1336 {
1337 C0 = @0;
1338 cst_emit = wi::bit_xor (@0, @1);
1339 }
1340 }
1341 (if (!fail && wi::bit_and (C0, zero_mask_not) == 0)
1342 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
1343 (if (!fail && wi::bit_and (@1, zero_mask_not) == 0)
1344 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1345
1346 /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1347 (simplify
1348 (pointer_plus (pointer_plus:s @0 @1) @3)
1349 (pointer_plus @0 (plus @1 @3)))
1350
1351 /* Pattern match
1352 tem1 = (long) ptr1;
1353 tem2 = (long) ptr2;
1354 tem3 = tem2 - tem1;
1355 tem4 = (unsigned long) tem3;
1356 tem5 = ptr1 + tem4;
1357 and produce
1358 tem5 = ptr2; */
1359 (simplify
1360 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1361 /* Conditionally look through a sign-changing conversion. */
1362 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1363 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1364 || (GENERIC && type == TREE_TYPE (@1))))
1365 @1))
1366
1367 /* Pattern match
1368 tem = (sizetype) ptr;
1369 tem = tem & algn;
1370 tem = -tem;
1371 ... = ptr p+ tem;
1372 and produce the simpler and easier to analyze with respect to alignment
1373 ... = ptr & ~algn; */
1374 (simplify
1375 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
1376 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), wi::bit_not (@1)); }
1377 (bit_and @0 { algn; })))
1378
1379 /* Try folding difference of addresses. */
1380 (simplify
1381 (minus (convert ADDR_EXPR@0) (convert @1))
1382 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1383 (with { HOST_WIDE_INT diff; }
1384 (if (ptr_difference_const (@0, @1, &diff))
1385 { build_int_cst_type (type, diff); }))))
1386 (simplify
1387 (minus (convert @0) (convert ADDR_EXPR@1))
1388 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1389 (with { HOST_WIDE_INT diff; }
1390 (if (ptr_difference_const (@0, @1, &diff))
1391 { build_int_cst_type (type, diff); }))))
1392
1393 /* If arg0 is derived from the address of an object or function, we may
1394 be able to fold this expression using the object or function's
1395 alignment. */
1396 (simplify
1397 (bit_and (convert? @0) INTEGER_CST@1)
1398 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1399 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1400 (with
1401 {
1402 unsigned int align;
1403 unsigned HOST_WIDE_INT bitpos;
1404 get_pointer_alignment_1 (@0, &align, &bitpos);
1405 }
1406 (if (wi::ltu_p (@1, align / BITS_PER_UNIT))
1407 { wide_int_to_tree (type, wi::bit_and (@1, bitpos / BITS_PER_UNIT)); }))))
1408
1409
1410 /* We can't reassociate at all for saturating types. */
1411 (if (!TYPE_SATURATING (type))
1412
1413 /* Contract negates. */
1414 /* A + (-B) -> A - B */
1415 (simplify
1416 (plus:c @0 (convert? (negate @1)))
1417 /* Apply STRIP_NOPS on the negate. */
1418 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1419 && !TYPE_OVERFLOW_SANITIZED (type))
1420 (with
1421 {
1422 tree t1 = type;
1423 if (INTEGRAL_TYPE_P (type)
1424 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1425 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1426 }
1427 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
1428 /* A - (-B) -> A + B */
1429 (simplify
1430 (minus @0 (convert? (negate @1)))
1431 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1432 && !TYPE_OVERFLOW_SANITIZED (type))
1433 (with
1434 {
1435 tree t1 = type;
1436 if (INTEGRAL_TYPE_P (type)
1437 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
1438 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
1439 }
1440 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
1441 /* -(-A) -> A */
1442 (simplify
1443 (negate (convert? (negate @1)))
1444 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1445 && !TYPE_OVERFLOW_SANITIZED (type))
1446 (convert @1)))
1447
1448 /* We can't reassociate floating-point unless -fassociative-math
1449 or fixed-point plus or minus because of saturation to +-Inf. */
1450 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
1451 && !FIXED_POINT_TYPE_P (type))
1452
1453 /* Match patterns that allow contracting a plus-minus pair
1454 irrespective of overflow issues. */
1455 /* (A +- B) - A -> +- B */
1456 /* (A +- B) -+ B -> A */
1457 /* A - (A +- B) -> -+ B */
1458 /* A +- (B -+ A) -> +- B */
1459 (simplify
1460 (minus (plus:c @0 @1) @0)
1461 @1)
1462 (simplify
1463 (minus (minus @0 @1) @0)
1464 (negate @1))
1465 (simplify
1466 (plus:c (minus @0 @1) @1)
1467 @0)
1468 (simplify
1469 (minus @0 (plus:c @0 @1))
1470 (negate @1))
1471 (simplify
1472 (minus @0 (minus @0 @1))
1473 @1)
1474 /* (A +- B) + (C - A) -> C +- B */
1475 /* (A + B) - (A - C) -> B + C */
1476 /* More cases are handled with comparisons. */
1477 (simplify
1478 (plus:c (plus:c @0 @1) (minus @2 @0))
1479 (plus @2 @1))
1480 (simplify
1481 (plus:c (minus @0 @1) (minus @2 @0))
1482 (minus @2 @1))
1483 (simplify
1484 (minus (plus:c @0 @1) (minus @0 @2))
1485 (plus @1 @2))
1486
1487 /* (A +- CST1) +- CST2 -> A + CST3
1488 Use view_convert because it is safe for vectors and equivalent for
1489 scalars. */
1490 (for outer_op (plus minus)
1491 (for inner_op (plus minus)
1492 neg_inner_op (minus plus)
1493 (simplify
1494 (outer_op (nop_convert (inner_op @0 CONSTANT_CLASS_P@1))
1495 CONSTANT_CLASS_P@2)
1496 /* If one of the types wraps, use that one. */
1497 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
1498 (if (outer_op == PLUS_EXPR)
1499 (plus (view_convert @0) (inner_op @2 (view_convert @1)))
1500 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1))))
1501 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1502 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1503 (if (outer_op == PLUS_EXPR)
1504 (view_convert (plus @0 (inner_op (view_convert @2) @1)))
1505 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
1506 /* If the constant operation overflows we cannot do the transform
1507 directly as we would introduce undefined overflow, for example
1508 with (a - 1) + INT_MIN. */
1509 (if (types_match (type, @0))
1510 (with { tree cst = const_binop (outer_op == inner_op
1511 ? PLUS_EXPR : MINUS_EXPR,
1512 type, @1, @2); }
1513 (if (cst && !TREE_OVERFLOW (cst))
1514 (inner_op @0 { cst; } )
1515 /* X+INT_MAX+1 is X-INT_MIN. */
1516 (if (INTEGRAL_TYPE_P (type) && cst
1517 && wi::eq_p (cst, wi::min_value (type)))
1518 (neg_inner_op @0 { wide_int_to_tree (type, cst); })
1519 /* Last resort, use some unsigned type. */
1520 (with { tree utype = unsigned_type_for (type); }
1521 (view_convert (inner_op
1522 (view_convert:utype @0)
1523 (view_convert:utype
1524 { drop_tree_overflow (cst); })))))))))))))
1525
1526 /* (CST1 - A) +- CST2 -> CST3 - A */
1527 (for outer_op (plus minus)
1528 (simplify
1529 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
1530 (with { tree cst = const_binop (outer_op, type, @1, @2); }
1531 (if (cst && !TREE_OVERFLOW (cst))
1532 (minus { cst; } @0)))))
1533
1534 /* CST1 - (CST2 - A) -> CST3 + A */
1535 (simplify
1536 (minus CONSTANT_CLASS_P@1 (minus CONSTANT_CLASS_P@2 @0))
1537 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
1538 (if (cst && !TREE_OVERFLOW (cst))
1539 (plus { cst; } @0))))
1540
1541 /* ~A + A -> -1 */
1542 (simplify
1543 (plus:c (bit_not @0) @0)
1544 (if (!TYPE_OVERFLOW_TRAPS (type))
1545 { build_all_ones_cst (type); }))
1546
1547 /* ~A + 1 -> -A */
1548 (simplify
1549 (plus (convert? (bit_not @0)) integer_each_onep)
1550 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1551 (negate (convert @0))))
1552
1553 /* -A - 1 -> ~A */
1554 (simplify
1555 (minus (convert? (negate @0)) integer_each_onep)
1556 (if (!TYPE_OVERFLOW_TRAPS (type)
1557 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1558 (bit_not (convert @0))))
1559
1560 /* -1 - A -> ~A */
1561 (simplify
1562 (minus integer_all_onesp @0)
1563 (bit_not @0))
1564
1565 /* (T)(P + A) - (T)P -> (T) A */
1566 (for add (plus pointer_plus)
1567 (simplify
1568 (minus (convert (add @@0 @1))
1569 (convert @0))
1570 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1571 /* For integer types, if A has a smaller type
1572 than T the result depends on the possible
1573 overflow in P + A.
1574 E.g. T=size_t, A=(unsigned)429497295, P>0.
1575 However, if an overflow in P + A would cause
1576 undefined behavior, we can assume that there
1577 is no overflow. */
1578 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1579 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1580 /* For pointer types, if the conversion of A to the
1581 final type requires a sign- or zero-extension,
1582 then we have to punt - it is not defined which
1583 one is correct. */
1584 || (POINTER_TYPE_P (TREE_TYPE (@0))
1585 && TREE_CODE (@1) == INTEGER_CST
1586 && tree_int_cst_sign_bit (@1) == 0))
1587 (convert @1))))
1588
1589 /* (T)P - (T)(P + A) -> -(T) A */
1590 (for add (plus pointer_plus)
1591 (simplify
1592 (minus (convert @0)
1593 (convert (add @@0 @1)))
1594 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1595 /* For integer types, if A has a smaller type
1596 than T the result depends on the possible
1597 overflow in P + A.
1598 E.g. T=size_t, A=(unsigned)429497295, P>0.
1599 However, if an overflow in P + A would cause
1600 undefined behavior, we can assume that there
1601 is no overflow. */
1602 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1603 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1604 /* For pointer types, if the conversion of A to the
1605 final type requires a sign- or zero-extension,
1606 then we have to punt - it is not defined which
1607 one is correct. */
1608 || (POINTER_TYPE_P (TREE_TYPE (@0))
1609 && TREE_CODE (@1) == INTEGER_CST
1610 && tree_int_cst_sign_bit (@1) == 0))
1611 (negate (convert @1)))))
1612
1613 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
1614 (for add (plus pointer_plus)
1615 (simplify
1616 (minus (convert (add @@0 @1))
1617 (convert (add @0 @2)))
1618 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
1619 /* For integer types, if A has a smaller type
1620 than T the result depends on the possible
1621 overflow in P + A.
1622 E.g. T=size_t, A=(unsigned)429497295, P>0.
1623 However, if an overflow in P + A would cause
1624 undefined behavior, we can assume that there
1625 is no overflow. */
1626 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1627 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1628 /* For pointer types, if the conversion of A to the
1629 final type requires a sign- or zero-extension,
1630 then we have to punt - it is not defined which
1631 one is correct. */
1632 || (POINTER_TYPE_P (TREE_TYPE (@0))
1633 && TREE_CODE (@1) == INTEGER_CST
1634 && tree_int_cst_sign_bit (@1) == 0
1635 && TREE_CODE (@2) == INTEGER_CST
1636 && tree_int_cst_sign_bit (@2) == 0))
1637 (minus (convert @1) (convert @2)))))))
1638
1639
1640 /* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
1641
1642 (for minmax (min max FMIN FMAX)
1643 (simplify
1644 (minmax @0 @0)
1645 @0))
1646 /* min(max(x,y),y) -> y. */
1647 (simplify
1648 (min:c (max:c @0 @1) @1)
1649 @1)
1650 /* max(min(x,y),y) -> y. */
1651 (simplify
1652 (max:c (min:c @0 @1) @1)
1653 @1)
1654 /* max(a,-a) -> abs(a). */
1655 (simplify
1656 (max:c @0 (negate @0))
1657 (if (TREE_CODE (type) != COMPLEX_TYPE
1658 && (! ANY_INTEGRAL_TYPE_P (type)
1659 || TYPE_OVERFLOW_UNDEFINED (type)))
1660 (abs @0)))
1661 /* min(a,-a) -> -abs(a). */
1662 (simplify
1663 (min:c @0 (negate @0))
1664 (if (TREE_CODE (type) != COMPLEX_TYPE
1665 && (! ANY_INTEGRAL_TYPE_P (type)
1666 || TYPE_OVERFLOW_UNDEFINED (type)))
1667 (negate (abs @0))))
1668 (simplify
1669 (min @0 @1)
1670 (switch
1671 (if (INTEGRAL_TYPE_P (type)
1672 && TYPE_MIN_VALUE (type)
1673 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
1674 @1)
1675 (if (INTEGRAL_TYPE_P (type)
1676 && TYPE_MAX_VALUE (type)
1677 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
1678 @0)))
1679 (simplify
1680 (max @0 @1)
1681 (switch
1682 (if (INTEGRAL_TYPE_P (type)
1683 && TYPE_MAX_VALUE (type)
1684 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
1685 @1)
1686 (if (INTEGRAL_TYPE_P (type)
1687 && TYPE_MIN_VALUE (type)
1688 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
1689 @0)))
1690
1691 /* max (a, a + CST) -> a + CST where CST is positive. */
1692 /* max (a, a + CST) -> a where CST is negative. */
1693 (simplify
1694 (max:c @0 (plus@2 @0 INTEGER_CST@1))
1695 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1696 (if (tree_int_cst_sgn (@1) > 0)
1697 @2
1698 @0)))
1699
1700 /* min (a, a + CST) -> a where CST is positive. */
1701 /* min (a, a + CST) -> a + CST where CST is negative. */
1702 (simplify
1703 (min:c @0 (plus@2 @0 INTEGER_CST@1))
1704 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1705 (if (tree_int_cst_sgn (@1) > 0)
1706 @0
1707 @2)))
1708
1709 /* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
1710 and the outer convert demotes the expression back to x's type. */
1711 (for minmax (min max)
1712 (simplify
1713 (convert (minmax@0 (convert @1) INTEGER_CST@2))
1714 (if (INTEGRAL_TYPE_P (type)
1715 && types_match (@1, type) && int_fits_type_p (@2, type)
1716 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
1717 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
1718 (minmax @1 (convert @2)))))
1719
1720 (for minmax (FMIN FMAX)
1721 /* If either argument is NaN, return the other one. Avoid the
1722 transformation if we get (and honor) a signalling NaN. */
1723 (simplify
1724 (minmax:c @0 REAL_CST@1)
1725 (if (real_isnan (TREE_REAL_CST_PTR (@1))
1726 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
1727 @0)))
1728 /* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
1729 functions to return the numeric arg if the other one is NaN.
1730 MIN and MAX don't honor that, so only transform if -ffinite-math-only
1731 is set. C99 doesn't require -0.0 to be handled, so we don't have to
1732 worry about it either. */
1733 (if (flag_finite_math_only)
1734 (simplify
1735 (FMIN @0 @1)
1736 (min @0 @1))
1737 (simplify
1738 (FMAX @0 @1)
1739 (max @0 @1)))
1740 /* min (-A, -B) -> -max (A, B) */
1741 (for minmax (min max FMIN FMAX)
1742 maxmin (max min FMAX FMIN)
1743 (simplify
1744 (minmax (negate:s@2 @0) (negate:s@3 @1))
1745 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
1746 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1747 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
1748 (negate (maxmin @0 @1)))))
1749 /* MIN (~X, ~Y) -> ~MAX (X, Y)
1750 MAX (~X, ~Y) -> ~MIN (X, Y) */
1751 (for minmax (min max)
1752 maxmin (max min)
1753 (simplify
1754 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
1755 (bit_not (maxmin @0 @1))))
1756
1757 /* MIN (X, Y) == X -> X <= Y */
1758 (for minmax (min min max max)
1759 cmp (eq ne eq ne )
1760 out (le gt ge lt )
1761 (simplify
1762 (cmp:c (minmax:c @0 @1) @0)
1763 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
1764 (out @0 @1))))
1765 /* MIN (X, 5) == 0 -> X == 0
1766 MIN (X, 5) == 7 -> false */
1767 (for cmp (eq ne)
1768 (simplify
1769 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
1770 (if (wi::lt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1771 { constant_boolean_node (cmp == NE_EXPR, type); }
1772 (if (wi::gt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1773 (cmp @0 @2)))))
1774 (for cmp (eq ne)
1775 (simplify
1776 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
1777 (if (wi::gt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1778 { constant_boolean_node (cmp == NE_EXPR, type); }
1779 (if (wi::lt_p (@1, @2, TYPE_SIGN (TREE_TYPE (@0))))
1780 (cmp @0 @2)))))
1781 /* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
1782 (for minmax (min min max max min min max max )
1783 cmp (lt le gt ge gt ge lt le )
1784 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
1785 (simplify
1786 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
1787 (comb (cmp @0 @2) (cmp @1 @2))))
1788
1789 /* Simplifications of shift and rotates. */
1790
1791 (for rotate (lrotate rrotate)
1792 (simplify
1793 (rotate integer_all_onesp@0 @1)
1794 @0))
1795
1796 /* Optimize -1 >> x for arithmetic right shifts. */
1797 (simplify
1798 (rshift integer_all_onesp@0 @1)
1799 (if (!TYPE_UNSIGNED (type)
1800 && tree_expr_nonnegative_p (@1))
1801 @0))
1802
1803 /* Optimize (x >> c) << c into x & (-1<<c). */
1804 (simplify
1805 (lshift (rshift @0 INTEGER_CST@1) @1)
1806 (if (wi::ltu_p (@1, element_precision (type)))
1807 (bit_and @0 (lshift { build_minus_one_cst (type); } @1))))
1808
1809 /* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
1810 types. */
1811 (simplify
1812 (rshift (lshift @0 INTEGER_CST@1) @1)
1813 (if (TYPE_UNSIGNED (type)
1814 && (wi::ltu_p (@1, element_precision (type))))
1815 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
1816
1817 (for shiftrotate (lrotate rrotate lshift rshift)
1818 (simplify
1819 (shiftrotate @0 integer_zerop)
1820 (non_lvalue @0))
1821 (simplify
1822 (shiftrotate integer_zerop@0 @1)
1823 @0)
1824 /* Prefer vector1 << scalar to vector1 << vector2
1825 if vector2 is uniform. */
1826 (for vec (VECTOR_CST CONSTRUCTOR)
1827 (simplify
1828 (shiftrotate @0 vec@1)
1829 (with { tree tem = uniform_vector_p (@1); }
1830 (if (tem)
1831 (shiftrotate @0 { tem; }))))))
1832
1833 /* Simplify X << Y where Y's low width bits are 0 to X, as only valid
1834 Y is 0. Similarly for X >> Y. */
1835 #if GIMPLE
1836 (for shift (lshift rshift)
1837 (simplify
1838 (shift @0 SSA_NAME@1)
1839 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
1840 (with {
1841 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
1842 int prec = TYPE_PRECISION (TREE_TYPE (@1));
1843 }
1844 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
1845 @0)))))
1846 #endif
1847
1848 /* Rewrite an LROTATE_EXPR by a constant into an
1849 RROTATE_EXPR by a new constant. */
1850 (simplify
1851 (lrotate @0 INTEGER_CST@1)
1852 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
1853 build_int_cst (TREE_TYPE (@1),
1854 element_precision (type)), @1); }))
1855
1856 /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
1857 (for op (lrotate rrotate rshift lshift)
1858 (simplify
1859 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
1860 (with { unsigned int prec = element_precision (type); }
1861 (if (wi::ge_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1)))
1862 && wi::lt_p (@1, prec, TYPE_SIGN (TREE_TYPE (@1)))
1863 && wi::ge_p (@2, 0, TYPE_SIGN (TREE_TYPE (@2)))
1864 && wi::lt_p (@2, prec, TYPE_SIGN (TREE_TYPE (@2))))
1865 (with { unsigned int low = wi::add (@1, @2).to_uhwi (); }
1866 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
1867 being well defined. */
1868 (if (low >= prec)
1869 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
1870 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
1871 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
1872 { build_zero_cst (type); }
1873 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
1874 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
1875
1876
1877 /* ((1 << A) & 1) != 0 -> A == 0
1878 ((1 << A) & 1) == 0 -> A != 0 */
1879 (for cmp (ne eq)
1880 icmp (eq ne)
1881 (simplify
1882 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
1883 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
1884
1885 /* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
1886 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
1887 if CST2 != 0. */
1888 (for cmp (ne eq)
1889 (simplify
1890 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
1891 (with { int cand = wi::ctz (@2) - wi::ctz (@0); }
1892 (if (cand < 0
1893 || (!integer_zerop (@2)
1894 && wi::ne_p (wi::lshift (@0, cand), @2)))
1895 { constant_boolean_node (cmp == NE_EXPR, type); }
1896 (if (!integer_zerop (@2)
1897 && wi::eq_p (wi::lshift (@0, cand), @2))
1898 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
1899
1900 /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
1901 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
1902 if the new mask might be further optimized. */
1903 (for shift (lshift rshift)
1904 (simplify
1905 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
1906 INTEGER_CST@2)
1907 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
1908 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
1909 && tree_fits_uhwi_p (@1)
1910 && tree_to_uhwi (@1) > 0
1911 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
1912 (with
1913 {
1914 unsigned int shiftc = tree_to_uhwi (@1);
1915 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
1916 unsigned HOST_WIDE_INT newmask, zerobits = 0;
1917 tree shift_type = TREE_TYPE (@3);
1918 unsigned int prec;
1919
1920 if (shift == LSHIFT_EXPR)
1921 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
1922 else if (shift == RSHIFT_EXPR
1923 && type_has_mode_precision_p (shift_type))
1924 {
1925 prec = TYPE_PRECISION (TREE_TYPE (@3));
1926 tree arg00 = @0;
1927 /* See if more bits can be proven as zero because of
1928 zero extension. */
1929 if (@3 != @0
1930 && TYPE_UNSIGNED (TREE_TYPE (@0)))
1931 {
1932 tree inner_type = TREE_TYPE (@0);
1933 if (type_has_mode_precision_p (inner_type)
1934 && TYPE_PRECISION (inner_type) < prec)
1935 {
1936 prec = TYPE_PRECISION (inner_type);
1937 /* See if we can shorten the right shift. */
1938 if (shiftc < prec)
1939 shift_type = inner_type;
1940 /* Otherwise X >> C1 is all zeros, so we'll optimize
1941 it into (X, 0) later on by making sure zerobits
1942 is all ones. */
1943 }
1944 }
1945 zerobits = HOST_WIDE_INT_M1U;
1946 if (shiftc < prec)
1947 {
1948 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
1949 zerobits <<= prec - shiftc;
1950 }
1951 /* For arithmetic shift if sign bit could be set, zerobits
1952 can contain actually sign bits, so no transformation is
1953 possible, unless MASK masks them all away. In that
1954 case the shift needs to be converted into logical shift. */
1955 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
1956 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
1957 {
1958 if ((mask & zerobits) == 0)
1959 shift_type = unsigned_type_for (TREE_TYPE (@3));
1960 else
1961 zerobits = 0;
1962 }
1963 }
1964 }
1965 /* ((X << 16) & 0xff00) is (X, 0). */
1966 (if ((mask & zerobits) == mask)
1967 { build_int_cst (type, 0); }
1968 (with { newmask = mask | zerobits; }
1969 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
1970 (with
1971 {
1972 /* Only do the transformation if NEWMASK is some integer
1973 mode's mask. */
1974 for (prec = BITS_PER_UNIT;
1975 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
1976 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
1977 break;
1978 }
1979 (if (prec < HOST_BITS_PER_WIDE_INT
1980 || newmask == HOST_WIDE_INT_M1U)
1981 (with
1982 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
1983 (if (!tree_int_cst_equal (newmaskt, @2))
1984 (if (shift_type != TREE_TYPE (@3))
1985 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
1986 (bit_and @4 { newmaskt; })))))))))))))
1987
1988 /* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
1989 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
1990 (for shift (lshift rshift)
1991 (for bit_op (bit_and bit_xor bit_ior)
1992 (simplify
1993 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
1994 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1995 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
1996 (bit_op (shift (convert @0) @1) { mask; }))))))
1997
1998 /* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
1999 (simplify
2000 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
2001 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
2002 && (element_precision (TREE_TYPE (@0))
2003 <= element_precision (TREE_TYPE (@1))
2004 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
2005 (with
2006 { tree shift_type = TREE_TYPE (@0); }
2007 (convert (rshift (convert:shift_type @1) @2)))))
2008
2009 /* ~(~X >>r Y) -> X >>r Y
2010 ~(~X <<r Y) -> X <<r Y */
2011 (for rotate (lrotate rrotate)
2012 (simplify
2013 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
2014 (if ((element_precision (TREE_TYPE (@0))
2015 <= element_precision (TREE_TYPE (@1))
2016 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
2017 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
2018 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
2019 (with
2020 { tree rotate_type = TREE_TYPE (@0); }
2021 (convert (rotate (convert:rotate_type @1) @2))))))
2022
2023 /* Simplifications of conversions. */
2024
2025 /* Basic strip-useless-type-conversions / strip_nops. */
2026 (for cvt (convert view_convert float fix_trunc)
2027 (simplify
2028 (cvt @0)
2029 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
2030 || (GENERIC && type == TREE_TYPE (@0)))
2031 @0)))
2032
2033 /* Contract view-conversions. */
2034 (simplify
2035 (view_convert (view_convert @0))
2036 (view_convert @0))
2037
2038 /* For integral conversions with the same precision or pointer
2039 conversions use a NOP_EXPR instead. */
2040 (simplify
2041 (view_convert @0)
2042 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
2043 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2044 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
2045 (convert @0)))
2046
2047 /* Strip inner integral conversions that do not change precision or size, or
2048 zero-extend while keeping the same size (for bool-to-char). */
2049 (simplify
2050 (view_convert (convert@0 @1))
2051 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
2052 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
2053 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
2054 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
2055 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
2056 && TYPE_UNSIGNED (TREE_TYPE (@1)))))
2057 (view_convert @1)))
2058
2059 /* Re-association barriers around constants and other re-association
2060 barriers can be removed. */
2061 (simplify
2062 (paren CONSTANT_CLASS_P@0)
2063 @0)
2064 (simplify
2065 (paren (paren@1 @0))
2066 @1)
2067
2068 /* Handle cases of two conversions in a row. */
2069 (for ocvt (convert float fix_trunc)
2070 (for icvt (convert float)
2071 (simplify
2072 (ocvt (icvt@1 @0))
2073 (with
2074 {
2075 tree inside_type = TREE_TYPE (@0);
2076 tree inter_type = TREE_TYPE (@1);
2077 int inside_int = INTEGRAL_TYPE_P (inside_type);
2078 int inside_ptr = POINTER_TYPE_P (inside_type);
2079 int inside_float = FLOAT_TYPE_P (inside_type);
2080 int inside_vec = VECTOR_TYPE_P (inside_type);
2081 unsigned int inside_prec = TYPE_PRECISION (inside_type);
2082 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
2083 int inter_int = INTEGRAL_TYPE_P (inter_type);
2084 int inter_ptr = POINTER_TYPE_P (inter_type);
2085 int inter_float = FLOAT_TYPE_P (inter_type);
2086 int inter_vec = VECTOR_TYPE_P (inter_type);
2087 unsigned int inter_prec = TYPE_PRECISION (inter_type);
2088 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
2089 int final_int = INTEGRAL_TYPE_P (type);
2090 int final_ptr = POINTER_TYPE_P (type);
2091 int final_float = FLOAT_TYPE_P (type);
2092 int final_vec = VECTOR_TYPE_P (type);
2093 unsigned int final_prec = TYPE_PRECISION (type);
2094 int final_unsignedp = TYPE_UNSIGNED (type);
2095 }
2096 (switch
2097 /* In addition to the cases of two conversions in a row
2098 handled below, if we are converting something to its own
2099 type via an object of identical or wider precision, neither
2100 conversion is needed. */
2101 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
2102 || (GENERIC
2103 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
2104 && (((inter_int || inter_ptr) && final_int)
2105 || (inter_float && final_float))
2106 && inter_prec >= final_prec)
2107 (ocvt @0))
2108
2109 /* Likewise, if the intermediate and initial types are either both
2110 float or both integer, we don't need the middle conversion if the
2111 former is wider than the latter and doesn't change the signedness
2112 (for integers). Avoid this if the final type is a pointer since
2113 then we sometimes need the middle conversion. */
2114 (if (((inter_int && inside_int) || (inter_float && inside_float))
2115 && (final_int || final_float)
2116 && inter_prec >= inside_prec
2117 && (inter_float || inter_unsignedp == inside_unsignedp))
2118 (ocvt @0))
2119
2120 /* If we have a sign-extension of a zero-extended value, we can
2121 replace that by a single zero-extension. Likewise if the
2122 final conversion does not change precision we can drop the
2123 intermediate conversion. */
2124 (if (inside_int && inter_int && final_int
2125 && ((inside_prec < inter_prec && inter_prec < final_prec
2126 && inside_unsignedp && !inter_unsignedp)
2127 || final_prec == inter_prec))
2128 (ocvt @0))
2129
2130 /* Two conversions in a row are not needed unless:
2131 - some conversion is floating-point (overstrict for now), or
2132 - some conversion is a vector (overstrict for now), or
2133 - the intermediate type is narrower than both initial and
2134 final, or
2135 - the intermediate type and innermost type differ in signedness,
2136 and the outermost type is wider than the intermediate, or
2137 - the initial type is a pointer type and the precisions of the
2138 intermediate and final types differ, or
2139 - the final type is a pointer type and the precisions of the
2140 initial and intermediate types differ. */
2141 (if (! inside_float && ! inter_float && ! final_float
2142 && ! inside_vec && ! inter_vec && ! final_vec
2143 && (inter_prec >= inside_prec || inter_prec >= final_prec)
2144 && ! (inside_int && inter_int
2145 && inter_unsignedp != inside_unsignedp
2146 && inter_prec < final_prec)
2147 && ((inter_unsignedp && inter_prec > inside_prec)
2148 == (final_unsignedp && final_prec > inter_prec))
2149 && ! (inside_ptr && inter_prec != final_prec)
2150 && ! (final_ptr && inside_prec != inter_prec))
2151 (ocvt @0))
2152
2153 /* A truncation to an unsigned type (a zero-extension) should be
2154 canonicalized as bitwise and of a mask. */
2155 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
2156 && final_int && inter_int && inside_int
2157 && final_prec == inside_prec
2158 && final_prec > inter_prec
2159 && inter_unsignedp)
2160 (convert (bit_and @0 { wide_int_to_tree
2161 (inside_type,
2162 wi::mask (inter_prec, false,
2163 TYPE_PRECISION (inside_type))); })))
2164
2165 /* If we are converting an integer to a floating-point that can
2166 represent it exactly and back to an integer, we can skip the
2167 floating-point conversion. */
2168 (if (GIMPLE /* PR66211 */
2169 && inside_int && inter_float && final_int &&
2170 (unsigned) significand_size (TYPE_MODE (inter_type))
2171 >= inside_prec - !inside_unsignedp)
2172 (convert @0)))))))
2173
2174 /* If we have a narrowing conversion to an integral type that is fed by a
2175 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
2176 masks off bits outside the final type (and nothing else). */
2177 (simplify
2178 (convert (bit_and @0 INTEGER_CST@1))
2179 (if (INTEGRAL_TYPE_P (type)
2180 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2181 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
2182 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
2183 TYPE_PRECISION (type)), 0))
2184 (convert @0)))
2185
2186
2187 /* (X /[ex] A) * A -> X. */
2188 (simplify
2189 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
2190 (convert @0))
2191
2192 /* Canonicalization of binary operations. */
2193
2194 /* Convert X + -C into X - C. */
2195 (simplify
2196 (plus @0 REAL_CST@1)
2197 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
2198 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
2199 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
2200 (minus @0 { tem; })))))
2201
2202 /* Convert x+x into x*2. */
2203 (simplify
2204 (plus @0 @0)
2205 (if (SCALAR_FLOAT_TYPE_P (type))
2206 (mult @0 { build_real (type, dconst2); })
2207 (if (INTEGRAL_TYPE_P (type))
2208 (mult @0 { build_int_cst (type, 2); }))))
2209
2210 (simplify
2211 (minus integer_zerop @1)
2212 (negate @1))
2213
2214 /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
2215 ARG0 is zero and X + ARG0 reduces to X, since that would mean
2216 (-ARG1 + ARG0) reduces to -ARG1. */
2217 (simplify
2218 (minus real_zerop@0 @1)
2219 (if (fold_real_zero_addition_p (type, @0, 0))
2220 (negate @1)))
2221
2222 /* Transform x * -1 into -x. */
2223 (simplify
2224 (mult @0 integer_minus_onep)
2225 (negate @0))
2226
2227 /* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
2228 signed overflow for CST != 0 && CST != -1. */
2229 (simplify
2230 (mult:c (mult:s @0 INTEGER_CST@1) @2)
2231 (if (TREE_CODE (@2) != INTEGER_CST
2232 && !integer_zerop (@1) && !integer_minus_onep (@1))
2233 (mult (mult @0 @2) @1)))
2234
2235 /* True if we can easily extract the real and imaginary parts of a complex
2236 number. */
2237 (match compositional_complex
2238 (convert? (complex @0 @1)))
2239
2240 /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
2241 (simplify
2242 (complex (realpart @0) (imagpart @0))
2243 @0)
2244 (simplify
2245 (realpart (complex @0 @1))
2246 @0)
2247 (simplify
2248 (imagpart (complex @0 @1))
2249 @1)
2250
2251 /* Sometimes we only care about half of a complex expression. */
2252 (simplify
2253 (realpart (convert?:s (conj:s @0)))
2254 (convert (realpart @0)))
2255 (simplify
2256 (imagpart (convert?:s (conj:s @0)))
2257 (convert (negate (imagpart @0))))
2258 (for part (realpart imagpart)
2259 (for op (plus minus)
2260 (simplify
2261 (part (convert?:s@2 (op:s @0 @1)))
2262 (convert (op (part @0) (part @1))))))
2263 (simplify
2264 (realpart (convert?:s (CEXPI:s @0)))
2265 (convert (COS @0)))
2266 (simplify
2267 (imagpart (convert?:s (CEXPI:s @0)))
2268 (convert (SIN @0)))
2269
2270 /* conj(conj(x)) -> x */
2271 (simplify
2272 (conj (convert? (conj @0)))
2273 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
2274 (convert @0)))
2275
2276 /* conj({x,y}) -> {x,-y} */
2277 (simplify
2278 (conj (convert?:s (complex:s @0 @1)))
2279 (with { tree itype = TREE_TYPE (type); }
2280 (complex (convert:itype @0) (negate (convert:itype @1)))))
2281
2282 /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
2283 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
2284 (simplify
2285 (bswap (bswap @0))
2286 @0)
2287 (simplify
2288 (bswap (bit_not (bswap @0)))
2289 (bit_not @0))
2290 (for bitop (bit_xor bit_ior bit_and)
2291 (simplify
2292 (bswap (bitop:c (bswap @0) @1))
2293 (bitop @0 (bswap @1)))))
2294
2295
2296 /* Combine COND_EXPRs and VEC_COND_EXPRs. */
2297
2298 /* Simplify constant conditions.
2299 Only optimize constant conditions when the selected branch
2300 has the same type as the COND_EXPR. This avoids optimizing
2301 away "c ? x : throw", where the throw has a void type.
2302 Note that we cannot throw away the fold-const.c variant nor
2303 this one as we depend on doing this transform before possibly
2304 A ? B : B -> B triggers and the fold-const.c one can optimize
2305 0 ? A : B to B even if A has side-effects. Something
2306 genmatch cannot handle. */
2307 (simplify
2308 (cond INTEGER_CST@0 @1 @2)
2309 (if (integer_zerop (@0))
2310 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
2311 @2)
2312 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
2313 @1)))
2314 (simplify
2315 (vec_cond VECTOR_CST@0 @1 @2)
2316 (if (integer_all_onesp (@0))
2317 @1
2318 (if (integer_zerop (@0))
2319 @2)))
2320
2321 /* Simplification moved from fold_cond_expr_with_comparison. It may also
2322 be extended. */
2323 /* This pattern implements two kinds simplification:
2324
2325 Case 1)
2326 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
2327 1) Conversions are type widening from smaller type.
2328 2) Const c1 equals to c2 after canonicalizing comparison.
2329 3) Comparison has tree code LT, LE, GT or GE.
2330 This specific pattern is needed when (cmp (convert x) c) may not
2331 be simplified by comparison patterns because of multiple uses of
2332 x. It also makes sense here because simplifying across multiple
2333 referred var is always benefitial for complicated cases.
2334
2335 Case 2)
2336 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
2337 (for cmp (lt le gt ge eq)
2338 (simplify
2339 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
2340 (with
2341 {
2342 tree from_type = TREE_TYPE (@1);
2343 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
2344 enum tree_code code = ERROR_MARK;
2345
2346 if (INTEGRAL_TYPE_P (from_type)
2347 && int_fits_type_p (@2, from_type)
2348 && (types_match (c1_type, from_type)
2349 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
2350 && (TYPE_UNSIGNED (from_type)
2351 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
2352 && (types_match (c2_type, from_type)
2353 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
2354 && (TYPE_UNSIGNED (from_type)
2355 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
2356 {
2357 if (cmp != EQ_EXPR)
2358 {
2359 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
2360 {
2361 /* X <= Y - 1 equals to X < Y. */
2362 if (cmp == LE_EXPR)
2363 code = LT_EXPR;
2364 /* X > Y - 1 equals to X >= Y. */
2365 if (cmp == GT_EXPR)
2366 code = GE_EXPR;
2367 }
2368 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
2369 {
2370 /* X < Y + 1 equals to X <= Y. */
2371 if (cmp == LT_EXPR)
2372 code = LE_EXPR;
2373 /* X >= Y + 1 equals to X > Y. */
2374 if (cmp == GE_EXPR)
2375 code = GT_EXPR;
2376 }
2377 if (code != ERROR_MARK
2378 || wi::to_widest (@2) == wi::to_widest (@3))
2379 {
2380 if (cmp == LT_EXPR || cmp == LE_EXPR)
2381 code = MIN_EXPR;
2382 if (cmp == GT_EXPR || cmp == GE_EXPR)
2383 code = MAX_EXPR;
2384 }
2385 }
2386 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
2387 else if (int_fits_type_p (@3, from_type))
2388 code = EQ_EXPR;
2389 }
2390 }
2391 (if (code == MAX_EXPR)
2392 (convert (max @1 (convert @2)))
2393 (if (code == MIN_EXPR)
2394 (convert (min @1 (convert @2)))
2395 (if (code == EQ_EXPR)
2396 (convert (cond (eq @1 (convert @3))
2397 (convert:from_type @3) (convert:from_type @2)))))))))
2398
2399 /* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
2400
2401 1) OP is PLUS or MINUS.
2402 2) CMP is LT, LE, GT or GE.
2403 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
2404
2405 This pattern also handles special cases like:
2406
2407 A) Operand x is a unsigned to signed type conversion and c1 is
2408 integer zero. In this case,
2409 (signed type)x < 0 <=> x > MAX_VAL(signed type)
2410 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
2411 B) Const c1 may not equal to (C3 op' C2). In this case we also
2412 check equality for (c1+1) and (c1-1) by adjusting comparison
2413 code.
2414
2415 TODO: Though signed type is handled by this pattern, it cannot be
2416 simplified at the moment because C standard requires additional
2417 type promotion. In order to match&simplify it here, the IR needs
2418 to be cleaned up by other optimizers, i.e, VRP. */
2419 (for op (plus minus)
2420 (for cmp (lt le gt ge)
2421 (simplify
2422 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
2423 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
2424 (if (types_match (from_type, to_type)
2425 /* Check if it is special case A). */
2426 || (TYPE_UNSIGNED (from_type)
2427 && !TYPE_UNSIGNED (to_type)
2428 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
2429 && integer_zerop (@1)
2430 && (cmp == LT_EXPR || cmp == GE_EXPR)))
2431 (with
2432 {
2433 bool overflow = false;
2434 enum tree_code code, cmp_code = cmp;
2435 wide_int real_c1, c1 = @1, c2 = @2, c3 = @3;
2436 signop sgn = TYPE_SIGN (from_type);
2437
2438 /* Handle special case A), given x of unsigned type:
2439 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
2440 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
2441 if (!types_match (from_type, to_type))
2442 {
2443 if (cmp_code == LT_EXPR)
2444 cmp_code = GT_EXPR;
2445 if (cmp_code == GE_EXPR)
2446 cmp_code = LE_EXPR;
2447 c1 = wi::max_value (to_type);
2448 }
2449 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
2450 compute (c3 op' c2) and check if it equals to c1 with op' being
2451 the inverted operator of op. Make sure overflow doesn't happen
2452 if it is undefined. */
2453 if (op == PLUS_EXPR)
2454 real_c1 = wi::sub (c3, c2, sgn, &overflow);
2455 else
2456 real_c1 = wi::add (c3, c2, sgn, &overflow);
2457
2458 code = cmp_code;
2459 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
2460 {
2461 /* Check if c1 equals to real_c1. Boundary condition is handled
2462 by adjusting comparison operation if necessary. */
2463 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
2464 && !overflow)
2465 {
2466 /* X <= Y - 1 equals to X < Y. */
2467 if (cmp_code == LE_EXPR)
2468 code = LT_EXPR;
2469 /* X > Y - 1 equals to X >= Y. */
2470 if (cmp_code == GT_EXPR)
2471 code = GE_EXPR;
2472 }
2473 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
2474 && !overflow)
2475 {
2476 /* X < Y + 1 equals to X <= Y. */
2477 if (cmp_code == LT_EXPR)
2478 code = LE_EXPR;
2479 /* X >= Y + 1 equals to X > Y. */
2480 if (cmp_code == GE_EXPR)
2481 code = GT_EXPR;
2482 }
2483 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
2484 {
2485 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
2486 code = MIN_EXPR;
2487 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
2488 code = MAX_EXPR;
2489 }
2490 }
2491 }
2492 (if (code == MAX_EXPR)
2493 (op (max @X { wide_int_to_tree (from_type, real_c1); })
2494 { wide_int_to_tree (from_type, c2); })
2495 (if (code == MIN_EXPR)
2496 (op (min @X { wide_int_to_tree (from_type, real_c1); })
2497 { wide_int_to_tree (from_type, c2); })))))))))
2498
2499 (for cnd (cond vec_cond)
2500 /* A ? B : (A ? X : C) -> A ? B : C. */
2501 (simplify
2502 (cnd @0 (cnd @0 @1 @2) @3)
2503 (cnd @0 @1 @3))
2504 (simplify
2505 (cnd @0 @1 (cnd @0 @2 @3))
2506 (cnd @0 @1 @3))
2507 /* A ? B : (!A ? C : X) -> A ? B : C. */
2508 /* ??? This matches embedded conditions open-coded because genmatch
2509 would generate matching code for conditions in separate stmts only.
2510 The following is still important to merge then and else arm cases
2511 from if-conversion. */
2512 (simplify
2513 (cnd @0 @1 (cnd @2 @3 @4))
2514 (if (COMPARISON_CLASS_P (@0)
2515 && COMPARISON_CLASS_P (@2)
2516 && invert_tree_comparison
2517 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@2)
2518 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@2, 0), 0)
2519 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@2, 1), 0))
2520 (cnd @0 @1 @3)))
2521 (simplify
2522 (cnd @0 (cnd @1 @2 @3) @4)
2523 (if (COMPARISON_CLASS_P (@0)
2524 && COMPARISON_CLASS_P (@1)
2525 && invert_tree_comparison
2526 (TREE_CODE (@0), HONOR_NANS (TREE_OPERAND (@0, 0))) == TREE_CODE (@1)
2527 && operand_equal_p (TREE_OPERAND (@0, 0), TREE_OPERAND (@1, 0), 0)
2528 && operand_equal_p (TREE_OPERAND (@0, 1), TREE_OPERAND (@1, 1), 0))
2529 (cnd @0 @3 @4)))
2530
2531 /* A ? B : B -> B. */
2532 (simplify
2533 (cnd @0 @1 @1)
2534 @1)
2535
2536 /* !A ? B : C -> A ? C : B. */
2537 (simplify
2538 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
2539 (cnd @0 @2 @1)))
2540
2541 /* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
2542 return all -1 or all 0 results. */
2543 /* ??? We could instead convert all instances of the vec_cond to negate,
2544 but that isn't necessarily a win on its own. */
2545 (simplify
2546 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
2547 (if (VECTOR_TYPE_P (type)
2548 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
2549 && (TYPE_MODE (TREE_TYPE (type))
2550 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
2551 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
2552
2553 /* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
2554 (simplify
2555 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
2556 (if (VECTOR_TYPE_P (type)
2557 && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
2558 && (TYPE_MODE (TREE_TYPE (type))
2559 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
2560 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
2561
2562
2563 /* Simplifications of comparisons. */
2564
2565 /* See if we can reduce the magnitude of a constant involved in a
2566 comparison by changing the comparison code. This is a canonicalization
2567 formerly done by maybe_canonicalize_comparison_1. */
2568 (for cmp (le gt)
2569 acmp (lt ge)
2570 (simplify
2571 (cmp @0 INTEGER_CST@1)
2572 (if (tree_int_cst_sgn (@1) == -1)
2573 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
2574 (for cmp (ge lt)
2575 acmp (gt le)
2576 (simplify
2577 (cmp @0 INTEGER_CST@1)
2578 (if (tree_int_cst_sgn (@1) == 1)
2579 (acmp @0 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
2580
2581
2582 /* We can simplify a logical negation of a comparison to the
2583 inverted comparison. As we cannot compute an expression
2584 operator using invert_tree_comparison we have to simulate
2585 that with expression code iteration. */
2586 (for cmp (tcc_comparison)
2587 icmp (inverted_tcc_comparison)
2588 ncmp (inverted_tcc_comparison_with_nans)
2589 /* Ideally we'd like to combine the following two patterns
2590 and handle some more cases by using
2591 (logical_inverted_value (cmp @0 @1))
2592 here but for that genmatch would need to "inline" that.
2593 For now implement what forward_propagate_comparison did. */
2594 (simplify
2595 (bit_not (cmp @0 @1))
2596 (if (VECTOR_TYPE_P (type)
2597 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
2598 /* Comparison inversion may be impossible for trapping math,
2599 invert_tree_comparison will tell us. But we can't use
2600 a computed operator in the replacement tree thus we have
2601 to play the trick below. */
2602 (with { enum tree_code ic = invert_tree_comparison
2603 (cmp, HONOR_NANS (@0)); }
2604 (if (ic == icmp)
2605 (icmp @0 @1)
2606 (if (ic == ncmp)
2607 (ncmp @0 @1))))))
2608 (simplify
2609 (bit_xor (cmp @0 @1) integer_truep)
2610 (with { enum tree_code ic = invert_tree_comparison
2611 (cmp, HONOR_NANS (@0)); }
2612 (if (ic == icmp)
2613 (icmp @0 @1)
2614 (if (ic == ncmp)
2615 (ncmp @0 @1))))))
2616
2617 /* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
2618 ??? The transformation is valid for the other operators if overflow
2619 is undefined for the type, but performing it here badly interacts
2620 with the transformation in fold_cond_expr_with_comparison which
2621 attempts to synthetize ABS_EXPR. */
2622 (for cmp (eq ne)
2623 (simplify
2624 (cmp (minus@2 @0 @1) integer_zerop)
2625 (if (single_use (@2))
2626 (cmp @0 @1))))
2627
2628 /* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
2629 signed arithmetic case. That form is created by the compiler
2630 often enough for folding it to be of value. One example is in
2631 computing loop trip counts after Operator Strength Reduction. */
2632 (for cmp (simple_comparison)
2633 scmp (swapped_simple_comparison)
2634 (simplify
2635 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
2636 /* Handle unfolded multiplication by zero. */
2637 (if (integer_zerop (@1))
2638 (cmp @1 @2)
2639 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2640 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2641 && single_use (@3))
2642 /* If @1 is negative we swap the sense of the comparison. */
2643 (if (tree_int_cst_sgn (@1) < 0)
2644 (scmp @0 @2)
2645 (cmp @0 @2))))))
2646
2647 /* Simplify comparison of something with itself. For IEEE
2648 floating-point, we can only do some of these simplifications. */
2649 (for cmp (eq ge le)
2650 (simplify
2651 (cmp @0 @0)
2652 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
2653 || ! HONOR_NANS (@0))
2654 { constant_boolean_node (true, type); }
2655 (if (cmp != EQ_EXPR)
2656 (eq @0 @0)))))
2657 (for cmp (ne gt lt)
2658 (simplify
2659 (cmp @0 @0)
2660 (if (cmp != NE_EXPR
2661 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
2662 || ! HONOR_NANS (@0))
2663 { constant_boolean_node (false, type); })))
2664 (for cmp (unle unge uneq)
2665 (simplify
2666 (cmp @0 @0)
2667 { constant_boolean_node (true, type); }))
2668 (for cmp (unlt ungt)
2669 (simplify
2670 (cmp @0 @0)
2671 (unordered @0 @0)))
2672 (simplify
2673 (ltgt @0 @0)
2674 (if (!flag_trapping_math)
2675 { constant_boolean_node (false, type); }))
2676
2677 /* Fold ~X op ~Y as Y op X. */
2678 (for cmp (simple_comparison)
2679 (simplify
2680 (cmp (bit_not@2 @0) (bit_not@3 @1))
2681 (if (single_use (@2) && single_use (@3))
2682 (cmp @1 @0))))
2683
2684 /* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
2685 (for cmp (simple_comparison)
2686 scmp (swapped_simple_comparison)
2687 (simplify
2688 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
2689 (if (single_use (@2)
2690 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
2691 (scmp @0 (bit_not @1)))))
2692
2693 (for cmp (simple_comparison)
2694 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
2695 (simplify
2696 (cmp (convert@2 @0) (convert? @1))
2697 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2698 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
2699 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
2700 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
2701 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
2702 (with
2703 {
2704 tree type1 = TREE_TYPE (@1);
2705 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
2706 {
2707 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
2708 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
2709 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
2710 type1 = float_type_node;
2711 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
2712 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
2713 type1 = double_type_node;
2714 }
2715 tree newtype
2716 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
2717 ? TREE_TYPE (@0) : type1);
2718 }
2719 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
2720 (cmp (convert:newtype @0) (convert:newtype @1))))))
2721
2722 (simplify
2723 (cmp @0 REAL_CST@1)
2724 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
2725 (switch
2726 /* a CMP (-0) -> a CMP 0 */
2727 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
2728 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
2729 /* x != NaN is always true, other ops are always false. */
2730 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
2731 && ! HONOR_SNANS (@1))
2732 { constant_boolean_node (cmp == NE_EXPR, type); })
2733 /* Fold comparisons against infinity. */
2734 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
2735 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
2736 (with
2737 {
2738 REAL_VALUE_TYPE max;
2739 enum tree_code code = cmp;
2740 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
2741 if (neg)
2742 code = swap_tree_comparison (code);
2743 }
2744 (switch
2745 /* x > +Inf is always false, if with ignore sNANs. */
2746 (if (code == GT_EXPR
2747 && ! HONOR_SNANS (@0))
2748 { constant_boolean_node (false, type); })
2749 (if (code == LE_EXPR)
2750 /* x <= +Inf is always true, if we don't case about NaNs. */
2751 (if (! HONOR_NANS (@0))
2752 { constant_boolean_node (true, type); }
2753 /* x <= +Inf is the same as x == x, i.e. !isnan(x). */
2754 (eq @0 @0)))
2755 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX. */
2756 (if (code == EQ_EXPR || code == GE_EXPR)
2757 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2758 (if (neg)
2759 (lt @0 { build_real (TREE_TYPE (@0), max); })
2760 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
2761 /* x < +Inf is always equal to x <= DBL_MAX. */
2762 (if (code == LT_EXPR)
2763 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2764 (if (neg)
2765 (ge @0 { build_real (TREE_TYPE (@0), max); })
2766 (le @0 { build_real (TREE_TYPE (@0), max); }))))
2767 /* x != +Inf is always equal to !(x > DBL_MAX). */
2768 (if (code == NE_EXPR)
2769 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
2770 (if (! HONOR_NANS (@0))
2771 (if (neg)
2772 (ge @0 { build_real (TREE_TYPE (@0), max); })
2773 (le @0 { build_real (TREE_TYPE (@0), max); }))
2774 (if (neg)
2775 (bit_xor (lt @0 { build_real (TREE_TYPE (@0), max); })
2776 { build_one_cst (type); })
2777 (bit_xor (gt @0 { build_real (TREE_TYPE (@0), max); })
2778 { build_one_cst (type); }))))))))))
2779
2780 /* If this is a comparison of a real constant with a PLUS_EXPR
2781 or a MINUS_EXPR of a real constant, we can convert it into a
2782 comparison with a revised real constant as long as no overflow
2783 occurs when unsafe_math_optimizations are enabled. */
2784 (if (flag_unsafe_math_optimizations)
2785 (for op (plus minus)
2786 (simplify
2787 (cmp (op @0 REAL_CST@1) REAL_CST@2)
2788 (with
2789 {
2790 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
2791 TREE_TYPE (@1), @2, @1);
2792 }
2793 (if (tem && !TREE_OVERFLOW (tem))
2794 (cmp @0 { tem; }))))))
2795
2796 /* Likewise, we can simplify a comparison of a real constant with
2797 a MINUS_EXPR whose first operand is also a real constant, i.e.
2798 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
2799 floating-point types only if -fassociative-math is set. */
2800 (if (flag_associative_math)
2801 (simplify
2802 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
2803 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
2804 (if (tem && !TREE_OVERFLOW (tem))
2805 (cmp { tem; } @1)))))
2806
2807 /* Fold comparisons against built-in math functions. */
2808 (if (flag_unsafe_math_optimizations
2809 && ! flag_errno_math)
2810 (for sq (SQRT)
2811 (simplify
2812 (cmp (sq @0) REAL_CST@1)
2813 (switch
2814 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
2815 (switch
2816 /* sqrt(x) < y is always false, if y is negative. */
2817 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
2818 { constant_boolean_node (false, type); })
2819 /* sqrt(x) > y is always true, if y is negative and we
2820 don't care about NaNs, i.e. negative values of x. */
2821 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
2822 { constant_boolean_node (true, type); })
2823 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
2824 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
2825 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
2826 (switch
2827 /* sqrt(x) < 0 is always false. */
2828 (if (cmp == LT_EXPR)
2829 { constant_boolean_node (false, type); })
2830 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
2831 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
2832 { constant_boolean_node (true, type); })
2833 /* sqrt(x) <= 0 -> x == 0. */
2834 (if (cmp == LE_EXPR)
2835 (eq @0 @1))
2836 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
2837 == or !=. In the last case:
2838
2839 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
2840
2841 if x is negative or NaN. Due to -funsafe-math-optimizations,
2842 the results for other x follow from natural arithmetic. */
2843 (cmp @0 @1)))
2844 (if (cmp == GT_EXPR || cmp == GE_EXPR)
2845 (with
2846 {
2847 REAL_VALUE_TYPE c2;
2848 real_arithmetic (&c2, MULT_EXPR,
2849 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
2850 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
2851 }
2852 (if (REAL_VALUE_ISINF (c2))
2853 /* sqrt(x) > y is x == +Inf, when y is very large. */
2854 (if (HONOR_INFINITIES (@0))
2855 (eq @0 { build_real (TREE_TYPE (@0), c2); })
2856 { constant_boolean_node (false, type); })
2857 /* sqrt(x) > c is the same as x > c*c. */
2858 (cmp @0 { build_real (TREE_TYPE (@0), c2); }))))
2859 (if (cmp == LT_EXPR || cmp == LE_EXPR)
2860 (with
2861 {
2862 REAL_VALUE_TYPE c2;
2863 real_arithmetic (&c2, MULT_EXPR,
2864 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
2865 real_convert (&c2, TYPE_MODE (TREE_TYPE (@0)), &c2);
2866 }
2867 (if (REAL_VALUE_ISINF (c2))
2868 (switch
2869 /* sqrt(x) < y is always true, when y is a very large
2870 value and we don't care about NaNs or Infinities. */
2871 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
2872 { constant_boolean_node (true, type); })
2873 /* sqrt(x) < y is x != +Inf when y is very large and we
2874 don't care about NaNs. */
2875 (if (! HONOR_NANS (@0))
2876 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
2877 /* sqrt(x) < y is x >= 0 when y is very large and we
2878 don't care about Infinities. */
2879 (if (! HONOR_INFINITIES (@0))
2880 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
2881 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
2882 (if (GENERIC)
2883 (truth_andif
2884 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
2885 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
2886 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
2887 (if (! HONOR_NANS (@0))
2888 (cmp @0 { build_real (TREE_TYPE (@0), c2); })
2889 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
2890 (if (GENERIC)
2891 (truth_andif
2892 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
2893 (cmp @0 { build_real (TREE_TYPE (@0), c2); })))))))))
2894 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */
2895 (simplify
2896 (cmp (sq @0) (sq @1))
2897 (if (! HONOR_NANS (@0))
2898 (cmp @0 @1))))))
2899
2900 /* Optimize various special cases of (FTYPE) N CMP CST. */
2901 (for cmp (lt le eq ne ge gt)
2902 icmp (le le eq ne ge ge)
2903 (simplify
2904 (cmp (float @0) REAL_CST@1)
2905 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
2906 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
2907 (with
2908 {
2909 tree itype = TREE_TYPE (@0);
2910 signop isign = TYPE_SIGN (itype);
2911 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
2912 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
2913 /* Be careful to preserve any potential exceptions due to
2914 NaNs. qNaNs are ok in == or != context.
2915 TODO: relax under -fno-trapping-math or
2916 -fno-signaling-nans. */
2917 bool exception_p
2918 = real_isnan (cst) && (cst->signalling
2919 || (cmp != EQ_EXPR && cmp != NE_EXPR));
2920 /* INT?_MIN is power-of-two so it takes
2921 only one mantissa bit. */
2922 bool signed_p = isign == SIGNED;
2923 bool itype_fits_ftype_p
2924 = TYPE_PRECISION (itype) - signed_p <= significand_size (fmt);
2925 }
2926 /* TODO: allow non-fitting itype and SNaNs when
2927 -fno-trapping-math. */
2928 (if (itype_fits_ftype_p && ! exception_p)
2929 (with
2930 {
2931 REAL_VALUE_TYPE imin, imax;
2932 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
2933 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
2934
2935 REAL_VALUE_TYPE icst;
2936 if (cmp == GT_EXPR || cmp == GE_EXPR)
2937 real_ceil (&icst, fmt, cst);
2938 else if (cmp == LT_EXPR || cmp == LE_EXPR)
2939 real_floor (&icst, fmt, cst);
2940 else
2941 real_trunc (&icst, fmt, cst);
2942
2943 bool cst_int_p = real_identical (&icst, cst);
2944
2945 bool overflow_p = false;
2946 wide_int icst_val
2947 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
2948 }
2949 (switch
2950 /* Optimize cases when CST is outside of ITYPE's range. */
2951 (if (real_compare (LT_EXPR, cst, &imin))
2952 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
2953 type); })
2954 (if (real_compare (GT_EXPR, cst, &imax))
2955 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
2956 type); })
2957 /* Remove cast if CST is an integer representable by ITYPE. */
2958 (if (cst_int_p)
2959 (cmp @0 { gcc_assert (!overflow_p);
2960 wide_int_to_tree (itype, icst_val); })
2961 )
2962 /* When CST is fractional, optimize
2963 (FTYPE) N == CST -> 0
2964 (FTYPE) N != CST -> 1. */
2965 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
2966 { constant_boolean_node (cmp == NE_EXPR, type); })
2967 /* Otherwise replace with sensible integer constant. */
2968 (with
2969 {
2970 gcc_checking_assert (!overflow_p);
2971 }
2972 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
2973
2974 /* Fold A /[ex] B CMP C to A CMP B * C. */
2975 (for cmp (eq ne)
2976 (simplify
2977 (cmp (exact_div @0 @1) INTEGER_CST@2)
2978 (if (!integer_zerop (@1))
2979 (if (wi::eq_p (@2, 0))
2980 (cmp @0 @2)
2981 (if (TREE_CODE (@1) == INTEGER_CST)
2982 (with
2983 {
2984 bool ovf;
2985 wide_int prod = wi::mul (@2, @1, TYPE_SIGN (TREE_TYPE (@1)), &ovf);
2986 }
2987 (if (ovf)
2988 { constant_boolean_node (cmp == NE_EXPR, type); }
2989 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
2990 (for cmp (lt le gt ge)
2991 (simplify
2992 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
2993 (if (wi::gt_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1))))
2994 (with
2995 {
2996 bool ovf;
2997 wide_int prod = wi::mul (@2, @1, TYPE_SIGN (TREE_TYPE (@1)), &ovf);
2998 }
2999 (if (ovf)
3000 { constant_boolean_node (wi::lt_p (@2, 0, TYPE_SIGN (TREE_TYPE (@2)))
3001 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
3002 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
3003
3004 /* Unordered tests if either argument is a NaN. */
3005 (simplify
3006 (bit_ior (unordered @0 @0) (unordered @1 @1))
3007 (if (types_match (@0, @1))
3008 (unordered @0 @1)))
3009 (simplify
3010 (bit_and (ordered @0 @0) (ordered @1 @1))
3011 (if (types_match (@0, @1))
3012 (ordered @0 @1)))
3013 (simplify
3014 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
3015 @2)
3016 (simplify
3017 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
3018 @2)
3019
3020 /* Simple range test simplifications. */
3021 /* A < B || A >= B -> true. */
3022 (for test1 (lt le le le ne ge)
3023 test2 (ge gt ge ne eq ne)
3024 (simplify
3025 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
3026 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3027 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3028 { constant_boolean_node (true, type); })))
3029 /* A < B && A >= B -> false. */
3030 (for test1 (lt lt lt le ne eq)
3031 test2 (ge gt eq gt eq gt)
3032 (simplify
3033 (bit_and:c (test1 @0 @1) (test2 @0 @1))
3034 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3035 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
3036 { constant_boolean_node (false, type); })))
3037
3038 /* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
3039 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0
3040
3041 Note that comparisons
3042 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0
3043 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0
3044 will be canonicalized to above so there's no need to
3045 consider them here.
3046 */
3047
3048 (for cmp (le gt)
3049 eqcmp (eq ne)
3050 (simplify
3051 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
3052 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
3053 (with
3054 {
3055 tree ty = TREE_TYPE (@0);
3056 unsigned prec = TYPE_PRECISION (ty);
3057 wide_int mask = wi::to_wide (@2, prec);
3058 wide_int rhs = wi::to_wide (@3, prec);
3059 signop sgn = TYPE_SIGN (ty);
3060 }
3061 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
3062 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
3063 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
3064 { build_zero_cst (ty); }))))))
3065
3066 /* -A CMP -B -> B CMP A. */
3067 (for cmp (tcc_comparison)
3068 scmp (swapped_tcc_comparison)
3069 (simplify
3070 (cmp (negate @0) (negate @1))
3071 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3072 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3073 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3074 (scmp @0 @1)))
3075 (simplify
3076 (cmp (negate @0) CONSTANT_CLASS_P@1)
3077 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3078 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3079 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
3080 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
3081 (if (tem && !TREE_OVERFLOW (tem))
3082 (scmp @0 { tem; }))))))
3083
3084 /* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
3085 (for op (eq ne)
3086 (simplify
3087 (op (abs @0) zerop@1)
3088 (op @0 @1)))
3089
3090 /* From fold_sign_changed_comparison and fold_widened_comparison. */
3091 (for cmp (simple_comparison)
3092 (simplify
3093 (cmp (convert@0 @00) (convert?@1 @10))
3094 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3095 /* Disable this optimization if we're casting a function pointer
3096 type on targets that require function pointer canonicalization. */
3097 && !(targetm.have_canonicalize_funcptr_for_compare ()
3098 && TREE_CODE (TREE_TYPE (@00)) == POINTER_TYPE
3099 && TREE_CODE (TREE_TYPE (TREE_TYPE (@00))) == FUNCTION_TYPE)
3100 && single_use (@0))
3101 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
3102 && (TREE_CODE (@10) == INTEGER_CST
3103 || (@1 != @10 && types_match (TREE_TYPE (@10), TREE_TYPE (@00))))
3104 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
3105 || cmp == NE_EXPR
3106 || cmp == EQ_EXPR)
3107 && (POINTER_TYPE_P (TREE_TYPE (@00)) == POINTER_TYPE_P (TREE_TYPE (@0))))
3108 /* ??? The special-casing of INTEGER_CST conversion was in the original
3109 code and here to avoid a spurious overflow flag on the resulting
3110 constant which fold_convert produces. */
3111 (if (TREE_CODE (@1) == INTEGER_CST)
3112 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
3113 TREE_OVERFLOW (@1)); })
3114 (cmp @00 (convert @1)))
3115
3116 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
3117 /* If possible, express the comparison in the shorter mode. */
3118 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
3119 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
3120 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
3121 && TYPE_UNSIGNED (TREE_TYPE (@00))))
3122 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
3123 || ((TYPE_PRECISION (TREE_TYPE (@00))
3124 >= TYPE_PRECISION (TREE_TYPE (@10)))
3125 && (TYPE_UNSIGNED (TREE_TYPE (@00))
3126 == TYPE_UNSIGNED (TREE_TYPE (@10))))
3127 || (TREE_CODE (@10) == INTEGER_CST
3128 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
3129 && int_fits_type_p (@10, TREE_TYPE (@00)))))
3130 (cmp @00 (convert @10))
3131 (if (TREE_CODE (@10) == INTEGER_CST
3132 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
3133 && !int_fits_type_p (@10, TREE_TYPE (@00)))
3134 (with
3135 {
3136 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3137 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
3138 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
3139 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
3140 }
3141 (if (above || below)
3142 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
3143 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
3144 (if (cmp == LT_EXPR || cmp == LE_EXPR)
3145 { constant_boolean_node (above ? true : false, type); }
3146 (if (cmp == GT_EXPR || cmp == GE_EXPR)
3147 { constant_boolean_node (above ? false : true, type); }))))))))))))
3148
3149 (for cmp (eq ne)
3150 /* A local variable can never be pointed to by
3151 the default SSA name of an incoming parameter.
3152 SSA names are canonicalized to 2nd place. */
3153 (simplify
3154 (cmp addr@0 SSA_NAME@1)
3155 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
3156 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL)
3157 (with { tree base = get_base_address (TREE_OPERAND (@0, 0)); }
3158 (if (TREE_CODE (base) == VAR_DECL
3159 && auto_var_in_fn_p (base, current_function_decl))
3160 (if (cmp == NE_EXPR)
3161 { constant_boolean_node (true, type); }
3162 { constant_boolean_node (false, type); }))))))
3163
3164 /* Equality compare simplifications from fold_binary */
3165 (for cmp (eq ne)
3166
3167 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
3168 Similarly for NE_EXPR. */
3169 (simplify
3170 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
3171 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
3172 && wi::bit_and_not (@1, @2) != 0)
3173 { constant_boolean_node (cmp == NE_EXPR, type); }))
3174
3175 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
3176 (simplify
3177 (cmp (bit_xor @0 @1) integer_zerop)
3178 (cmp @0 @1))
3179
3180 /* (X ^ Y) == Y becomes X == 0.
3181 Likewise (X ^ Y) == X becomes Y == 0. */
3182 (simplify
3183 (cmp:c (bit_xor:c @0 @1) @0)
3184 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
3185
3186 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
3187 (simplify
3188 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
3189 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
3190 (cmp @0 (bit_xor @1 (convert @2)))))
3191
3192 (simplify
3193 (cmp (convert? addr@0) integer_zerop)
3194 (if (tree_single_nonzero_warnv_p (@0, NULL))
3195 { constant_boolean_node (cmp == NE_EXPR, type); })))
3196
3197 /* If we have (A & C) == C where C is a power of 2, convert this into
3198 (A & C) != 0. Similarly for NE_EXPR. */
3199 (for cmp (eq ne)
3200 icmp (ne eq)
3201 (simplify
3202 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
3203 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
3204
3205 /* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
3206 convert this into a shift followed by ANDing with D. */
3207 (simplify
3208 (cond
3209 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
3210 integer_pow2p@2 integer_zerop)
3211 (with {
3212 int shift = wi::exact_log2 (@2) - wi::exact_log2 (@1);
3213 }
3214 (if (shift > 0)
3215 (bit_and
3216 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
3217 (bit_and
3218 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); })) @2))))
3219
3220 /* If we have (A & C) != 0 where C is the sign bit of A, convert
3221 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
3222 (for cmp (eq ne)
3223 ncmp (ge lt)
3224 (simplify
3225 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
3226 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
3227 && type_has_mode_precision_p (TREE_TYPE (@0))
3228 && element_precision (@2) >= element_precision (@0)
3229 && wi::only_sign_bit_p (@1, element_precision (@0)))
3230 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
3231 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
3232
3233 /* If we have A < 0 ? C : 0 where C is a power of 2, convert
3234 this into a right shift or sign extension followed by ANDing with C. */
3235 (simplify
3236 (cond
3237 (lt @0 integer_zerop)
3238 integer_pow2p@1 integer_zerop)
3239 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
3240 (with {
3241 int shift = element_precision (@0) - wi::exact_log2 (@1) - 1;
3242 }
3243 (if (shift >= 0)
3244 (bit_and
3245 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
3246 @1)
3247 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
3248 sign extension followed by AND with C will achieve the effect. */
3249 (bit_and (convert @0) @1)))))
3250
3251 /* When the addresses are not directly of decls compare base and offset.
3252 This implements some remaining parts of fold_comparison address
3253 comparisons but still no complete part of it. Still it is good
3254 enough to make fold_stmt not regress when not dispatching to fold_binary. */
3255 (for cmp (simple_comparison)
3256 (simplify
3257 (cmp (convert1?@2 addr@0) (convert2? addr@1))
3258 (with
3259 {
3260 HOST_WIDE_INT off0, off1;
3261 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
3262 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
3263 if (base0 && TREE_CODE (base0) == MEM_REF)
3264 {
3265 off0 += mem_ref_offset (base0).to_short_addr ();
3266 base0 = TREE_OPERAND (base0, 0);
3267 }
3268 if (base1 && TREE_CODE (base1) == MEM_REF)
3269 {
3270 off1 += mem_ref_offset (base1).to_short_addr ();
3271 base1 = TREE_OPERAND (base1, 0);
3272 }
3273 }
3274 (if (base0 && base1)
3275 (with
3276 {
3277 int equal = 2;
3278 /* Punt in GENERIC on variables with value expressions;
3279 the value expressions might point to fields/elements
3280 of other vars etc. */
3281 if (GENERIC
3282 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
3283 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
3284 ;
3285 else if (decl_in_symtab_p (base0)
3286 && decl_in_symtab_p (base1))
3287 equal = symtab_node::get_create (base0)
3288 ->equal_address_to (symtab_node::get_create (base1));
3289 else if ((DECL_P (base0)
3290 || TREE_CODE (base0) == SSA_NAME
3291 || TREE_CODE (base0) == STRING_CST)
3292 && (DECL_P (base1)
3293 || TREE_CODE (base1) == SSA_NAME
3294 || TREE_CODE (base1) == STRING_CST))
3295 equal = (base0 == base1);
3296 }
3297 (if (equal == 1)
3298 (switch
3299 (if (cmp == EQ_EXPR)
3300 { constant_boolean_node (off0 == off1, type); })
3301 (if (cmp == NE_EXPR)
3302 { constant_boolean_node (off0 != off1, type); })
3303 (if (cmp == LT_EXPR)
3304 { constant_boolean_node (off0 < off1, type); })
3305 (if (cmp == LE_EXPR)
3306 { constant_boolean_node (off0 <= off1, type); })
3307 (if (cmp == GE_EXPR)
3308 { constant_boolean_node (off0 >= off1, type); })
3309 (if (cmp == GT_EXPR)
3310 { constant_boolean_node (off0 > off1, type); }))
3311 (if (equal == 0
3312 && DECL_P (base0) && DECL_P (base1)
3313 /* If we compare this as integers require equal offset. */
3314 && (!INTEGRAL_TYPE_P (TREE_TYPE (@2))
3315 || off0 == off1))
3316 (switch
3317 (if (cmp == EQ_EXPR)
3318 { constant_boolean_node (false, type); })
3319 (if (cmp == NE_EXPR)
3320 { constant_boolean_node (true, type); })))))))))
3321
3322 /* Simplify pointer equality compares using PTA. */
3323 (for neeq (ne eq)
3324 (simplify
3325 (neeq @0 @1)
3326 (if (POINTER_TYPE_P (TREE_TYPE (@0))
3327 && ptrs_compare_unequal (@0, @1))
3328 { neeq == EQ_EXPR ? boolean_false_node : boolean_true_node; })))
3329
3330 /* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
3331 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
3332 Disable the transform if either operand is pointer to function.
3333 This broke pr22051-2.c for arm where function pointer
3334 canonicalizaion is not wanted. */
3335
3336 (for cmp (ne eq)
3337 (simplify
3338 (cmp (convert @0) INTEGER_CST@1)
3339 (if ((POINTER_TYPE_P (TREE_TYPE (@0)) && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
3340 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
3341 || (INTEGRAL_TYPE_P (TREE_TYPE (@0)) && POINTER_TYPE_P (TREE_TYPE (@1))
3342 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
3343 (cmp @0 (convert @1)))))
3344
3345 /* Non-equality compare simplifications from fold_binary */
3346 (for cmp (lt gt le ge)
3347 /* Comparisons with the highest or lowest possible integer of
3348 the specified precision will have known values. */
3349 (simplify
3350 (cmp (convert?@2 @0) INTEGER_CST@1)
3351 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
3352 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
3353 (with
3354 {
3355 tree arg1_type = TREE_TYPE (@1);
3356 unsigned int prec = TYPE_PRECISION (arg1_type);
3357 wide_int max = wi::max_value (arg1_type);
3358 wide_int signed_max = wi::max_value (prec, SIGNED);
3359 wide_int min = wi::min_value (arg1_type);
3360 }
3361 (switch
3362 (if (wi::eq_p (@1, max))
3363 (switch
3364 (if (cmp == GT_EXPR)
3365 { constant_boolean_node (false, type); })
3366 (if (cmp == GE_EXPR)
3367 (eq @2 @1))
3368 (if (cmp == LE_EXPR)
3369 { constant_boolean_node (true, type); })
3370 (if (cmp == LT_EXPR)
3371 (ne @2 @1))))
3372 (if (wi::eq_p (@1, min))
3373 (switch
3374 (if (cmp == LT_EXPR)
3375 { constant_boolean_node (false, type); })
3376 (if (cmp == LE_EXPR)
3377 (eq @2 @1))
3378 (if (cmp == GE_EXPR)
3379 { constant_boolean_node (true, type); })
3380 (if (cmp == GT_EXPR)
3381 (ne @2 @1))))
3382 (if (wi::eq_p (@1, max - 1))
3383 (switch
3384 (if (cmp == GT_EXPR)
3385 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))
3386 (if (cmp == LE_EXPR)
3387 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::add (@1, 1)); }))))
3388 (if (wi::eq_p (@1, min + 1))
3389 (switch
3390 (if (cmp == GE_EXPR)
3391 (ne @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))
3392 (if (cmp == LT_EXPR)
3393 (eq @2 { wide_int_to_tree (TREE_TYPE (@1), wi::sub (@1, 1)); }))))
3394 (if (wi::eq_p (@1, signed_max)
3395 && TYPE_UNSIGNED (arg1_type)
3396 /* We will flip the signedness of the comparison operator
3397 associated with the mode of @1, so the sign bit is
3398 specified by this mode. Check that @1 is the signed
3399 max associated with this sign bit. */
3400 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
3401 /* signed_type does not work on pointer types. */
3402 && INTEGRAL_TYPE_P (arg1_type))
3403 /* The following case also applies to X < signed_max+1
3404 and X >= signed_max+1 because previous transformations. */
3405 (if (cmp == LE_EXPR || cmp == GT_EXPR)
3406 (with { tree st = signed_type_for (arg1_type); }
3407 (if (cmp == LE_EXPR)
3408 (ge (convert:st @0) { build_zero_cst (st); })
3409 (lt (convert:st @0) { build_zero_cst (st); }))))))))))
3410
3411 (for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
3412 /* If the second operand is NaN, the result is constant. */
3413 (simplify
3414 (cmp @0 REAL_CST@1)
3415 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3416 && (cmp != LTGT_EXPR || ! flag_trapping_math))
3417 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
3418 ? false : true, type); })))
3419
3420 /* bool_var != 0 becomes bool_var. */
3421 (simplify
3422 (ne @0 integer_zerop)
3423 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
3424 && types_match (type, TREE_TYPE (@0)))
3425 (non_lvalue @0)))
3426 /* bool_var == 1 becomes bool_var. */
3427 (simplify
3428 (eq @0 integer_onep)
3429 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
3430 && types_match (type, TREE_TYPE (@0)))
3431 (non_lvalue @0)))
3432 /* Do not handle
3433 bool_var == 0 becomes !bool_var or
3434 bool_var != 1 becomes !bool_var
3435 here because that only is good in assignment context as long
3436 as we require a tcc_comparison in GIMPLE_CONDs where we'd
3437 replace if (x == 0) with tem = ~x; if (tem != 0) which is
3438 clearly less optimal and which we'll transform again in forwprop. */
3439
3440 /* When one argument is a constant, overflow detection can be simplified.
3441 Currently restricted to single use so as not to interfere too much with
3442 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
3443 A + CST CMP A -> A CMP' CST' */
3444 (for cmp (lt le ge gt)
3445 out (gt gt le le)
3446 (simplify
3447 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
3448 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3449 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
3450 && wi::ne_p (@1, 0)
3451 && single_use (@2))
3452 (out @0 { wide_int_to_tree (TREE_TYPE (@0), wi::max_value
3453 (TYPE_PRECISION (TREE_TYPE (@0)), UNSIGNED) - @1); }))))
3454
3455 /* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
3456 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
3457 expects the long form, so we restrict the transformation for now. */
3458 (for cmp (gt le)
3459 (simplify
3460 (cmp:c (minus@2 @0 @1) @0)
3461 (if (single_use (@2)
3462 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3463 && TYPE_UNSIGNED (TREE_TYPE (@0))
3464 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
3465 (cmp @1 @0))))
3466
3467 /* Testing for overflow is unnecessary if we already know the result. */
3468 /* A - B > A */
3469 (for cmp (gt le)
3470 out (ne eq)
3471 (simplify
3472 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
3473 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3474 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
3475 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
3476 /* A + B < A */
3477 (for cmp (lt ge)
3478 out (ne eq)
3479 (simplify
3480 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
3481 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
3482 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
3483 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
3484
3485 /* For unsigned operands, -1 / B < A checks whether A * B would overflow.
3486 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
3487 (for cmp (lt ge)
3488 out (ne eq)
3489 (simplify
3490 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
3491 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
3492 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
3493 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
3494
3495 /* Simplification of math builtins. These rules must all be optimizations
3496 as well as IL simplifications. If there is a possibility that the new
3497 form could be a pessimization, the rule should go in the canonicalization
3498 section that follows this one.
3499
3500 Rules can generally go in this section if they satisfy one of
3501 the following:
3502
3503 - the rule describes an identity
3504
3505 - the rule replaces calls with something as simple as addition or
3506 multiplication
3507
3508 - the rule contains unary calls only and simplifies the surrounding
3509 arithmetic. (The idea here is to exclude non-unary calls in which
3510 one operand is constant and in which the call is known to be cheap
3511 when the operand has that value.) */
3512
3513 (if (flag_unsafe_math_optimizations)
3514 /* Simplify sqrt(x) * sqrt(x) -> x. */
3515 (simplify
3516 (mult (SQRT@1 @0) @1)
3517 (if (!HONOR_SNANS (type))
3518 @0))
3519
3520 (for op (plus minus)
3521 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */
3522 (simplify
3523 (op (rdiv @0 @1)
3524 (rdiv @2 @1))
3525 (rdiv (op @0 @2) @1)))
3526
3527 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
3528 (for root (SQRT CBRT)
3529 (simplify
3530 (mult (root:s @0) (root:s @1))
3531 (root (mult @0 @1))))
3532
3533 /* Simplify expN(x) * expN(y) -> expN(x+y). */
3534 (for exps (EXP EXP2 EXP10 POW10)
3535 (simplify
3536 (mult (exps:s @0) (exps:s @1))
3537 (exps (plus @0 @1))))
3538
3539 /* Simplify a/root(b/c) into a*root(c/b). */
3540 (for root (SQRT CBRT)
3541 (simplify
3542 (rdiv @0 (root:s (rdiv:s @1 @2)))
3543 (mult @0 (root (rdiv @2 @1)))))
3544
3545 /* Simplify x/expN(y) into x*expN(-y). */
3546 (for exps (EXP EXP2 EXP10 POW10)
3547 (simplify
3548 (rdiv @0 (exps:s @1))
3549 (mult @0 (exps (negate @1)))))
3550
3551 (for logs (LOG LOG2 LOG10 LOG10)
3552 exps (EXP EXP2 EXP10 POW10)
3553 /* logN(expN(x)) -> x. */
3554 (simplify
3555 (logs (exps @0))
3556 @0)
3557 /* expN(logN(x)) -> x. */
3558 (simplify
3559 (exps (logs @0))
3560 @0))
3561
3562 /* Optimize logN(func()) for various exponential functions. We
3563 want to determine the value "x" and the power "exponent" in
3564 order to transform logN(x**exponent) into exponent*logN(x). */
3565 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
3566 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
3567 (simplify
3568 (logs (exps @0))
3569 (if (SCALAR_FLOAT_TYPE_P (type))
3570 (with {
3571 tree x;
3572 switch (exps)
3573 {
3574 CASE_CFN_EXP:
3575 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
3576 x = build_real_truncate (type, dconst_e ());
3577 break;
3578 CASE_CFN_EXP2:
3579 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
3580 x = build_real (type, dconst2);
3581 break;
3582 CASE_CFN_EXP10:
3583 CASE_CFN_POW10:
3584 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
3585 {
3586 REAL_VALUE_TYPE dconst10;
3587 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
3588 x = build_real (type, dconst10);
3589 }
3590 break;
3591 default:
3592 gcc_unreachable ();
3593 }
3594 }
3595 (mult (logs { x; }) @0)))))
3596
3597 (for logs (LOG LOG
3598 LOG2 LOG2
3599 LOG10 LOG10)
3600 exps (SQRT CBRT)
3601 (simplify
3602 (logs (exps @0))
3603 (if (SCALAR_FLOAT_TYPE_P (type))
3604 (with {
3605 tree x;
3606 switch (exps)
3607 {
3608 CASE_CFN_SQRT:
3609 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
3610 x = build_real (type, dconsthalf);
3611 break;
3612 CASE_CFN_CBRT:
3613 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
3614 x = build_real_truncate (type, dconst_third ());
3615 break;
3616 default:
3617 gcc_unreachable ();
3618 }
3619 }
3620 (mult { x; } (logs @0))))))
3621
3622 /* logN(pow(x,exponent)) -> exponent*logN(x). */
3623 (for logs (LOG LOG2 LOG10)
3624 pows (POW)
3625 (simplify
3626 (logs (pows @0 @1))
3627 (mult @1 (logs @0))))
3628
3629 /* pow(C,x) -> exp(log(C)*x) if C > 0. */
3630 (for pows (POW)
3631 exps (EXP)
3632 logs (LOG)
3633 (simplify
3634 (pows REAL_CST@0 @1)
3635 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
3636 && real_isfinite (TREE_REAL_CST_PTR (@0)))
3637 (exps (mult (logs @0) @1)))))
3638
3639 (for sqrts (SQRT)
3640 cbrts (CBRT)
3641 pows (POW)
3642 exps (EXP EXP2 EXP10 POW10)
3643 /* sqrt(expN(x)) -> expN(x*0.5). */
3644 (simplify
3645 (sqrts (exps @0))
3646 (exps (mult @0 { build_real (type, dconsthalf); })))
3647 /* cbrt(expN(x)) -> expN(x/3). */
3648 (simplify
3649 (cbrts (exps @0))
3650 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
3651 /* pow(expN(x), y) -> expN(x*y). */
3652 (simplify
3653 (pows (exps @0) @1)
3654 (exps (mult @0 @1))))
3655
3656 /* tan(atan(x)) -> x. */
3657 (for tans (TAN)
3658 atans (ATAN)
3659 (simplify
3660 (tans (atans @0))
3661 @0)))
3662
3663 /* cabs(x+0i) or cabs(0+xi) -> abs(x). */
3664 (simplify
3665 (CABS (complex:C @0 real_zerop@1))
3666 (abs @0))
3667
3668 /* trunc(trunc(x)) -> trunc(x), etc. */
3669 (for fns (TRUNC FLOOR CEIL ROUND NEARBYINT RINT)
3670 (simplify
3671 (fns (fns @0))
3672 (fns @0)))
3673 /* f(x) -> x if x is integer valued and f does nothing for such values. */
3674 (for fns (TRUNC FLOOR CEIL ROUND NEARBYINT RINT)
3675 (simplify
3676 (fns integer_valued_real_p@0)
3677 @0))
3678
3679 /* hypot(x,0) and hypot(0,x) -> abs(x). */
3680 (simplify
3681 (HYPOT:c @0 real_zerop@1)
3682 (abs @0))
3683
3684 /* pow(1,x) -> 1. */
3685 (simplify
3686 (POW real_onep@0 @1)
3687 @0)
3688
3689 (simplify
3690 /* copysign(x,x) -> x. */
3691 (COPYSIGN @0 @0)
3692 @0)
3693
3694 (simplify
3695 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
3696 (COPYSIGN @0 tree_expr_nonnegative_p@1)
3697 (abs @0))
3698
3699 (for scale (LDEXP SCALBN SCALBLN)
3700 /* ldexp(0, x) -> 0. */
3701 (simplify
3702 (scale real_zerop@0 @1)
3703 @0)
3704 /* ldexp(x, 0) -> x. */
3705 (simplify
3706 (scale @0 integer_zerop@1)
3707 @0)
3708 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
3709 (simplify
3710 (scale REAL_CST@0 @1)
3711 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
3712 @0)))
3713
3714 /* Canonicalization of sequences of math builtins. These rules represent
3715 IL simplifications but are not necessarily optimizations.
3716
3717 The sincos pass is responsible for picking "optimal" implementations
3718 of math builtins, which may be more complicated and can sometimes go
3719 the other way, e.g. converting pow into a sequence of sqrts.
3720 We only want to do these canonicalizations before the pass has run. */
3721
3722 (if (flag_unsafe_math_optimizations && canonicalize_math_p ())
3723 /* Simplify tan(x) * cos(x) -> sin(x). */
3724 (simplify
3725 (mult:c (TAN:s @0) (COS:s @0))
3726 (SIN @0))
3727
3728 /* Simplify x * pow(x,c) -> pow(x,c+1). */
3729 (simplify
3730 (mult:c @0 (POW:s @0 REAL_CST@1))
3731 (if (!TREE_OVERFLOW (@1))
3732 (POW @0 (plus @1 { build_one_cst (type); }))))
3733
3734 /* Simplify sin(x) / cos(x) -> tan(x). */
3735 (simplify
3736 (rdiv (SIN:s @0) (COS:s @0))
3737 (TAN @0))
3738
3739 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
3740 (simplify
3741 (rdiv (COS:s @0) (SIN:s @0))
3742 (rdiv { build_one_cst (type); } (TAN @0)))
3743
3744 /* Simplify sin(x) / tan(x) -> cos(x). */
3745 (simplify
3746 (rdiv (SIN:s @0) (TAN:s @0))
3747 (if (! HONOR_NANS (@0)
3748 && ! HONOR_INFINITIES (@0))
3749 (COS @0)))
3750
3751 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
3752 (simplify
3753 (rdiv (TAN:s @0) (SIN:s @0))
3754 (if (! HONOR_NANS (@0)
3755 && ! HONOR_INFINITIES (@0))
3756 (rdiv { build_one_cst (type); } (COS @0))))
3757
3758 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
3759 (simplify
3760 (mult (POW:s @0 @1) (POW:s @0 @2))
3761 (POW @0 (plus @1 @2)))
3762
3763 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
3764 (simplify
3765 (mult (POW:s @0 @1) (POW:s @2 @1))
3766 (POW (mult @0 @2) @1))
3767
3768 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
3769 (simplify
3770 (mult (POWI:s @0 @1) (POWI:s @2 @1))
3771 (POWI (mult @0 @2) @1))
3772
3773 /* Simplify pow(x,c) / x -> pow(x,c-1). */
3774 (simplify
3775 (rdiv (POW:s @0 REAL_CST@1) @0)
3776 (if (!TREE_OVERFLOW (@1))
3777 (POW @0 (minus @1 { build_one_cst (type); }))))
3778
3779 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
3780 (simplify
3781 (rdiv @0 (POW:s @1 @2))
3782 (mult @0 (POW @1 (negate @2))))
3783
3784 (for sqrts (SQRT)
3785 cbrts (CBRT)
3786 pows (POW)
3787 /* sqrt(sqrt(x)) -> pow(x,1/4). */
3788 (simplify
3789 (sqrts (sqrts @0))
3790 (pows @0 { build_real (type, dconst_quarter ()); }))
3791 /* sqrt(cbrt(x)) -> pow(x,1/6). */
3792 (simplify
3793 (sqrts (cbrts @0))
3794 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
3795 /* cbrt(sqrt(x)) -> pow(x,1/6). */
3796 (simplify
3797 (cbrts (sqrts @0))
3798 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
3799 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
3800 (simplify
3801 (cbrts (cbrts tree_expr_nonnegative_p@0))
3802 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
3803 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
3804 (simplify
3805 (sqrts (pows @0 @1))
3806 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
3807 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
3808 (simplify
3809 (cbrts (pows tree_expr_nonnegative_p@0 @1))
3810 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
3811 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
3812 (simplify
3813 (pows (sqrts @0) @1)
3814 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
3815 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
3816 (simplify
3817 (pows (cbrts tree_expr_nonnegative_p@0) @1)
3818 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
3819 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
3820 (simplify
3821 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
3822 (pows @0 (mult @1 @2))))
3823
3824 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
3825 (simplify
3826 (CABS (complex @0 @0))
3827 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
3828
3829 /* hypot(x,x) -> fabs(x)*sqrt(2). */
3830 (simplify
3831 (HYPOT @0 @0)
3832 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
3833
3834 /* cexp(x+yi) -> exp(x)*cexpi(y). */
3835 (for cexps (CEXP)
3836 exps (EXP)
3837 cexpis (CEXPI)
3838 (simplify
3839 (cexps compositional_complex@0)
3840 (if (targetm.libc_has_function (function_c99_math_complex))
3841 (complex
3842 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
3843 (mult @1 (imagpart @2)))))))
3844
3845 (if (canonicalize_math_p ())
3846 /* floor(x) -> trunc(x) if x is nonnegative. */
3847 (for floors (FLOOR)
3848 truncs (TRUNC)
3849 (simplify
3850 (floors tree_expr_nonnegative_p@0)
3851 (truncs @0))))
3852
3853 (match double_value_p
3854 @0
3855 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
3856 (for froms (BUILT_IN_TRUNCL
3857 BUILT_IN_FLOORL
3858 BUILT_IN_CEILL
3859 BUILT_IN_ROUNDL
3860 BUILT_IN_NEARBYINTL
3861 BUILT_IN_RINTL)
3862 tos (BUILT_IN_TRUNC
3863 BUILT_IN_FLOOR
3864 BUILT_IN_CEIL
3865 BUILT_IN_ROUND
3866 BUILT_IN_NEARBYINT
3867 BUILT_IN_RINT)
3868 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
3869 (if (optimize && canonicalize_math_p ())
3870 (simplify
3871 (froms (convert double_value_p@0))
3872 (convert (tos @0)))))
3873
3874 (match float_value_p
3875 @0
3876 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
3877 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
3878 BUILT_IN_FLOORL BUILT_IN_FLOOR
3879 BUILT_IN_CEILL BUILT_IN_CEIL
3880 BUILT_IN_ROUNDL BUILT_IN_ROUND
3881 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
3882 BUILT_IN_RINTL BUILT_IN_RINT)
3883 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
3884 BUILT_IN_FLOORF BUILT_IN_FLOORF
3885 BUILT_IN_CEILF BUILT_IN_CEILF
3886 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
3887 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
3888 BUILT_IN_RINTF BUILT_IN_RINTF)
3889 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
3890 if x is a float. */
3891 (if (optimize && canonicalize_math_p ()
3892 && targetm.libc_has_function (function_c99_misc))
3893 (simplify
3894 (froms (convert float_value_p@0))
3895 (convert (tos @0)))))
3896
3897 (for froms (XFLOORL XCEILL XROUNDL XRINTL)
3898 tos (XFLOOR XCEIL XROUND XRINT)
3899 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
3900 (if (optimize && canonicalize_math_p ())
3901 (simplify
3902 (froms (convert double_value_p@0))
3903 (tos @0))))
3904
3905 (for froms (XFLOORL XCEILL XROUNDL XRINTL
3906 XFLOOR XCEIL XROUND XRINT)
3907 tos (XFLOORF XCEILF XROUNDF XRINTF)
3908 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
3909 if x is a float. */
3910 (if (optimize && canonicalize_math_p ())
3911 (simplify
3912 (froms (convert float_value_p@0))
3913 (tos @0))))
3914
3915 (if (canonicalize_math_p ())
3916 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
3917 (for floors (IFLOOR LFLOOR LLFLOOR)
3918 (simplify
3919 (floors tree_expr_nonnegative_p@0)
3920 (fix_trunc @0))))
3921
3922 (if (canonicalize_math_p ())
3923 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
3924 (for fns (IFLOOR LFLOOR LLFLOOR
3925 ICEIL LCEIL LLCEIL
3926 IROUND LROUND LLROUND)
3927 (simplify
3928 (fns integer_valued_real_p@0)
3929 (fix_trunc @0)))
3930 (if (!flag_errno_math)
3931 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
3932 (for rints (IRINT LRINT LLRINT)
3933 (simplify
3934 (rints integer_valued_real_p@0)
3935 (fix_trunc @0)))))
3936
3937 (if (canonicalize_math_p ())
3938 (for ifn (IFLOOR ICEIL IROUND IRINT)
3939 lfn (LFLOOR LCEIL LROUND LRINT)
3940 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
3941 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
3942 sizeof (int) == sizeof (long). */
3943 (if (TYPE_PRECISION (integer_type_node)
3944 == TYPE_PRECISION (long_integer_type_node))
3945 (simplify
3946 (ifn @0)
3947 (lfn:long_integer_type_node @0)))
3948 /* Canonicalize llround (x) to lround (x) on LP64 targets where
3949 sizeof (long long) == sizeof (long). */
3950 (if (TYPE_PRECISION (long_long_integer_type_node)
3951 == TYPE_PRECISION (long_integer_type_node))
3952 (simplify
3953 (llfn @0)
3954 (lfn:long_integer_type_node @0)))))
3955
3956 /* cproj(x) -> x if we're ignoring infinities. */
3957 (simplify
3958 (CPROJ @0)
3959 (if (!HONOR_INFINITIES (type))
3960 @0))
3961
3962 /* If the real part is inf and the imag part is known to be
3963 nonnegative, return (inf + 0i). */
3964 (simplify
3965 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
3966 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
3967 { build_complex_inf (type, false); }))
3968
3969 /* If the imag part is inf, return (inf+I*copysign(0,imag)). */
3970 (simplify
3971 (CPROJ (complex @0 REAL_CST@1))
3972 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
3973 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
3974
3975 (for pows (POW)
3976 sqrts (SQRT)
3977 cbrts (CBRT)
3978 (simplify
3979 (pows @0 REAL_CST@1)
3980 (with {
3981 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
3982 REAL_VALUE_TYPE tmp;
3983 }
3984 (switch
3985 /* pow(x,0) -> 1. */
3986 (if (real_equal (value, &dconst0))
3987 { build_real (type, dconst1); })
3988 /* pow(x,1) -> x. */
3989 (if (real_equal (value, &dconst1))
3990 @0)
3991 /* pow(x,-1) -> 1/x. */
3992 (if (real_equal (value, &dconstm1))
3993 (rdiv { build_real (type, dconst1); } @0))
3994 /* pow(x,0.5) -> sqrt(x). */
3995 (if (flag_unsafe_math_optimizations
3996 && canonicalize_math_p ()
3997 && real_equal (value, &dconsthalf))
3998 (sqrts @0))
3999 /* pow(x,1/3) -> cbrt(x). */
4000 (if (flag_unsafe_math_optimizations
4001 && canonicalize_math_p ()
4002 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
4003 real_equal (value, &tmp)))
4004 (cbrts @0))))))
4005
4006 /* powi(1,x) -> 1. */
4007 (simplify
4008 (POWI real_onep@0 @1)
4009 @0)
4010
4011 (simplify
4012 (POWI @0 INTEGER_CST@1)
4013 (switch
4014 /* powi(x,0) -> 1. */
4015 (if (wi::eq_p (@1, 0))
4016 { build_real (type, dconst1); })
4017 /* powi(x,1) -> x. */
4018 (if (wi::eq_p (@1, 1))
4019 @0)
4020 /* powi(x,-1) -> 1/x. */
4021 (if (wi::eq_p (@1, -1))
4022 (rdiv { build_real (type, dconst1); } @0))))
4023
4024 /* Narrowing of arithmetic and logical operations.
4025
4026 These are conceptually similar to the transformations performed for
4027 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
4028 term we want to move all that code out of the front-ends into here. */
4029
4030 /* If we have a narrowing conversion of an arithmetic operation where
4031 both operands are widening conversions from the same type as the outer
4032 narrowing conversion. Then convert the innermost operands to a suitable
4033 unsigned type (to avoid introducing undefined behavior), perform the
4034 operation and convert the result to the desired type. */
4035 (for op (plus minus)
4036 (simplify
4037 (convert (op:s (convert@2 @0) (convert?@3 @1)))
4038 (if (INTEGRAL_TYPE_P (type)
4039 /* We check for type compatibility between @0 and @1 below,
4040 so there's no need to check that @1/@3 are integral types. */
4041 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4042 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4043 /* The precision of the type of each operand must match the
4044 precision of the mode of each operand, similarly for the
4045 result. */
4046 && type_has_mode_precision_p (TREE_TYPE (@0))
4047 && type_has_mode_precision_p (TREE_TYPE (@1))
4048 && type_has_mode_precision_p (type)
4049 /* The inner conversion must be a widening conversion. */
4050 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4051 && types_match (@0, type)
4052 && (types_match (@0, @1)
4053 /* Or the second operand is const integer or converted const
4054 integer from valueize. */
4055 || TREE_CODE (@1) == INTEGER_CST))
4056 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4057 (op @0 (convert @1))
4058 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
4059 (convert (op (convert:utype @0)
4060 (convert:utype @1))))))))
4061
4062 /* This is another case of narrowing, specifically when there's an outer
4063 BIT_AND_EXPR which masks off bits outside the type of the innermost
4064 operands. Like the previous case we have to convert the operands
4065 to unsigned types to avoid introducing undefined behavior for the
4066 arithmetic operation. */
4067 (for op (minus plus)
4068 (simplify
4069 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
4070 (if (INTEGRAL_TYPE_P (type)
4071 /* We check for type compatibility between @0 and @1 below,
4072 so there's no need to check that @1/@3 are integral types. */
4073 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
4074 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
4075 /* The precision of the type of each operand must match the
4076 precision of the mode of each operand, similarly for the
4077 result. */
4078 && type_has_mode_precision_p (TREE_TYPE (@0))
4079 && type_has_mode_precision_p (TREE_TYPE (@1))
4080 && type_has_mode_precision_p (type)
4081 /* The inner conversion must be a widening conversion. */
4082 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
4083 && types_match (@0, @1)
4084 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
4085 <= TYPE_PRECISION (TREE_TYPE (@0)))
4086 && (wi::bit_and (@4, wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
4087 true, TYPE_PRECISION (type))) == 0))
4088 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
4089 (with { tree ntype = TREE_TYPE (@0); }
4090 (convert (bit_and (op @0 @1) (convert:ntype @4))))
4091 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
4092 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
4093 (convert:utype @4))))))))
4094
4095 /* Transform (@0 < @1 and @0 < @2) to use min,
4096 (@0 > @1 and @0 > @2) to use max */
4097 (for op (lt le gt ge)
4098 ext (min min max max)
4099 (simplify
4100 (bit_and (op:cs @0 @1) (op:cs @0 @2))
4101 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4102 && TREE_CODE (@0) != INTEGER_CST)
4103 (op @0 (ext @1 @2)))))
4104
4105 (simplify
4106 /* signbit(x) -> 0 if x is nonnegative. */
4107 (SIGNBIT tree_expr_nonnegative_p@0)
4108 { integer_zero_node; })
4109
4110 (simplify
4111 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
4112 (SIGNBIT @0)
4113 (if (!HONOR_SIGNED_ZEROS (@0))
4114 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
4115
4116 /* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
4117 (for cmp (eq ne)
4118 (for op (plus minus)
4119 rop (minus plus)
4120 (simplify
4121 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4122 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4123 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
4124 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
4125 && !TYPE_SATURATING (TREE_TYPE (@0)))
4126 (with { tree res = int_const_binop (rop, @2, @1); }
4127 (if (TREE_OVERFLOW (res)
4128 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4129 { constant_boolean_node (cmp == NE_EXPR, type); }
4130 (if (single_use (@3))
4131 (cmp @0 { res; }))))))))
4132 (for cmp (lt le gt ge)
4133 (for op (plus minus)
4134 rop (minus plus)
4135 (simplify
4136 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
4137 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
4138 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
4139 (with { tree res = int_const_binop (rop, @2, @1); }
4140 (if (TREE_OVERFLOW (res))
4141 {
4142 fold_overflow_warning (("assuming signed overflow does not occur "
4143 "when simplifying conditional to constant"),
4144 WARN_STRICT_OVERFLOW_CONDITIONAL);
4145 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
4146 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
4147 bool ovf_high = wi::lt_p (@1, 0, TYPE_SIGN (TREE_TYPE (@1)))
4148 != (op == MINUS_EXPR);
4149 constant_boolean_node (less == ovf_high, type);
4150 }
4151 (if (single_use (@3))
4152 (with
4153 {
4154 fold_overflow_warning (("assuming signed overflow does not occur "
4155 "when changing X +- C1 cmp C2 to "
4156 "X cmp C2 -+ C1"),
4157 WARN_STRICT_OVERFLOW_COMPARISON);
4158 }
4159 (cmp @0 { res; })))))))))
4160
4161 /* Canonicalizations of BIT_FIELD_REFs. */
4162
4163 (simplify
4164 (BIT_FIELD_REF @0 @1 @2)
4165 (switch
4166 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
4167 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4168 (switch
4169 (if (integer_zerop (@2))
4170 (view_convert (realpart @0)))
4171 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
4172 (view_convert (imagpart @0)))))
4173 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4174 && INTEGRAL_TYPE_P (type)
4175 /* On GIMPLE this should only apply to register arguments. */
4176 && (! GIMPLE || is_gimple_reg (@0))
4177 /* A bit-field-ref that referenced the full argument can be stripped. */
4178 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
4179 && integer_zerop (@2))
4180 /* Low-parts can be reduced to integral conversions.
4181 ??? The following doesn't work for PDP endian. */
4182 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
4183 /* Don't even think about BITS_BIG_ENDIAN. */
4184 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
4185 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
4186 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
4187 ? (TYPE_PRECISION (TREE_TYPE (@0))
4188 - TYPE_PRECISION (type))
4189 : 0)) == 0)))
4190 (convert @0))))
4191
4192 /* Simplify vector extracts. */
4193
4194 (simplify
4195 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
4196 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
4197 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
4198 || (VECTOR_TYPE_P (type)
4199 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
4200 (with
4201 {
4202 tree ctor = (TREE_CODE (@0) == SSA_NAME
4203 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
4204 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
4205 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
4206 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
4207 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
4208 }
4209 (if (n != 0
4210 && (idx % width) == 0
4211 && (n % width) == 0
4212 && ((idx + n) / width) <= TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor)))
4213 (with
4214 {
4215 idx = idx / width;
4216 n = n / width;
4217 /* Constructor elements can be subvectors. */
4218 unsigned HOST_WIDE_INT k = 1;
4219 if (CONSTRUCTOR_NELTS (ctor) != 0)
4220 {
4221 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
4222 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
4223 k = TYPE_VECTOR_SUBPARTS (cons_elem);
4224 }
4225 }
4226 (switch
4227 /* We keep an exact subset of the constructor elements. */
4228 (if ((idx % k) == 0 && (n % k) == 0)
4229 (if (CONSTRUCTOR_NELTS (ctor) == 0)
4230 { build_constructor (type, NULL); }
4231 (with
4232 {
4233 idx /= k;
4234 n /= k;
4235 }
4236 (if (n == 1)
4237 (if (idx < CONSTRUCTOR_NELTS (ctor))
4238 { CONSTRUCTOR_ELT (ctor, idx)->value; }
4239 { build_zero_cst (type); })
4240 {
4241 vec<constructor_elt, va_gc> *vals;
4242 vec_alloc (vals, n);
4243 for (unsigned i = 0;
4244 i < n && idx + i < CONSTRUCTOR_NELTS (ctor); ++i)
4245 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
4246 CONSTRUCTOR_ELT (ctor, idx + i)->value);
4247 build_constructor (type, vals);
4248 }))))
4249 /* The bitfield references a single constructor element. */
4250 (if (idx + n <= (idx / k + 1) * k)
4251 (switch
4252 (if (CONSTRUCTOR_NELTS (ctor) <= idx / k)
4253 { build_zero_cst (type); })
4254 (if (n == k)
4255 { CONSTRUCTOR_ELT (ctor, idx / k)->value; })
4256 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / k)->value; }
4257 @1 { bitsize_int ((idx % k) * width); })))))))))
4258
4259 /* Simplify a bit extraction from a bit insertion for the cases with
4260 the inserted element fully covering the extraction or the insertion
4261 not touching the extraction. */
4262 (simplify
4263 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
4264 (with
4265 {
4266 unsigned HOST_WIDE_INT isize;
4267 if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
4268 isize = TYPE_PRECISION (TREE_TYPE (@1));
4269 else
4270 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
4271 }
4272 (switch
4273 (if (wi::leu_p (@ipos, @rpos)
4274 && wi::leu_p (wi::add (@rpos, @rsize), wi::add (@ipos, isize)))
4275 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
4276 wi::sub (@rpos, @ipos)); }))
4277 (if (wi::geu_p (@ipos, wi::add (@rpos, @rsize))
4278 || wi::geu_p (@rpos, wi::add (@ipos, isize)))
4279 (BIT_FIELD_REF @0 @rsize @rpos)))))