43f3d7ab0fa5838c543c795ca283d90d8cecd62a
[gcc.git] / gcc / match.pd
1 /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
4
5 Copyright (C) 2014-2020 Free Software Foundation, Inc.
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25
26 /* Generic tree predicates we inherit. */
27 (define_predicates
28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
29 integer_each_onep integer_truep integer_nonzerop
30 real_zerop real_onep real_minus_onep
31 zerop
32 initializer_each_zero_or_onep
33 CONSTANT_CLASS_P
34 tree_expr_nonnegative_p
35 tree_expr_nonzero_p
36 integer_valued_real_p
37 integer_pow2p
38 uniform_integer_cst_p
39 HONOR_NANS
40 uniform_vector_p)
41
42 /* Operator lists. */
43 (define_operator_list tcc_comparison
44 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
45 (define_operator_list inverted_tcc_comparison
46 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
47 (define_operator_list inverted_tcc_comparison_with_nans
48 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
49 (define_operator_list swapped_tcc_comparison
50 gt ge eq ne le lt unordered ordered ungt unge unlt unle uneq ltgt)
51 (define_operator_list simple_comparison lt le eq ne ge gt)
52 (define_operator_list swapped_simple_comparison gt ge eq ne le lt)
53
54 #include "cfn-operators.pd"
55
56 /* Define operand lists for math rounding functions {,i,l,ll}FN,
57 where the versions prefixed with "i" return an int, those prefixed with
58 "l" return a long and those prefixed with "ll" return a long long.
59
60 Also define operand lists:
61
62 X<FN>F for all float functions, in the order i, l, ll
63 X<FN> for all double functions, in the same order
64 X<FN>L for all long double functions, in the same order. */
65 #define DEFINE_INT_AND_FLOAT_ROUND_FN(FN) \
66 (define_operator_list X##FN##F BUILT_IN_I##FN##F \
67 BUILT_IN_L##FN##F \
68 BUILT_IN_LL##FN##F) \
69 (define_operator_list X##FN BUILT_IN_I##FN \
70 BUILT_IN_L##FN \
71 BUILT_IN_LL##FN) \
72 (define_operator_list X##FN##L BUILT_IN_I##FN##L \
73 BUILT_IN_L##FN##L \
74 BUILT_IN_LL##FN##L)
75
76 DEFINE_INT_AND_FLOAT_ROUND_FN (FLOOR)
77 DEFINE_INT_AND_FLOAT_ROUND_FN (CEIL)
78 DEFINE_INT_AND_FLOAT_ROUND_FN (ROUND)
79 DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
80
81 /* Binary operations and their associated IFN_COND_* function. */
82 (define_operator_list UNCOND_BINARY
83 plus minus
84 mult trunc_div trunc_mod rdiv
85 min max
86 bit_and bit_ior bit_xor
87 lshift rshift)
88 (define_operator_list COND_BINARY
89 IFN_COND_ADD IFN_COND_SUB
90 IFN_COND_MUL IFN_COND_DIV IFN_COND_MOD IFN_COND_RDIV
91 IFN_COND_MIN IFN_COND_MAX
92 IFN_COND_AND IFN_COND_IOR IFN_COND_XOR
93 IFN_COND_SHL IFN_COND_SHR)
94
95 /* Same for ternary operations. */
96 (define_operator_list UNCOND_TERNARY
97 IFN_FMA IFN_FMS IFN_FNMA IFN_FNMS)
98 (define_operator_list COND_TERNARY
99 IFN_COND_FMA IFN_COND_FMS IFN_COND_FNMA IFN_COND_FNMS)
100
101 /* With nop_convert? combine convert? and view_convert? in one pattern
102 plus conditionalize on tree_nop_conversion_p conversions. */
103 (match (nop_convert @0)
104 (convert @0)
105 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))))
106 (match (nop_convert @0)
107 (view_convert @0)
108 (if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
109 && known_eq (TYPE_VECTOR_SUBPARTS (type),
110 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0)))
111 && tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
112
113 /* Transform likes of (char) ABS_EXPR <(int) x> into (char) ABSU_EXPR <x>
114 ABSU_EXPR returns unsigned absolute value of the operand and the operand
115 of the ABSU_EXPR will have the corresponding signed type. */
116 (simplify (abs (convert @0))
117 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
118 && !TYPE_UNSIGNED (TREE_TYPE (@0))
119 && element_precision (type) > element_precision (TREE_TYPE (@0)))
120 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
121 (convert (absu:utype @0)))))
122
123 #if GIMPLE
124 /* Optimize (X + (X >> (prec - 1))) ^ (X >> (prec - 1)) into abs (X). */
125 (simplify
126 (bit_xor:c (plus:c @0 (rshift@2 @0 INTEGER_CST@1)) @2)
127 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
128 && !TYPE_UNSIGNED (TREE_TYPE (@0))
129 && wi::to_widest (@1) == element_precision (TREE_TYPE (@0)) - 1)
130 (abs @0)))
131 #endif
132
133 /* Simplifications of operations with one constant operand and
134 simplifications to constants or single values. */
135
136 (for op (plus pointer_plus minus bit_ior bit_xor)
137 (simplify
138 (op @0 integer_zerop)
139 (non_lvalue @0)))
140
141 /* 0 +p index -> (type)index */
142 (simplify
143 (pointer_plus integer_zerop @1)
144 (non_lvalue (convert @1)))
145
146 /* ptr - 0 -> (type)ptr */
147 (simplify
148 (pointer_diff @0 integer_zerop)
149 (convert @0))
150
151 /* See if ARG1 is zero and X + ARG1 reduces to X.
152 Likewise if the operands are reversed. */
153 (simplify
154 (plus:c @0 real_zerop@1)
155 (if (fold_real_zero_addition_p (type, @1, 0))
156 (non_lvalue @0)))
157
158 /* See if ARG1 is zero and X - ARG1 reduces to X. */
159 (simplify
160 (minus @0 real_zerop@1)
161 (if (fold_real_zero_addition_p (type, @1, 1))
162 (non_lvalue @0)))
163
164 /* Even if the fold_real_zero_addition_p can't simplify X + 0.0
165 into X, we can optimize (X + 0.0) + 0.0 or (X + 0.0) - 0.0
166 or (X - 0.0) + 0.0 into X + 0.0 and (X - 0.0) - 0.0 into X - 0.0
167 if not -frounding-math. For sNaNs the first operation would raise
168 exceptions but turn the result into qNan, so the second operation
169 would not raise it. */
170 (for inner_op (plus minus)
171 (for outer_op (plus minus)
172 (simplify
173 (outer_op (inner_op@3 @0 REAL_CST@1) REAL_CST@2)
174 (if (real_zerop (@1)
175 && real_zerop (@2)
176 && !HONOR_SIGN_DEPENDENT_ROUNDING (type))
177 (with { bool inner_plus = ((inner_op == PLUS_EXPR)
178 ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)));
179 bool outer_plus
180 = ((outer_op == PLUS_EXPR)
181 ^ REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@2))); }
182 (if (outer_plus && !inner_plus)
183 (outer_op @0 @2)
184 @3))))))
185
186 /* Simplify x - x.
187 This is unsafe for certain floats even in non-IEEE formats.
188 In IEEE, it is unsafe because it does wrong for NaNs.
189 Also note that operand_equal_p is always false if an operand
190 is volatile. */
191 (simplify
192 (minus @0 @0)
193 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
194 { build_zero_cst (type); }))
195 (simplify
196 (pointer_diff @@0 @0)
197 { build_zero_cst (type); })
198
199 (simplify
200 (mult @0 integer_zerop@1)
201 @1)
202
203 /* Maybe fold x * 0 to 0. The expressions aren't the same
204 when x is NaN, since x * 0 is also NaN. Nor are they the
205 same in modes with signed zeros, since multiplying a
206 negative value by 0 gives -0, not +0. */
207 (simplify
208 (mult @0 real_zerop@1)
209 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
210 @1))
211
212 /* In IEEE floating point, x*1 is not equivalent to x for snans.
213 Likewise for complex arithmetic with signed zeros. */
214 (simplify
215 (mult @0 real_onep)
216 (if (!HONOR_SNANS (type)
217 && (!HONOR_SIGNED_ZEROS (type)
218 || !COMPLEX_FLOAT_TYPE_P (type)))
219 (non_lvalue @0)))
220
221 /* Transform x * -1.0 into -x. */
222 (simplify
223 (mult @0 real_minus_onep)
224 (if (!HONOR_SNANS (type)
225 && (!HONOR_SIGNED_ZEROS (type)
226 || !COMPLEX_FLOAT_TYPE_P (type)))
227 (negate @0)))
228
229 /* Transform { 0 or 1 } * { 0 or 1 } into { 0 or 1 } & { 0 or 1 } */
230 (simplify
231 (mult SSA_NAME@1 SSA_NAME@2)
232 (if (INTEGRAL_TYPE_P (type)
233 && get_nonzero_bits (@1) == 1
234 && get_nonzero_bits (@2) == 1)
235 (bit_and @1 @2)))
236
237 /* Transform x * { 0 or 1, 0 or 1, ... } into x & { 0 or -1, 0 or -1, ...},
238 unless the target has native support for the former but not the latter. */
239 (simplify
240 (mult @0 VECTOR_CST@1)
241 (if (initializer_each_zero_or_onep (@1)
242 && !HONOR_SNANS (type)
243 && !HONOR_SIGNED_ZEROS (type))
244 (with { tree itype = FLOAT_TYPE_P (type) ? unsigned_type_for (type) : type; }
245 (if (itype
246 && (!VECTOR_MODE_P (TYPE_MODE (type))
247 || (VECTOR_MODE_P (TYPE_MODE (itype))
248 && optab_handler (and_optab,
249 TYPE_MODE (itype)) != CODE_FOR_nothing)))
250 (view_convert (bit_and:itype (view_convert @0)
251 (ne @1 { build_zero_cst (type); })))))))
252
253 (for cmp (gt ge lt le)
254 outp (convert convert negate negate)
255 outn (negate negate convert convert)
256 /* Transform (X > 0.0 ? 1.0 : -1.0) into copysign(1, X). */
257 /* Transform (X >= 0.0 ? 1.0 : -1.0) into copysign(1, X). */
258 /* Transform (X < 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
259 /* Transform (X <= 0.0 ? 1.0 : -1.0) into copysign(1,-X). */
260 (simplify
261 (cond (cmp @0 real_zerop) real_onep@1 real_minus_onep)
262 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
263 && types_match (type, TREE_TYPE (@0)))
264 (switch
265 (if (types_match (type, float_type_node))
266 (BUILT_IN_COPYSIGNF @1 (outp @0)))
267 (if (types_match (type, double_type_node))
268 (BUILT_IN_COPYSIGN @1 (outp @0)))
269 (if (types_match (type, long_double_type_node))
270 (BUILT_IN_COPYSIGNL @1 (outp @0))))))
271 /* Transform (X > 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
272 /* Transform (X >= 0.0 ? -1.0 : 1.0) into copysign(1,-X). */
273 /* Transform (X < 0.0 ? -1.0 : 1.0) into copysign(1,X). */
274 /* Transform (X <= 0.0 ? -1.0 : 1.0) into copysign(1,X). */
275 (simplify
276 (cond (cmp @0 real_zerop) real_minus_onep real_onep@1)
277 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type)
278 && types_match (type, TREE_TYPE (@0)))
279 (switch
280 (if (types_match (type, float_type_node))
281 (BUILT_IN_COPYSIGNF @1 (outn @0)))
282 (if (types_match (type, double_type_node))
283 (BUILT_IN_COPYSIGN @1 (outn @0)))
284 (if (types_match (type, long_double_type_node))
285 (BUILT_IN_COPYSIGNL @1 (outn @0)))))))
286
287 /* Transform X * copysign (1.0, X) into abs(X). */
288 (simplify
289 (mult:c @0 (COPYSIGN_ALL real_onep @0))
290 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
291 (abs @0)))
292
293 /* Transform X * copysign (1.0, -X) into -abs(X). */
294 (simplify
295 (mult:c @0 (COPYSIGN_ALL real_onep (negate @0)))
296 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (type))
297 (negate (abs @0))))
298
299 /* Transform copysign (CST, X) into copysign (ABS(CST), X). */
300 (simplify
301 (COPYSIGN_ALL REAL_CST@0 @1)
302 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@0)))
303 (COPYSIGN_ALL (negate @0) @1)))
304
305 /* X * 1, X / 1 -> X. */
306 (for op (mult trunc_div ceil_div floor_div round_div exact_div)
307 (simplify
308 (op @0 integer_onep)
309 (non_lvalue @0)))
310
311 /* (A / (1 << B)) -> (A >> B).
312 Only for unsigned A. For signed A, this would not preserve rounding
313 toward zero.
314 For example: (-1 / ( 1 << B)) != -1 >> B.
315 Also also widening conversions, like:
316 (A / (unsigned long long) (1U << B)) -> (A >> B)
317 or
318 (A / (unsigned long long) (1 << B)) -> (A >> B).
319 If the left shift is signed, it can be done only if the upper bits
320 of A starting from shift's type sign bit are zero, as
321 (unsigned long long) (1 << 31) is -2147483648ULL, not 2147483648ULL,
322 so it is valid only if A >> 31 is zero. */
323 (simplify
324 (trunc_div @0 (convert? (lshift integer_onep@1 @2)))
325 (if ((TYPE_UNSIGNED (type) || tree_expr_nonnegative_p (@0))
326 && (!VECTOR_TYPE_P (type)
327 || target_supports_op_p (type, RSHIFT_EXPR, optab_vector)
328 || target_supports_op_p (type, RSHIFT_EXPR, optab_scalar))
329 && (useless_type_conversion_p (type, TREE_TYPE (@1))
330 || (element_precision (type) >= element_precision (TREE_TYPE (@1))
331 && (TYPE_UNSIGNED (TREE_TYPE (@1))
332 || (element_precision (type)
333 == element_precision (TREE_TYPE (@1)))
334 || (INTEGRAL_TYPE_P (type)
335 && (tree_nonzero_bits (@0)
336 & wi::mask (element_precision (TREE_TYPE (@1)) - 1,
337 true,
338 element_precision (type))) == 0)))))
339 (rshift @0 @2)))
340
341 /* Preserve explicit divisions by 0: the C++ front-end wants to detect
342 undefined behavior in constexpr evaluation, and assuming that the division
343 traps enables better optimizations than these anyway. */
344 (for div (trunc_div ceil_div floor_div round_div exact_div)
345 /* 0 / X is always zero. */
346 (simplify
347 (div integer_zerop@0 @1)
348 /* But not for 0 / 0 so that we can get the proper warnings and errors. */
349 (if (!integer_zerop (@1))
350 @0))
351 /* X / -1 is -X. */
352 (simplify
353 (div @0 integer_minus_onep@1)
354 (if (!TYPE_UNSIGNED (type))
355 (negate @0)))
356 /* X / X is one. */
357 (simplify
358 (div @0 @0)
359 /* But not for 0 / 0 so that we can get the proper warnings and errors.
360 And not for _Fract types where we can't build 1. */
361 (if (!integer_zerop (@0) && !ALL_FRACT_MODE_P (TYPE_MODE (type)))
362 { build_one_cst (type); }))
363 /* X / abs (X) is X < 0 ? -1 : 1. */
364 (simplify
365 (div:C @0 (abs @0))
366 (if (INTEGRAL_TYPE_P (type)
367 && TYPE_OVERFLOW_UNDEFINED (type))
368 (cond (lt @0 { build_zero_cst (type); })
369 { build_minus_one_cst (type); } { build_one_cst (type); })))
370 /* X / -X is -1. */
371 (simplify
372 (div:C @0 (negate @0))
373 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
374 && TYPE_OVERFLOW_UNDEFINED (type))
375 { build_minus_one_cst (type); })))
376
377 /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
378 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
379 (simplify
380 (floor_div @0 @1)
381 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
382 && TYPE_UNSIGNED (type))
383 (trunc_div @0 @1)))
384
385 /* Combine two successive divisions. Note that combining ceil_div
386 and floor_div is trickier and combining round_div even more so. */
387 (for div (trunc_div exact_div)
388 (simplify
389 (div (div@3 @0 INTEGER_CST@1) INTEGER_CST@2)
390 (with {
391 wi::overflow_type overflow;
392 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
393 TYPE_SIGN (type), &overflow);
394 }
395 (if (div == EXACT_DIV_EXPR
396 || optimize_successive_divisions_p (@2, @3))
397 (if (!overflow)
398 (div @0 { wide_int_to_tree (type, mul); })
399 (if (TYPE_UNSIGNED (type)
400 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED))
401 { build_zero_cst (type); }))))))
402
403 /* Combine successive multiplications. Similar to above, but handling
404 overflow is different. */
405 (simplify
406 (mult (mult @0 INTEGER_CST@1) INTEGER_CST@2)
407 (with {
408 wi::overflow_type overflow;
409 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
410 TYPE_SIGN (type), &overflow);
411 }
412 /* Skip folding on overflow: the only special case is @1 * @2 == -INT_MIN,
413 otherwise undefined overflow implies that @0 must be zero. */
414 (if (!overflow || TYPE_OVERFLOW_WRAPS (type))
415 (mult @0 { wide_int_to_tree (type, mul); }))))
416
417 /* Optimize A / A to 1.0 if we don't care about
418 NaNs or Infinities. */
419 (simplify
420 (rdiv @0 @0)
421 (if (FLOAT_TYPE_P (type)
422 && ! HONOR_NANS (type)
423 && ! HONOR_INFINITIES (type))
424 { build_one_cst (type); }))
425
426 /* Optimize -A / A to -1.0 if we don't care about
427 NaNs or Infinities. */
428 (simplify
429 (rdiv:C @0 (negate @0))
430 (if (FLOAT_TYPE_P (type)
431 && ! HONOR_NANS (type)
432 && ! HONOR_INFINITIES (type))
433 { build_minus_one_cst (type); }))
434
435 /* PR71078: x / abs(x) -> copysign (1.0, x) */
436 (simplify
437 (rdiv:C (convert? @0) (convert? (abs @0)))
438 (if (SCALAR_FLOAT_TYPE_P (type)
439 && ! HONOR_NANS (type)
440 && ! HONOR_INFINITIES (type))
441 (switch
442 (if (types_match (type, float_type_node))
443 (BUILT_IN_COPYSIGNF { build_one_cst (type); } (convert @0)))
444 (if (types_match (type, double_type_node))
445 (BUILT_IN_COPYSIGN { build_one_cst (type); } (convert @0)))
446 (if (types_match (type, long_double_type_node))
447 (BUILT_IN_COPYSIGNL { build_one_cst (type); } (convert @0))))))
448
449 /* In IEEE floating point, x/1 is not equivalent to x for snans. */
450 (simplify
451 (rdiv @0 real_onep)
452 (if (!HONOR_SNANS (type))
453 (non_lvalue @0)))
454
455 /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
456 (simplify
457 (rdiv @0 real_minus_onep)
458 (if (!HONOR_SNANS (type))
459 (negate @0)))
460
461 (if (flag_reciprocal_math)
462 /* Convert (A/B)/C to A/(B*C). */
463 (simplify
464 (rdiv (rdiv:s @0 @1) @2)
465 (rdiv @0 (mult @1 @2)))
466
467 /* Canonicalize x / (C1 * y) to (x * C2) / y. */
468 (simplify
469 (rdiv @0 (mult:s @1 REAL_CST@2))
470 (with
471 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @2); }
472 (if (tem)
473 (rdiv (mult @0 { tem; } ) @1))))
474
475 /* Convert A/(B/C) to (A/B)*C */
476 (simplify
477 (rdiv @0 (rdiv:s @1 @2))
478 (mult (rdiv @0 @1) @2)))
479
480 /* Simplify x / (- y) to -x / y. */
481 (simplify
482 (rdiv @0 (negate @1))
483 (rdiv (negate @0) @1))
484
485 (if (flag_unsafe_math_optimizations)
486 /* Simplify (C / x op 0.0) to x op 0.0 for C != 0, C != Inf/Nan.
487 Since C / x may underflow to zero, do this only for unsafe math. */
488 (for op (lt le gt ge)
489 neg_op (gt ge lt le)
490 (simplify
491 (op (rdiv REAL_CST@0 @1) real_zerop@2)
492 (if (!HONOR_SIGNED_ZEROS (@1) && !HONOR_INFINITIES (@1))
493 (switch
494 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@0)))
495 (op @1 @2))
496 /* For C < 0, use the inverted operator. */
497 (if (real_less (TREE_REAL_CST_PTR (@0), &dconst0))
498 (neg_op @1 @2)))))))
499
500 /* Optimize (X & (-A)) / A where A is a power of 2, to X >> log2(A) */
501 (for div (trunc_div ceil_div floor_div round_div exact_div)
502 (simplify
503 (div (convert? (bit_and @0 INTEGER_CST@1)) INTEGER_CST@2)
504 (if (integer_pow2p (@2)
505 && tree_int_cst_sgn (@2) > 0
506 && tree_nop_conversion_p (type, TREE_TYPE (@0))
507 && wi::to_wide (@2) + wi::to_wide (@1) == 0)
508 (rshift (convert @0)
509 { build_int_cst (integer_type_node,
510 wi::exact_log2 (wi::to_wide (@2))); }))))
511
512 /* If ARG1 is a constant, we can convert this to a multiply by the
513 reciprocal. This does not have the same rounding properties,
514 so only do this if -freciprocal-math. We can actually
515 always safely do it if ARG1 is a power of two, but it's hard to
516 tell if it is or not in a portable manner. */
517 (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
518 (simplify
519 (rdiv @0 cst@1)
520 (if (optimize)
521 (if (flag_reciprocal_math
522 && !real_zerop (@1))
523 (with
524 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
525 (if (tem)
526 (mult @0 { tem; } )))
527 (if (cst != COMPLEX_CST)
528 (with { tree inverse = exact_inverse (type, @1); }
529 (if (inverse)
530 (mult @0 { inverse; } ))))))))
531
532 (for mod (ceil_mod floor_mod round_mod trunc_mod)
533 /* 0 % X is always zero. */
534 (simplify
535 (mod integer_zerop@0 @1)
536 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
537 (if (!integer_zerop (@1))
538 @0))
539 /* X % 1 is always zero. */
540 (simplify
541 (mod @0 integer_onep)
542 { build_zero_cst (type); })
543 /* X % -1 is zero. */
544 (simplify
545 (mod @0 integer_minus_onep@1)
546 (if (!TYPE_UNSIGNED (type))
547 { build_zero_cst (type); }))
548 /* X % X is zero. */
549 (simplify
550 (mod @0 @0)
551 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
552 (if (!integer_zerop (@0))
553 { build_zero_cst (type); }))
554 /* (X % Y) % Y is just X % Y. */
555 (simplify
556 (mod (mod@2 @0 @1) @1)
557 @2)
558 /* From extract_muldiv_1: (X * C1) % C2 is zero if C1 is a multiple of C2. */
559 (simplify
560 (mod (mult @0 INTEGER_CST@1) INTEGER_CST@2)
561 (if (ANY_INTEGRAL_TYPE_P (type)
562 && TYPE_OVERFLOW_UNDEFINED (type)
563 && wi::multiple_of_p (wi::to_wide (@1), wi::to_wide (@2),
564 TYPE_SIGN (type)))
565 { build_zero_cst (type); }))
566 /* For (X % C) == 0, if X is signed and C is power of 2, use unsigned
567 modulo and comparison, since it is simpler and equivalent. */
568 (for cmp (eq ne)
569 (simplify
570 (cmp (mod @0 integer_pow2p@2) integer_zerop@1)
571 (if (!TYPE_UNSIGNED (TREE_TYPE (@0)))
572 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
573 (cmp (mod (convert:utype @0) (convert:utype @2)) (convert:utype @1)))))))
574
575 /* X % -C is the same as X % C. */
576 (simplify
577 (trunc_mod @0 INTEGER_CST@1)
578 (if (TYPE_SIGN (type) == SIGNED
579 && !TREE_OVERFLOW (@1)
580 && wi::neg_p (wi::to_wide (@1))
581 && !TYPE_OVERFLOW_TRAPS (type)
582 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
583 && !sign_bit_p (@1, @1))
584 (trunc_mod @0 (negate @1))))
585
586 /* X % -Y is the same as X % Y. */
587 (simplify
588 (trunc_mod @0 (convert? (negate @1)))
589 (if (INTEGRAL_TYPE_P (type)
590 && !TYPE_UNSIGNED (type)
591 && !TYPE_OVERFLOW_TRAPS (type)
592 && tree_nop_conversion_p (type, TREE_TYPE (@1))
593 /* Avoid this transformation if X might be INT_MIN or
594 Y might be -1, because we would then change valid
595 INT_MIN % -(-1) into invalid INT_MIN % -1. */
596 && (expr_not_equal_to (@0, wi::to_wide (TYPE_MIN_VALUE (type)))
597 || expr_not_equal_to (@1, wi::minus_one (TYPE_PRECISION
598 (TREE_TYPE (@1))))))
599 (trunc_mod @0 (convert @1))))
600
601 /* X - (X / Y) * Y is the same as X % Y. */
602 (simplify
603 (minus (convert1? @0) (convert2? (mult:c (trunc_div @@0 @@1) @1)))
604 (if (INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
605 (convert (trunc_mod @0 @1))))
606
607 /* Optimize TRUNC_MOD_EXPR by a power of two into a BIT_AND_EXPR,
608 i.e. "X % C" into "X & (C - 1)", if X and C are positive.
609 Also optimize A % (C << N) where C is a power of 2,
610 to A & ((C << N) - 1). */
611 (match (power_of_two_cand @1)
612 INTEGER_CST@1)
613 (match (power_of_two_cand @1)
614 (lshift INTEGER_CST@1 @2))
615 (for mod (trunc_mod floor_mod)
616 (simplify
617 (mod @0 (convert?@3 (power_of_two_cand@1 @2)))
618 (if ((TYPE_UNSIGNED (type)
619 || tree_expr_nonnegative_p (@0))
620 && tree_nop_conversion_p (type, TREE_TYPE (@3))
621 && integer_pow2p (@2) && tree_int_cst_sgn (@2) > 0)
622 (bit_and @0 (convert (minus @1 { build_int_cst (TREE_TYPE (@1), 1); }))))))
623
624 /* Simplify (unsigned t * 2)/2 -> unsigned t & 0x7FFFFFFF. */
625 (simplify
626 (trunc_div (mult @0 integer_pow2p@1) @1)
627 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
628 (bit_and @0 { wide_int_to_tree
629 (type, wi::mask (TYPE_PRECISION (type)
630 - wi::exact_log2 (wi::to_wide (@1)),
631 false, TYPE_PRECISION (type))); })))
632
633 /* Simplify (unsigned t / 2) * 2 -> unsigned t & ~1. */
634 (simplify
635 (mult (trunc_div @0 integer_pow2p@1) @1)
636 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
637 (bit_and @0 (negate @1))))
638
639 /* Simplify (t * 2) / 2) -> t. */
640 (for div (trunc_div ceil_div floor_div round_div exact_div)
641 (simplify
642 (div (mult:c @0 @1) @1)
643 (if (ANY_INTEGRAL_TYPE_P (type)
644 && TYPE_OVERFLOW_UNDEFINED (type))
645 @0)))
646
647 (for op (negate abs)
648 /* Simplify cos(-x) and cos(|x|) -> cos(x). Similarly for cosh. */
649 (for coss (COS COSH)
650 (simplify
651 (coss (op @0))
652 (coss @0)))
653 /* Simplify pow(-x, y) and pow(|x|,y) -> pow(x,y) if y is an even integer. */
654 (for pows (POW)
655 (simplify
656 (pows (op @0) REAL_CST@1)
657 (with { HOST_WIDE_INT n; }
658 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
659 (pows @0 @1)))))
660 /* Likewise for powi. */
661 (for pows (POWI)
662 (simplify
663 (pows (op @0) INTEGER_CST@1)
664 (if ((wi::to_wide (@1) & 1) == 0)
665 (pows @0 @1))))
666 /* Strip negate and abs from both operands of hypot. */
667 (for hypots (HYPOT)
668 (simplify
669 (hypots (op @0) @1)
670 (hypots @0 @1))
671 (simplify
672 (hypots @0 (op @1))
673 (hypots @0 @1)))
674 /* copysign(-x, y) and copysign(abs(x), y) -> copysign(x, y). */
675 (for copysigns (COPYSIGN_ALL)
676 (simplify
677 (copysigns (op @0) @1)
678 (copysigns @0 @1))))
679
680 /* abs(x)*abs(x) -> x*x. Should be valid for all types. */
681 (simplify
682 (mult (abs@1 @0) @1)
683 (mult @0 @0))
684
685 /* Convert absu(x)*absu(x) -> x*x. */
686 (simplify
687 (mult (absu@1 @0) @1)
688 (mult (convert@2 @0) @2))
689
690 /* cos(copysign(x, y)) -> cos(x). Similarly for cosh. */
691 (for coss (COS COSH)
692 copysigns (COPYSIGN)
693 (simplify
694 (coss (copysigns @0 @1))
695 (coss @0)))
696
697 /* pow(copysign(x, y), z) -> pow(x, z) if z is an even integer. */
698 (for pows (POW)
699 copysigns (COPYSIGN)
700 (simplify
701 (pows (copysigns @0 @2) REAL_CST@1)
702 (with { HOST_WIDE_INT n; }
703 (if (real_isinteger (&TREE_REAL_CST (@1), &n) && (n & 1) == 0)
704 (pows @0 @1)))))
705 /* Likewise for powi. */
706 (for pows (POWI)
707 copysigns (COPYSIGN)
708 (simplify
709 (pows (copysigns @0 @2) INTEGER_CST@1)
710 (if ((wi::to_wide (@1) & 1) == 0)
711 (pows @0 @1))))
712
713 (for hypots (HYPOT)
714 copysigns (COPYSIGN)
715 /* hypot(copysign(x, y), z) -> hypot(x, z). */
716 (simplify
717 (hypots (copysigns @0 @1) @2)
718 (hypots @0 @2))
719 /* hypot(x, copysign(y, z)) -> hypot(x, y). */
720 (simplify
721 (hypots @0 (copysigns @1 @2))
722 (hypots @0 @1)))
723
724 /* copysign(x, CST) -> [-]abs (x). */
725 (for copysigns (COPYSIGN_ALL)
726 (simplify
727 (copysigns @0 REAL_CST@1)
728 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
729 (negate (abs @0))
730 (abs @0))))
731
732 /* copysign(copysign(x, y), z) -> copysign(x, z). */
733 (for copysigns (COPYSIGN_ALL)
734 (simplify
735 (copysigns (copysigns @0 @1) @2)
736 (copysigns @0 @2)))
737
738 /* copysign(x,y)*copysign(x,y) -> x*x. */
739 (for copysigns (COPYSIGN_ALL)
740 (simplify
741 (mult (copysigns@2 @0 @1) @2)
742 (mult @0 @0)))
743
744 /* ccos(-x) -> ccos(x). Similarly for ccosh. */
745 (for ccoss (CCOS CCOSH)
746 (simplify
747 (ccoss (negate @0))
748 (ccoss @0)))
749
750 /* cabs(-x) and cos(conj(x)) -> cabs(x). */
751 (for ops (conj negate)
752 (for cabss (CABS)
753 (simplify
754 (cabss (ops @0))
755 (cabss @0))))
756
757 /* Fold (a * (1 << b)) into (a << b) */
758 (simplify
759 (mult:c @0 (convert? (lshift integer_onep@1 @2)))
760 (if (! FLOAT_TYPE_P (type)
761 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
762 (lshift @0 @2)))
763
764 /* Fold (1 << (C - x)) where C = precision(type) - 1
765 into ((1 << C) >> x). */
766 (simplify
767 (lshift integer_onep@0 (minus@1 INTEGER_CST@2 @3))
768 (if (INTEGRAL_TYPE_P (type)
769 && wi::eq_p (wi::to_wide (@2), TYPE_PRECISION (type) - 1)
770 && single_use (@1))
771 (if (TYPE_UNSIGNED (type))
772 (rshift (lshift @0 @2) @3)
773 (with
774 { tree utype = unsigned_type_for (type); }
775 (convert (rshift (lshift (convert:utype @0) @2) @3))))))
776
777 /* Fold (C1/X)*C2 into (C1*C2)/X. */
778 (simplify
779 (mult (rdiv@3 REAL_CST@0 @1) REAL_CST@2)
780 (if (flag_associative_math
781 && single_use (@3))
782 (with
783 { tree tem = const_binop (MULT_EXPR, type, @0, @2); }
784 (if (tem)
785 (rdiv { tem; } @1)))))
786
787 /* Simplify ~X & X as zero. */
788 (simplify
789 (bit_and:c (convert? @0) (convert? (bit_not @0)))
790 { build_zero_cst (type); })
791
792 /* PR71636: Transform x & ((1U << b) - 1) -> x & ~(~0U << b); */
793 (simplify
794 (bit_and:c @0 (plus:s (lshift:s integer_onep @1) integer_minus_onep))
795 (if (TYPE_UNSIGNED (type))
796 (bit_and @0 (bit_not (lshift { build_all_ones_cst (type); } @1)))))
797
798 (for bitop (bit_and bit_ior)
799 cmp (eq ne)
800 /* PR35691: Transform
801 (x == 0 & y == 0) -> (x | typeof(x)(y)) == 0.
802 (x != 0 | y != 0) -> (x | typeof(x)(y)) != 0. */
803 (simplify
804 (bitop (cmp @0 integer_zerop@2) (cmp @1 integer_zerop))
805 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
806 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
807 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
808 (cmp (bit_ior @0 (convert @1)) @2)))
809 /* Transform:
810 (x == -1 & y == -1) -> (x & typeof(x)(y)) == -1.
811 (x != -1 | y != -1) -> (x & typeof(x)(y)) != -1. */
812 (simplify
813 (bitop (cmp @0 integer_all_onesp@2) (cmp @1 integer_all_onesp))
814 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
815 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
816 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
817 (cmp (bit_and @0 (convert @1)) @2))))
818
819 /* Fold (A & ~B) - (A & B) into (A ^ B) - B. */
820 (simplify
821 (minus (bit_and:cs @0 (bit_not @1)) (bit_and:cs @0 @1))
822 (minus (bit_xor @0 @1) @1))
823 (simplify
824 (minus (bit_and:s @0 INTEGER_CST@2) (bit_and:s @0 INTEGER_CST@1))
825 (if (~wi::to_wide (@2) == wi::to_wide (@1))
826 (minus (bit_xor @0 @1) @1)))
827
828 /* Fold (A & B) - (A & ~B) into B - (A ^ B). */
829 (simplify
830 (minus (bit_and:cs @0 @1) (bit_and:cs @0 (bit_not @1)))
831 (minus @1 (bit_xor @0 @1)))
832
833 /* Simplify (X & ~Y) |^+ (~X & Y) -> X ^ Y. */
834 (for op (bit_ior bit_xor plus)
835 (simplify
836 (op (bit_and:c @0 (bit_not @1)) (bit_and:c (bit_not @0) @1))
837 (bit_xor @0 @1))
838 (simplify
839 (op:c (bit_and @0 INTEGER_CST@2) (bit_and (bit_not @0) INTEGER_CST@1))
840 (if (~wi::to_wide (@2) == wi::to_wide (@1))
841 (bit_xor @0 @1))))
842
843 /* PR53979: Transform ((a ^ b) | a) -> (a | b) */
844 (simplify
845 (bit_ior:c (bit_xor:c @0 @1) @0)
846 (bit_ior @0 @1))
847
848 /* (a & ~b) | (a ^ b) --> a ^ b */
849 (simplify
850 (bit_ior:c (bit_and:c @0 (bit_not @1)) (bit_xor:c@2 @0 @1))
851 @2)
852
853 /* (a & ~b) ^ ~a --> ~(a & b) */
854 (simplify
855 (bit_xor:c (bit_and:cs @0 (bit_not @1)) (bit_not @0))
856 (bit_not (bit_and @0 @1)))
857
858 /* (~a & b) ^ a --> (a | b) */
859 (simplify
860 (bit_xor:c (bit_and:cs (bit_not @0) @1) @0)
861 (bit_ior @0 @1))
862
863 /* (a | b) & ~(a ^ b) --> a & b */
864 (simplify
865 (bit_and:c (bit_ior @0 @1) (bit_not (bit_xor:c @0 @1)))
866 (bit_and @0 @1))
867
868 /* a | ~(a ^ b) --> a | ~b */
869 (simplify
870 (bit_ior:c @0 (bit_not:s (bit_xor:c @0 @1)))
871 (bit_ior @0 (bit_not @1)))
872
873 /* (a | b) | (a &^ b) --> a | b */
874 (for op (bit_and bit_xor)
875 (simplify
876 (bit_ior:c (bit_ior@2 @0 @1) (op:c @0 @1))
877 @2))
878
879 /* (a & b) | ~(a ^ b) --> ~(a ^ b) */
880 (simplify
881 (bit_ior:c (bit_and:c @0 @1) (bit_not@2 (bit_xor @0 @1)))
882 @2)
883
884 /* ~(~a & b) --> a | ~b */
885 (simplify
886 (bit_not (bit_and:cs (bit_not @0) @1))
887 (bit_ior @0 (bit_not @1)))
888
889 /* ~(~a | b) --> a & ~b */
890 (simplify
891 (bit_not (bit_ior:cs (bit_not @0) @1))
892 (bit_and @0 (bit_not @1)))
893
894 /* Simplify (~X & Y) to X ^ Y if we know that (X & ~Y) is 0. */
895 #if GIMPLE
896 (simplify
897 (bit_and (bit_not SSA_NAME@0) INTEGER_CST@1)
898 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
899 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
900 (bit_xor @0 @1)))
901 #endif
902
903 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
904 ((A & N) + B) & M -> (A + B) & M
905 Similarly if (N & M) == 0,
906 ((A | N) + B) & M -> (A + B) & M
907 and for - instead of + (or unary - instead of +)
908 and/or ^ instead of |.
909 If B is constant and (B & M) == 0, fold into A & M. */
910 (for op (plus minus)
911 (for bitop (bit_and bit_ior bit_xor)
912 (simplify
913 (bit_and (op:s (bitop:s@0 @3 INTEGER_CST@4) @1) INTEGER_CST@2)
914 (with
915 { tree pmop[2];
916 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, bitop,
917 @3, @4, @1, ERROR_MARK, NULL_TREE,
918 NULL_TREE, pmop); }
919 (if (utype)
920 (convert (bit_and (op (convert:utype { pmop[0]; })
921 (convert:utype { pmop[1]; }))
922 (convert:utype @2))))))
923 (simplify
924 (bit_and (op:s @0 (bitop:s@1 @3 INTEGER_CST@4)) INTEGER_CST@2)
925 (with
926 { tree pmop[2];
927 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
928 NULL_TREE, NULL_TREE, @1, bitop, @3,
929 @4, pmop); }
930 (if (utype)
931 (convert (bit_and (op (convert:utype { pmop[0]; })
932 (convert:utype { pmop[1]; }))
933 (convert:utype @2)))))))
934 (simplify
935 (bit_and (op:s @0 @1) INTEGER_CST@2)
936 (with
937 { tree pmop[2];
938 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @2, op, @0, ERROR_MARK,
939 NULL_TREE, NULL_TREE, @1, ERROR_MARK,
940 NULL_TREE, NULL_TREE, pmop); }
941 (if (utype)
942 (convert (bit_and (op (convert:utype { pmop[0]; })
943 (convert:utype { pmop[1]; }))
944 (convert:utype @2)))))))
945 (for bitop (bit_and bit_ior bit_xor)
946 (simplify
947 (bit_and (negate:s (bitop:s@0 @2 INTEGER_CST@3)) INTEGER_CST@1)
948 (with
949 { tree pmop[2];
950 tree utype = fold_bit_and_mask (TREE_TYPE (@0), @1, NEGATE_EXPR, @0,
951 bitop, @2, @3, NULL_TREE, ERROR_MARK,
952 NULL_TREE, NULL_TREE, pmop); }
953 (if (utype)
954 (convert (bit_and (negate (convert:utype { pmop[0]; }))
955 (convert:utype @1)))))))
956
957 /* X % Y is smaller than Y. */
958 (for cmp (lt ge)
959 (simplify
960 (cmp (trunc_mod @0 @1) @1)
961 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
962 { constant_boolean_node (cmp == LT_EXPR, type); })))
963 (for cmp (gt le)
964 (simplify
965 (cmp @1 (trunc_mod @0 @1))
966 (if (TYPE_UNSIGNED (TREE_TYPE (@0)))
967 { constant_boolean_node (cmp == GT_EXPR, type); })))
968
969 /* x | ~0 -> ~0 */
970 (simplify
971 (bit_ior @0 integer_all_onesp@1)
972 @1)
973
974 /* x | 0 -> x */
975 (simplify
976 (bit_ior @0 integer_zerop)
977 @0)
978
979 /* x & 0 -> 0 */
980 (simplify
981 (bit_and @0 integer_zerop@1)
982 @1)
983
984 /* ~x | x -> -1 */
985 /* ~x ^ x -> -1 */
986 /* ~x + x -> -1 */
987 (for op (bit_ior bit_xor plus)
988 (simplify
989 (op:c (convert? @0) (convert? (bit_not @0)))
990 (convert { build_all_ones_cst (TREE_TYPE (@0)); })))
991
992 /* x ^ x -> 0 */
993 (simplify
994 (bit_xor @0 @0)
995 { build_zero_cst (type); })
996
997 /* Canonicalize X ^ ~0 to ~X. */
998 (simplify
999 (bit_xor @0 integer_all_onesp@1)
1000 (bit_not @0))
1001
1002 /* x & ~0 -> x */
1003 (simplify
1004 (bit_and @0 integer_all_onesp)
1005 (non_lvalue @0))
1006
1007 /* x & x -> x, x | x -> x */
1008 (for bitop (bit_and bit_ior)
1009 (simplify
1010 (bitop @0 @0)
1011 (non_lvalue @0)))
1012
1013 /* x & C -> x if we know that x & ~C == 0. */
1014 #if GIMPLE
1015 (simplify
1016 (bit_and SSA_NAME@0 INTEGER_CST@1)
1017 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1018 && wi::bit_and_not (get_nonzero_bits (@0), wi::to_wide (@1)) == 0)
1019 @0))
1020 #endif
1021
1022 /* ~(~X - Y) -> X + Y and ~(~X + Y) -> X - Y. */
1023 (simplify
1024 (bit_not (minus (bit_not @0) @1))
1025 (plus @0 @1))
1026 (simplify
1027 (bit_not (plus:c (bit_not @0) @1))
1028 (minus @0 @1))
1029
1030 /* x + (x & 1) -> (x + 1) & ~1 */
1031 (simplify
1032 (plus:c @0 (bit_and:s @0 integer_onep@1))
1033 (bit_and (plus @0 @1) (bit_not @1)))
1034
1035 /* x & ~(x & y) -> x & ~y */
1036 /* x | ~(x | y) -> x | ~y */
1037 (for bitop (bit_and bit_ior)
1038 (simplify
1039 (bitop:c @0 (bit_not (bitop:cs @0 @1)))
1040 (bitop @0 (bit_not @1))))
1041
1042 /* (~x & y) | ~(x | y) -> ~x */
1043 (simplify
1044 (bit_ior:c (bit_and:c (bit_not@2 @0) @1) (bit_not (bit_ior:c @0 @1)))
1045 @2)
1046
1047 /* (x | y) ^ (x | ~y) -> ~x */
1048 (simplify
1049 (bit_xor:c (bit_ior:c @0 @1) (bit_ior:c @0 (bit_not @1)))
1050 (bit_not @0))
1051
1052 /* (x & y) | ~(x | y) -> ~(x ^ y) */
1053 (simplify
1054 (bit_ior:c (bit_and:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
1055 (bit_not (bit_xor @0 @1)))
1056
1057 /* (~x | y) ^ (x ^ y) -> x | ~y */
1058 (simplify
1059 (bit_xor:c (bit_ior:cs (bit_not @0) @1) (bit_xor:s @0 @1))
1060 (bit_ior @0 (bit_not @1)))
1061
1062 /* (x ^ y) | ~(x | y) -> ~(x & y) */
1063 (simplify
1064 (bit_ior:c (bit_xor:s @0 @1) (bit_not:s (bit_ior:s @0 @1)))
1065 (bit_not (bit_and @0 @1)))
1066
1067 /* (x | y) & ~x -> y & ~x */
1068 /* (x & y) | ~x -> y | ~x */
1069 (for bitop (bit_and bit_ior)
1070 rbitop (bit_ior bit_and)
1071 (simplify
1072 (bitop:c (rbitop:c @0 @1) (bit_not@2 @0))
1073 (bitop @1 @2)))
1074
1075 /* (x & y) ^ (x | y) -> x ^ y */
1076 (simplify
1077 (bit_xor:c (bit_and @0 @1) (bit_ior @0 @1))
1078 (bit_xor @0 @1))
1079
1080 /* (x ^ y) ^ (x | y) -> x & y */
1081 (simplify
1082 (bit_xor:c (bit_xor @0 @1) (bit_ior @0 @1))
1083 (bit_and @0 @1))
1084
1085 /* (x & y) + (x ^ y) -> x | y */
1086 /* (x & y) | (x ^ y) -> x | y */
1087 /* (x & y) ^ (x ^ y) -> x | y */
1088 (for op (plus bit_ior bit_xor)
1089 (simplify
1090 (op:c (bit_and @0 @1) (bit_xor @0 @1))
1091 (bit_ior @0 @1)))
1092
1093 /* (x & y) + (x | y) -> x + y */
1094 (simplify
1095 (plus:c (bit_and @0 @1) (bit_ior @0 @1))
1096 (plus @0 @1))
1097
1098 /* (x + y) - (x | y) -> x & y */
1099 (simplify
1100 (minus (plus @0 @1) (bit_ior @0 @1))
1101 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1102 && !TYPE_SATURATING (type))
1103 (bit_and @0 @1)))
1104
1105 /* (x + y) - (x & y) -> x | y */
1106 (simplify
1107 (minus (plus @0 @1) (bit_and @0 @1))
1108 (if (!TYPE_OVERFLOW_SANITIZED (type) && !TYPE_OVERFLOW_TRAPS (type)
1109 && !TYPE_SATURATING (type))
1110 (bit_ior @0 @1)))
1111
1112 /* (x | y) - y -> (x & ~y) */
1113 (simplify
1114 (minus (bit_ior:cs @0 @1) @1)
1115 (bit_and @0 (bit_not @1)))
1116
1117 /* (x | y) - (x ^ y) -> x & y */
1118 (simplify
1119 (minus (bit_ior @0 @1) (bit_xor @0 @1))
1120 (bit_and @0 @1))
1121
1122 /* (x | y) - (x & y) -> x ^ y */
1123 (simplify
1124 (minus (bit_ior @0 @1) (bit_and @0 @1))
1125 (bit_xor @0 @1))
1126
1127 /* (x | y) & ~(x & y) -> x ^ y */
1128 (simplify
1129 (bit_and:c (bit_ior @0 @1) (bit_not (bit_and @0 @1)))
1130 (bit_xor @0 @1))
1131
1132 /* (x | y) & (~x ^ y) -> x & y */
1133 (simplify
1134 (bit_and:c (bit_ior:c @0 @1) (bit_xor:c @1 (bit_not @0)))
1135 (bit_and @0 @1))
1136
1137 /* (~x | y) & (x | ~y) -> ~(x ^ y) */
1138 (simplify
1139 (bit_and (bit_ior:cs (bit_not @0) @1) (bit_ior:cs @0 (bit_not @1)))
1140 (bit_not (bit_xor @0 @1)))
1141
1142 /* (~x | y) ^ (x | ~y) -> x ^ y */
1143 (simplify
1144 (bit_xor (bit_ior:c (bit_not @0) @1) (bit_ior:c @0 (bit_not @1)))
1145 (bit_xor @0 @1))
1146
1147 /* ~x & ~y -> ~(x | y)
1148 ~x | ~y -> ~(x & y) */
1149 (for op (bit_and bit_ior)
1150 rop (bit_ior bit_and)
1151 (simplify
1152 (op (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1153 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1154 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1155 (bit_not (rop (convert @0) (convert @1))))))
1156
1157 /* If we are XORing or adding two BIT_AND_EXPR's, both of which are and'ing
1158 with a constant, and the two constants have no bits in common,
1159 we should treat this as a BIT_IOR_EXPR since this may produce more
1160 simplifications. */
1161 (for op (bit_xor plus)
1162 (simplify
1163 (op (convert1? (bit_and@4 @0 INTEGER_CST@1))
1164 (convert2? (bit_and@5 @2 INTEGER_CST@3)))
1165 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1166 && tree_nop_conversion_p (type, TREE_TYPE (@2))
1167 && (wi::to_wide (@1) & wi::to_wide (@3)) == 0)
1168 (bit_ior (convert @4) (convert @5)))))
1169
1170 /* (X | Y) ^ X -> Y & ~ X*/
1171 (simplify
1172 (bit_xor:c (convert1? (bit_ior:c @@0 @1)) (convert2? @0))
1173 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1174 (convert (bit_and @1 (bit_not @0)))))
1175
1176 /* Convert ~X ^ ~Y to X ^ Y. */
1177 (simplify
1178 (bit_xor (convert1? (bit_not @0)) (convert2? (bit_not @1)))
1179 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1180 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
1181 (bit_xor (convert @0) (convert @1))))
1182
1183 /* Convert ~X ^ C to X ^ ~C. */
1184 (simplify
1185 (bit_xor (convert? (bit_not @0)) INTEGER_CST@1)
1186 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1187 (bit_xor (convert @0) (bit_not @1))))
1188
1189 /* Fold (X & Y) ^ Y and (X ^ Y) & Y as ~X & Y. */
1190 (for opo (bit_and bit_xor)
1191 opi (bit_xor bit_and)
1192 (simplify
1193 (opo:c (opi:cs @0 @1) @1)
1194 (bit_and (bit_not @0) @1)))
1195
1196 /* Given a bit-wise operation CODE applied to ARG0 and ARG1, see if both
1197 operands are another bit-wise operation with a common input. If so,
1198 distribute the bit operations to save an operation and possibly two if
1199 constants are involved. For example, convert
1200 (A | B) & (A | C) into A | (B & C)
1201 Further simplification will occur if B and C are constants. */
1202 (for op (bit_and bit_ior bit_xor)
1203 rop (bit_ior bit_and bit_and)
1204 (simplify
1205 (op (convert? (rop:c @@0 @1)) (convert? (rop:c @0 @2)))
1206 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1207 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1208 (rop (convert @0) (op (convert @1) (convert @2))))))
1209
1210 /* Some simple reassociation for bit operations, also handled in reassoc. */
1211 /* (X & Y) & Y -> X & Y
1212 (X | Y) | Y -> X | Y */
1213 (for op (bit_and bit_ior)
1214 (simplify
1215 (op:c (convert1?@2 (op:c @0 @@1)) (convert2? @1))
1216 @2))
1217 /* (X ^ Y) ^ Y -> X */
1218 (simplify
1219 (bit_xor:c (convert1? (bit_xor:c @0 @@1)) (convert2? @1))
1220 (convert @0))
1221 /* (X & Y) & (X & Z) -> (X & Y) & Z
1222 (X | Y) | (X | Z) -> (X | Y) | Z */
1223 (for op (bit_and bit_ior)
1224 (simplify
1225 (op (convert1?@3 (op:c@4 @0 @1)) (convert2?@5 (op:c@6 @0 @2)))
1226 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1227 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1228 (if (single_use (@5) && single_use (@6))
1229 (op @3 (convert @2))
1230 (if (single_use (@3) && single_use (@4))
1231 (op (convert @1) @5))))))
1232 /* (X ^ Y) ^ (X ^ Z) -> Y ^ Z */
1233 (simplify
1234 (bit_xor (convert1? (bit_xor:c @0 @1)) (convert2? (bit_xor:c @0 @2)))
1235 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
1236 && tree_nop_conversion_p (type, TREE_TYPE (@2)))
1237 (bit_xor (convert @1) (convert @2))))
1238
1239 /* Convert abs (abs (X)) into abs (X).
1240 also absu (absu (X)) into absu (X). */
1241 (simplify
1242 (abs (abs@1 @0))
1243 @1)
1244
1245 (simplify
1246 (absu (convert@2 (absu@1 @0)))
1247 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@1)))
1248 @1))
1249
1250 /* Convert abs[u] (-X) -> abs[u] (X). */
1251 (simplify
1252 (abs (negate @0))
1253 (abs @0))
1254
1255 (simplify
1256 (absu (negate @0))
1257 (absu @0))
1258
1259 /* Convert abs[u] (X) where X is nonnegative -> (X). */
1260 (simplify
1261 (abs tree_expr_nonnegative_p@0)
1262 @0)
1263
1264 (simplify
1265 (absu tree_expr_nonnegative_p@0)
1266 (convert @0))
1267
1268 /* A few cases of fold-const.c negate_expr_p predicate. */
1269 (match negate_expr_p
1270 INTEGER_CST
1271 (if ((INTEGRAL_TYPE_P (type)
1272 && TYPE_UNSIGNED (type))
1273 || (!TYPE_OVERFLOW_SANITIZED (type)
1274 && may_negate_without_overflow_p (t)))))
1275 (match negate_expr_p
1276 FIXED_CST)
1277 (match negate_expr_p
1278 (negate @0)
1279 (if (!TYPE_OVERFLOW_SANITIZED (type))))
1280 (match negate_expr_p
1281 REAL_CST
1282 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (t)))))
1283 /* VECTOR_CST handling of non-wrapping types would recurse in unsupported
1284 ways. */
1285 (match negate_expr_p
1286 VECTOR_CST
1287 (if (FLOAT_TYPE_P (TREE_TYPE (type)) || TYPE_OVERFLOW_WRAPS (type))))
1288 (match negate_expr_p
1289 (minus @0 @1)
1290 (if ((ANY_INTEGRAL_TYPE_P (type) && TYPE_OVERFLOW_WRAPS (type))
1291 || (FLOAT_TYPE_P (type)
1292 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1293 && !HONOR_SIGNED_ZEROS (type)))))
1294
1295 /* (-A) * (-B) -> A * B */
1296 (simplify
1297 (mult:c (convert1? (negate @0)) (convert2? negate_expr_p@1))
1298 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1299 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1300 (mult (convert @0) (convert (negate @1)))))
1301
1302 /* -(A + B) -> (-B) - A. */
1303 (simplify
1304 (negate (plus:c @0 negate_expr_p@1))
1305 (if (!HONOR_SIGN_DEPENDENT_ROUNDING (element_mode (type))
1306 && !HONOR_SIGNED_ZEROS (element_mode (type)))
1307 (minus (negate @1) @0)))
1308
1309 /* -(A - B) -> B - A. */
1310 (simplify
1311 (negate (minus @0 @1))
1312 (if ((ANY_INTEGRAL_TYPE_P (type) && !TYPE_OVERFLOW_SANITIZED (type))
1313 || (FLOAT_TYPE_P (type)
1314 && !HONOR_SIGN_DEPENDENT_ROUNDING (type)
1315 && !HONOR_SIGNED_ZEROS (type)))
1316 (minus @1 @0)))
1317 (simplify
1318 (negate (pointer_diff @0 @1))
1319 (if (TYPE_OVERFLOW_UNDEFINED (type))
1320 (pointer_diff @1 @0)))
1321
1322 /* A - B -> A + (-B) if B is easily negatable. */
1323 (simplify
1324 (minus @0 negate_expr_p@1)
1325 (if (!FIXED_POINT_TYPE_P (type))
1326 (plus @0 (negate @1))))
1327
1328 /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
1329 when profitable.
1330 For bitwise binary operations apply operand conversions to the
1331 binary operation result instead of to the operands. This allows
1332 to combine successive conversions and bitwise binary operations.
1333 We combine the above two cases by using a conditional convert. */
1334 (for bitop (bit_and bit_ior bit_xor)
1335 (simplify
1336 (bitop (convert@2 @0) (convert?@3 @1))
1337 (if (((TREE_CODE (@1) == INTEGER_CST
1338 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
1339 && int_fits_type_p (@1, TREE_TYPE (@0)))
1340 || types_match (@0, @1))
1341 /* ??? This transform conflicts with fold-const.c doing
1342 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
1343 constants (if x has signed type, the sign bit cannot be set
1344 in c). This folds extension into the BIT_AND_EXPR.
1345 Restrict it to GIMPLE to avoid endless recursions. */
1346 && (bitop != BIT_AND_EXPR || GIMPLE)
1347 && (/* That's a good idea if the conversion widens the operand, thus
1348 after hoisting the conversion the operation will be narrower. */
1349 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
1350 /* It's also a good idea if the conversion is to a non-integer
1351 mode. */
1352 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
1353 /* Or if the precision of TO is not the same as the precision
1354 of its mode. */
1355 || !type_has_mode_precision_p (type)
1356 /* In GIMPLE, getting rid of 2 conversions for one new results
1357 in smaller IL. */
1358 || (GIMPLE
1359 && TREE_CODE (@1) != INTEGER_CST
1360 && tree_nop_conversion_p (type, TREE_TYPE (@0))
1361 && single_use (@2)
1362 && single_use (@3))))
1363 (convert (bitop @0 (convert @1)))))
1364 /* In GIMPLE, getting rid of 2 conversions for one new results
1365 in smaller IL. */
1366 (simplify
1367 (convert (bitop:cs@2 (nop_convert:s @0) @1))
1368 (if (GIMPLE
1369 && TREE_CODE (@1) != INTEGER_CST
1370 && tree_nop_conversion_p (type, TREE_TYPE (@2))
1371 && types_match (type, @0))
1372 (bitop @0 (convert @1)))))
1373
1374 (for bitop (bit_and bit_ior)
1375 rbitop (bit_ior bit_and)
1376 /* (x | y) & x -> x */
1377 /* (x & y) | x -> x */
1378 (simplify
1379 (bitop:c (rbitop:c @0 @1) @0)
1380 @0)
1381 /* (~x | y) & x -> x & y */
1382 /* (~x & y) | x -> x | y */
1383 (simplify
1384 (bitop:c (rbitop:c (bit_not @0) @1) @0)
1385 (bitop @0 @1)))
1386
1387 /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
1388 (simplify
1389 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1390 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
1391
1392 /* Combine successive equal operations with constants. */
1393 (for bitop (bit_and bit_ior bit_xor)
1394 (simplify
1395 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
1396 (if (!CONSTANT_CLASS_P (@0))
1397 /* This is the canonical form regardless of whether (bitop @1 @2) can be
1398 folded to a constant. */
1399 (bitop @0 (bitop @1 @2))
1400 /* In this case we have three constants and (bitop @0 @1) doesn't fold
1401 to a constant. This can happen if @0 or @1 is a POLY_INT_CST and if
1402 the values involved are such that the operation can't be decided at
1403 compile time. Try folding one of @0 or @1 with @2 to see whether
1404 that combination can be decided at compile time.
1405
1406 Keep the existing form if both folds fail, to avoid endless
1407 oscillation. */
1408 (with { tree cst1 = const_binop (bitop, type, @0, @2); }
1409 (if (cst1)
1410 (bitop @1 { cst1; })
1411 (with { tree cst2 = const_binop (bitop, type, @1, @2); }
1412 (if (cst2)
1413 (bitop @0 { cst2; }))))))))
1414
1415 /* Try simple folding for X op !X, and X op X with the help
1416 of the truth_valued_p and logical_inverted_value predicates. */
1417 (match truth_valued_p
1418 @0
1419 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
1420 (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
1421 (match truth_valued_p
1422 (op @0 @1)))
1423 (match truth_valued_p
1424 (truth_not @0))
1425
1426 (match (logical_inverted_value @0)
1427 (truth_not @0))
1428 (match (logical_inverted_value @0)
1429 (bit_not truth_valued_p@0))
1430 (match (logical_inverted_value @0)
1431 (eq @0 integer_zerop))
1432 (match (logical_inverted_value @0)
1433 (ne truth_valued_p@0 integer_truep))
1434 (match (logical_inverted_value @0)
1435 (bit_xor truth_valued_p@0 integer_truep))
1436
1437 /* X & !X -> 0. */
1438 (simplify
1439 (bit_and:c @0 (logical_inverted_value @0))
1440 { build_zero_cst (type); })
1441 /* X | !X and X ^ !X -> 1, , if X is truth-valued. */
1442 (for op (bit_ior bit_xor)
1443 (simplify
1444 (op:c truth_valued_p@0 (logical_inverted_value @0))
1445 { constant_boolean_node (true, type); }))
1446 /* X ==/!= !X is false/true. */
1447 (for op (eq ne)
1448 (simplify
1449 (op:c truth_valued_p@0 (logical_inverted_value @0))
1450 { constant_boolean_node (op == NE_EXPR ? true : false, type); }))
1451
1452 /* ~~x -> x */
1453 (simplify
1454 (bit_not (bit_not @0))
1455 @0)
1456
1457 /* Convert ~ (-A) to A - 1. */
1458 (simplify
1459 (bit_not (convert? (negate @0)))
1460 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1461 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1462 (convert (minus @0 { build_each_one_cst (TREE_TYPE (@0)); }))))
1463
1464 /* Convert - (~A) to A + 1. */
1465 (simplify
1466 (negate (nop_convert? (bit_not @0)))
1467 (plus (view_convert @0) { build_each_one_cst (type); }))
1468
1469 /* Convert ~ (A - 1) or ~ (A + -1) to -A. */
1470 (simplify
1471 (bit_not (convert? (minus @0 integer_each_onep)))
1472 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1473 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1474 (convert (negate @0))))
1475 (simplify
1476 (bit_not (convert? (plus @0 integer_all_onesp)))
1477 (if (element_precision (type) <= element_precision (TREE_TYPE (@0))
1478 || !TYPE_UNSIGNED (TREE_TYPE (@0)))
1479 (convert (negate @0))))
1480
1481 /* Part of convert ~(X ^ Y) to ~X ^ Y or X ^ ~Y if ~X or ~Y simplify. */
1482 (simplify
1483 (bit_not (convert? (bit_xor @0 INTEGER_CST@1)))
1484 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1485 (convert (bit_xor @0 (bit_not @1)))))
1486 (simplify
1487 (bit_not (convert? (bit_xor:c (bit_not @0) @1)))
1488 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1489 (convert (bit_xor @0 @1))))
1490
1491 /* Otherwise prefer ~(X ^ Y) to ~X ^ Y as more canonical. */
1492 (simplify
1493 (bit_xor:c (nop_convert?:s (bit_not:s @0)) @1)
1494 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1495 (bit_not (bit_xor (view_convert @0) @1))))
1496
1497 /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
1498 (simplify
1499 (bit_ior:c (bit_and:cs @0 (bit_not @2)) (bit_and:cs @1 @2))
1500 (bit_xor (bit_and (bit_xor @0 @1) @2) @0))
1501
1502 /* Fold A - (A & B) into ~B & A. */
1503 (simplify
1504 (minus (convert1? @0) (convert2?:s (bit_and:cs @@0 @1)))
1505 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
1506 && tree_nop_conversion_p (type, TREE_TYPE (@1)))
1507 (convert (bit_and (bit_not @1) @0))))
1508
1509 /* (m1 CMP m2) * d -> (m1 CMP m2) ? d : 0 */
1510 (for cmp (gt lt ge le)
1511 (simplify
1512 (mult (convert (cmp @0 @1)) @2)
1513 (if (GIMPLE || !TREE_SIDE_EFFECTS (@2))
1514 (cond (cmp @0 @1) @2 { build_zero_cst (type); }))))
1515
1516 /* For integral types with undefined overflow and C != 0 fold
1517 x * C EQ/NE y * C into x EQ/NE y. */
1518 (for cmp (eq ne)
1519 (simplify
1520 (cmp (mult:c @0 @1) (mult:c @2 @1))
1521 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1522 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1523 && tree_expr_nonzero_p (@1))
1524 (cmp @0 @2))))
1525
1526 /* For integral types with wrapping overflow and C odd fold
1527 x * C EQ/NE y * C into x EQ/NE y. */
1528 (for cmp (eq ne)
1529 (simplify
1530 (cmp (mult @0 INTEGER_CST@1) (mult @2 @1))
1531 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1532 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
1533 && (TREE_INT_CST_LOW (@1) & 1) != 0)
1534 (cmp @0 @2))))
1535
1536 /* For integral types with undefined overflow and C != 0 fold
1537 x * C RELOP y * C into:
1538
1539 x RELOP y for nonnegative C
1540 y RELOP x for negative C */
1541 (for cmp (lt gt le ge)
1542 (simplify
1543 (cmp (mult:c @0 @1) (mult:c @2 @1))
1544 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
1545 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1546 (if (tree_expr_nonnegative_p (@1) && tree_expr_nonzero_p (@1))
1547 (cmp @0 @2)
1548 (if (TREE_CODE (@1) == INTEGER_CST
1549 && wi::neg_p (wi::to_wide (@1), TYPE_SIGN (TREE_TYPE (@1))))
1550 (cmp @2 @0))))))
1551
1552 /* (X - 1U) <= INT_MAX-1U into (int) X > 0. */
1553 (for cmp (le gt)
1554 icmp (gt le)
1555 (simplify
1556 (cmp (plus @0 integer_minus_onep@1) INTEGER_CST@2)
1557 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1558 && TYPE_UNSIGNED (TREE_TYPE (@0))
1559 && TYPE_PRECISION (TREE_TYPE (@0)) > 1
1560 && (wi::to_wide (@2)
1561 == wi::max_value (TYPE_PRECISION (TREE_TYPE (@0)), SIGNED) - 1))
1562 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
1563 (icmp (convert:stype @0) { build_int_cst (stype, 0); })))))
1564
1565 /* X / 4 < Y / 4 iff X < Y when the division is known to be exact. */
1566 (for cmp (simple_comparison)
1567 (simplify
1568 (cmp (convert?@3 (exact_div @0 INTEGER_CST@2)) (convert? (exact_div @1 @2)))
1569 (if (element_precision (@3) >= element_precision (@0)
1570 && types_match (@0, @1))
1571 (if (wi::lt_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2))))
1572 (if (!TYPE_UNSIGNED (TREE_TYPE (@3)))
1573 (cmp @1 @0)
1574 (if (tree_expr_nonzero_p (@0) && tree_expr_nonzero_p (@1))
1575 (with
1576 {
1577 tree utype = unsigned_type_for (TREE_TYPE (@0));
1578 }
1579 (cmp (convert:utype @1) (convert:utype @0)))))
1580 (if (wi::gt_p (wi::to_wide (@2), 1, TYPE_SIGN (TREE_TYPE (@2))))
1581 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) || !TYPE_UNSIGNED (TREE_TYPE (@3)))
1582 (cmp @0 @1)
1583 (with
1584 {
1585 tree utype = unsigned_type_for (TREE_TYPE (@0));
1586 }
1587 (cmp (convert:utype @0) (convert:utype @1)))))))))
1588
1589 /* X / C1 op C2 into a simple range test. */
1590 (for cmp (simple_comparison)
1591 (simplify
1592 (cmp (trunc_div:s @0 INTEGER_CST@1) INTEGER_CST@2)
1593 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
1594 && integer_nonzerop (@1)
1595 && !TREE_OVERFLOW (@1)
1596 && !TREE_OVERFLOW (@2))
1597 (with { tree lo, hi; bool neg_overflow;
1598 enum tree_code code = fold_div_compare (cmp, @1, @2, &lo, &hi,
1599 &neg_overflow); }
1600 (switch
1601 (if (code == LT_EXPR || code == GE_EXPR)
1602 (if (TREE_OVERFLOW (lo))
1603 { build_int_cst (type, (code == LT_EXPR) ^ neg_overflow); }
1604 (if (code == LT_EXPR)
1605 (lt @0 { lo; })
1606 (ge @0 { lo; }))))
1607 (if (code == LE_EXPR || code == GT_EXPR)
1608 (if (TREE_OVERFLOW (hi))
1609 { build_int_cst (type, (code == LE_EXPR) ^ neg_overflow); }
1610 (if (code == LE_EXPR)
1611 (le @0 { hi; })
1612 (gt @0 { hi; }))))
1613 (if (!lo && !hi)
1614 { build_int_cst (type, code == NE_EXPR); })
1615 (if (code == EQ_EXPR && !hi)
1616 (ge @0 { lo; }))
1617 (if (code == EQ_EXPR && !lo)
1618 (le @0 { hi; }))
1619 (if (code == NE_EXPR && !hi)
1620 (lt @0 { lo; }))
1621 (if (code == NE_EXPR && !lo)
1622 (gt @0 { hi; }))
1623 (if (GENERIC)
1624 { build_range_check (UNKNOWN_LOCATION, type, @0, code == EQ_EXPR,
1625 lo, hi); })
1626 (with
1627 {
1628 tree etype = range_check_type (TREE_TYPE (@0));
1629 if (etype)
1630 {
1631 hi = fold_convert (etype, hi);
1632 lo = fold_convert (etype, lo);
1633 hi = const_binop (MINUS_EXPR, etype, hi, lo);
1634 }
1635 }
1636 (if (etype && hi && !TREE_OVERFLOW (hi))
1637 (if (code == EQ_EXPR)
1638 (le (minus (convert:etype @0) { lo; }) { hi; })
1639 (gt (minus (convert:etype @0) { lo; }) { hi; })))))))))
1640
1641 /* X + Z < Y + Z is the same as X < Y when there is no overflow. */
1642 (for op (lt le ge gt)
1643 (simplify
1644 (op (plus:c @0 @2) (plus:c @1 @2))
1645 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1646 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1647 (op @0 @1))))
1648 /* For equality and subtraction, this is also true with wrapping overflow. */
1649 (for op (eq ne minus)
1650 (simplify
1651 (op (plus:c @0 @2) (plus:c @1 @2))
1652 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1653 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1654 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1655 (op @0 @1))))
1656
1657 /* X - Z < Y - Z is the same as X < Y when there is no overflow. */
1658 (for op (lt le ge gt)
1659 (simplify
1660 (op (minus @0 @2) (minus @1 @2))
1661 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1662 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1663 (op @0 @1))))
1664 /* For equality and subtraction, this is also true with wrapping overflow. */
1665 (for op (eq ne minus)
1666 (simplify
1667 (op (minus @0 @2) (minus @1 @2))
1668 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1669 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1670 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1671 (op @0 @1))))
1672 /* And for pointers... */
1673 (for op (simple_comparison)
1674 (simplify
1675 (op (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1676 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1677 (op @0 @1))))
1678 (simplify
1679 (minus (pointer_diff@3 @0 @2) (pointer_diff @1 @2))
1680 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1681 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1682 (pointer_diff @0 @1)))
1683
1684 /* Z - X < Z - Y is the same as Y < X when there is no overflow. */
1685 (for op (lt le ge gt)
1686 (simplify
1687 (op (minus @2 @0) (minus @2 @1))
1688 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1689 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
1690 (op @1 @0))))
1691 /* For equality and subtraction, this is also true with wrapping overflow. */
1692 (for op (eq ne minus)
1693 (simplify
1694 (op (minus @2 @0) (minus @2 @1))
1695 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1696 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1697 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1698 (op @1 @0))))
1699 /* And for pointers... */
1700 (for op (simple_comparison)
1701 (simplify
1702 (op (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1703 (if (!TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1704 (op @1 @0))))
1705 (simplify
1706 (minus (pointer_diff@3 @2 @0) (pointer_diff @2 @1))
1707 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@3))
1708 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@2)))
1709 (pointer_diff @1 @0)))
1710
1711 /* X + Y < Y is the same as X < 0 when there is no overflow. */
1712 (for op (lt le gt ge)
1713 (simplify
1714 (op:c (plus:c@2 @0 @1) @1)
1715 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1716 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1717 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
1718 && (CONSTANT_CLASS_P (@0) || single_use (@2)))
1719 (op @0 { build_zero_cst (TREE_TYPE (@0)); }))))
1720 /* For equality, this is also true with wrapping overflow. */
1721 (for op (eq ne)
1722 (simplify
1723 (op:c (nop_convert?@3 (plus:c@2 @0 (convert1? @1))) (convert2? @1))
1724 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1725 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1726 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
1727 && (CONSTANT_CLASS_P (@0) || (single_use (@2) && single_use (@3)))
1728 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@2))
1729 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@1)))
1730 (op @0 { build_zero_cst (TREE_TYPE (@0)); })))
1731 (simplify
1732 (op:c (nop_convert?@3 (pointer_plus@2 (convert1? @0) @1)) (convert2? @0))
1733 (if (tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0))
1734 && tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
1735 && (CONSTANT_CLASS_P (@1) || (single_use (@2) && single_use (@3))))
1736 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1737
1738 /* X - Y < X is the same as Y > 0 when there is no overflow.
1739 For equality, this is also true with wrapping overflow. */
1740 (for op (simple_comparison)
1741 (simplify
1742 (op:c @0 (minus@2 @0 @1))
1743 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
1744 && (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
1745 || ((op == EQ_EXPR || op == NE_EXPR)
1746 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))))
1747 && (CONSTANT_CLASS_P (@1) || single_use (@2)))
1748 (op @1 { build_zero_cst (TREE_TYPE (@1)); }))))
1749
1750 /* Transform:
1751 (X / Y) == 0 -> X < Y if X, Y are unsigned.
1752 (X / Y) != 0 -> X >= Y, if X, Y are unsigned. */
1753 (for cmp (eq ne)
1754 ocmp (lt ge)
1755 (simplify
1756 (cmp (trunc_div @0 @1) integer_zerop)
1757 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
1758 /* Complex ==/!= is allowed, but not </>=. */
1759 && TREE_CODE (TREE_TYPE (@0)) != COMPLEX_TYPE
1760 && (VECTOR_TYPE_P (type) || !VECTOR_TYPE_P (TREE_TYPE (@0))))
1761 (ocmp @0 @1))))
1762
1763 /* X == C - X can never be true if C is odd. */
1764 (for cmp (eq ne)
1765 (simplify
1766 (cmp:c (convert? @0) (convert1? (minus INTEGER_CST@1 (convert2? @0))))
1767 (if (TREE_INT_CST_LOW (@1) & 1)
1768 { constant_boolean_node (cmp == NE_EXPR, type); })))
1769
1770 /* Arguments on which one can call get_nonzero_bits to get the bits
1771 possibly set. */
1772 (match with_possible_nonzero_bits
1773 INTEGER_CST@0)
1774 (match with_possible_nonzero_bits
1775 SSA_NAME@0
1776 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))))
1777 /* Slightly extended version, do not make it recursive to keep it cheap. */
1778 (match (with_possible_nonzero_bits2 @0)
1779 with_possible_nonzero_bits@0)
1780 (match (with_possible_nonzero_bits2 @0)
1781 (bit_and:c with_possible_nonzero_bits@0 @2))
1782
1783 /* Same for bits that are known to be set, but we do not have
1784 an equivalent to get_nonzero_bits yet. */
1785 (match (with_certain_nonzero_bits2 @0)
1786 INTEGER_CST@0)
1787 (match (with_certain_nonzero_bits2 @0)
1788 (bit_ior @1 INTEGER_CST@0))
1789
1790 /* X == C (or X & Z == Y | C) is impossible if ~nonzero(X) & C != 0. */
1791 (for cmp (eq ne)
1792 (simplify
1793 (cmp:c (with_possible_nonzero_bits2 @0) (with_certain_nonzero_bits2 @1))
1794 (if (wi::bit_and_not (wi::to_wide (@1), get_nonzero_bits (@0)) != 0)
1795 { constant_boolean_node (cmp == NE_EXPR, type); })))
1796
1797 /* ((X inner_op C0) outer_op C1)
1798 With X being a tree where value_range has reasoned certain bits to always be
1799 zero throughout its computed value range,
1800 inner_op = {|,^}, outer_op = {|,^} and inner_op != outer_op
1801 where zero_mask has 1's for all bits that are sure to be 0 in
1802 and 0's otherwise.
1803 if (inner_op == '^') C0 &= ~C1;
1804 if ((C0 & ~zero_mask) == 0) then emit (X outer_op (C0 outer_op C1)
1805 if ((C1 & ~zero_mask) == 0) then emit (X inner_op (C0 outer_op C1)
1806 */
1807 (for inner_op (bit_ior bit_xor)
1808 outer_op (bit_xor bit_ior)
1809 (simplify
1810 (outer_op
1811 (inner_op:s @2 INTEGER_CST@0) INTEGER_CST@1)
1812 (with
1813 {
1814 bool fail = false;
1815 wide_int zero_mask_not;
1816 wide_int C0;
1817 wide_int cst_emit;
1818
1819 if (TREE_CODE (@2) == SSA_NAME)
1820 zero_mask_not = get_nonzero_bits (@2);
1821 else
1822 fail = true;
1823
1824 if (inner_op == BIT_XOR_EXPR)
1825 {
1826 C0 = wi::bit_and_not (wi::to_wide (@0), wi::to_wide (@1));
1827 cst_emit = C0 | wi::to_wide (@1);
1828 }
1829 else
1830 {
1831 C0 = wi::to_wide (@0);
1832 cst_emit = C0 ^ wi::to_wide (@1);
1833 }
1834 }
1835 (if (!fail && (C0 & zero_mask_not) == 0)
1836 (outer_op @2 { wide_int_to_tree (type, cst_emit); })
1837 (if (!fail && (wi::to_wide (@1) & zero_mask_not) == 0)
1838 (inner_op @2 { wide_int_to_tree (type, cst_emit); }))))))
1839
1840 /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
1841 (simplify
1842 (pointer_plus (pointer_plus:s @0 @1) @3)
1843 (pointer_plus @0 (plus @1 @3)))
1844
1845 /* Pattern match
1846 tem1 = (long) ptr1;
1847 tem2 = (long) ptr2;
1848 tem3 = tem2 - tem1;
1849 tem4 = (unsigned long) tem3;
1850 tem5 = ptr1 + tem4;
1851 and produce
1852 tem5 = ptr2; */
1853 (simplify
1854 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
1855 /* Conditionally look through a sign-changing conversion. */
1856 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
1857 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
1858 || (GENERIC && type == TREE_TYPE (@1))))
1859 @1))
1860 (simplify
1861 (pointer_plus @0 (convert?@2 (pointer_diff@3 @1 @@0)))
1862 (if (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (TREE_TYPE (@3)))
1863 (convert @1)))
1864
1865 /* Pattern match
1866 tem = (sizetype) ptr;
1867 tem = tem & algn;
1868 tem = -tem;
1869 ... = ptr p+ tem;
1870 and produce the simpler and easier to analyze with respect to alignment
1871 ... = ptr & ~algn; */
1872 (simplify
1873 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
1874 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), ~wi::to_wide (@1)); }
1875 (bit_and @0 { algn; })))
1876
1877 /* Try folding difference of addresses. */
1878 (simplify
1879 (minus (convert ADDR_EXPR@0) (convert @1))
1880 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1881 (with { poly_int64 diff; }
1882 (if (ptr_difference_const (@0, @1, &diff))
1883 { build_int_cst_type (type, diff); }))))
1884 (simplify
1885 (minus (convert @0) (convert ADDR_EXPR@1))
1886 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
1887 (with { poly_int64 diff; }
1888 (if (ptr_difference_const (@0, @1, &diff))
1889 { build_int_cst_type (type, diff); }))))
1890 (simplify
1891 (pointer_diff (convert?@2 ADDR_EXPR@0) (convert1?@3 @1))
1892 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1893 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
1894 (with { poly_int64 diff; }
1895 (if (ptr_difference_const (@0, @1, &diff))
1896 { build_int_cst_type (type, diff); }))))
1897 (simplify
1898 (pointer_diff (convert?@2 @0) (convert1?@3 ADDR_EXPR@1))
1899 (if (tree_nop_conversion_p (TREE_TYPE(@2), TREE_TYPE (@0))
1900 && tree_nop_conversion_p (TREE_TYPE(@3), TREE_TYPE (@1)))
1901 (with { poly_int64 diff; }
1902 (if (ptr_difference_const (@0, @1, &diff))
1903 { build_int_cst_type (type, diff); }))))
1904
1905 /* Canonicalize (T *)(ptr - ptr-cst) to &MEM[ptr + -ptr-cst]. */
1906 (simplify
1907 (convert (pointer_diff @0 INTEGER_CST@1))
1908 (if (POINTER_TYPE_P (type))
1909 { build_fold_addr_expr_with_type
1910 (build2 (MEM_REF, char_type_node, @0,
1911 wide_int_to_tree (ptr_type_node, wi::neg (wi::to_wide (@1)))),
1912 type); }))
1913
1914 /* If arg0 is derived from the address of an object or function, we may
1915 be able to fold this expression using the object or function's
1916 alignment. */
1917 (simplify
1918 (bit_and (convert? @0) INTEGER_CST@1)
1919 (if (POINTER_TYPE_P (TREE_TYPE (@0))
1920 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
1921 (with
1922 {
1923 unsigned int align;
1924 unsigned HOST_WIDE_INT bitpos;
1925 get_pointer_alignment_1 (@0, &align, &bitpos);
1926 }
1927 (if (wi::ltu_p (wi::to_wide (@1), align / BITS_PER_UNIT))
1928 { wide_int_to_tree (type, (wi::to_wide (@1)
1929 & (bitpos / BITS_PER_UNIT))); }))))
1930
1931 (match min_value
1932 INTEGER_CST
1933 (if (INTEGRAL_TYPE_P (type)
1934 && wi::eq_p (wi::to_wide (t), wi::min_value (type)))))
1935
1936 (match max_value
1937 INTEGER_CST
1938 (if (INTEGRAL_TYPE_P (type)
1939 && wi::eq_p (wi::to_wide (t), wi::max_value (type)))))
1940
1941 /* x > y && x != XXX_MIN --> x > y
1942 x > y && x == XXX_MIN --> false . */
1943 (for eqne (eq ne)
1944 (simplify
1945 (bit_and:c (gt:c@2 @0 @1) (eqne @0 min_value))
1946 (switch
1947 (if (eqne == EQ_EXPR)
1948 { constant_boolean_node (false, type); })
1949 (if (eqne == NE_EXPR)
1950 @2)
1951 )))
1952
1953 /* x < y && x != XXX_MAX --> x < y
1954 x < y && x == XXX_MAX --> false. */
1955 (for eqne (eq ne)
1956 (simplify
1957 (bit_and:c (lt:c@2 @0 @1) (eqne @0 max_value))
1958 (switch
1959 (if (eqne == EQ_EXPR)
1960 { constant_boolean_node (false, type); })
1961 (if (eqne == NE_EXPR)
1962 @2)
1963 )))
1964
1965 /* x <= y && x == XXX_MIN --> x == XXX_MIN. */
1966 (simplify
1967 (bit_and:c (le:c @0 @1) (eq@2 @0 min_value))
1968 @2)
1969
1970 /* x >= y && x == XXX_MAX --> x == XXX_MAX. */
1971 (simplify
1972 (bit_and:c (ge:c @0 @1) (eq@2 @0 max_value))
1973 @2)
1974
1975 /* x > y || x != XXX_MIN --> x != XXX_MIN. */
1976 (simplify
1977 (bit_ior:c (gt:c @0 @1) (ne@2 @0 min_value))
1978 @2)
1979
1980 /* x <= y || x != XXX_MIN --> true. */
1981 (simplify
1982 (bit_ior:c (le:c @0 @1) (ne @0 min_value))
1983 { constant_boolean_node (true, type); })
1984
1985 /* x <= y || x == XXX_MIN --> x <= y. */
1986 (simplify
1987 (bit_ior:c (le:c@2 @0 @1) (eq @0 min_value))
1988 @2)
1989
1990 /* x < y || x != XXX_MAX --> x != XXX_MAX. */
1991 (simplify
1992 (bit_ior:c (lt:c @0 @1) (ne@2 @0 max_value))
1993 @2)
1994
1995 /* x >= y || x != XXX_MAX --> true
1996 x >= y || x == XXX_MAX --> x >= y. */
1997 (for eqne (eq ne)
1998 (simplify
1999 (bit_ior:c (ge:c@2 @0 @1) (eqne @0 max_value))
2000 (switch
2001 (if (eqne == EQ_EXPR)
2002 @2)
2003 (if (eqne == NE_EXPR)
2004 { constant_boolean_node (true, type); }))))
2005
2006 /* Convert (X == CST1) && (X OP2 CST2) to a known value
2007 based on CST1 OP2 CST2. Similarly for (X != CST1). */
2008
2009 (for code1 (eq ne)
2010 (for code2 (eq ne lt gt le ge)
2011 (simplify
2012 (bit_and:c (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2))
2013 (with
2014 {
2015 int cmp = tree_int_cst_compare (@1, @2);
2016 bool val;
2017 switch (code2)
2018 {
2019 case EQ_EXPR: val = (cmp == 0); break;
2020 case NE_EXPR: val = (cmp != 0); break;
2021 case LT_EXPR: val = (cmp < 0); break;
2022 case GT_EXPR: val = (cmp > 0); break;
2023 case LE_EXPR: val = (cmp <= 0); break;
2024 case GE_EXPR: val = (cmp >= 0); break;
2025 default: gcc_unreachable ();
2026 }
2027 }
2028 (switch
2029 (if (code1 == EQ_EXPR && val) @3)
2030 (if (code1 == EQ_EXPR && !val) { constant_boolean_node (false, type); })
2031 (if (code1 == NE_EXPR && !val) @4))))))
2032
2033 /* Convert (X OP1 CST1) && (X OP2 CST2). */
2034
2035 (for code1 (lt le gt ge)
2036 (for code2 (lt le gt ge)
2037 (simplify
2038 (bit_and (code1:c@3 @0 INTEGER_CST@1) (code2:c@4 @0 INTEGER_CST@2))
2039 (with
2040 {
2041 int cmp = tree_int_cst_compare (@1, @2);
2042 }
2043 (switch
2044 /* Choose the more restrictive of two < or <= comparisons. */
2045 (if ((code1 == LT_EXPR || code1 == LE_EXPR)
2046 && (code2 == LT_EXPR || code2 == LE_EXPR))
2047 (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR))
2048 @3
2049 @4))
2050 /* Likewise chose the more restrictive of two > or >= comparisons. */
2051 (if ((code1 == GT_EXPR || code1 == GE_EXPR)
2052 && (code2 == GT_EXPR || code2 == GE_EXPR))
2053 (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR))
2054 @3
2055 @4))
2056 /* Check for singleton ranges. */
2057 (if (cmp == 0
2058 && ((code1 == LE_EXPR && code2 == GE_EXPR)
2059 || (code1 == GE_EXPR && code2 == LE_EXPR)))
2060 (eq @0 @1))
2061 /* Check for disjoint ranges. */
2062 (if (cmp <= 0
2063 && (code1 == LT_EXPR || code1 == LE_EXPR)
2064 && (code2 == GT_EXPR || code2 == GE_EXPR))
2065 { constant_boolean_node (false, type); })
2066 (if (cmp >= 0
2067 && (code1 == GT_EXPR || code1 == GE_EXPR)
2068 && (code2 == LT_EXPR || code2 == LE_EXPR))
2069 { constant_boolean_node (false, type); })
2070 )))))
2071
2072 /* Convert (X == CST1) || (X OP2 CST2) to a known value
2073 based on CST1 OP2 CST2. Similarly for (X != CST1). */
2074
2075 (for code1 (eq ne)
2076 (for code2 (eq ne lt gt le ge)
2077 (simplify
2078 (bit_ior:c (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2))
2079 (with
2080 {
2081 int cmp = tree_int_cst_compare (@1, @2);
2082 bool val;
2083 switch (code2)
2084 {
2085 case EQ_EXPR: val = (cmp == 0); break;
2086 case NE_EXPR: val = (cmp != 0); break;
2087 case LT_EXPR: val = (cmp < 0); break;
2088 case GT_EXPR: val = (cmp > 0); break;
2089 case LE_EXPR: val = (cmp <= 0); break;
2090 case GE_EXPR: val = (cmp >= 0); break;
2091 default: gcc_unreachable ();
2092 }
2093 }
2094 (switch
2095 (if (code1 == EQ_EXPR && val) @4)
2096 (if (code1 == NE_EXPR && val) { constant_boolean_node (true, type); })
2097 (if (code1 == NE_EXPR && !val) @3))))))
2098
2099 /* Convert (X OP1 CST1) || (X OP2 CST2). */
2100
2101 (for code1 (lt le gt ge)
2102 (for code2 (lt le gt ge)
2103 (simplify
2104 (bit_ior (code1@3 @0 INTEGER_CST@1) (code2@4 @0 INTEGER_CST@2))
2105 (with
2106 {
2107 int cmp = tree_int_cst_compare (@1, @2);
2108 }
2109 (switch
2110 /* Choose the more restrictive of two < or <= comparisons. */
2111 (if ((code1 == LT_EXPR || code1 == LE_EXPR)
2112 && (code2 == LT_EXPR || code2 == LE_EXPR))
2113 (if ((cmp < 0) || (cmp == 0 && code1 == LT_EXPR))
2114 @4
2115 @3))
2116 /* Likewise chose the more restrictive of two > or >= comparisons. */
2117 (if ((code1 == GT_EXPR || code1 == GE_EXPR)
2118 && (code2 == GT_EXPR || code2 == GE_EXPR))
2119 (if ((cmp > 0) || (cmp == 0 && code1 == GT_EXPR))
2120 @4
2121 @3))
2122 /* Check for singleton ranges. */
2123 (if (cmp == 0
2124 && ((code1 == LT_EXPR && code2 == GT_EXPR)
2125 || (code1 == GT_EXPR && code2 == LT_EXPR)))
2126 (ne @0 @2))
2127 /* Check for disjoint ranges. */
2128 (if (cmp >= 0
2129 && (code1 == LT_EXPR || code1 == LE_EXPR)
2130 && (code2 == GT_EXPR || code2 == GE_EXPR))
2131 { constant_boolean_node (true, type); })
2132 (if (cmp <= 0
2133 && (code1 == GT_EXPR || code1 == GE_EXPR)
2134 && (code2 == LT_EXPR || code2 == LE_EXPR))
2135 { constant_boolean_node (true, type); })
2136 )))))
2137
2138 /* We can't reassociate at all for saturating types. */
2139 (if (!TYPE_SATURATING (type))
2140
2141 /* Contract negates. */
2142 /* A + (-B) -> A - B */
2143 (simplify
2144 (plus:c @0 (convert? (negate @1)))
2145 /* Apply STRIP_NOPS on the negate. */
2146 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
2147 && !TYPE_OVERFLOW_SANITIZED (type))
2148 (with
2149 {
2150 tree t1 = type;
2151 if (INTEGRAL_TYPE_P (type)
2152 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
2153 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
2154 }
2155 (convert (minus (convert:t1 @0) (convert:t1 @1))))))
2156 /* A - (-B) -> A + B */
2157 (simplify
2158 (minus @0 (convert? (negate @1)))
2159 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
2160 && !TYPE_OVERFLOW_SANITIZED (type))
2161 (with
2162 {
2163 tree t1 = type;
2164 if (INTEGRAL_TYPE_P (type)
2165 && TYPE_OVERFLOW_WRAPS (type) != TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
2166 t1 = TYPE_OVERFLOW_WRAPS (type) ? type : TREE_TYPE (@1);
2167 }
2168 (convert (plus (convert:t1 @0) (convert:t1 @1))))))
2169 /* -(T)(-A) -> (T)A
2170 Sign-extension is ok except for INT_MIN, which thankfully cannot
2171 happen without overflow. */
2172 (simplify
2173 (negate (convert (negate @1)))
2174 (if (INTEGRAL_TYPE_P (type)
2175 && (TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@1))
2176 || (!TYPE_UNSIGNED (TREE_TYPE (@1))
2177 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2178 && !TYPE_OVERFLOW_SANITIZED (type)
2179 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
2180 (convert @1)))
2181 (simplify
2182 (negate (convert negate_expr_p@1))
2183 (if (SCALAR_FLOAT_TYPE_P (type)
2184 && ((DECIMAL_FLOAT_TYPE_P (type)
2185 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))
2186 && TYPE_PRECISION (type) >= TYPE_PRECISION (TREE_TYPE (@1)))
2187 || !HONOR_SIGN_DEPENDENT_ROUNDING (type)))
2188 (convert (negate @1))))
2189 (simplify
2190 (negate (nop_convert? (negate @1)))
2191 (if (!TYPE_OVERFLOW_SANITIZED (type)
2192 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@1)))
2193 (view_convert @1)))
2194
2195 /* We can't reassociate floating-point unless -fassociative-math
2196 or fixed-point plus or minus because of saturation to +-Inf. */
2197 (if ((!FLOAT_TYPE_P (type) || flag_associative_math)
2198 && !FIXED_POINT_TYPE_P (type))
2199
2200 /* Match patterns that allow contracting a plus-minus pair
2201 irrespective of overflow issues. */
2202 /* (A +- B) - A -> +- B */
2203 /* (A +- B) -+ B -> A */
2204 /* A - (A +- B) -> -+ B */
2205 /* A +- (B -+ A) -> +- B */
2206 (simplify
2207 (minus (nop_convert1? (plus:c (nop_convert2? @0) @1)) @0)
2208 (view_convert @1))
2209 (simplify
2210 (minus (nop_convert1? (minus (nop_convert2? @0) @1)) @0)
2211 (if (!ANY_INTEGRAL_TYPE_P (type)
2212 || TYPE_OVERFLOW_WRAPS (type))
2213 (negate (view_convert @1))
2214 (view_convert (negate @1))))
2215 (simplify
2216 (plus:c (nop_convert1? (minus @0 (nop_convert2? @1))) @1)
2217 (view_convert @0))
2218 (simplify
2219 (minus @0 (nop_convert1? (plus:c (nop_convert2? @0) @1)))
2220 (if (!ANY_INTEGRAL_TYPE_P (type)
2221 || TYPE_OVERFLOW_WRAPS (type))
2222 (negate (view_convert @1))
2223 (view_convert (negate @1))))
2224 (simplify
2225 (minus @0 (nop_convert1? (minus (nop_convert2? @0) @1)))
2226 (view_convert @1))
2227 /* (A +- B) + (C - A) -> C +- B */
2228 /* (A + B) - (A - C) -> B + C */
2229 /* More cases are handled with comparisons. */
2230 (simplify
2231 (plus:c (plus:c @0 @1) (minus @2 @0))
2232 (plus @2 @1))
2233 (simplify
2234 (plus:c (minus @0 @1) (minus @2 @0))
2235 (minus @2 @1))
2236 (simplify
2237 (plus:c (pointer_diff @0 @1) (pointer_diff @2 @0))
2238 (if (TYPE_OVERFLOW_UNDEFINED (type)
2239 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0)))
2240 (pointer_diff @2 @1)))
2241 (simplify
2242 (minus (plus:c @0 @1) (minus @0 @2))
2243 (plus @1 @2))
2244
2245 /* (A +- CST1) +- CST2 -> A + CST3
2246 Use view_convert because it is safe for vectors and equivalent for
2247 scalars. */
2248 (for outer_op (plus minus)
2249 (for inner_op (plus minus)
2250 neg_inner_op (minus plus)
2251 (simplify
2252 (outer_op (nop_convert? (inner_op @0 CONSTANT_CLASS_P@1))
2253 CONSTANT_CLASS_P@2)
2254 /* If one of the types wraps, use that one. */
2255 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
2256 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
2257 forever if something doesn't simplify into a constant. */
2258 (if (!CONSTANT_CLASS_P (@0))
2259 (if (outer_op == PLUS_EXPR)
2260 (plus (view_convert @0) (inner_op @2 (view_convert @1)))
2261 (minus (view_convert @0) (neg_inner_op @2 (view_convert @1)))))
2262 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2263 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2264 (if (outer_op == PLUS_EXPR)
2265 (view_convert (plus @0 (inner_op (view_convert @2) @1)))
2266 (view_convert (minus @0 (neg_inner_op (view_convert @2) @1))))
2267 /* If the constant operation overflows we cannot do the transform
2268 directly as we would introduce undefined overflow, for example
2269 with (a - 1) + INT_MIN. */
2270 (if (types_match (type, @0))
2271 (with { tree cst = const_binop (outer_op == inner_op
2272 ? PLUS_EXPR : MINUS_EXPR,
2273 type, @1, @2); }
2274 (if (cst && !TREE_OVERFLOW (cst))
2275 (inner_op @0 { cst; } )
2276 /* X+INT_MAX+1 is X-INT_MIN. */
2277 (if (INTEGRAL_TYPE_P (type) && cst
2278 && wi::to_wide (cst) == wi::min_value (type))
2279 (neg_inner_op @0 { wide_int_to_tree (type, wi::to_wide (cst)); })
2280 /* Last resort, use some unsigned type. */
2281 (with { tree utype = unsigned_type_for (type); }
2282 (if (utype)
2283 (view_convert (inner_op
2284 (view_convert:utype @0)
2285 (view_convert:utype
2286 { drop_tree_overflow (cst); }))))))))))))))
2287
2288 /* (CST1 - A) +- CST2 -> CST3 - A */
2289 (for outer_op (plus minus)
2290 (simplify
2291 (outer_op (nop_convert? (minus CONSTANT_CLASS_P@1 @0)) CONSTANT_CLASS_P@2)
2292 /* If one of the types wraps, use that one. */
2293 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
2294 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
2295 forever if something doesn't simplify into a constant. */
2296 (if (!CONSTANT_CLASS_P (@0))
2297 (minus (outer_op (view_convert @1) @2) (view_convert @0)))
2298 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2299 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2300 (view_convert (minus (outer_op @1 (view_convert @2)) @0))
2301 (if (types_match (type, @0))
2302 (with { tree cst = const_binop (outer_op, type, @1, @2); }
2303 (if (cst && !TREE_OVERFLOW (cst))
2304 (minus { cst; } @0))))))))
2305
2306 /* CST1 - (CST2 - A) -> CST3 + A
2307 Use view_convert because it is safe for vectors and equivalent for
2308 scalars. */
2309 (simplify
2310 (minus CONSTANT_CLASS_P@1 (nop_convert? (minus CONSTANT_CLASS_P@2 @0)))
2311 /* If one of the types wraps, use that one. */
2312 (if (!ANY_INTEGRAL_TYPE_P (type) || TYPE_OVERFLOW_WRAPS (type))
2313 /* If all 3 captures are CONSTANT_CLASS_P, punt, as we might recurse
2314 forever if something doesn't simplify into a constant. */
2315 (if (!CONSTANT_CLASS_P (@0))
2316 (plus (view_convert @0) (minus @1 (view_convert @2))))
2317 (if (!ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2318 || TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
2319 (view_convert (plus @0 (minus (view_convert @1) @2)))
2320 (if (types_match (type, @0))
2321 (with { tree cst = const_binop (MINUS_EXPR, type, @1, @2); }
2322 (if (cst && !TREE_OVERFLOW (cst))
2323 (plus { cst; } @0)))))))
2324
2325 /* ((T)(A)) + CST -> (T)(A + CST) */
2326 #if GIMPLE
2327 (simplify
2328 (plus (convert SSA_NAME@0) INTEGER_CST@1)
2329 (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE
2330 && TREE_CODE (type) == INTEGER_TYPE
2331 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0))
2332 && int_fits_type_p (@1, TREE_TYPE (@0)))
2333 /* Perform binary operation inside the cast if the constant fits
2334 and (A + CST)'s range does not overflow. */
2335 (with
2336 {
2337 wi::overflow_type min_ovf = wi::OVF_OVERFLOW,
2338 max_ovf = wi::OVF_OVERFLOW;
2339 tree inner_type = TREE_TYPE (@0);
2340
2341 wide_int w1
2342 = wide_int::from (wi::to_wide (@1), TYPE_PRECISION (inner_type),
2343 TYPE_SIGN (inner_type));
2344
2345 wide_int wmin0, wmax0;
2346 if (get_range_info (@0, &wmin0, &wmax0) == VR_RANGE)
2347 {
2348 wi::add (wmin0, w1, TYPE_SIGN (inner_type), &min_ovf);
2349 wi::add (wmax0, w1, TYPE_SIGN (inner_type), &max_ovf);
2350 }
2351 }
2352 (if (min_ovf == wi::OVF_NONE && max_ovf == wi::OVF_NONE)
2353 (convert (plus @0 { wide_int_to_tree (TREE_TYPE (@0), w1); } )))
2354 )))
2355 #endif
2356
2357 /* ((T)(A + CST1)) + CST2 -> (T)(A) + (T)CST1 + CST2 */
2358 #if GIMPLE
2359 (for op (plus minus)
2360 (simplify
2361 (plus (convert:s (op:s @0 INTEGER_CST@1)) INTEGER_CST@2)
2362 (if (TREE_CODE (TREE_TYPE (@0)) == INTEGER_TYPE
2363 && TREE_CODE (type) == INTEGER_TYPE
2364 && TYPE_PRECISION (type) > TYPE_PRECISION (TREE_TYPE (@0))
2365 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
2366 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
2367 && TYPE_OVERFLOW_WRAPS (type))
2368 (plus (convert @0) (op @2 (convert @1))))))
2369 #endif
2370
2371 /* ~A + A -> -1 */
2372 (simplify
2373 (plus:c (bit_not @0) @0)
2374 (if (!TYPE_OVERFLOW_TRAPS (type))
2375 { build_all_ones_cst (type); }))
2376
2377 /* ~A + 1 -> -A */
2378 (simplify
2379 (plus (convert? (bit_not @0)) integer_each_onep)
2380 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
2381 (negate (convert @0))))
2382
2383 /* -A - 1 -> ~A */
2384 (simplify
2385 (minus (convert? (negate @0)) integer_each_onep)
2386 (if (!TYPE_OVERFLOW_TRAPS (type)
2387 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
2388 (bit_not (convert @0))))
2389
2390 /* -1 - A -> ~A */
2391 (simplify
2392 (minus integer_all_onesp @0)
2393 (bit_not @0))
2394
2395 /* (T)(P + A) - (T)P -> (T) A */
2396 (simplify
2397 (minus (convert (plus:c @@0 @1))
2398 (convert? @0))
2399 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2400 /* For integer types, if A has a smaller type
2401 than T the result depends on the possible
2402 overflow in P + A.
2403 E.g. T=size_t, A=(unsigned)429497295, P>0.
2404 However, if an overflow in P + A would cause
2405 undefined behavior, we can assume that there
2406 is no overflow. */
2407 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2408 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2409 (convert @1)))
2410 (simplify
2411 (minus (convert (pointer_plus @@0 @1))
2412 (convert @0))
2413 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2414 /* For pointer types, if the conversion of A to the
2415 final type requires a sign- or zero-extension,
2416 then we have to punt - it is not defined which
2417 one is correct. */
2418 || (POINTER_TYPE_P (TREE_TYPE (@0))
2419 && TREE_CODE (@1) == INTEGER_CST
2420 && tree_int_cst_sign_bit (@1) == 0))
2421 (convert @1)))
2422 (simplify
2423 (pointer_diff (pointer_plus @@0 @1) @0)
2424 /* The second argument of pointer_plus must be interpreted as signed, and
2425 thus sign-extended if necessary. */
2426 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2427 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2428 second arg is unsigned even when we need to consider it as signed,
2429 we don't want to diagnose overflow here. */
2430 (convert (view_convert:stype @1))))
2431
2432 /* (T)P - (T)(P + A) -> -(T) A */
2433 (simplify
2434 (minus (convert? @0)
2435 (convert (plus:c @@0 @1)))
2436 (if (INTEGRAL_TYPE_P (type)
2437 && TYPE_OVERFLOW_UNDEFINED (type)
2438 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2439 (with { tree utype = unsigned_type_for (type); }
2440 (convert (negate (convert:utype @1))))
2441 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2442 /* For integer types, if A has a smaller type
2443 than T the result depends on the possible
2444 overflow in P + A.
2445 E.g. T=size_t, A=(unsigned)429497295, P>0.
2446 However, if an overflow in P + A would cause
2447 undefined behavior, we can assume that there
2448 is no overflow. */
2449 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2450 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))))
2451 (negate (convert @1)))))
2452 (simplify
2453 (minus (convert @0)
2454 (convert (pointer_plus @@0 @1)))
2455 (if (INTEGRAL_TYPE_P (type)
2456 && TYPE_OVERFLOW_UNDEFINED (type)
2457 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2458 (with { tree utype = unsigned_type_for (type); }
2459 (convert (negate (convert:utype @1))))
2460 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2461 /* For pointer types, if the conversion of A to the
2462 final type requires a sign- or zero-extension,
2463 then we have to punt - it is not defined which
2464 one is correct. */
2465 || (POINTER_TYPE_P (TREE_TYPE (@0))
2466 && TREE_CODE (@1) == INTEGER_CST
2467 && tree_int_cst_sign_bit (@1) == 0))
2468 (negate (convert @1)))))
2469 (simplify
2470 (pointer_diff @0 (pointer_plus @@0 @1))
2471 /* The second argument of pointer_plus must be interpreted as signed, and
2472 thus sign-extended if necessary. */
2473 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2474 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2475 second arg is unsigned even when we need to consider it as signed,
2476 we don't want to diagnose overflow here. */
2477 (negate (convert (view_convert:stype @1)))))
2478
2479 /* (T)(P + A) - (T)(P + B) -> (T)A - (T)B */
2480 (simplify
2481 (minus (convert (plus:c @@0 @1))
2482 (convert (plus:c @0 @2)))
2483 (if (INTEGRAL_TYPE_P (type)
2484 && TYPE_OVERFLOW_UNDEFINED (type)
2485 && element_precision (type) <= element_precision (TREE_TYPE (@1))
2486 && element_precision (type) <= element_precision (TREE_TYPE (@2)))
2487 (with { tree utype = unsigned_type_for (type); }
2488 (convert (minus (convert:utype @1) (convert:utype @2))))
2489 (if (((element_precision (type) <= element_precision (TREE_TYPE (@1)))
2490 == (element_precision (type) <= element_precision (TREE_TYPE (@2))))
2491 && (element_precision (type) <= element_precision (TREE_TYPE (@1))
2492 /* For integer types, if A has a smaller type
2493 than T the result depends on the possible
2494 overflow in P + A.
2495 E.g. T=size_t, A=(unsigned)429497295, P>0.
2496 However, if an overflow in P + A would cause
2497 undefined behavior, we can assume that there
2498 is no overflow. */
2499 || (INTEGRAL_TYPE_P (TREE_TYPE (@1))
2500 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
2501 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@1))
2502 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@2)))))
2503 (minus (convert @1) (convert @2)))))
2504 (simplify
2505 (minus (convert (pointer_plus @@0 @1))
2506 (convert (pointer_plus @0 @2)))
2507 (if (INTEGRAL_TYPE_P (type)
2508 && TYPE_OVERFLOW_UNDEFINED (type)
2509 && element_precision (type) <= element_precision (TREE_TYPE (@1)))
2510 (with { tree utype = unsigned_type_for (type); }
2511 (convert (minus (convert:utype @1) (convert:utype @2))))
2512 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
2513 /* For pointer types, if the conversion of A to the
2514 final type requires a sign- or zero-extension,
2515 then we have to punt - it is not defined which
2516 one is correct. */
2517 || (POINTER_TYPE_P (TREE_TYPE (@0))
2518 && TREE_CODE (@1) == INTEGER_CST
2519 && tree_int_cst_sign_bit (@1) == 0
2520 && TREE_CODE (@2) == INTEGER_CST
2521 && tree_int_cst_sign_bit (@2) == 0))
2522 (minus (convert @1) (convert @2)))))
2523 (simplify
2524 (pointer_diff (pointer_plus @@0 @1) (pointer_plus @0 @2))
2525 /* The second argument of pointer_plus must be interpreted as signed, and
2526 thus sign-extended if necessary. */
2527 (with { tree stype = signed_type_for (TREE_TYPE (@1)); }
2528 /* Use view_convert instead of convert here, as POINTER_PLUS_EXPR
2529 second arg is unsigned even when we need to consider it as signed,
2530 we don't want to diagnose overflow here. */
2531 (minus (convert (view_convert:stype @1))
2532 (convert (view_convert:stype @2)))))))
2533
2534 /* (A * C) +- (B * C) -> (A+-B) * C and (A * C) +- A -> A * (C+-1).
2535 Modeled after fold_plusminus_mult_expr. */
2536 (if (!TYPE_SATURATING (type)
2537 && (!FLOAT_TYPE_P (type) || flag_associative_math))
2538 (for plusminus (plus minus)
2539 (simplify
2540 (plusminus (mult:cs@3 @0 @1) (mult:cs@4 @0 @2))
2541 (if ((!ANY_INTEGRAL_TYPE_P (type)
2542 || TYPE_OVERFLOW_WRAPS (type)
2543 || (INTEGRAL_TYPE_P (type)
2544 && tree_expr_nonzero_p (@0)
2545 && expr_not_equal_to (@0, wi::minus_one (TYPE_PRECISION (type)))))
2546 /* If @1 +- @2 is constant require a hard single-use on either
2547 original operand (but not on both). */
2548 && (single_use (@3) || single_use (@4)))
2549 (mult (plusminus @1 @2) @0)))
2550 /* We cannot generate constant 1 for fract. */
2551 (if (!ALL_FRACT_MODE_P (TYPE_MODE (type)))
2552 (simplify
2553 (plusminus @0 (mult:c@3 @0 @2))
2554 (if ((!ANY_INTEGRAL_TYPE_P (type)
2555 || TYPE_OVERFLOW_WRAPS (type)
2556 /* For @0 + @0*@2 this transformation would introduce UB
2557 (where there was none before) for @0 in [-1,0] and @2 max.
2558 For @0 - @0*@2 this transformation would introduce UB
2559 for @0 0 and @2 in [min,min+1] or @0 -1 and @2 min+1. */
2560 || (INTEGRAL_TYPE_P (type)
2561 && ((tree_expr_nonzero_p (@0)
2562 && expr_not_equal_to (@0,
2563 wi::minus_one (TYPE_PRECISION (type))))
2564 || (plusminus == PLUS_EXPR
2565 ? expr_not_equal_to (@2,
2566 wi::max_value (TYPE_PRECISION (type), SIGNED))
2567 /* Let's ignore the @0 -1 and @2 min case. */
2568 : (expr_not_equal_to (@2,
2569 wi::min_value (TYPE_PRECISION (type), SIGNED))
2570 && expr_not_equal_to (@2,
2571 wi::min_value (TYPE_PRECISION (type), SIGNED)
2572 + 1))))))
2573 && single_use (@3))
2574 (mult (plusminus { build_one_cst (type); } @2) @0)))
2575 (simplify
2576 (plusminus (mult:c@3 @0 @2) @0)
2577 (if ((!ANY_INTEGRAL_TYPE_P (type)
2578 || TYPE_OVERFLOW_WRAPS (type)
2579 /* For @0*@2 + @0 this transformation would introduce UB
2580 (where there was none before) for @0 in [-1,0] and @2 max.
2581 For @0*@2 - @0 this transformation would introduce UB
2582 for @0 0 and @2 min. */
2583 || (INTEGRAL_TYPE_P (type)
2584 && ((tree_expr_nonzero_p (@0)
2585 && (plusminus == MINUS_EXPR
2586 || expr_not_equal_to (@0,
2587 wi::minus_one (TYPE_PRECISION (type)))))
2588 || expr_not_equal_to (@2,
2589 (plusminus == PLUS_EXPR
2590 ? wi::max_value (TYPE_PRECISION (type), SIGNED)
2591 : wi::min_value (TYPE_PRECISION (type), SIGNED))))))
2592 && single_use (@3))
2593 (mult (plusminus @2 { build_one_cst (type); }) @0))))))
2594
2595 #if GIMPLE
2596 /* Canonicalize X + (X << C) into X * (1 + (1 << C)) and
2597 (X << C1) + (X << C2) into X * ((1 << C1) + (1 << C2)). */
2598 (simplify
2599 (plus:c @0 (lshift:s @0 INTEGER_CST@1))
2600 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2601 && tree_fits_uhwi_p (@1)
2602 && tree_to_uhwi (@1) < element_precision (type))
2603 (with { tree t = type;
2604 if (!TYPE_OVERFLOW_WRAPS (t)) t = unsigned_type_for (t);
2605 wide_int w = wi::set_bit_in_zero (tree_to_uhwi (@1),
2606 element_precision (type));
2607 w += 1;
2608 tree cst = wide_int_to_tree (VECTOR_TYPE_P (t) ? TREE_TYPE (t)
2609 : t, w);
2610 cst = build_uniform_cst (t, cst); }
2611 (convert (mult (convert:t @0) { cst; })))))
2612 (simplify
2613 (plus (lshift:s @0 INTEGER_CST@1) (lshift:s @0 INTEGER_CST@2))
2614 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2615 && tree_fits_uhwi_p (@1)
2616 && tree_to_uhwi (@1) < element_precision (type)
2617 && tree_fits_uhwi_p (@2)
2618 && tree_to_uhwi (@2) < element_precision (type))
2619 (with { tree t = type;
2620 if (!TYPE_OVERFLOW_WRAPS (t)) t = unsigned_type_for (t);
2621 unsigned int prec = element_precision (type);
2622 wide_int w = wi::set_bit_in_zero (tree_to_uhwi (@1), prec);
2623 w += wi::set_bit_in_zero (tree_to_uhwi (@2), prec);
2624 tree cst = wide_int_to_tree (VECTOR_TYPE_P (t) ? TREE_TYPE (t)
2625 : t, w);
2626 cst = build_uniform_cst (t, cst); }
2627 (convert (mult (convert:t @0) { cst; })))))
2628 #endif
2629
2630 /* Simplifications of MIN_EXPR, MAX_EXPR, fmin() and fmax(). */
2631
2632 (for minmax (min max FMIN_ALL FMAX_ALL)
2633 (simplify
2634 (minmax @0 @0)
2635 @0))
2636 /* min(max(x,y),y) -> y. */
2637 (simplify
2638 (min:c (max:c @0 @1) @1)
2639 @1)
2640 /* max(min(x,y),y) -> y. */
2641 (simplify
2642 (max:c (min:c @0 @1) @1)
2643 @1)
2644 /* max(a,-a) -> abs(a). */
2645 (simplify
2646 (max:c @0 (negate @0))
2647 (if (TREE_CODE (type) != COMPLEX_TYPE
2648 && (! ANY_INTEGRAL_TYPE_P (type)
2649 || TYPE_OVERFLOW_UNDEFINED (type)))
2650 (abs @0)))
2651 /* min(a,-a) -> -abs(a). */
2652 (simplify
2653 (min:c @0 (negate @0))
2654 (if (TREE_CODE (type) != COMPLEX_TYPE
2655 && (! ANY_INTEGRAL_TYPE_P (type)
2656 || TYPE_OVERFLOW_UNDEFINED (type)))
2657 (negate (abs @0))))
2658 (simplify
2659 (min @0 @1)
2660 (switch
2661 (if (INTEGRAL_TYPE_P (type)
2662 && TYPE_MIN_VALUE (type)
2663 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2664 @1)
2665 (if (INTEGRAL_TYPE_P (type)
2666 && TYPE_MAX_VALUE (type)
2667 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2668 @0)))
2669 (simplify
2670 (max @0 @1)
2671 (switch
2672 (if (INTEGRAL_TYPE_P (type)
2673 && TYPE_MAX_VALUE (type)
2674 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
2675 @1)
2676 (if (INTEGRAL_TYPE_P (type)
2677 && TYPE_MIN_VALUE (type)
2678 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
2679 @0)))
2680
2681 /* max (a, a + CST) -> a + CST where CST is positive. */
2682 /* max (a, a + CST) -> a where CST is negative. */
2683 (simplify
2684 (max:c @0 (plus@2 @0 INTEGER_CST@1))
2685 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2686 (if (tree_int_cst_sgn (@1) > 0)
2687 @2
2688 @0)))
2689
2690 /* min (a, a + CST) -> a where CST is positive. */
2691 /* min (a, a + CST) -> a + CST where CST is negative. */
2692 (simplify
2693 (min:c @0 (plus@2 @0 INTEGER_CST@1))
2694 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
2695 (if (tree_int_cst_sgn (@1) > 0)
2696 @0
2697 @2)))
2698
2699 /* (convert (minmax ((convert (x) c)))) -> minmax (x c) if x is promoted
2700 and the outer convert demotes the expression back to x's type. */
2701 (for minmax (min max)
2702 (simplify
2703 (convert (minmax@0 (convert @1) INTEGER_CST@2))
2704 (if (INTEGRAL_TYPE_P (type)
2705 && types_match (@1, type) && int_fits_type_p (@2, type)
2706 && TYPE_SIGN (TREE_TYPE (@0)) == TYPE_SIGN (type)
2707 && TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type))
2708 (minmax @1 (convert @2)))))
2709
2710 (for minmax (FMIN_ALL FMAX_ALL)
2711 /* If either argument is NaN, return the other one. Avoid the
2712 transformation if we get (and honor) a signalling NaN. */
2713 (simplify
2714 (minmax:c @0 REAL_CST@1)
2715 (if (real_isnan (TREE_REAL_CST_PTR (@1))
2716 && (!HONOR_SNANS (@1) || !TREE_REAL_CST (@1).signalling))
2717 @0)))
2718 /* Convert fmin/fmax to MIN_EXPR/MAX_EXPR. C99 requires these
2719 functions to return the numeric arg if the other one is NaN.
2720 MIN and MAX don't honor that, so only transform if -ffinite-math-only
2721 is set. C99 doesn't require -0.0 to be handled, so we don't have to
2722 worry about it either. */
2723 (if (flag_finite_math_only)
2724 (simplify
2725 (FMIN_ALL @0 @1)
2726 (min @0 @1))
2727 (simplify
2728 (FMAX_ALL @0 @1)
2729 (max @0 @1)))
2730 /* min (-A, -B) -> -max (A, B) */
2731 (for minmax (min max FMIN_ALL FMAX_ALL)
2732 maxmin (max min FMAX_ALL FMIN_ALL)
2733 (simplify
2734 (minmax (negate:s@2 @0) (negate:s@3 @1))
2735 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
2736 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
2737 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
2738 (negate (maxmin @0 @1)))))
2739 /* MIN (~X, ~Y) -> ~MAX (X, Y)
2740 MAX (~X, ~Y) -> ~MIN (X, Y) */
2741 (for minmax (min max)
2742 maxmin (max min)
2743 (simplify
2744 (minmax (bit_not:s@2 @0) (bit_not:s@3 @1))
2745 (bit_not (maxmin @0 @1))))
2746
2747 /* MIN (X, Y) == X -> X <= Y */
2748 (for minmax (min min max max)
2749 cmp (eq ne eq ne )
2750 out (le gt ge lt )
2751 (simplify
2752 (cmp:c (minmax:c @0 @1) @0)
2753 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0)))
2754 (out @0 @1))))
2755 /* MIN (X, 5) == 0 -> X == 0
2756 MIN (X, 5) == 7 -> false */
2757 (for cmp (eq ne)
2758 (simplify
2759 (cmp (min @0 INTEGER_CST@1) INTEGER_CST@2)
2760 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2761 TYPE_SIGN (TREE_TYPE (@0))))
2762 { constant_boolean_node (cmp == NE_EXPR, type); }
2763 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2764 TYPE_SIGN (TREE_TYPE (@0))))
2765 (cmp @0 @2)))))
2766 (for cmp (eq ne)
2767 (simplify
2768 (cmp (max @0 INTEGER_CST@1) INTEGER_CST@2)
2769 (if (wi::gt_p (wi::to_wide (@1), wi::to_wide (@2),
2770 TYPE_SIGN (TREE_TYPE (@0))))
2771 { constant_boolean_node (cmp == NE_EXPR, type); }
2772 (if (wi::lt_p (wi::to_wide (@1), wi::to_wide (@2),
2773 TYPE_SIGN (TREE_TYPE (@0))))
2774 (cmp @0 @2)))))
2775 /* MIN (X, C1) < C2 -> X < C2 || C1 < C2 */
2776 (for minmax (min min max max min min max max )
2777 cmp (lt le gt ge gt ge lt le )
2778 comb (bit_ior bit_ior bit_ior bit_ior bit_and bit_and bit_and bit_and)
2779 (simplify
2780 (cmp (minmax @0 INTEGER_CST@1) INTEGER_CST@2)
2781 (comb (cmp @0 @2) (cmp @1 @2))))
2782
2783 /* Undo fancy way of writing max/min or other ?: expressions,
2784 like a - ((a - b) & -(a < b)), in this case into (a < b) ? b : a.
2785 People normally use ?: and that is what we actually try to optimize. */
2786 (for cmp (simple_comparison)
2787 (simplify
2788 (minus @0 (bit_and:c (minus @0 @1)
2789 (convert? (negate@4 (convert? (cmp@5 @2 @3))))))
2790 (if (INTEGRAL_TYPE_P (type)
2791 && INTEGRAL_TYPE_P (TREE_TYPE (@4))
2792 && TREE_CODE (TREE_TYPE (@4)) != BOOLEAN_TYPE
2793 && INTEGRAL_TYPE_P (TREE_TYPE (@5))
2794 && (TYPE_PRECISION (TREE_TYPE (@4)) >= TYPE_PRECISION (type)
2795 || !TYPE_UNSIGNED (TREE_TYPE (@4)))
2796 && (GIMPLE || !TREE_SIDE_EFFECTS (@1)))
2797 (cond (cmp @2 @3) @1 @0)))
2798 (simplify
2799 (plus:c @0 (bit_and:c (minus @1 @0)
2800 (convert? (negate@4 (convert? (cmp@5 @2 @3))))))
2801 (if (INTEGRAL_TYPE_P (type)
2802 && INTEGRAL_TYPE_P (TREE_TYPE (@4))
2803 && TREE_CODE (TREE_TYPE (@4)) != BOOLEAN_TYPE
2804 && INTEGRAL_TYPE_P (TREE_TYPE (@5))
2805 && (TYPE_PRECISION (TREE_TYPE (@4)) >= TYPE_PRECISION (type)
2806 || !TYPE_UNSIGNED (TREE_TYPE (@4)))
2807 && (GIMPLE || !TREE_SIDE_EFFECTS (@1)))
2808 (cond (cmp @2 @3) @1 @0)))
2809 /* Similarly with ^ instead of - though in that case with :c. */
2810 (simplify
2811 (bit_xor:c @0 (bit_and:c (bit_xor:c @0 @1)
2812 (convert? (negate@4 (convert? (cmp@5 @2 @3))))))
2813 (if (INTEGRAL_TYPE_P (type)
2814 && INTEGRAL_TYPE_P (TREE_TYPE (@4))
2815 && TREE_CODE (TREE_TYPE (@4)) != BOOLEAN_TYPE
2816 && INTEGRAL_TYPE_P (TREE_TYPE (@5))
2817 && (TYPE_PRECISION (TREE_TYPE (@4)) >= TYPE_PRECISION (type)
2818 || !TYPE_UNSIGNED (TREE_TYPE (@4)))
2819 && (GIMPLE || !TREE_SIDE_EFFECTS (@1)))
2820 (cond (cmp @2 @3) @1 @0))))
2821
2822 /* Simplifications of shift and rotates. */
2823
2824 (for rotate (lrotate rrotate)
2825 (simplify
2826 (rotate integer_all_onesp@0 @1)
2827 @0))
2828
2829 /* Optimize -1 >> x for arithmetic right shifts. */
2830 (simplify
2831 (rshift integer_all_onesp@0 @1)
2832 (if (!TYPE_UNSIGNED (type)
2833 && tree_expr_nonnegative_p (@1))
2834 @0))
2835
2836 /* Optimize (x >> c) << c into x & (-1<<c). */
2837 (simplify
2838 (lshift (nop_convert? (rshift @0 INTEGER_CST@1)) @1)
2839 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type)))
2840 /* It doesn't matter if the right shift is arithmetic or logical. */
2841 (bit_and (view_convert @0) (lshift { build_minus_one_cst (type); } @1))))
2842
2843 (simplify
2844 (lshift (convert (convert@2 (rshift @0 INTEGER_CST@1))) @1)
2845 (if (wi::ltu_p (wi::to_wide (@1), element_precision (type))
2846 /* Allow intermediate conversion to integral type with whatever sign, as
2847 long as the low TYPE_PRECISION (type)
2848 - TYPE_PRECISION (TREE_TYPE (@2)) bits are preserved. */
2849 && INTEGRAL_TYPE_P (type)
2850 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
2851 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
2852 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0))
2853 && (TYPE_PRECISION (TREE_TYPE (@2)) >= TYPE_PRECISION (type)
2854 || wi::geu_p (wi::to_wide (@1),
2855 TYPE_PRECISION (type)
2856 - TYPE_PRECISION (TREE_TYPE (@2)))))
2857 (bit_and (convert @0) (lshift { build_minus_one_cst (type); } @1))))
2858
2859 /* Optimize (x << c) >> c into x & ((unsigned)-1 >> c) for unsigned
2860 types. */
2861 (simplify
2862 (rshift (lshift @0 INTEGER_CST@1) @1)
2863 (if (TYPE_UNSIGNED (type)
2864 && (wi::ltu_p (wi::to_wide (@1), element_precision (type))))
2865 (bit_and @0 (rshift { build_minus_one_cst (type); } @1))))
2866
2867 (for shiftrotate (lrotate rrotate lshift rshift)
2868 (simplify
2869 (shiftrotate @0 integer_zerop)
2870 (non_lvalue @0))
2871 (simplify
2872 (shiftrotate integer_zerop@0 @1)
2873 @0)
2874 /* Prefer vector1 << scalar to vector1 << vector2
2875 if vector2 is uniform. */
2876 (for vec (VECTOR_CST CONSTRUCTOR)
2877 (simplify
2878 (shiftrotate @0 vec@1)
2879 (with { tree tem = uniform_vector_p (@1); }
2880 (if (tem)
2881 (shiftrotate @0 { tem; }))))))
2882
2883 /* Simplify X << Y where Y's low width bits are 0 to X, as only valid
2884 Y is 0. Similarly for X >> Y. */
2885 #if GIMPLE
2886 (for shift (lshift rshift)
2887 (simplify
2888 (shift @0 SSA_NAME@1)
2889 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
2890 (with {
2891 int width = ceil_log2 (element_precision (TREE_TYPE (@0)));
2892 int prec = TYPE_PRECISION (TREE_TYPE (@1));
2893 }
2894 (if ((get_nonzero_bits (@1) & wi::mask (width, false, prec)) == 0)
2895 @0)))))
2896 #endif
2897
2898 /* Rewrite an LROTATE_EXPR by a constant into an
2899 RROTATE_EXPR by a new constant. */
2900 (simplify
2901 (lrotate @0 INTEGER_CST@1)
2902 (rrotate @0 { const_binop (MINUS_EXPR, TREE_TYPE (@1),
2903 build_int_cst (TREE_TYPE (@1),
2904 element_precision (type)), @1); }))
2905
2906 /* Turn (a OP c1) OP c2 into a OP (c1+c2). */
2907 (for op (lrotate rrotate rshift lshift)
2908 (simplify
2909 (op (op @0 INTEGER_CST@1) INTEGER_CST@2)
2910 (with { unsigned int prec = element_precision (type); }
2911 (if (wi::ge_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1)))
2912 && wi::lt_p (wi::to_wide (@1), prec, TYPE_SIGN (TREE_TYPE (@1)))
2913 && wi::ge_p (wi::to_wide (@2), 0, TYPE_SIGN (TREE_TYPE (@2)))
2914 && wi::lt_p (wi::to_wide (@2), prec, TYPE_SIGN (TREE_TYPE (@2))))
2915 (with { unsigned int low = (tree_to_uhwi (@1)
2916 + tree_to_uhwi (@2)); }
2917 /* Deal with a OP (c1 + c2) being undefined but (a OP c1) OP c2
2918 being well defined. */
2919 (if (low >= prec)
2920 (if (op == LROTATE_EXPR || op == RROTATE_EXPR)
2921 (op @0 { build_int_cst (TREE_TYPE (@1), low % prec); })
2922 (if (TYPE_UNSIGNED (type) || op == LSHIFT_EXPR)
2923 { build_zero_cst (type); }
2924 (op @0 { build_int_cst (TREE_TYPE (@1), prec - 1); })))
2925 (op @0 { build_int_cst (TREE_TYPE (@1), low); })))))))
2926
2927
2928 /* ((1 << A) & 1) != 0 -> A == 0
2929 ((1 << A) & 1) == 0 -> A != 0 */
2930 (for cmp (ne eq)
2931 icmp (eq ne)
2932 (simplify
2933 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
2934 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
2935
2936 /* (CST1 << A) == CST2 -> A == ctz (CST2) - ctz (CST1)
2937 (CST1 << A) != CST2 -> A != ctz (CST2) - ctz (CST1)
2938 if CST2 != 0. */
2939 (for cmp (ne eq)
2940 (simplify
2941 (cmp (lshift INTEGER_CST@0 @1) INTEGER_CST@2)
2942 (with { int cand = wi::ctz (wi::to_wide (@2)) - wi::ctz (wi::to_wide (@0)); }
2943 (if (cand < 0
2944 || (!integer_zerop (@2)
2945 && wi::lshift (wi::to_wide (@0), cand) != wi::to_wide (@2)))
2946 { constant_boolean_node (cmp == NE_EXPR, type); }
2947 (if (!integer_zerop (@2)
2948 && wi::lshift (wi::to_wide (@0), cand) == wi::to_wide (@2))
2949 (cmp @1 { build_int_cst (TREE_TYPE (@1), cand); }))))))
2950
2951 /* Fold (X << C1) & C2 into (X << C1) & (C2 | ((1 << C1) - 1))
2952 (X >> C1) & C2 into (X >> C1) & (C2 | ~((type) -1 >> C1))
2953 if the new mask might be further optimized. */
2954 (for shift (lshift rshift)
2955 (simplify
2956 (bit_and (convert?:s@4 (shift:s@5 (convert1?@3 @0) INTEGER_CST@1))
2957 INTEGER_CST@2)
2958 (if (tree_nop_conversion_p (TREE_TYPE (@4), TREE_TYPE (@5))
2959 && TYPE_PRECISION (type) <= HOST_BITS_PER_WIDE_INT
2960 && tree_fits_uhwi_p (@1)
2961 && tree_to_uhwi (@1) > 0
2962 && tree_to_uhwi (@1) < TYPE_PRECISION (type))
2963 (with
2964 {
2965 unsigned int shiftc = tree_to_uhwi (@1);
2966 unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (@2);
2967 unsigned HOST_WIDE_INT newmask, zerobits = 0;
2968 tree shift_type = TREE_TYPE (@3);
2969 unsigned int prec;
2970
2971 if (shift == LSHIFT_EXPR)
2972 zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
2973 else if (shift == RSHIFT_EXPR
2974 && type_has_mode_precision_p (shift_type))
2975 {
2976 prec = TYPE_PRECISION (TREE_TYPE (@3));
2977 tree arg00 = @0;
2978 /* See if more bits can be proven as zero because of
2979 zero extension. */
2980 if (@3 != @0
2981 && TYPE_UNSIGNED (TREE_TYPE (@0)))
2982 {
2983 tree inner_type = TREE_TYPE (@0);
2984 if (type_has_mode_precision_p (inner_type)
2985 && TYPE_PRECISION (inner_type) < prec)
2986 {
2987 prec = TYPE_PRECISION (inner_type);
2988 /* See if we can shorten the right shift. */
2989 if (shiftc < prec)
2990 shift_type = inner_type;
2991 /* Otherwise X >> C1 is all zeros, so we'll optimize
2992 it into (X, 0) later on by making sure zerobits
2993 is all ones. */
2994 }
2995 }
2996 zerobits = HOST_WIDE_INT_M1U;
2997 if (shiftc < prec)
2998 {
2999 zerobits >>= HOST_BITS_PER_WIDE_INT - shiftc;
3000 zerobits <<= prec - shiftc;
3001 }
3002 /* For arithmetic shift if sign bit could be set, zerobits
3003 can contain actually sign bits, so no transformation is
3004 possible, unless MASK masks them all away. In that
3005 case the shift needs to be converted into logical shift. */
3006 if (!TYPE_UNSIGNED (TREE_TYPE (@3))
3007 && prec == TYPE_PRECISION (TREE_TYPE (@3)))
3008 {
3009 if ((mask & zerobits) == 0)
3010 shift_type = unsigned_type_for (TREE_TYPE (@3));
3011 else
3012 zerobits = 0;
3013 }
3014 }
3015 }
3016 /* ((X << 16) & 0xff00) is (X, 0). */
3017 (if ((mask & zerobits) == mask)
3018 { build_int_cst (type, 0); }
3019 (with { newmask = mask | zerobits; }
3020 (if (newmask != mask && (newmask & (newmask + 1)) == 0)
3021 (with
3022 {
3023 /* Only do the transformation if NEWMASK is some integer
3024 mode's mask. */
3025 for (prec = BITS_PER_UNIT;
3026 prec < HOST_BITS_PER_WIDE_INT; prec <<= 1)
3027 if (newmask == (HOST_WIDE_INT_1U << prec) - 1)
3028 break;
3029 }
3030 (if (prec < HOST_BITS_PER_WIDE_INT
3031 || newmask == HOST_WIDE_INT_M1U)
3032 (with
3033 { tree newmaskt = build_int_cst_type (TREE_TYPE (@2), newmask); }
3034 (if (!tree_int_cst_equal (newmaskt, @2))
3035 (if (shift_type != TREE_TYPE (@3))
3036 (bit_and (convert (shift:shift_type (convert @3) @1)) { newmaskt; })
3037 (bit_and @4 { newmaskt; })))))))))))))
3038
3039 /* Fold (X {&,^,|} C2) << C1 into (X << C1) {&,^,|} (C2 << C1)
3040 (X {&,^,|} C2) >> C1 into (X >> C1) & (C2 >> C1). */
3041 (for shift (lshift rshift)
3042 (for bit_op (bit_and bit_xor bit_ior)
3043 (simplify
3044 (shift (convert?:s (bit_op:s @0 INTEGER_CST@2)) INTEGER_CST@1)
3045 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
3046 (with { tree mask = int_const_binop (shift, fold_convert (type, @2), @1); }
3047 (bit_op (shift (convert @0) @1) { mask; }))))))
3048
3049 /* ~(~X >> Y) -> X >> Y (for arithmetic shift). */
3050 (simplify
3051 (bit_not (convert1?:s (rshift:s (convert2?@0 (bit_not @1)) @2)))
3052 (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
3053 && (element_precision (TREE_TYPE (@0))
3054 <= element_precision (TREE_TYPE (@1))
3055 || !TYPE_UNSIGNED (TREE_TYPE (@1))))
3056 (with
3057 { tree shift_type = TREE_TYPE (@0); }
3058 (convert (rshift (convert:shift_type @1) @2)))))
3059
3060 /* ~(~X >>r Y) -> X >>r Y
3061 ~(~X <<r Y) -> X <<r Y */
3062 (for rotate (lrotate rrotate)
3063 (simplify
3064 (bit_not (convert1?:s (rotate:s (convert2?@0 (bit_not @1)) @2)))
3065 (if ((element_precision (TREE_TYPE (@0))
3066 <= element_precision (TREE_TYPE (@1))
3067 || !TYPE_UNSIGNED (TREE_TYPE (@1)))
3068 && (element_precision (type) <= element_precision (TREE_TYPE (@0))
3069 || !TYPE_UNSIGNED (TREE_TYPE (@0))))
3070 (with
3071 { tree rotate_type = TREE_TYPE (@0); }
3072 (convert (rotate (convert:rotate_type @1) @2))))))
3073
3074 /* Simplifications of conversions. */
3075
3076 /* Basic strip-useless-type-conversions / strip_nops. */
3077 (for cvt (convert view_convert float fix_trunc)
3078 (simplify
3079 (cvt @0)
3080 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
3081 || (GENERIC && type == TREE_TYPE (@0)))
3082 @0)))
3083
3084 /* Contract view-conversions. */
3085 (simplify
3086 (view_convert (view_convert @0))
3087 (view_convert @0))
3088
3089 /* For integral conversions with the same precision or pointer
3090 conversions use a NOP_EXPR instead. */
3091 (simplify
3092 (view_convert @0)
3093 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
3094 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
3095 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
3096 (convert @0)))
3097
3098 /* Strip inner integral conversions that do not change precision or size, or
3099 zero-extend while keeping the same size (for bool-to-char). */
3100 (simplify
3101 (view_convert (convert@0 @1))
3102 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
3103 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
3104 && TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))
3105 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1))
3106 || (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@1))
3107 && TYPE_UNSIGNED (TREE_TYPE (@1)))))
3108 (view_convert @1)))
3109
3110 /* Simplify a view-converted empty constructor. */
3111 (simplify
3112 (view_convert CONSTRUCTOR@0)
3113 (if (TREE_CODE (@0) != SSA_NAME
3114 && CONSTRUCTOR_NELTS (@0) == 0)
3115 { build_zero_cst (type); }))
3116
3117 /* Re-association barriers around constants and other re-association
3118 barriers can be removed. */
3119 (simplify
3120 (paren CONSTANT_CLASS_P@0)
3121 @0)
3122 (simplify
3123 (paren (paren@1 @0))
3124 @1)
3125
3126 /* Handle cases of two conversions in a row. */
3127 (for ocvt (convert float fix_trunc)
3128 (for icvt (convert float)
3129 (simplify
3130 (ocvt (icvt@1 @0))
3131 (with
3132 {
3133 tree inside_type = TREE_TYPE (@0);
3134 tree inter_type = TREE_TYPE (@1);
3135 int inside_int = INTEGRAL_TYPE_P (inside_type);
3136 int inside_ptr = POINTER_TYPE_P (inside_type);
3137 int inside_float = FLOAT_TYPE_P (inside_type);
3138 int inside_vec = VECTOR_TYPE_P (inside_type);
3139 unsigned int inside_prec = TYPE_PRECISION (inside_type);
3140 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
3141 int inter_int = INTEGRAL_TYPE_P (inter_type);
3142 int inter_ptr = POINTER_TYPE_P (inter_type);
3143 int inter_float = FLOAT_TYPE_P (inter_type);
3144 int inter_vec = VECTOR_TYPE_P (inter_type);
3145 unsigned int inter_prec = TYPE_PRECISION (inter_type);
3146 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
3147 int final_int = INTEGRAL_TYPE_P (type);
3148 int final_ptr = POINTER_TYPE_P (type);
3149 int final_float = FLOAT_TYPE_P (type);
3150 int final_vec = VECTOR_TYPE_P (type);
3151 unsigned int final_prec = TYPE_PRECISION (type);
3152 int final_unsignedp = TYPE_UNSIGNED (type);
3153 }
3154 (switch
3155 /* In addition to the cases of two conversions in a row
3156 handled below, if we are converting something to its own
3157 type via an object of identical or wider precision, neither
3158 conversion is needed. */
3159 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
3160 || (GENERIC
3161 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
3162 && (((inter_int || inter_ptr) && final_int)
3163 || (inter_float && final_float))
3164 && inter_prec >= final_prec)
3165 (ocvt @0))
3166
3167 /* Likewise, if the intermediate and initial types are either both
3168 float or both integer, we don't need the middle conversion if the
3169 former is wider than the latter and doesn't change the signedness
3170 (for integers). Avoid this if the final type is a pointer since
3171 then we sometimes need the middle conversion. */
3172 (if (((inter_int && inside_int) || (inter_float && inside_float))
3173 && (final_int || final_float)
3174 && inter_prec >= inside_prec
3175 && (inter_float || inter_unsignedp == inside_unsignedp))
3176 (ocvt @0))
3177
3178 /* If we have a sign-extension of a zero-extended value, we can
3179 replace that by a single zero-extension. Likewise if the
3180 final conversion does not change precision we can drop the
3181 intermediate conversion. */
3182 (if (inside_int && inter_int && final_int
3183 && ((inside_prec < inter_prec && inter_prec < final_prec
3184 && inside_unsignedp && !inter_unsignedp)
3185 || final_prec == inter_prec))
3186 (ocvt @0))
3187
3188 /* Two conversions in a row are not needed unless:
3189 - some conversion is floating-point (overstrict for now), or
3190 - some conversion is a vector (overstrict for now), or
3191 - the intermediate type is narrower than both initial and
3192 final, or
3193 - the intermediate type and innermost type differ in signedness,
3194 and the outermost type is wider than the intermediate, or
3195 - the initial type is a pointer type and the precisions of the
3196 intermediate and final types differ, or
3197 - the final type is a pointer type and the precisions of the
3198 initial and intermediate types differ. */
3199 (if (! inside_float && ! inter_float && ! final_float
3200 && ! inside_vec && ! inter_vec && ! final_vec
3201 && (inter_prec >= inside_prec || inter_prec >= final_prec)
3202 && ! (inside_int && inter_int
3203 && inter_unsignedp != inside_unsignedp
3204 && inter_prec < final_prec)
3205 && ((inter_unsignedp && inter_prec > inside_prec)
3206 == (final_unsignedp && final_prec > inter_prec))
3207 && ! (inside_ptr && inter_prec != final_prec)
3208 && ! (final_ptr && inside_prec != inter_prec))
3209 (ocvt @0))
3210
3211 /* A truncation to an unsigned type (a zero-extension) should be
3212 canonicalized as bitwise and of a mask. */
3213 (if (GIMPLE /* PR70366: doing this in GENERIC breaks -Wconversion. */
3214 && final_int && inter_int && inside_int
3215 && final_prec == inside_prec
3216 && final_prec > inter_prec
3217 && inter_unsignedp)
3218 (convert (bit_and @0 { wide_int_to_tree
3219 (inside_type,
3220 wi::mask (inter_prec, false,
3221 TYPE_PRECISION (inside_type))); })))
3222
3223 /* If we are converting an integer to a floating-point that can
3224 represent it exactly and back to an integer, we can skip the
3225 floating-point conversion. */
3226 (if (GIMPLE /* PR66211 */
3227 && inside_int && inter_float && final_int &&
3228 (unsigned) significand_size (TYPE_MODE (inter_type))
3229 >= inside_prec - !inside_unsignedp)
3230 (convert @0)))))))
3231
3232 /* If we have a narrowing conversion to an integral type that is fed by a
3233 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
3234 masks off bits outside the final type (and nothing else). */
3235 (simplify
3236 (convert (bit_and @0 INTEGER_CST@1))
3237 (if (INTEGRAL_TYPE_P (type)
3238 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
3239 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
3240 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
3241 TYPE_PRECISION (type)), 0))
3242 (convert @0)))
3243
3244
3245 /* (X /[ex] A) * A -> X. */
3246 (simplify
3247 (mult (convert1? (exact_div @0 @@1)) (convert2? @1))
3248 (convert @0))
3249
3250 /* Simplify (A / B) * B + (A % B) -> A. */
3251 (for div (trunc_div ceil_div floor_div round_div)
3252 mod (trunc_mod ceil_mod floor_mod round_mod)
3253 (simplify
3254 (plus:c (mult:c (div @0 @1) @1) (mod @0 @1))
3255 @0))
3256
3257 /* ((X /[ex] A) +- B) * A --> X +- A * B. */
3258 (for op (plus minus)
3259 (simplify
3260 (mult (convert1? (op (convert2? (exact_div @0 INTEGER_CST@@1)) INTEGER_CST@2)) @1)
3261 (if (tree_nop_conversion_p (type, TREE_TYPE (@2))
3262 && tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2)))
3263 (with
3264 {
3265 wi::overflow_type overflow;
3266 wide_int mul = wi::mul (wi::to_wide (@1), wi::to_wide (@2),
3267 TYPE_SIGN (type), &overflow);
3268 }
3269 (if (types_match (type, TREE_TYPE (@2))
3270 && types_match (TREE_TYPE (@0), TREE_TYPE (@2)) && !overflow)
3271 (op @0 { wide_int_to_tree (type, mul); })
3272 (with { tree utype = unsigned_type_for (type); }
3273 (convert (op (convert:utype @0)
3274 (mult (convert:utype @1) (convert:utype @2))))))))))
3275
3276 /* Canonicalization of binary operations. */
3277
3278 /* Convert X + -C into X - C. */
3279 (simplify
3280 (plus @0 REAL_CST@1)
3281 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
3282 (with { tree tem = const_unop (NEGATE_EXPR, type, @1); }
3283 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
3284 (minus @0 { tem; })))))
3285
3286 /* Convert x+x into x*2. */
3287 (simplify
3288 (plus @0 @0)
3289 (if (SCALAR_FLOAT_TYPE_P (type))
3290 (mult @0 { build_real (type, dconst2); })
3291 (if (INTEGRAL_TYPE_P (type))
3292 (mult @0 { build_int_cst (type, 2); }))))
3293
3294 /* 0 - X -> -X. */
3295 (simplify
3296 (minus integer_zerop @1)
3297 (negate @1))
3298 (simplify
3299 (pointer_diff integer_zerop @1)
3300 (negate (convert @1)))
3301
3302 /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
3303 ARG0 is zero and X + ARG0 reduces to X, since that would mean
3304 (-ARG1 + ARG0) reduces to -ARG1. */
3305 (simplify
3306 (minus real_zerop@0 @1)
3307 (if (fold_real_zero_addition_p (type, @0, 0))
3308 (negate @1)))
3309
3310 /* Transform x * -1 into -x. */
3311 (simplify
3312 (mult @0 integer_minus_onep)
3313 (negate @0))
3314
3315 /* Reassociate (X * CST) * Y to (X * Y) * CST. This does not introduce
3316 signed overflow for CST != 0 && CST != -1. */
3317 (simplify
3318 (mult:c (mult:s@3 @0 INTEGER_CST@1) @2)
3319 (if (TREE_CODE (@2) != INTEGER_CST
3320 && single_use (@3)
3321 && !integer_zerop (@1) && !integer_minus_onep (@1))
3322 (mult (mult @0 @2) @1)))
3323
3324 /* True if we can easily extract the real and imaginary parts of a complex
3325 number. */
3326 (match compositional_complex
3327 (convert? (complex @0 @1)))
3328
3329 /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
3330 (simplify
3331 (complex (realpart @0) (imagpart @0))
3332 @0)
3333 (simplify
3334 (realpart (complex @0 @1))
3335 @0)
3336 (simplify
3337 (imagpart (complex @0 @1))
3338 @1)
3339
3340 /* Sometimes we only care about half of a complex expression. */
3341 (simplify
3342 (realpart (convert?:s (conj:s @0)))
3343 (convert (realpart @0)))
3344 (simplify
3345 (imagpart (convert?:s (conj:s @0)))
3346 (convert (negate (imagpart @0))))
3347 (for part (realpart imagpart)
3348 (for op (plus minus)
3349 (simplify
3350 (part (convert?:s@2 (op:s @0 @1)))
3351 (convert (op (part @0) (part @1))))))
3352 (simplify
3353 (realpart (convert?:s (CEXPI:s @0)))
3354 (convert (COS @0)))
3355 (simplify
3356 (imagpart (convert?:s (CEXPI:s @0)))
3357 (convert (SIN @0)))
3358
3359 /* conj(conj(x)) -> x */
3360 (simplify
3361 (conj (convert? (conj @0)))
3362 (if (tree_nop_conversion_p (TREE_TYPE (@0), type))
3363 (convert @0)))
3364
3365 /* conj({x,y}) -> {x,-y} */
3366 (simplify
3367 (conj (convert?:s (complex:s @0 @1)))
3368 (with { tree itype = TREE_TYPE (type); }
3369 (complex (convert:itype @0) (negate (convert:itype @1)))))
3370
3371 /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
3372 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
3373 (simplify
3374 (bswap (bswap @0))
3375 @0)
3376 (simplify
3377 (bswap (bit_not (bswap @0)))
3378 (bit_not @0))
3379 (for bitop (bit_xor bit_ior bit_and)
3380 (simplify
3381 (bswap (bitop:c (bswap @0) @1))
3382 (bitop @0 (bswap @1)))))
3383
3384
3385 /* Combine COND_EXPRs and VEC_COND_EXPRs. */
3386
3387 /* Simplify constant conditions.
3388 Only optimize constant conditions when the selected branch
3389 has the same type as the COND_EXPR. This avoids optimizing
3390 away "c ? x : throw", where the throw has a void type.
3391 Note that we cannot throw away the fold-const.c variant nor
3392 this one as we depend on doing this transform before possibly
3393 A ? B : B -> B triggers and the fold-const.c one can optimize
3394 0 ? A : B to B even if A has side-effects. Something
3395 genmatch cannot handle. */
3396 (simplify
3397 (cond INTEGER_CST@0 @1 @2)
3398 (if (integer_zerop (@0))
3399 (if (!VOID_TYPE_P (TREE_TYPE (@2)) || VOID_TYPE_P (type))
3400 @2)
3401 (if (!VOID_TYPE_P (TREE_TYPE (@1)) || VOID_TYPE_P (type))
3402 @1)))
3403 (simplify
3404 (vec_cond VECTOR_CST@0 @1 @2)
3405 (if (integer_all_onesp (@0))
3406 @1
3407 (if (integer_zerop (@0))
3408 @2)))
3409
3410 /* Sink unary operations to constant branches, but only if we do fold it to
3411 constants. */
3412 (for op (negate bit_not abs absu)
3413 (simplify
3414 (op (vec_cond @0 VECTOR_CST@1 VECTOR_CST@2))
3415 (with
3416 {
3417 tree cst1, cst2;
3418 cst1 = const_unop (op, type, @1);
3419 if (cst1)
3420 cst2 = const_unop (op, type, @2);
3421 }
3422 (if (cst1 && cst2)
3423 (vec_cond @0 { cst1; } { cst2; })))))
3424
3425 /* Simplification moved from fold_cond_expr_with_comparison. It may also
3426 be extended. */
3427 /* This pattern implements two kinds simplification:
3428
3429 Case 1)
3430 (cond (cmp (convert1? x) c1) (convert2? x) c2) -> (minmax (x c)) if:
3431 1) Conversions are type widening from smaller type.
3432 2) Const c1 equals to c2 after canonicalizing comparison.
3433 3) Comparison has tree code LT, LE, GT or GE.
3434 This specific pattern is needed when (cmp (convert x) c) may not
3435 be simplified by comparison patterns because of multiple uses of
3436 x. It also makes sense here because simplifying across multiple
3437 referred var is always benefitial for complicated cases.
3438
3439 Case 2)
3440 (cond (eq (convert1? x) c1) (convert2? x) c2) -> (cond (eq x c1) c1 c2). */
3441 (for cmp (lt le gt ge eq)
3442 (simplify
3443 (cond (cmp (convert1? @1) INTEGER_CST@3) (convert2? @1) INTEGER_CST@2)
3444 (with
3445 {
3446 tree from_type = TREE_TYPE (@1);
3447 tree c1_type = TREE_TYPE (@3), c2_type = TREE_TYPE (@2);
3448 enum tree_code code = ERROR_MARK;
3449
3450 if (INTEGRAL_TYPE_P (from_type)
3451 && int_fits_type_p (@2, from_type)
3452 && (types_match (c1_type, from_type)
3453 || (TYPE_PRECISION (c1_type) > TYPE_PRECISION (from_type)
3454 && (TYPE_UNSIGNED (from_type)
3455 || TYPE_SIGN (c1_type) == TYPE_SIGN (from_type))))
3456 && (types_match (c2_type, from_type)
3457 || (TYPE_PRECISION (c2_type) > TYPE_PRECISION (from_type)
3458 && (TYPE_UNSIGNED (from_type)
3459 || TYPE_SIGN (c2_type) == TYPE_SIGN (from_type)))))
3460 {
3461 if (cmp != EQ_EXPR)
3462 {
3463 if (wi::to_widest (@3) == (wi::to_widest (@2) - 1))
3464 {
3465 /* X <= Y - 1 equals to X < Y. */
3466 if (cmp == LE_EXPR)
3467 code = LT_EXPR;
3468 /* X > Y - 1 equals to X >= Y. */
3469 if (cmp == GT_EXPR)
3470 code = GE_EXPR;
3471 }
3472 if (wi::to_widest (@3) == (wi::to_widest (@2) + 1))
3473 {
3474 /* X < Y + 1 equals to X <= Y. */
3475 if (cmp == LT_EXPR)
3476 code = LE_EXPR;
3477 /* X >= Y + 1 equals to X > Y. */
3478 if (cmp == GE_EXPR)
3479 code = GT_EXPR;
3480 }
3481 if (code != ERROR_MARK
3482 || wi::to_widest (@2) == wi::to_widest (@3))
3483 {
3484 if (cmp == LT_EXPR || cmp == LE_EXPR)
3485 code = MIN_EXPR;
3486 if (cmp == GT_EXPR || cmp == GE_EXPR)
3487 code = MAX_EXPR;
3488 }
3489 }
3490 /* Can do A == C1 ? A : C2 -> A == C1 ? C1 : C2? */
3491 else if (int_fits_type_p (@3, from_type))
3492 code = EQ_EXPR;
3493 }
3494 }
3495 (if (code == MAX_EXPR)
3496 (convert (max @1 (convert @2)))
3497 (if (code == MIN_EXPR)
3498 (convert (min @1 (convert @2)))
3499 (if (code == EQ_EXPR)
3500 (convert (cond (eq @1 (convert @3))
3501 (convert:from_type @3) (convert:from_type @2)))))))))
3502
3503 /* (cond (cmp (convert? x) c1) (op x c2) c3) -> (op (minmax x c1) c2) if:
3504
3505 1) OP is PLUS or MINUS.
3506 2) CMP is LT, LE, GT or GE.
3507 3) C3 == (C1 op C2), and computation doesn't have undefined behavior.
3508
3509 This pattern also handles special cases like:
3510
3511 A) Operand x is a unsigned to signed type conversion and c1 is
3512 integer zero. In this case,
3513 (signed type)x < 0 <=> x > MAX_VAL(signed type)
3514 (signed type)x >= 0 <=> x <= MAX_VAL(signed type)
3515 B) Const c1 may not equal to (C3 op' C2). In this case we also
3516 check equality for (c1+1) and (c1-1) by adjusting comparison
3517 code.
3518
3519 TODO: Though signed type is handled by this pattern, it cannot be
3520 simplified at the moment because C standard requires additional
3521 type promotion. In order to match&simplify it here, the IR needs
3522 to be cleaned up by other optimizers, i.e, VRP. */
3523 (for op (plus minus)
3524 (for cmp (lt le gt ge)
3525 (simplify
3526 (cond (cmp (convert? @X) INTEGER_CST@1) (op @X INTEGER_CST@2) INTEGER_CST@3)
3527 (with { tree from_type = TREE_TYPE (@X), to_type = TREE_TYPE (@1); }
3528 (if (types_match (from_type, to_type)
3529 /* Check if it is special case A). */
3530 || (TYPE_UNSIGNED (from_type)
3531 && !TYPE_UNSIGNED (to_type)
3532 && TYPE_PRECISION (from_type) == TYPE_PRECISION (to_type)
3533 && integer_zerop (@1)
3534 && (cmp == LT_EXPR || cmp == GE_EXPR)))
3535 (with
3536 {
3537 wi::overflow_type overflow = wi::OVF_NONE;
3538 enum tree_code code, cmp_code = cmp;
3539 wide_int real_c1;
3540 wide_int c1 = wi::to_wide (@1);
3541 wide_int c2 = wi::to_wide (@2);
3542 wide_int c3 = wi::to_wide (@3);
3543 signop sgn = TYPE_SIGN (from_type);
3544
3545 /* Handle special case A), given x of unsigned type:
3546 ((signed type)x < 0) <=> (x > MAX_VAL(signed type))
3547 ((signed type)x >= 0) <=> (x <= MAX_VAL(signed type)) */
3548 if (!types_match (from_type, to_type))
3549 {
3550 if (cmp_code == LT_EXPR)
3551 cmp_code = GT_EXPR;
3552 if (cmp_code == GE_EXPR)
3553 cmp_code = LE_EXPR;
3554 c1 = wi::max_value (to_type);
3555 }
3556 /* To simplify this pattern, we require c3 = (c1 op c2). Here we
3557 compute (c3 op' c2) and check if it equals to c1 with op' being
3558 the inverted operator of op. Make sure overflow doesn't happen
3559 if it is undefined. */
3560 if (op == PLUS_EXPR)
3561 real_c1 = wi::sub (c3, c2, sgn, &overflow);
3562 else
3563 real_c1 = wi::add (c3, c2, sgn, &overflow);
3564
3565 code = cmp_code;
3566 if (!overflow || !TYPE_OVERFLOW_UNDEFINED (from_type))
3567 {
3568 /* Check if c1 equals to real_c1. Boundary condition is handled
3569 by adjusting comparison operation if necessary. */
3570 if (!wi::cmp (wi::sub (real_c1, 1, sgn, &overflow), c1, sgn)
3571 && !overflow)
3572 {
3573 /* X <= Y - 1 equals to X < Y. */
3574 if (cmp_code == LE_EXPR)
3575 code = LT_EXPR;
3576 /* X > Y - 1 equals to X >= Y. */
3577 if (cmp_code == GT_EXPR)
3578 code = GE_EXPR;
3579 }
3580 if (!wi::cmp (wi::add (real_c1, 1, sgn, &overflow), c1, sgn)
3581 && !overflow)
3582 {
3583 /* X < Y + 1 equals to X <= Y. */
3584 if (cmp_code == LT_EXPR)
3585 code = LE_EXPR;
3586 /* X >= Y + 1 equals to X > Y. */
3587 if (cmp_code == GE_EXPR)
3588 code = GT_EXPR;
3589 }
3590 if (code != cmp_code || !wi::cmp (real_c1, c1, sgn))
3591 {
3592 if (cmp_code == LT_EXPR || cmp_code == LE_EXPR)
3593 code = MIN_EXPR;
3594 if (cmp_code == GT_EXPR || cmp_code == GE_EXPR)
3595 code = MAX_EXPR;
3596 }
3597 }
3598 }
3599 (if (code == MAX_EXPR)
3600 (op (max @X { wide_int_to_tree (from_type, real_c1); })
3601 { wide_int_to_tree (from_type, c2); })
3602 (if (code == MIN_EXPR)
3603 (op (min @X { wide_int_to_tree (from_type, real_c1); })
3604 { wide_int_to_tree (from_type, c2); })))))))))
3605
3606 (for cnd (cond vec_cond)
3607 /* A ? B : (A ? X : C) -> A ? B : C. */
3608 (simplify
3609 (cnd @0 (cnd @0 @1 @2) @3)
3610 (cnd @0 @1 @3))
3611 (simplify
3612 (cnd @0 @1 (cnd @0 @2 @3))
3613 (cnd @0 @1 @3))
3614 /* A ? B : (!A ? C : X) -> A ? B : C. */
3615 /* ??? This matches embedded conditions open-coded because genmatch
3616 would generate matching code for conditions in separate stmts only.
3617 The following is still important to merge then and else arm cases
3618 from if-conversion. */
3619 (simplify
3620 (cnd @0 @1 (cnd @2 @3 @4))
3621 (if (inverse_conditions_p (@0, @2))
3622 (cnd @0 @1 @3)))
3623 (simplify
3624 (cnd @0 (cnd @1 @2 @3) @4)
3625 (if (inverse_conditions_p (@0, @1))
3626 (cnd @0 @3 @4)))
3627
3628 /* A ? B : B -> B. */
3629 (simplify
3630 (cnd @0 @1 @1)
3631 @1)
3632
3633 /* !A ? B : C -> A ? C : B. */
3634 (simplify
3635 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
3636 (cnd @0 @2 @1)))
3637
3638 /* A + (B vcmp C ? 1 : 0) -> A - (B vcmp C ? -1 : 0), since vector comparisons
3639 return all -1 or all 0 results. */
3640 /* ??? We could instead convert all instances of the vec_cond to negate,
3641 but that isn't necessarily a win on its own. */
3642 (simplify
3643 (plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
3644 (if (VECTOR_TYPE_P (type)
3645 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3646 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
3647 && (TYPE_MODE (TREE_TYPE (type))
3648 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
3649 (minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
3650
3651 /* ... likewise A - (B vcmp C ? 1 : 0) -> A + (B vcmp C ? -1 : 0). */
3652 (simplify
3653 (minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
3654 (if (VECTOR_TYPE_P (type)
3655 && known_eq (TYPE_VECTOR_SUBPARTS (type),
3656 TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
3657 && (TYPE_MODE (TREE_TYPE (type))
3658 == TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
3659 (plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
3660
3661
3662 /* Simplifications of comparisons. */
3663
3664 /* See if we can reduce the magnitude of a constant involved in a
3665 comparison by changing the comparison code. This is a canonicalization
3666 formerly done by maybe_canonicalize_comparison_1. */
3667 (for cmp (le gt)
3668 acmp (lt ge)
3669 (simplify
3670 (cmp @0 uniform_integer_cst_p@1)
3671 (with { tree cst = uniform_integer_cst_p (@1); }
3672 (if (tree_int_cst_sgn (cst) == -1)
3673 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
3674 wide_int_to_tree (TREE_TYPE (cst),
3675 wi::to_wide (cst)
3676 + 1)); })))))
3677 (for cmp (ge lt)
3678 acmp (gt le)
3679 (simplify
3680 (cmp @0 uniform_integer_cst_p@1)
3681 (with { tree cst = uniform_integer_cst_p (@1); }
3682 (if (tree_int_cst_sgn (cst) == 1)
3683 (acmp @0 { build_uniform_cst (TREE_TYPE (@1),
3684 wide_int_to_tree (TREE_TYPE (cst),
3685 wi::to_wide (cst) - 1)); })))))
3686
3687 /* We can simplify a logical negation of a comparison to the
3688 inverted comparison. As we cannot compute an expression
3689 operator using invert_tree_comparison we have to simulate
3690 that with expression code iteration. */
3691 (for cmp (tcc_comparison)
3692 icmp (inverted_tcc_comparison)
3693 ncmp (inverted_tcc_comparison_with_nans)
3694 /* Ideally we'd like to combine the following two patterns
3695 and handle some more cases by using
3696 (logical_inverted_value (cmp @0 @1))
3697 here but for that genmatch would need to "inline" that.
3698 For now implement what forward_propagate_comparison did. */
3699 (simplify
3700 (bit_not (cmp @0 @1))
3701 (if (VECTOR_TYPE_P (type)
3702 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
3703 /* Comparison inversion may be impossible for trapping math,
3704 invert_tree_comparison will tell us. But we can't use
3705 a computed operator in the replacement tree thus we have
3706 to play the trick below. */
3707 (with { enum tree_code ic = invert_tree_comparison
3708 (cmp, HONOR_NANS (@0)); }
3709 (if (ic == icmp)
3710 (icmp @0 @1)
3711 (if (ic == ncmp)
3712 (ncmp @0 @1))))))
3713 (simplify
3714 (bit_xor (cmp @0 @1) integer_truep)
3715 (with { enum tree_code ic = invert_tree_comparison
3716 (cmp, HONOR_NANS (@0)); }
3717 (if (ic == icmp)
3718 (icmp @0 @1)
3719 (if (ic == ncmp)
3720 (ncmp @0 @1))))))
3721
3722 /* Transform comparisons of the form X - Y CMP 0 to X CMP Y.
3723 ??? The transformation is valid for the other operators if overflow
3724 is undefined for the type, but performing it here badly interacts
3725 with the transformation in fold_cond_expr_with_comparison which
3726 attempts to synthetize ABS_EXPR. */
3727 (for cmp (eq ne)
3728 (for sub (minus pointer_diff)
3729 (simplify
3730 (cmp (sub@2 @0 @1) integer_zerop)
3731 (if (single_use (@2))
3732 (cmp @0 @1)))))
3733
3734 /* Transform comparisons of the form X * C1 CMP 0 to X CMP 0 in the
3735 signed arithmetic case. That form is created by the compiler
3736 often enough for folding it to be of value. One example is in
3737 computing loop trip counts after Operator Strength Reduction. */
3738 (for cmp (simple_comparison)
3739 scmp (swapped_simple_comparison)
3740 (simplify
3741 (cmp (mult@3 @0 INTEGER_CST@1) integer_zerop@2)
3742 /* Handle unfolded multiplication by zero. */
3743 (if (integer_zerop (@1))
3744 (cmp @1 @2)
3745 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
3746 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
3747 && single_use (@3))
3748 /* If @1 is negative we swap the sense of the comparison. */
3749 (if (tree_int_cst_sgn (@1) < 0)
3750 (scmp @0 @2)
3751 (cmp @0 @2))))))
3752
3753 /* Simplify comparison of something with itself. For IEEE
3754 floating-point, we can only do some of these simplifications. */
3755 (for cmp (eq ge le)
3756 (simplify
3757 (cmp @0 @0)
3758 (if (! FLOAT_TYPE_P (TREE_TYPE (@0))
3759 || ! HONOR_NANS (@0))
3760 { constant_boolean_node (true, type); }
3761 (if (cmp != EQ_EXPR)
3762 (eq @0 @0)))))
3763 (for cmp (ne gt lt)
3764 (simplify
3765 (cmp @0 @0)
3766 (if (cmp != NE_EXPR
3767 || ! FLOAT_TYPE_P (TREE_TYPE (@0))
3768 || ! HONOR_NANS (@0))
3769 { constant_boolean_node (false, type); })))
3770 (for cmp (unle unge uneq)
3771 (simplify
3772 (cmp @0 @0)
3773 { constant_boolean_node (true, type); }))
3774 (for cmp (unlt ungt)
3775 (simplify
3776 (cmp @0 @0)
3777 (unordered @0 @0)))
3778 (simplify
3779 (ltgt @0 @0)
3780 (if (!flag_trapping_math)
3781 { constant_boolean_node (false, type); }))
3782
3783 /* Fold ~X op ~Y as Y op X. */
3784 (for cmp (simple_comparison)
3785 (simplify
3786 (cmp (bit_not@2 @0) (bit_not@3 @1))
3787 (if (single_use (@2) && single_use (@3))
3788 (cmp @1 @0))))
3789
3790 /* Fold ~X op C as X op' ~C, where op' is the swapped comparison. */
3791 (for cmp (simple_comparison)
3792 scmp (swapped_simple_comparison)
3793 (simplify
3794 (cmp (bit_not@2 @0) CONSTANT_CLASS_P@1)
3795 (if (single_use (@2)
3796 && (TREE_CODE (@1) == INTEGER_CST || TREE_CODE (@1) == VECTOR_CST))
3797 (scmp @0 (bit_not @1)))))
3798
3799 (for cmp (simple_comparison)
3800 /* Fold (double)float1 CMP (double)float2 into float1 CMP float2. */
3801 (simplify
3802 (cmp (convert@2 @0) (convert? @1))
3803 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
3804 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3805 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
3806 && (DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@2))
3807 == DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1))))
3808 (with
3809 {
3810 tree type1 = TREE_TYPE (@1);
3811 if (TREE_CODE (@1) == REAL_CST && !DECIMAL_FLOAT_TYPE_P (type1))
3812 {
3813 REAL_VALUE_TYPE orig = TREE_REAL_CST (@1);
3814 if (TYPE_PRECISION (type1) > TYPE_PRECISION (float_type_node)
3815 && exact_real_truncate (TYPE_MODE (float_type_node), &orig))
3816 type1 = float_type_node;
3817 if (TYPE_PRECISION (type1) > TYPE_PRECISION (double_type_node)
3818 && exact_real_truncate (TYPE_MODE (double_type_node), &orig))
3819 type1 = double_type_node;
3820 }
3821 tree newtype
3822 = (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (type1)
3823 ? TREE_TYPE (@0) : type1);
3824 }
3825 (if (TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (newtype))
3826 (cmp (convert:newtype @0) (convert:newtype @1))))))
3827
3828 (simplify
3829 (cmp @0 REAL_CST@1)
3830 /* IEEE doesn't distinguish +0 and -0 in comparisons. */
3831 (switch
3832 /* a CMP (-0) -> a CMP 0 */
3833 (if (REAL_VALUE_MINUS_ZERO (TREE_REAL_CST (@1)))
3834 (cmp @0 { build_real (TREE_TYPE (@1), dconst0); }))
3835 /* x != NaN is always true, other ops are always false. */
3836 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3837 && ! HONOR_SNANS (@1))
3838 { constant_boolean_node (cmp == NE_EXPR, type); })
3839 /* Fold comparisons against infinity. */
3840 (if (REAL_VALUE_ISINF (TREE_REAL_CST (@1))
3841 && MODE_HAS_INFINITIES (TYPE_MODE (TREE_TYPE (@1))))
3842 (with
3843 {
3844 REAL_VALUE_TYPE max;
3845 enum tree_code code = cmp;
3846 bool neg = REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1));
3847 if (neg)
3848 code = swap_tree_comparison (code);
3849 }
3850 (switch
3851 /* x > +Inf is always false, if we ignore NaNs or exceptions. */
3852 (if (code == GT_EXPR
3853 && !(HONOR_NANS (@0) && flag_trapping_math))
3854 { constant_boolean_node (false, type); })
3855 (if (code == LE_EXPR)
3856 /* x <= +Inf is always true, if we don't care about NaNs. */
3857 (if (! HONOR_NANS (@0))
3858 { constant_boolean_node (true, type); }
3859 /* x <= +Inf is the same as x == x, i.e. !isnan(x), but this loses
3860 an "invalid" exception. */
3861 (if (!flag_trapping_math)
3862 (eq @0 @0))))
3863 /* x == +Inf and x >= +Inf are always equal to x > DBL_MAX, but
3864 for == this introduces an exception for x a NaN. */
3865 (if ((code == EQ_EXPR && !(HONOR_NANS (@0) && flag_trapping_math))
3866 || code == GE_EXPR)
3867 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3868 (if (neg)
3869 (lt @0 { build_real (TREE_TYPE (@0), max); })
3870 (gt @0 { build_real (TREE_TYPE (@0), max); }))))
3871 /* x < +Inf is always equal to x <= DBL_MAX. */
3872 (if (code == LT_EXPR)
3873 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3874 (if (neg)
3875 (ge @0 { build_real (TREE_TYPE (@0), max); })
3876 (le @0 { build_real (TREE_TYPE (@0), max); }))))
3877 /* x != +Inf is always equal to !(x > DBL_MAX), but this introduces
3878 an exception for x a NaN so use an unordered comparison. */
3879 (if (code == NE_EXPR)
3880 (with { real_maxval (&max, neg, TYPE_MODE (TREE_TYPE (@0))); }
3881 (if (! HONOR_NANS (@0))
3882 (if (neg)
3883 (ge @0 { build_real (TREE_TYPE (@0), max); })
3884 (le @0 { build_real (TREE_TYPE (@0), max); }))
3885 (if (neg)
3886 (unge @0 { build_real (TREE_TYPE (@0), max); })
3887 (unle @0 { build_real (TREE_TYPE (@0), max); }))))))))))
3888
3889 /* If this is a comparison of a real constant with a PLUS_EXPR
3890 or a MINUS_EXPR of a real constant, we can convert it into a
3891 comparison with a revised real constant as long as no overflow
3892 occurs when unsafe_math_optimizations are enabled. */
3893 (if (flag_unsafe_math_optimizations)
3894 (for op (plus minus)
3895 (simplify
3896 (cmp (op @0 REAL_CST@1) REAL_CST@2)
3897 (with
3898 {
3899 tree tem = const_binop (op == PLUS_EXPR ? MINUS_EXPR : PLUS_EXPR,
3900 TREE_TYPE (@1), @2, @1);
3901 }
3902 (if (tem && !TREE_OVERFLOW (tem))
3903 (cmp @0 { tem; }))))))
3904
3905 /* Likewise, we can simplify a comparison of a real constant with
3906 a MINUS_EXPR whose first operand is also a real constant, i.e.
3907 (c1 - x) < c2 becomes x > c1-c2. Reordering is allowed on
3908 floating-point types only if -fassociative-math is set. */
3909 (if (flag_associative_math)
3910 (simplify
3911 (cmp (minus REAL_CST@0 @1) REAL_CST@2)
3912 (with { tree tem = const_binop (MINUS_EXPR, TREE_TYPE (@1), @0, @2); }
3913 (if (tem && !TREE_OVERFLOW (tem))
3914 (cmp { tem; } @1)))))
3915
3916 /* Fold comparisons against built-in math functions. */
3917 (if (flag_unsafe_math_optimizations && ! flag_errno_math)
3918 (for sq (SQRT)
3919 (simplify
3920 (cmp (sq @0) REAL_CST@1)
3921 (switch
3922 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
3923 (switch
3924 /* sqrt(x) < y is always false, if y is negative. */
3925 (if (cmp == EQ_EXPR || cmp == LT_EXPR || cmp == LE_EXPR)
3926 { constant_boolean_node (false, type); })
3927 /* sqrt(x) > y is always true, if y is negative and we
3928 don't care about NaNs, i.e. negative values of x. */
3929 (if (cmp == NE_EXPR || !HONOR_NANS (@0))
3930 { constant_boolean_node (true, type); })
3931 /* sqrt(x) > y is the same as x >= 0, if y is negative. */
3932 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })))
3933 (if (real_equal (TREE_REAL_CST_PTR (@1), &dconst0))
3934 (switch
3935 /* sqrt(x) < 0 is always false. */
3936 (if (cmp == LT_EXPR)
3937 { constant_boolean_node (false, type); })
3938 /* sqrt(x) >= 0 is always true if we don't care about NaNs. */
3939 (if (cmp == GE_EXPR && !HONOR_NANS (@0))
3940 { constant_boolean_node (true, type); })
3941 /* sqrt(x) <= 0 -> x == 0. */
3942 (if (cmp == LE_EXPR)
3943 (eq @0 @1))
3944 /* Otherwise sqrt(x) cmp 0 -> x cmp 0. Here cmp can be >=, >,
3945 == or !=. In the last case:
3946
3947 (sqrt(x) != 0) == (NaN != 0) == true == (x != 0)
3948
3949 if x is negative or NaN. Due to -funsafe-math-optimizations,
3950 the results for other x follow from natural arithmetic. */
3951 (cmp @0 @1)))
3952 (if ((cmp == LT_EXPR
3953 || cmp == LE_EXPR
3954 || cmp == GT_EXPR
3955 || cmp == GE_EXPR)
3956 && !REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
3957 /* Give up for -frounding-math. */
3958 && !HONOR_SIGN_DEPENDENT_ROUNDING (TREE_TYPE (@0)))
3959 (with
3960 {
3961 REAL_VALUE_TYPE c2;
3962 enum tree_code ncmp = cmp;
3963 const real_format *fmt
3964 = REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0)));
3965 real_arithmetic (&c2, MULT_EXPR,
3966 &TREE_REAL_CST (@1), &TREE_REAL_CST (@1));
3967 real_convert (&c2, fmt, &c2);
3968 /* See PR91734: if c2 is inexact and sqrt(c2) < c (or sqrt(c2) >= c),
3969 then change LT_EXPR into LE_EXPR or GE_EXPR into GT_EXPR. */
3970 if (!REAL_VALUE_ISINF (c2))
3971 {
3972 tree c3 = fold_const_call (CFN_SQRT, TREE_TYPE (@0),
3973 build_real (TREE_TYPE (@0), c2));
3974 if (c3 == NULL_TREE || TREE_CODE (c3) != REAL_CST)
3975 ncmp = ERROR_MARK;
3976 else if ((cmp == LT_EXPR || cmp == GE_EXPR)
3977 && real_less (&TREE_REAL_CST (c3), &TREE_REAL_CST (@1)))
3978 ncmp = cmp == LT_EXPR ? LE_EXPR : GT_EXPR;
3979 else if ((cmp == LE_EXPR || cmp == GT_EXPR)
3980 && real_less (&TREE_REAL_CST (@1), &TREE_REAL_CST (c3)))
3981 ncmp = cmp == LE_EXPR ? LT_EXPR : GE_EXPR;
3982 else
3983 {
3984 /* With rounding to even, sqrt of up to 3 different values
3985 gives the same normal result, so in some cases c2 needs
3986 to be adjusted. */
3987 REAL_VALUE_TYPE c2alt, tow;
3988 if (cmp == LT_EXPR || cmp == GE_EXPR)
3989 tow = dconst0;
3990 else
3991 real_inf (&tow);
3992 real_nextafter (&c2alt, fmt, &c2, &tow);
3993 real_convert (&c2alt, fmt, &c2alt);
3994 if (REAL_VALUE_ISINF (c2alt))
3995 ncmp = ERROR_MARK;
3996 else
3997 {
3998 c3 = fold_const_call (CFN_SQRT, TREE_TYPE (@0),
3999 build_real (TREE_TYPE (@0), c2alt));
4000 if (c3 == NULL_TREE || TREE_CODE (c3) != REAL_CST)
4001 ncmp = ERROR_MARK;
4002 else if (real_equal (&TREE_REAL_CST (c3),
4003 &TREE_REAL_CST (@1)))
4004 c2 = c2alt;
4005 }
4006 }
4007 }
4008 }
4009 (if (cmp == GT_EXPR || cmp == GE_EXPR)
4010 (if (REAL_VALUE_ISINF (c2))
4011 /* sqrt(x) > y is x == +Inf, when y is very large. */
4012 (if (HONOR_INFINITIES (@0))
4013 (eq @0 { build_real (TREE_TYPE (@0), c2); })
4014 { constant_boolean_node (false, type); })
4015 /* sqrt(x) > c is the same as x > c*c. */
4016 (if (ncmp != ERROR_MARK)
4017 (if (ncmp == GE_EXPR)
4018 (ge @0 { build_real (TREE_TYPE (@0), c2); })
4019 (gt @0 { build_real (TREE_TYPE (@0), c2); }))))
4020 /* else if (cmp == LT_EXPR || cmp == LE_EXPR) */
4021 (if (REAL_VALUE_ISINF (c2))
4022 (switch
4023 /* sqrt(x) < y is always true, when y is a very large
4024 value and we don't care about NaNs or Infinities. */
4025 (if (! HONOR_NANS (@0) && ! HONOR_INFINITIES (@0))
4026 { constant_boolean_node (true, type); })
4027 /* sqrt(x) < y is x != +Inf when y is very large and we
4028 don't care about NaNs. */
4029 (if (! HONOR_NANS (@0))
4030 (ne @0 { build_real (TREE_TYPE (@0), c2); }))
4031 /* sqrt(x) < y is x >= 0 when y is very large and we
4032 don't care about Infinities. */
4033 (if (! HONOR_INFINITIES (@0))
4034 (ge @0 { build_real (TREE_TYPE (@0), dconst0); }))
4035 /* sqrt(x) < y is x >= 0 && x != +Inf, when y is large. */
4036 (if (GENERIC)
4037 (truth_andif
4038 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
4039 (ne @0 { build_real (TREE_TYPE (@0), c2); }))))
4040 /* sqrt(x) < c is the same as x < c*c, if we ignore NaNs. */
4041 (if (ncmp != ERROR_MARK && ! HONOR_NANS (@0))
4042 (if (ncmp == LT_EXPR)
4043 (lt @0 { build_real (TREE_TYPE (@0), c2); })
4044 (le @0 { build_real (TREE_TYPE (@0), c2); }))
4045 /* sqrt(x) < c is the same as x >= 0 && x < c*c. */
4046 (if (ncmp != ERROR_MARK && GENERIC)
4047 (if (ncmp == LT_EXPR)
4048 (truth_andif
4049 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
4050 (lt @0 { build_real (TREE_TYPE (@0), c2); }))
4051 (truth_andif
4052 (ge @0 { build_real (TREE_TYPE (@0), dconst0); })
4053 (le @0 { build_real (TREE_TYPE (@0), c2); })))))))))))
4054 /* Transform sqrt(x) cmp sqrt(y) -> x cmp y. */
4055 (simplify
4056 (cmp (sq @0) (sq @1))
4057 (if (! HONOR_NANS (@0))
4058 (cmp @0 @1))))))
4059
4060 /* Optimize various special cases of (FTYPE) N CMP (FTYPE) M. */
4061 (for cmp (lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
4062 icmp (lt le eq ne ge gt unordered ordered lt le gt ge eq ne)
4063 (simplify
4064 (cmp (float@0 @1) (float @2))
4065 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@0))
4066 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0)))
4067 (with
4068 {
4069 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@0))));
4070 tree type1 = TREE_TYPE (@1);
4071 bool type1_signed_p = TYPE_SIGN (type1) == SIGNED;
4072 tree type2 = TREE_TYPE (@2);
4073 bool type2_signed_p = TYPE_SIGN (type2) == SIGNED;
4074 }
4075 (if (fmt.can_represent_integral_type_p (type1)
4076 && fmt.can_represent_integral_type_p (type2))
4077 (if (cmp == ORDERED_EXPR || cmp == UNORDERED_EXPR)
4078 { constant_boolean_node (cmp == ORDERED_EXPR, type); }
4079 (if (TYPE_PRECISION (type1) > TYPE_PRECISION (type2)
4080 && type1_signed_p >= type2_signed_p)
4081 (icmp @1 (convert @2))
4082 (if (TYPE_PRECISION (type1) < TYPE_PRECISION (type2)
4083 && type1_signed_p <= type2_signed_p)
4084 (icmp (convert:type2 @1) @2)
4085 (if (TYPE_PRECISION (type1) == TYPE_PRECISION (type2)
4086 && type1_signed_p == type2_signed_p)
4087 (icmp @1 @2))))))))))
4088
4089 /* Optimize various special cases of (FTYPE) N CMP CST. */
4090 (for cmp (lt le eq ne ge gt)
4091 icmp (le le eq ne ge ge)
4092 (simplify
4093 (cmp (float @0) REAL_CST@1)
4094 (if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (@1))
4095 && ! DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@1)))
4096 (with
4097 {
4098 tree itype = TREE_TYPE (@0);
4099 format_helper fmt (REAL_MODE_FORMAT (TYPE_MODE (TREE_TYPE (@1))));
4100 const REAL_VALUE_TYPE *cst = TREE_REAL_CST_PTR (@1);
4101 /* Be careful to preserve any potential exceptions due to
4102 NaNs. qNaNs are ok in == or != context.
4103 TODO: relax under -fno-trapping-math or
4104 -fno-signaling-nans. */
4105 bool exception_p
4106 = real_isnan (cst) && (cst->signalling
4107 || (cmp != EQ_EXPR && cmp != NE_EXPR));
4108 }
4109 /* TODO: allow non-fitting itype and SNaNs when
4110 -fno-trapping-math. */
4111 (if (fmt.can_represent_integral_type_p (itype) && ! exception_p)
4112 (with
4113 {
4114 signop isign = TYPE_SIGN (itype);
4115 REAL_VALUE_TYPE imin, imax;
4116 real_from_integer (&imin, fmt, wi::min_value (itype), isign);
4117 real_from_integer (&imax, fmt, wi::max_value (itype), isign);
4118
4119 REAL_VALUE_TYPE icst;
4120 if (cmp == GT_EXPR || cmp == GE_EXPR)
4121 real_ceil (&icst, fmt, cst);
4122 else if (cmp == LT_EXPR || cmp == LE_EXPR)
4123 real_floor (&icst, fmt, cst);
4124 else
4125 real_trunc (&icst, fmt, cst);
4126
4127 bool cst_int_p = !real_isnan (cst) && real_identical (&icst, cst);
4128
4129 bool overflow_p = false;
4130 wide_int icst_val
4131 = real_to_integer (&icst, &overflow_p, TYPE_PRECISION (itype));
4132 }
4133 (switch
4134 /* Optimize cases when CST is outside of ITYPE's range. */
4135 (if (real_compare (LT_EXPR, cst, &imin))
4136 { constant_boolean_node (cmp == GT_EXPR || cmp == GE_EXPR || cmp == NE_EXPR,
4137 type); })
4138 (if (real_compare (GT_EXPR, cst, &imax))
4139 { constant_boolean_node (cmp == LT_EXPR || cmp == LE_EXPR || cmp == NE_EXPR,
4140 type); })
4141 /* Remove cast if CST is an integer representable by ITYPE. */
4142 (if (cst_int_p)
4143 (cmp @0 { gcc_assert (!overflow_p);
4144 wide_int_to_tree (itype, icst_val); })
4145 )
4146 /* When CST is fractional, optimize
4147 (FTYPE) N == CST -> 0
4148 (FTYPE) N != CST -> 1. */
4149 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
4150 { constant_boolean_node (cmp == NE_EXPR, type); })
4151 /* Otherwise replace with sensible integer constant. */
4152 (with
4153 {
4154 gcc_checking_assert (!overflow_p);
4155 }
4156 (icmp @0 { wide_int_to_tree (itype, icst_val); })))))))))
4157
4158 /* Fold A /[ex] B CMP C to A CMP B * C. */
4159 (for cmp (eq ne)
4160 (simplify
4161 (cmp (exact_div @0 @1) INTEGER_CST@2)
4162 (if (!integer_zerop (@1))
4163 (if (wi::to_wide (@2) == 0)
4164 (cmp @0 @2)
4165 (if (TREE_CODE (@1) == INTEGER_CST)
4166 (with
4167 {
4168 wi::overflow_type ovf;
4169 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
4170 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
4171 }
4172 (if (ovf)
4173 { constant_boolean_node (cmp == NE_EXPR, type); }
4174 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))))
4175 (for cmp (lt le gt ge)
4176 (simplify
4177 (cmp (exact_div @0 INTEGER_CST@1) INTEGER_CST@2)
4178 (if (wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
4179 (with
4180 {
4181 wi::overflow_type ovf;
4182 wide_int prod = wi::mul (wi::to_wide (@2), wi::to_wide (@1),
4183 TYPE_SIGN (TREE_TYPE (@1)), &ovf);
4184 }
4185 (if (ovf)
4186 { constant_boolean_node (wi::lt_p (wi::to_wide (@2), 0,
4187 TYPE_SIGN (TREE_TYPE (@2)))
4188 != (cmp == LT_EXPR || cmp == LE_EXPR), type); }
4189 (cmp @0 { wide_int_to_tree (TREE_TYPE (@0), prod); }))))))
4190
4191 /* Fold (size_t)(A /[ex] B) CMP C to (size_t)A CMP (size_t)B * C or A CMP' 0.
4192
4193 For small C (less than max/B), this is (size_t)A CMP (size_t)B * C.
4194 For large C (more than min/B+2^size), this is also true, with the
4195 multiplication computed modulo 2^size.
4196 For intermediate C, this just tests the sign of A. */
4197 (for cmp (lt le gt ge)
4198 cmp2 (ge ge lt lt)
4199 (simplify
4200 (cmp (convert (exact_div @0 INTEGER_CST@1)) INTEGER_CST@2)
4201 (if (tree_nop_conversion_p (TREE_TYPE (@0), TREE_TYPE (@2))
4202 && TYPE_UNSIGNED (TREE_TYPE (@2)) && !TYPE_UNSIGNED (TREE_TYPE (@0))
4203 && wi::gt_p (wi::to_wide (@1), 0, TYPE_SIGN (TREE_TYPE (@1))))
4204 (with
4205 {
4206 tree utype = TREE_TYPE (@2);
4207 wide_int denom = wi::to_wide (@1);
4208 wide_int right = wi::to_wide (@2);
4209 wide_int smax = wi::sdiv_trunc (wi::max_value (TREE_TYPE (@0)), denom);
4210 wide_int smin = wi::sdiv_trunc (wi::min_value (TREE_TYPE (@0)), denom);
4211 bool small = wi::leu_p (right, smax);
4212 bool large = wi::geu_p (right, smin);
4213 }
4214 (if (small || large)
4215 (cmp (convert:utype @0) (mult @2 (convert @1)))
4216 (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); }))))))
4217
4218 /* Unordered tests if either argument is a NaN. */
4219 (simplify
4220 (bit_ior (unordered @0 @0) (unordered @1 @1))
4221 (if (types_match (@0, @1))
4222 (unordered @0 @1)))
4223 (simplify
4224 (bit_and (ordered @0 @0) (ordered @1 @1))
4225 (if (types_match (@0, @1))
4226 (ordered @0 @1)))
4227 (simplify
4228 (bit_ior:c (unordered @0 @0) (unordered:c@2 @0 @1))
4229 @2)
4230 (simplify
4231 (bit_and:c (ordered @0 @0) (ordered:c@2 @0 @1))
4232 @2)
4233
4234 /* Simple range test simplifications. */
4235 /* A < B || A >= B -> true. */
4236 (for test1 (lt le le le ne ge)
4237 test2 (ge gt ge ne eq ne)
4238 (simplify
4239 (bit_ior:c (test1 @0 @1) (test2 @0 @1))
4240 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4241 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
4242 { constant_boolean_node (true, type); })))
4243 /* A < B && A >= B -> false. */
4244 (for test1 (lt lt lt le ne eq)
4245 test2 (ge gt eq gt eq gt)
4246 (simplify
4247 (bit_and:c (test1 @0 @1) (test2 @0 @1))
4248 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4249 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@0)))
4250 { constant_boolean_node (false, type); })))
4251
4252 /* A & (2**N - 1) <= 2**K - 1 -> A & (2**N - 2**K) == 0
4253 A & (2**N - 1) > 2**K - 1 -> A & (2**N - 2**K) != 0
4254
4255 Note that comparisons
4256 A & (2**N - 1) < 2**K -> A & (2**N - 2**K) == 0
4257 A & (2**N - 1) >= 2**K -> A & (2**N - 2**K) != 0
4258 will be canonicalized to above so there's no need to
4259 consider them here.
4260 */
4261
4262 (for cmp (le gt)
4263 eqcmp (eq ne)
4264 (simplify
4265 (cmp (bit_and@0 @1 INTEGER_CST@2) INTEGER_CST@3)
4266 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0)))
4267 (with
4268 {
4269 tree ty = TREE_TYPE (@0);
4270 unsigned prec = TYPE_PRECISION (ty);
4271 wide_int mask = wi::to_wide (@2, prec);
4272 wide_int rhs = wi::to_wide (@3, prec);
4273 signop sgn = TYPE_SIGN (ty);
4274 }
4275 (if ((mask & (mask + 1)) == 0 && wi::gt_p (rhs, 0, sgn)
4276 && (rhs & (rhs + 1)) == 0 && wi::ge_p (mask, rhs, sgn))
4277 (eqcmp (bit_and @1 { wide_int_to_tree (ty, mask - rhs); })
4278 { build_zero_cst (ty); }))))))
4279
4280 /* -A CMP -B -> B CMP A. */
4281 (for cmp (tcc_comparison)
4282 scmp (swapped_tcc_comparison)
4283 (simplify
4284 (cmp (negate @0) (negate @1))
4285 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
4286 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4287 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
4288 (scmp @0 @1)))
4289 (simplify
4290 (cmp (negate @0) CONSTANT_CLASS_P@1)
4291 (if (FLOAT_TYPE_P (TREE_TYPE (@0))
4292 || (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4293 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))))
4294 (with { tree tem = const_unop (NEGATE_EXPR, TREE_TYPE (@0), @1); }
4295 (if (tem && !TREE_OVERFLOW (tem))
4296 (scmp @0 { tem; }))))))
4297
4298 /* Convert ABS_EXPR<x> == 0 or ABS_EXPR<x> != 0 to x == 0 or x != 0. */
4299 (for op (eq ne)
4300 (simplify
4301 (op (abs @0) zerop@1)
4302 (op @0 @1)))
4303
4304 /* From fold_sign_changed_comparison and fold_widened_comparison.
4305 FIXME: the lack of symmetry is disturbing. */
4306 (for cmp (simple_comparison)
4307 (simplify
4308 (cmp (convert@0 @00) (convert?@1 @10))
4309 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4310 /* Disable this optimization if we're casting a function pointer
4311 type on targets that require function pointer canonicalization. */
4312 && !(targetm.have_canonicalize_funcptr_for_compare ()
4313 && ((POINTER_TYPE_P (TREE_TYPE (@00))
4314 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@00))))
4315 || (POINTER_TYPE_P (TREE_TYPE (@10))
4316 && FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@10))))))
4317 && single_use (@0))
4318 (if (TYPE_PRECISION (TREE_TYPE (@00)) == TYPE_PRECISION (TREE_TYPE (@0))
4319 && (TREE_CODE (@10) == INTEGER_CST
4320 || @1 != @10)
4321 && (TYPE_UNSIGNED (TREE_TYPE (@00)) == TYPE_UNSIGNED (TREE_TYPE (@0))
4322 || cmp == NE_EXPR
4323 || cmp == EQ_EXPR)
4324 && !POINTER_TYPE_P (TREE_TYPE (@00)))
4325 /* ??? The special-casing of INTEGER_CST conversion was in the original
4326 code and here to avoid a spurious overflow flag on the resulting
4327 constant which fold_convert produces. */
4328 (if (TREE_CODE (@1) == INTEGER_CST)
4329 (cmp @00 { force_fit_type (TREE_TYPE (@00), wi::to_widest (@1), 0,
4330 TREE_OVERFLOW (@1)); })
4331 (cmp @00 (convert @1)))
4332
4333 (if (TYPE_PRECISION (TREE_TYPE (@0)) > TYPE_PRECISION (TREE_TYPE (@00)))
4334 /* If possible, express the comparison in the shorter mode. */
4335 (if ((cmp == EQ_EXPR || cmp == NE_EXPR
4336 || TYPE_UNSIGNED (TREE_TYPE (@0)) == TYPE_UNSIGNED (TREE_TYPE (@00))
4337 || (!TYPE_UNSIGNED (TREE_TYPE (@0))
4338 && TYPE_UNSIGNED (TREE_TYPE (@00))))
4339 && (types_match (TREE_TYPE (@10), TREE_TYPE (@00))
4340 || ((TYPE_PRECISION (TREE_TYPE (@00))
4341 >= TYPE_PRECISION (TREE_TYPE (@10)))
4342 && (TYPE_UNSIGNED (TREE_TYPE (@00))
4343 == TYPE_UNSIGNED (TREE_TYPE (@10))))
4344 || (TREE_CODE (@10) == INTEGER_CST
4345 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
4346 && int_fits_type_p (@10, TREE_TYPE (@00)))))
4347 (cmp @00 (convert @10))
4348 (if (TREE_CODE (@10) == INTEGER_CST
4349 && INTEGRAL_TYPE_P (TREE_TYPE (@00))
4350 && !int_fits_type_p (@10, TREE_TYPE (@00)))
4351 (with
4352 {
4353 tree min = lower_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
4354 tree max = upper_bound_in_type (TREE_TYPE (@10), TREE_TYPE (@00));
4355 bool above = integer_nonzerop (const_binop (LT_EXPR, type, max, @10));
4356 bool below = integer_nonzerop (const_binop (LT_EXPR, type, @10, min));
4357 }
4358 (if (above || below)
4359 (if (cmp == EQ_EXPR || cmp == NE_EXPR)
4360 { constant_boolean_node (cmp == EQ_EXPR ? false : true, type); }
4361 (if (cmp == LT_EXPR || cmp == LE_EXPR)
4362 { constant_boolean_node (above ? true : false, type); }
4363 (if (cmp == GT_EXPR || cmp == GE_EXPR)
4364 { constant_boolean_node (above ? false : true, type); }))))))))))))
4365
4366 (for cmp (eq ne)
4367 (simplify
4368 /* SSA names are canonicalized to 2nd place. */
4369 (cmp addr@0 SSA_NAME@1)
4370 (with
4371 { poly_int64 off; tree base; }
4372 /* A local variable can never be pointed to by
4373 the default SSA name of an incoming parameter. */
4374 (if (SSA_NAME_IS_DEFAULT_DEF (@1)
4375 && TREE_CODE (SSA_NAME_VAR (@1)) == PARM_DECL
4376 && (base = get_base_address (TREE_OPERAND (@0, 0)))
4377 && TREE_CODE (base) == VAR_DECL
4378 && auto_var_in_fn_p (base, current_function_decl))
4379 (if (cmp == NE_EXPR)
4380 { constant_boolean_node (true, type); }
4381 { constant_boolean_node (false, type); })
4382 /* If the address is based on @1 decide using the offset. */
4383 (if ((base = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off))
4384 && TREE_CODE (base) == MEM_REF
4385 && TREE_OPERAND (base, 0) == @1)
4386 (with { off += mem_ref_offset (base).force_shwi (); }
4387 (if (known_ne (off, 0))
4388 { constant_boolean_node (cmp == NE_EXPR, type); }
4389 (if (known_eq (off, 0))
4390 { constant_boolean_node (cmp == EQ_EXPR, type); }))))))))
4391
4392 /* Equality compare simplifications from fold_binary */
4393 (for cmp (eq ne)
4394
4395 /* If we have (A | C) == D where C & ~D != 0, convert this into 0.
4396 Similarly for NE_EXPR. */
4397 (simplify
4398 (cmp (convert?@3 (bit_ior @0 INTEGER_CST@1)) INTEGER_CST@2)
4399 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0))
4400 && wi::bit_and_not (wi::to_wide (@1), wi::to_wide (@2)) != 0)
4401 { constant_boolean_node (cmp == NE_EXPR, type); }))
4402
4403 /* (X ^ Y) == 0 becomes X == Y, and (X ^ Y) != 0 becomes X != Y. */
4404 (simplify
4405 (cmp (bit_xor @0 @1) integer_zerop)
4406 (cmp @0 @1))
4407
4408 /* (X ^ Y) == Y becomes X == 0.
4409 Likewise (X ^ Y) == X becomes Y == 0. */
4410 (simplify
4411 (cmp:c (bit_xor:c @0 @1) @0)
4412 (cmp @1 { build_zero_cst (TREE_TYPE (@1)); }))
4413
4414 /* (X ^ C1) op C2 can be rewritten as X op (C1 ^ C2). */
4415 (simplify
4416 (cmp (convert?@3 (bit_xor @0 INTEGER_CST@1)) INTEGER_CST@2)
4417 (if (tree_nop_conversion_p (TREE_TYPE (@3), TREE_TYPE (@0)))
4418 (cmp @0 (bit_xor @1 (convert @2)))))
4419
4420 (simplify
4421 (cmp (convert? addr@0) integer_zerop)
4422 (if (tree_single_nonzero_warnv_p (@0, NULL))
4423 { constant_boolean_node (cmp == NE_EXPR, type); }))
4424
4425 /* (X & C) op (Y & C) into (X ^ Y) & C op 0. */
4426 (simplify
4427 (cmp (bit_and:cs @0 @2) (bit_and:cs @1 @2))
4428 (cmp (bit_and (bit_xor @0 @1) @2) { build_zero_cst (TREE_TYPE (@2)); })))
4429
4430 /* (X < 0) != (Y < 0) into (X ^ Y) < 0.
4431 (X >= 0) != (Y >= 0) into (X ^ Y) < 0.
4432 (X < 0) == (Y < 0) into (X ^ Y) >= 0.
4433 (X >= 0) == (Y >= 0) into (X ^ Y) >= 0. */
4434 (for cmp (eq ne)
4435 ncmp (ge lt)
4436 (for sgncmp (ge lt)
4437 (simplify
4438 (cmp (sgncmp @0 integer_zerop@2) (sgncmp @1 integer_zerop))
4439 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4440 && !TYPE_UNSIGNED (TREE_TYPE (@0))
4441 && types_match (@0, @1))
4442 (ncmp (bit_xor @0 @1) @2)))))
4443 /* (X < 0) == (Y >= 0) into (X ^ Y) < 0.
4444 (X < 0) != (Y >= 0) into (X ^ Y) >= 0. */
4445 (for cmp (eq ne)
4446 ncmp (lt ge)
4447 (simplify
4448 (cmp:c (lt @0 integer_zerop@2) (ge @1 integer_zerop))
4449 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4450 && !TYPE_UNSIGNED (TREE_TYPE (@0))
4451 && types_match (@0, @1))
4452 (ncmp (bit_xor @0 @1) @2))))
4453
4454 /* If we have (A & C) == C where C is a power of 2, convert this into
4455 (A & C) != 0. Similarly for NE_EXPR. */
4456 (for cmp (eq ne)
4457 icmp (ne eq)
4458 (simplify
4459 (cmp (bit_and@2 @0 integer_pow2p@1) @1)
4460 (icmp @2 { build_zero_cst (TREE_TYPE (@0)); })))
4461
4462 /* If we have (A & C) != 0 ? D : 0 where C and D are powers of 2,
4463 convert this into a shift followed by ANDing with D. */
4464 (simplify
4465 (cond
4466 (ne (bit_and @0 integer_pow2p@1) integer_zerop)
4467 INTEGER_CST@2 integer_zerop)
4468 (if (integer_pow2p (@2))
4469 (with {
4470 int shift = (wi::exact_log2 (wi::to_wide (@2))
4471 - wi::exact_log2 (wi::to_wide (@1)));
4472 }
4473 (if (shift > 0)
4474 (bit_and
4475 (lshift (convert @0) { build_int_cst (integer_type_node, shift); }) @2)
4476 (bit_and
4477 (convert (rshift @0 { build_int_cst (integer_type_node, -shift); }))
4478 @2)))))
4479
4480 /* If we have (A & C) != 0 where C is the sign bit of A, convert
4481 this into A < 0. Similarly for (A & C) == 0 into A >= 0. */
4482 (for cmp (eq ne)
4483 ncmp (ge lt)
4484 (simplify
4485 (cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
4486 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4487 && type_has_mode_precision_p (TREE_TYPE (@0))
4488 && element_precision (@2) >= element_precision (@0)
4489 && wi::only_sign_bit_p (wi::to_wide (@1), element_precision (@0)))
4490 (with { tree stype = signed_type_for (TREE_TYPE (@0)); }
4491 (ncmp (convert:stype @0) { build_zero_cst (stype); })))))
4492
4493 /* If we have A < 0 ? C : 0 where C is a power of 2, convert
4494 this into a right shift or sign extension followed by ANDing with C. */
4495 (simplify
4496 (cond
4497 (lt @0 integer_zerop)
4498 INTEGER_CST@1 integer_zerop)
4499 (if (integer_pow2p (@1)
4500 && !TYPE_UNSIGNED (TREE_TYPE (@0)))
4501 (with {
4502 int shift = element_precision (@0) - wi::exact_log2 (wi::to_wide (@1)) - 1;
4503 }
4504 (if (shift >= 0)
4505 (bit_and
4506 (convert (rshift @0 { build_int_cst (integer_type_node, shift); }))
4507 @1)
4508 /* Otherwise ctype must be wider than TREE_TYPE (@0) and pure
4509 sign extension followed by AND with C will achieve the effect. */
4510 (bit_and (convert @0) @1)))))
4511
4512 /* When the addresses are not directly of decls compare base and offset.
4513 This implements some remaining parts of fold_comparison address
4514 comparisons but still no complete part of it. Still it is good
4515 enough to make fold_stmt not regress when not dispatching to fold_binary. */
4516 (for cmp (simple_comparison)
4517 (simplify
4518 (cmp (convert1?@2 addr@0) (convert2? addr@1))
4519 (with
4520 {
4521 poly_int64 off0, off1;
4522 tree base0 = get_addr_base_and_unit_offset (TREE_OPERAND (@0, 0), &off0);
4523 tree base1 = get_addr_base_and_unit_offset (TREE_OPERAND (@1, 0), &off1);
4524 if (base0 && TREE_CODE (base0) == MEM_REF)
4525 {
4526 off0 += mem_ref_offset (base0).force_shwi ();
4527 base0 = TREE_OPERAND (base0, 0);
4528 }
4529 if (base1 && TREE_CODE (base1) == MEM_REF)
4530 {
4531 off1 += mem_ref_offset (base1).force_shwi ();
4532 base1 = TREE_OPERAND (base1, 0);
4533 }
4534 }
4535 (if (base0 && base1)
4536 (with
4537 {
4538 int equal = 2;
4539 /* Punt in GENERIC on variables with value expressions;
4540 the value expressions might point to fields/elements
4541 of other vars etc. */
4542 if (GENERIC
4543 && ((VAR_P (base0) && DECL_HAS_VALUE_EXPR_P (base0))
4544 || (VAR_P (base1) && DECL_HAS_VALUE_EXPR_P (base1))))
4545 ;
4546 else if (decl_in_symtab_p (base0)
4547 && decl_in_symtab_p (base1))
4548 equal = symtab_node::get_create (base0)
4549 ->equal_address_to (symtab_node::get_create (base1));
4550 else if ((DECL_P (base0)
4551 || TREE_CODE (base0) == SSA_NAME
4552 || TREE_CODE (base0) == STRING_CST)
4553 && (DECL_P (base1)
4554 || TREE_CODE (base1) == SSA_NAME
4555 || TREE_CODE (base1) == STRING_CST))
4556 equal = (base0 == base1);
4557 if (equal == 0)
4558 {
4559 HOST_WIDE_INT ioff0 = -1, ioff1 = -1;
4560 off0.is_constant (&ioff0);
4561 off1.is_constant (&ioff1);
4562 if ((DECL_P (base0) && TREE_CODE (base1) == STRING_CST)
4563 || (TREE_CODE (base0) == STRING_CST && DECL_P (base1))
4564 || (TREE_CODE (base0) == STRING_CST
4565 && TREE_CODE (base1) == STRING_CST
4566 && ioff0 >= 0 && ioff1 >= 0
4567 && ioff0 < TREE_STRING_LENGTH (base0)
4568 && ioff1 < TREE_STRING_LENGTH (base1)
4569 /* This is a too conservative test that the STRING_CSTs
4570 will not end up being string-merged. */
4571 && strncmp (TREE_STRING_POINTER (base0) + ioff0,
4572 TREE_STRING_POINTER (base1) + ioff1,
4573 MIN (TREE_STRING_LENGTH (base0) - ioff0,
4574 TREE_STRING_LENGTH (base1) - ioff1)) != 0))
4575 ;
4576 else if (!DECL_P (base0) || !DECL_P (base1))
4577 equal = 2;
4578 else if (cmp != EQ_EXPR && cmp != NE_EXPR)
4579 equal = 2;
4580 /* If this is a pointer comparison, ignore for now even
4581 valid equalities where one pointer is the offset zero
4582 of one object and the other to one past end of another one. */
4583 else if (!INTEGRAL_TYPE_P (TREE_TYPE (@2)))
4584 ;
4585 /* Assume that automatic variables can't be adjacent to global
4586 variables. */
4587 else if (is_global_var (base0) != is_global_var (base1))
4588 ;
4589 else
4590 {
4591 tree sz0 = DECL_SIZE_UNIT (base0);
4592 tree sz1 = DECL_SIZE_UNIT (base1);
4593 /* If sizes are unknown, e.g. VLA or not representable,
4594 punt. */
4595 if (!tree_fits_poly_int64_p (sz0)
4596 || !tree_fits_poly_int64_p (sz1))
4597 equal = 2;
4598 else
4599 {
4600 poly_int64 size0 = tree_to_poly_int64 (sz0);
4601 poly_int64 size1 = tree_to_poly_int64 (sz1);
4602 /* If one offset is pointing (or could be) to the beginning
4603 of one object and the other is pointing to one past the
4604 last byte of the other object, punt. */
4605 if (maybe_eq (off0, 0) && maybe_eq (off1, size1))
4606 equal = 2;
4607 else if (maybe_eq (off1, 0) && maybe_eq (off0, size0))
4608 equal = 2;
4609 /* If both offsets are the same, there are some cases
4610 we know that are ok. Either if we know they aren't
4611 zero, or if we know both sizes are no zero. */
4612 if (equal == 2
4613 && known_eq (off0, off1)
4614 && (known_ne (off0, 0)
4615 || (known_ne (size0, 0) && known_ne (size1, 0))))
4616 equal = 0;
4617 }
4618 }
4619 }
4620 }
4621 (if (equal == 1
4622 && (cmp == EQ_EXPR || cmp == NE_EXPR
4623 /* If the offsets are equal we can ignore overflow. */
4624 || known_eq (off0, off1)
4625 || TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
4626 /* Or if we compare using pointers to decls or strings. */
4627 || (POINTER_TYPE_P (TREE_TYPE (@2))
4628 && (DECL_P (base0) || TREE_CODE (base0) == STRING_CST))))
4629 (switch
4630 (if (cmp == EQ_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
4631 { constant_boolean_node (known_eq (off0, off1), type); })
4632 (if (cmp == NE_EXPR && (known_eq (off0, off1) || known_ne (off0, off1)))
4633 { constant_boolean_node (known_ne (off0, off1), type); })
4634 (if (cmp == LT_EXPR && (known_lt (off0, off1) || known_ge (off0, off1)))
4635 { constant_boolean_node (known_lt (off0, off1), type); })
4636 (if (cmp == LE_EXPR && (known_le (off0, off1) || known_gt (off0, off1)))
4637 { constant_boolean_node (known_le (off0, off1), type); })
4638 (if (cmp == GE_EXPR && (known_ge (off0, off1) || known_lt (off0, off1)))
4639 { constant_boolean_node (known_ge (off0, off1), type); })
4640 (if (cmp == GT_EXPR && (known_gt (off0, off1) || known_le (off0, off1)))
4641 { constant_boolean_node (known_gt (off0, off1), type); }))
4642 (if (equal == 0)
4643 (switch
4644 (if (cmp == EQ_EXPR)
4645 { constant_boolean_node (false, type); })
4646 (if (cmp == NE_EXPR)
4647 { constant_boolean_node (true, type); })))))))))
4648
4649 /* Simplify pointer equality compares using PTA. */
4650 (for neeq (ne eq)
4651 (simplify
4652 (neeq @0 @1)
4653 (if (POINTER_TYPE_P (TREE_TYPE (@0))
4654 && ptrs_compare_unequal (@0, @1))
4655 { constant_boolean_node (neeq != EQ_EXPR, type); })))
4656
4657 /* PR70920: Transform (intptr_t)x eq/ne CST to x eq/ne (typeof x) CST.
4658 and (typeof ptr_cst) x eq/ne ptr_cst to x eq/ne (typeof x) CST.
4659 Disable the transform if either operand is pointer to function.
4660 This broke pr22051-2.c for arm where function pointer
4661 canonicalizaion is not wanted. */
4662
4663 (for cmp (ne eq)
4664 (simplify
4665 (cmp (convert @0) INTEGER_CST@1)
4666 (if (((POINTER_TYPE_P (TREE_TYPE (@0))
4667 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@0)))
4668 && INTEGRAL_TYPE_P (TREE_TYPE (@1)))
4669 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4670 && POINTER_TYPE_P (TREE_TYPE (@1))
4671 && !FUNC_OR_METHOD_TYPE_P (TREE_TYPE (TREE_TYPE (@1)))))
4672 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
4673 (cmp @0 (convert @1)))))
4674
4675 /* Non-equality compare simplifications from fold_binary */
4676 (for cmp (lt gt le ge)
4677 /* Comparisons with the highest or lowest possible integer of
4678 the specified precision will have known values. */
4679 (simplify
4680 (cmp (convert?@2 @0) uniform_integer_cst_p@1)
4681 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@1))
4682 || POINTER_TYPE_P (TREE_TYPE (@1))
4683 || VECTOR_INTEGER_TYPE_P (TREE_TYPE (@1)))
4684 && tree_nop_conversion_p (TREE_TYPE (@2), TREE_TYPE (@0)))
4685 (with
4686 {
4687 tree cst = uniform_integer_cst_p (@1);
4688 tree arg1_type = TREE_TYPE (cst);
4689 unsigned int prec = TYPE_PRECISION (arg1_type);
4690 wide_int max = wi::max_value (arg1_type);
4691 wide_int signed_max = wi::max_value (prec, SIGNED);
4692 wide_int min = wi::min_value (arg1_type);
4693 }
4694 (switch
4695 (if (wi::to_wide (cst) == max)
4696 (switch
4697 (if (cmp == GT_EXPR)
4698 { constant_boolean_node (false, type); })
4699 (if (cmp == GE_EXPR)
4700 (eq @2 @1))
4701 (if (cmp == LE_EXPR)
4702 { constant_boolean_node (true, type); })
4703 (if (cmp == LT_EXPR)
4704 (ne @2 @1))))
4705 (if (wi::to_wide (cst) == min)
4706 (switch
4707 (if (cmp == LT_EXPR)
4708 { constant_boolean_node (false, type); })
4709 (if (cmp == LE_EXPR)
4710 (eq @2 @1))
4711 (if (cmp == GE_EXPR)
4712 { constant_boolean_node (true, type); })
4713 (if (cmp == GT_EXPR)
4714 (ne @2 @1))))
4715 (if (wi::to_wide (cst) == max - 1)
4716 (switch
4717 (if (cmp == GT_EXPR)
4718 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
4719 wide_int_to_tree (TREE_TYPE (cst),
4720 wi::to_wide (cst)
4721 + 1)); }))
4722 (if (cmp == LE_EXPR)
4723 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
4724 wide_int_to_tree (TREE_TYPE (cst),
4725 wi::to_wide (cst)
4726 + 1)); }))))
4727 (if (wi::to_wide (cst) == min + 1)
4728 (switch
4729 (if (cmp == GE_EXPR)
4730 (ne @2 { build_uniform_cst (TREE_TYPE (@1),
4731 wide_int_to_tree (TREE_TYPE (cst),
4732 wi::to_wide (cst)
4733 - 1)); }))
4734 (if (cmp == LT_EXPR)
4735 (eq @2 { build_uniform_cst (TREE_TYPE (@1),
4736 wide_int_to_tree (TREE_TYPE (cst),
4737 wi::to_wide (cst)
4738 - 1)); }))))
4739 (if (wi::to_wide (cst) == signed_max
4740 && TYPE_UNSIGNED (arg1_type)
4741 /* We will flip the signedness of the comparison operator
4742 associated with the mode of @1, so the sign bit is
4743 specified by this mode. Check that @1 is the signed
4744 max associated with this sign bit. */
4745 && prec == GET_MODE_PRECISION (SCALAR_INT_TYPE_MODE (arg1_type))
4746 /* signed_type does not work on pointer types. */
4747 && INTEGRAL_TYPE_P (arg1_type))
4748 /* The following case also applies to X < signed_max+1
4749 and X >= signed_max+1 because previous transformations. */
4750 (if (cmp == LE_EXPR || cmp == GT_EXPR)
4751 (with { tree st = signed_type_for (TREE_TYPE (@1)); }
4752 (switch
4753 (if (cst == @1 && cmp == LE_EXPR)
4754 (ge (convert:st @0) { build_zero_cst (st); }))
4755 (if (cst == @1 && cmp == GT_EXPR)
4756 (lt (convert:st @0) { build_zero_cst (st); }))
4757 (if (cmp == LE_EXPR)
4758 (ge (view_convert:st @0) { build_zero_cst (st); }))
4759 (if (cmp == GT_EXPR)
4760 (lt (view_convert:st @0) { build_zero_cst (st); })))))))))))
4761
4762 (for cmp (unordered ordered unlt unle ungt unge uneq ltgt)
4763 /* If the second operand is NaN, the result is constant. */
4764 (simplify
4765 (cmp @0 REAL_CST@1)
4766 (if (REAL_VALUE_ISNAN (TREE_REAL_CST (@1))
4767 && (cmp != LTGT_EXPR || ! flag_trapping_math))
4768 { constant_boolean_node (cmp == ORDERED_EXPR || cmp == LTGT_EXPR
4769 ? false : true, type); })))
4770
4771 /* bool_var != 0 becomes bool_var. */
4772 (simplify
4773 (ne @0 integer_zerop)
4774 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4775 && types_match (type, TREE_TYPE (@0)))
4776 (non_lvalue @0)))
4777 /* bool_var == 1 becomes bool_var. */
4778 (simplify
4779 (eq @0 integer_onep)
4780 (if (TREE_CODE (TREE_TYPE (@0)) == BOOLEAN_TYPE
4781 && types_match (type, TREE_TYPE (@0)))
4782 (non_lvalue @0)))
4783 /* Do not handle
4784 bool_var == 0 becomes !bool_var or
4785 bool_var != 1 becomes !bool_var
4786 here because that only is good in assignment context as long
4787 as we require a tcc_comparison in GIMPLE_CONDs where we'd
4788 replace if (x == 0) with tem = ~x; if (tem != 0) which is
4789 clearly less optimal and which we'll transform again in forwprop. */
4790
4791 /* When one argument is a constant, overflow detection can be simplified.
4792 Currently restricted to single use so as not to interfere too much with
4793 ADD_OVERFLOW detection in tree-ssa-math-opts.c.
4794 A + CST CMP A -> A CMP' CST' */
4795 (for cmp (lt le ge gt)
4796 out (gt gt le le)
4797 (simplify
4798 (cmp:c (plus@2 @0 INTEGER_CST@1) @0)
4799 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4800 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0))
4801 && wi::to_wide (@1) != 0
4802 && single_use (@2))
4803 (with { unsigned int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
4804 (out @0 { wide_int_to_tree (TREE_TYPE (@0),
4805 wi::max_value (prec, UNSIGNED)
4806 - wi::to_wide (@1)); })))))
4807
4808 /* To detect overflow in unsigned A - B, A < B is simpler than A - B > A.
4809 However, the detection logic for SUB_OVERFLOW in tree-ssa-math-opts.c
4810 expects the long form, so we restrict the transformation for now. */
4811 (for cmp (gt le)
4812 (simplify
4813 (cmp:c (minus@2 @0 @1) @0)
4814 (if (single_use (@2)
4815 && ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4816 && TYPE_UNSIGNED (TREE_TYPE (@0)))
4817 (cmp @1 @0))))
4818
4819 /* Optimize A - B + -1 >= A into B >= A for unsigned comparisons. */
4820 (for cmp (ge lt)
4821 (simplify
4822 (cmp:c (plus (minus @0 @1) integer_minus_onep) @0)
4823 (if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (@0))
4824 && TYPE_UNSIGNED (TREE_TYPE (@0)))
4825 (cmp @1 @0))))
4826
4827 /* Testing for overflow is unnecessary if we already know the result. */
4828 /* A - B > A */
4829 (for cmp (gt le)
4830 out (ne eq)
4831 (simplify
4832 (cmp:c (realpart (IFN_SUB_OVERFLOW@2 @0 @1)) @0)
4833 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4834 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4835 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4836 /* A + B < A */
4837 (for cmp (lt ge)
4838 out (ne eq)
4839 (simplify
4840 (cmp:c (realpart (IFN_ADD_OVERFLOW:c@2 @0 @1)) @0)
4841 (if (TYPE_UNSIGNED (TREE_TYPE (@0))
4842 && types_match (TREE_TYPE (@0), TREE_TYPE (@1)))
4843 (out (imagpart @2) { build_zero_cst (TREE_TYPE (@0)); }))))
4844
4845 /* For unsigned operands, -1 / B < A checks whether A * B would overflow.
4846 Simplify it to __builtin_mul_overflow (A, B, <unused>). */
4847 (for cmp (lt ge)
4848 out (ne eq)
4849 (simplify
4850 (cmp:c (trunc_div:s integer_all_onesp @1) @0)
4851 (if (TYPE_UNSIGNED (TREE_TYPE (@0)) && !VECTOR_TYPE_P (TREE_TYPE (@0)))
4852 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
4853 (out (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
4854
4855 /* Similarly, for unsigned operands, (((type) A * B) >> prec) != 0 where type
4856 is at least twice as wide as type of A and B, simplify to
4857 __builtin_mul_overflow (A, B, <unused>). */
4858 (for cmp (eq ne)
4859 (simplify
4860 (cmp (rshift (mult:s (convert@3 @0) (convert @1)) INTEGER_CST@2)
4861 integer_zerop)
4862 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
4863 && INTEGRAL_TYPE_P (TREE_TYPE (@3))
4864 && TYPE_UNSIGNED (TREE_TYPE (@0))
4865 && (TYPE_PRECISION (TREE_TYPE (@3))
4866 >= 2 * TYPE_PRECISION (TREE_TYPE (@0)))
4867 && tree_fits_uhwi_p (@2)
4868 && tree_to_uhwi (@2) == TYPE_PRECISION (TREE_TYPE (@0))
4869 && types_match (@0, @1)
4870 && type_has_mode_precision_p (TREE_TYPE (@0))
4871 && (optab_handler (umulv4_optab, TYPE_MODE (TREE_TYPE (@0)))
4872 != CODE_FOR_nothing))
4873 (with { tree t = TREE_TYPE (@0), cpx = build_complex_type (t); }
4874 (cmp (imagpart (IFN_MUL_OVERFLOW:cpx @0 @1)) { build_zero_cst (t); })))))
4875
4876 /* Simplification of math builtins. These rules must all be optimizations
4877 as well as IL simplifications. If there is a possibility that the new
4878 form could be a pessimization, the rule should go in the canonicalization
4879 section that follows this one.
4880
4881 Rules can generally go in this section if they satisfy one of
4882 the following:
4883
4884 - the rule describes an identity
4885
4886 - the rule replaces calls with something as simple as addition or
4887 multiplication
4888
4889 - the rule contains unary calls only and simplifies the surrounding
4890 arithmetic. (The idea here is to exclude non-unary calls in which
4891 one operand is constant and in which the call is known to be cheap
4892 when the operand has that value.) */
4893
4894 (if (flag_unsafe_math_optimizations)
4895 /* Simplify sqrt(x) * sqrt(x) -> x. */
4896 (simplify
4897 (mult (SQRT_ALL@1 @0) @1)
4898 (if (!HONOR_SNANS (type))
4899 @0))
4900
4901 (for op (plus minus)
4902 /* Simplify (A / C) +- (B / C) -> (A +- B) / C. */
4903 (simplify
4904 (op (rdiv @0 @1)
4905 (rdiv @2 @1))
4906 (rdiv (op @0 @2) @1)))
4907
4908 (for cmp (lt le gt ge)
4909 neg_cmp (gt ge lt le)
4910 /* Simplify (x * C1) cmp C2 -> x cmp (C2 / C1), where C1 != 0. */
4911 (simplify
4912 (cmp (mult @0 REAL_CST@1) REAL_CST@2)
4913 (with
4914 { tree tem = const_binop (RDIV_EXPR, type, @2, @1); }
4915 (if (tem
4916 && !(REAL_VALUE_ISINF (TREE_REAL_CST (tem))
4917 || (real_zerop (tem) && !real_zerop (@1))))
4918 (switch
4919 (if (real_less (&dconst0, TREE_REAL_CST_PTR (@1)))
4920 (cmp @0 { tem; }))
4921 (if (real_less (TREE_REAL_CST_PTR (@1), &dconst0))
4922 (neg_cmp @0 { tem; })))))))
4923
4924 /* Simplify sqrt(x) * sqrt(y) -> sqrt(x*y). */
4925 (for root (SQRT CBRT)
4926 (simplify
4927 (mult (root:s @0) (root:s @1))
4928 (root (mult @0 @1))))
4929
4930 /* Simplify expN(x) * expN(y) -> expN(x+y). */
4931 (for exps (EXP EXP2 EXP10 POW10)
4932 (simplify
4933 (mult (exps:s @0) (exps:s @1))
4934 (exps (plus @0 @1))))
4935
4936 /* Simplify a/root(b/c) into a*root(c/b). */
4937 (for root (SQRT CBRT)
4938 (simplify
4939 (rdiv @0 (root:s (rdiv:s @1 @2)))
4940 (mult @0 (root (rdiv @2 @1)))))
4941
4942 /* Simplify x/expN(y) into x*expN(-y). */
4943 (for exps (EXP EXP2 EXP10 POW10)
4944 (simplify
4945 (rdiv @0 (exps:s @1))
4946 (mult @0 (exps (negate @1)))))
4947
4948 (for logs (LOG LOG2 LOG10 LOG10)
4949 exps (EXP EXP2 EXP10 POW10)
4950 /* logN(expN(x)) -> x. */
4951 (simplify
4952 (logs (exps @0))
4953 @0)
4954 /* expN(logN(x)) -> x. */
4955 (simplify
4956 (exps (logs @0))
4957 @0))
4958
4959 /* Optimize logN(func()) for various exponential functions. We
4960 want to determine the value "x" and the power "exponent" in
4961 order to transform logN(x**exponent) into exponent*logN(x). */
4962 (for logs (LOG LOG LOG LOG2 LOG2 LOG2 LOG10 LOG10)
4963 exps (EXP2 EXP10 POW10 EXP EXP10 POW10 EXP EXP2)
4964 (simplify
4965 (logs (exps @0))
4966 (if (SCALAR_FLOAT_TYPE_P (type))
4967 (with {
4968 tree x;
4969 switch (exps)
4970 {
4971 CASE_CFN_EXP:
4972 /* Prepare to do logN(exp(exponent)) -> exponent*logN(e). */
4973 x = build_real_truncate (type, dconst_e ());
4974 break;
4975 CASE_CFN_EXP2:
4976 /* Prepare to do logN(exp2(exponent)) -> exponent*logN(2). */
4977 x = build_real (type, dconst2);
4978 break;
4979 CASE_CFN_EXP10:
4980 CASE_CFN_POW10:
4981 /* Prepare to do logN(exp10(exponent)) -> exponent*logN(10). */
4982 {
4983 REAL_VALUE_TYPE dconst10;
4984 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
4985 x = build_real (type, dconst10);
4986 }
4987 break;
4988 default:
4989 gcc_unreachable ();
4990 }
4991 }
4992 (mult (logs { x; }) @0)))))
4993
4994 (for logs (LOG LOG
4995 LOG2 LOG2
4996 LOG10 LOG10)
4997 exps (SQRT CBRT)
4998 (simplify
4999 (logs (exps @0))
5000 (if (SCALAR_FLOAT_TYPE_P (type))
5001 (with {
5002 tree x;
5003 switch (exps)
5004 {
5005 CASE_CFN_SQRT:
5006 /* Prepare to do logN(sqrt(x)) -> 0.5*logN(x). */
5007 x = build_real (type, dconsthalf);
5008 break;
5009 CASE_CFN_CBRT:
5010 /* Prepare to do logN(cbrt(x)) -> (1/3)*logN(x). */
5011 x = build_real_truncate (type, dconst_third ());
5012 break;
5013 default:
5014 gcc_unreachable ();
5015 }
5016 }
5017 (mult { x; } (logs @0))))))
5018
5019 /* logN(pow(x,exponent)) -> exponent*logN(x). */
5020 (for logs (LOG LOG2 LOG10)
5021 pows (POW)
5022 (simplify
5023 (logs (pows @0 @1))
5024 (mult @1 (logs @0))))
5025
5026 /* pow(C,x) -> exp(log(C)*x) if C > 0,
5027 or if C is a positive power of 2,
5028 pow(C,x) -> exp2(log2(C)*x). */
5029 #if GIMPLE
5030 (for pows (POW)
5031 exps (EXP)
5032 logs (LOG)
5033 exp2s (EXP2)
5034 log2s (LOG2)
5035 (simplify
5036 (pows REAL_CST@0 @1)
5037 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
5038 && real_isfinite (TREE_REAL_CST_PTR (@0))
5039 /* As libmvec doesn't have a vectorized exp2, defer optimizing
5040 the use_exp2 case until after vectorization. It seems actually
5041 beneficial for all constants to postpone this until later,
5042 because exp(log(C)*x), while faster, will have worse precision
5043 and if x folds into a constant too, that is unnecessary
5044 pessimization. */
5045 && canonicalize_math_after_vectorization_p ())
5046 (with {
5047 const REAL_VALUE_TYPE *const value = TREE_REAL_CST_PTR (@0);
5048 bool use_exp2 = false;
5049 if (targetm.libc_has_function (function_c99_misc)
5050 && value->cl == rvc_normal)
5051 {
5052 REAL_VALUE_TYPE frac_rvt = *value;
5053 SET_REAL_EXP (&frac_rvt, 1);
5054 if (real_equal (&frac_rvt, &dconst1))
5055 use_exp2 = true;
5056 }
5057 }
5058 (if (!use_exp2)
5059 (if (optimize_pow_to_exp (@0, @1))
5060 (exps (mult (logs @0) @1)))
5061 (exp2s (mult (log2s @0) @1)))))))
5062 #endif
5063
5064 /* pow(C,x)*expN(y) -> expN(logN(C)*x+y) if C > 0. */
5065 (for pows (POW)
5066 exps (EXP EXP2 EXP10 POW10)
5067 logs (LOG LOG2 LOG10 LOG10)
5068 (simplify
5069 (mult:c (pows:s REAL_CST@0 @1) (exps:s @2))
5070 (if (real_compare (GT_EXPR, TREE_REAL_CST_PTR (@0), &dconst0)
5071 && real_isfinite (TREE_REAL_CST_PTR (@0)))
5072 (exps (plus (mult (logs @0) @1) @2)))))
5073
5074 (for sqrts (SQRT)
5075 cbrts (CBRT)
5076 pows (POW)
5077 exps (EXP EXP2 EXP10 POW10)
5078 /* sqrt(expN(x)) -> expN(x*0.5). */
5079 (simplify
5080 (sqrts (exps @0))
5081 (exps (mult @0 { build_real (type, dconsthalf); })))
5082 /* cbrt(expN(x)) -> expN(x/3). */
5083 (simplify
5084 (cbrts (exps @0))
5085 (exps (mult @0 { build_real_truncate (type, dconst_third ()); })))
5086 /* pow(expN(x), y) -> expN(x*y). */
5087 (simplify
5088 (pows (exps @0) @1)
5089 (exps (mult @0 @1))))
5090
5091 /* tan(atan(x)) -> x. */
5092 (for tans (TAN)
5093 atans (ATAN)
5094 (simplify
5095 (tans (atans @0))
5096 @0)))
5097
5098 /* Simplify sin(atan(x)) -> x / sqrt(x*x + 1). */
5099 (for sins (SIN)
5100 atans (ATAN)
5101 sqrts (SQRT)
5102 copysigns (COPYSIGN)
5103 (simplify
5104 (sins (atans:s @0))
5105 (with
5106 {
5107 REAL_VALUE_TYPE r_cst;
5108 build_sinatan_real (&r_cst, type);
5109 tree t_cst = build_real (type, r_cst);
5110 tree t_one = build_one_cst (type);
5111 }
5112 (if (SCALAR_FLOAT_TYPE_P (type))
5113 (cond (lt (abs @0) { t_cst; })
5114 (rdiv @0 (sqrts (plus (mult @0 @0) { t_one; })))
5115 (copysigns { t_one; } @0))))))
5116
5117 /* Simplify cos(atan(x)) -> 1 / sqrt(x*x + 1). */
5118 (for coss (COS)
5119 atans (ATAN)
5120 sqrts (SQRT)
5121 copysigns (COPYSIGN)
5122 (simplify
5123 (coss (atans:s @0))
5124 (with
5125 {
5126 REAL_VALUE_TYPE r_cst;
5127 build_sinatan_real (&r_cst, type);
5128 tree t_cst = build_real (type, r_cst);
5129 tree t_one = build_one_cst (type);
5130 tree t_zero = build_zero_cst (type);
5131 }
5132 (if (SCALAR_FLOAT_TYPE_P (type))
5133 (cond (lt (abs @0) { t_cst; })
5134 (rdiv { t_one; } (sqrts (plus (mult @0 @0) { t_one; })))
5135 (copysigns { t_zero; } @0))))))
5136
5137 (if (!flag_errno_math)
5138 /* Simplify sinh(atanh(x)) -> x / sqrt((1 - x)*(1 + x)). */
5139 (for sinhs (SINH)
5140 atanhs (ATANH)
5141 sqrts (SQRT)
5142 (simplify
5143 (sinhs (atanhs:s @0))
5144 (with { tree t_one = build_one_cst (type); }
5145 (rdiv @0 (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0)))))))
5146
5147 /* Simplify cosh(atanh(x)) -> 1 / sqrt((1 - x)*(1 + x)) */
5148 (for coshs (COSH)
5149 atanhs (ATANH)
5150 sqrts (SQRT)
5151 (simplify
5152 (coshs (atanhs:s @0))
5153 (with { tree t_one = build_one_cst (type); }
5154 (rdiv { t_one; } (sqrts (mult (minus { t_one; } @0) (plus { t_one; } @0))))))))
5155
5156 /* cabs(x+0i) or cabs(0+xi) -> abs(x). */
5157 (simplify
5158 (CABS (complex:C @0 real_zerop@1))
5159 (abs @0))
5160
5161 /* trunc(trunc(x)) -> trunc(x), etc. */
5162 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
5163 (simplify
5164 (fns (fns @0))
5165 (fns @0)))
5166 /* f(x) -> x if x is integer valued and f does nothing for such values. */
5167 (for fns (TRUNC_ALL FLOOR_ALL CEIL_ALL ROUND_ALL NEARBYINT_ALL RINT_ALL)
5168 (simplify
5169 (fns integer_valued_real_p@0)
5170 @0))
5171
5172 /* hypot(x,0) and hypot(0,x) -> abs(x). */
5173 (simplify
5174 (HYPOT:c @0 real_zerop@1)
5175 (abs @0))
5176
5177 /* pow(1,x) -> 1. */
5178 (simplify
5179 (POW real_onep@0 @1)
5180 @0)
5181
5182 (simplify
5183 /* copysign(x,x) -> x. */
5184 (COPYSIGN_ALL @0 @0)
5185 @0)
5186
5187 (simplify
5188 /* copysign(x,y) -> fabs(x) if y is nonnegative. */
5189 (COPYSIGN_ALL @0 tree_expr_nonnegative_p@1)
5190 (abs @0))
5191
5192 (for scale (LDEXP SCALBN SCALBLN)
5193 /* ldexp(0, x) -> 0. */
5194 (simplify
5195 (scale real_zerop@0 @1)
5196 @0)
5197 /* ldexp(x, 0) -> x. */
5198 (simplify
5199 (scale @0 integer_zerop@1)
5200 @0)
5201 /* ldexp(x, y) -> x if x is +-Inf or NaN. */
5202 (simplify
5203 (scale REAL_CST@0 @1)
5204 (if (!real_isfinite (TREE_REAL_CST_PTR (@0)))
5205 @0)))
5206
5207 /* Canonicalization of sequences of math builtins. These rules represent
5208 IL simplifications but are not necessarily optimizations.
5209
5210 The sincos pass is responsible for picking "optimal" implementations
5211 of math builtins, which may be more complicated and can sometimes go
5212 the other way, e.g. converting pow into a sequence of sqrts.
5213 We only want to do these canonicalizations before the pass has run. */
5214
5215 (if (flag_unsafe_math_optimizations && canonicalize_math_p ())
5216 /* Simplify tan(x) * cos(x) -> sin(x). */
5217 (simplify
5218 (mult:c (TAN:s @0) (COS:s @0))
5219 (SIN @0))
5220
5221 /* Simplify x * pow(x,c) -> pow(x,c+1). */
5222 (simplify
5223 (mult:c @0 (POW:s @0 REAL_CST@1))
5224 (if (!TREE_OVERFLOW (@1))
5225 (POW @0 (plus @1 { build_one_cst (type); }))))
5226
5227 /* Simplify sin(x) / cos(x) -> tan(x). */
5228 (simplify
5229 (rdiv (SIN:s @0) (COS:s @0))
5230 (TAN @0))
5231
5232 /* Simplify sinh(x) / cosh(x) -> tanh(x). */
5233 (simplify
5234 (rdiv (SINH:s @0) (COSH:s @0))
5235 (TANH @0))
5236
5237 /* Simplify tanh (x) / sinh (x) -> 1.0 / cosh (x). */
5238 (simplify
5239 (rdiv (TANH:s @0) (SINH:s @0))
5240 (rdiv {build_one_cst (type);} (COSH @0)))
5241
5242 /* Simplify cos(x) / sin(x) -> 1 / tan(x). */
5243 (simplify
5244 (rdiv (COS:s @0) (SIN:s @0))
5245 (rdiv { build_one_cst (type); } (TAN @0)))
5246
5247 /* Simplify sin(x) / tan(x) -> cos(x). */
5248 (simplify
5249 (rdiv (SIN:s @0) (TAN:s @0))
5250 (if (! HONOR_NANS (@0)
5251 && ! HONOR_INFINITIES (@0))
5252 (COS @0)))
5253
5254 /* Simplify tan(x) / sin(x) -> 1.0 / cos(x). */
5255 (simplify
5256 (rdiv (TAN:s @0) (SIN:s @0))
5257 (if (! HONOR_NANS (@0)
5258 && ! HONOR_INFINITIES (@0))
5259 (rdiv { build_one_cst (type); } (COS @0))))
5260
5261 /* Simplify pow(x,y) * pow(x,z) -> pow(x,y+z). */
5262 (simplify
5263 (mult (POW:s @0 @1) (POW:s @0 @2))
5264 (POW @0 (plus @1 @2)))
5265
5266 /* Simplify pow(x,y) * pow(z,y) -> pow(x*z,y). */
5267 (simplify
5268 (mult (POW:s @0 @1) (POW:s @2 @1))
5269 (POW (mult @0 @2) @1))
5270
5271 /* Simplify powi(x,y) * powi(z,y) -> powi(x*z,y). */
5272 (simplify
5273 (mult (POWI:s @0 @1) (POWI:s @2 @1))
5274 (POWI (mult @0 @2) @1))
5275
5276 /* Simplify pow(x,c) / x -> pow(x,c-1). */
5277 (simplify
5278 (rdiv (POW:s @0 REAL_CST@1) @0)
5279 (if (!TREE_OVERFLOW (@1))
5280 (POW @0 (minus @1 { build_one_cst (type); }))))
5281
5282 /* Simplify x / pow (y,z) -> x * pow(y,-z). */
5283 (simplify
5284 (rdiv @0 (POW:s @1 @2))
5285 (mult @0 (POW @1 (negate @2))))
5286
5287 (for sqrts (SQRT)
5288 cbrts (CBRT)
5289 pows (POW)
5290 /* sqrt(sqrt(x)) -> pow(x,1/4). */
5291 (simplify
5292 (sqrts (sqrts @0))
5293 (pows @0 { build_real (type, dconst_quarter ()); }))
5294 /* sqrt(cbrt(x)) -> pow(x,1/6). */
5295 (simplify
5296 (sqrts (cbrts @0))
5297 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
5298 /* cbrt(sqrt(x)) -> pow(x,1/6). */
5299 (simplify
5300 (cbrts (sqrts @0))
5301 (pows @0 { build_real_truncate (type, dconst_sixth ()); }))
5302 /* cbrt(cbrt(x)) -> pow(x,1/9), iff x is nonnegative. */
5303 (simplify
5304 (cbrts (cbrts tree_expr_nonnegative_p@0))
5305 (pows @0 { build_real_truncate (type, dconst_ninth ()); }))
5306 /* sqrt(pow(x,y)) -> pow(|x|,y*0.5). */
5307 (simplify
5308 (sqrts (pows @0 @1))
5309 (pows (abs @0) (mult @1 { build_real (type, dconsthalf); })))
5310 /* cbrt(pow(x,y)) -> pow(x,y/3), iff x is nonnegative. */
5311 (simplify
5312 (cbrts (pows tree_expr_nonnegative_p@0 @1))
5313 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
5314 /* pow(sqrt(x),y) -> pow(x,y*0.5). */
5315 (simplify
5316 (pows (sqrts @0) @1)
5317 (pows @0 (mult @1 { build_real (type, dconsthalf); })))
5318 /* pow(cbrt(x),y) -> pow(x,y/3) iff x is nonnegative. */
5319 (simplify
5320 (pows (cbrts tree_expr_nonnegative_p@0) @1)
5321 (pows @0 (mult @1 { build_real_truncate (type, dconst_third ()); })))
5322 /* pow(pow(x,y),z) -> pow(x,y*z) iff x is nonnegative. */
5323 (simplify
5324 (pows (pows tree_expr_nonnegative_p@0 @1) @2)
5325 (pows @0 (mult @1 @2))))
5326
5327 /* cabs(x+xi) -> fabs(x)*sqrt(2). */
5328 (simplify
5329 (CABS (complex @0 @0))
5330 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
5331
5332 /* hypot(x,x) -> fabs(x)*sqrt(2). */
5333 (simplify
5334 (HYPOT @0 @0)
5335 (mult (abs @0) { build_real_truncate (type, dconst_sqrt2 ()); }))
5336
5337 /* cexp(x+yi) -> exp(x)*cexpi(y). */
5338 (for cexps (CEXP)
5339 exps (EXP)
5340 cexpis (CEXPI)
5341 (simplify
5342 (cexps compositional_complex@0)
5343 (if (targetm.libc_has_function (function_c99_math_complex))
5344 (complex
5345 (mult (exps@1 (realpart @0)) (realpart (cexpis:type@2 (imagpart @0))))
5346 (mult @1 (imagpart @2)))))))
5347
5348 (if (canonicalize_math_p ())
5349 /* floor(x) -> trunc(x) if x is nonnegative. */
5350 (for floors (FLOOR_ALL)
5351 truncs (TRUNC_ALL)
5352 (simplify
5353 (floors tree_expr_nonnegative_p@0)
5354 (truncs @0))))
5355
5356 (match double_value_p
5357 @0
5358 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == double_type_node)))
5359 (for froms (BUILT_IN_TRUNCL
5360 BUILT_IN_FLOORL
5361 BUILT_IN_CEILL
5362 BUILT_IN_ROUNDL
5363 BUILT_IN_NEARBYINTL
5364 BUILT_IN_RINTL)
5365 tos (BUILT_IN_TRUNC
5366 BUILT_IN_FLOOR
5367 BUILT_IN_CEIL
5368 BUILT_IN_ROUND
5369 BUILT_IN_NEARBYINT
5370 BUILT_IN_RINT)
5371 /* truncl(extend(x)) -> extend(trunc(x)), etc., if x is a double. */
5372 (if (optimize && canonicalize_math_p ())
5373 (simplify
5374 (froms (convert double_value_p@0))
5375 (convert (tos @0)))))
5376
5377 (match float_value_p
5378 @0
5379 (if (TYPE_MAIN_VARIANT (TREE_TYPE (@0)) == float_type_node)))
5380 (for froms (BUILT_IN_TRUNCL BUILT_IN_TRUNC
5381 BUILT_IN_FLOORL BUILT_IN_FLOOR
5382 BUILT_IN_CEILL BUILT_IN_CEIL
5383 BUILT_IN_ROUNDL BUILT_IN_ROUND
5384 BUILT_IN_NEARBYINTL BUILT_IN_NEARBYINT
5385 BUILT_IN_RINTL BUILT_IN_RINT)
5386 tos (BUILT_IN_TRUNCF BUILT_IN_TRUNCF
5387 BUILT_IN_FLOORF BUILT_IN_FLOORF
5388 BUILT_IN_CEILF BUILT_IN_CEILF
5389 BUILT_IN_ROUNDF BUILT_IN_ROUNDF
5390 BUILT_IN_NEARBYINTF BUILT_IN_NEARBYINTF
5391 BUILT_IN_RINTF BUILT_IN_RINTF)
5392 /* truncl(extend(x)) and trunc(extend(x)) -> extend(truncf(x)), etc.,
5393 if x is a float. */
5394 (if (optimize && canonicalize_math_p ()
5395 && targetm.libc_has_function (function_c99_misc))
5396 (simplify
5397 (froms (convert float_value_p@0))
5398 (convert (tos @0)))))
5399
5400 (for froms (XFLOORL XCEILL XROUNDL XRINTL)
5401 tos (XFLOOR XCEIL XROUND XRINT)
5402 /* llfloorl(extend(x)) -> llfloor(x), etc., if x is a double. */
5403 (if (optimize && canonicalize_math_p ())
5404 (simplify
5405 (froms (convert double_value_p@0))
5406 (tos @0))))
5407
5408 (for froms (XFLOORL XCEILL XROUNDL XRINTL
5409 XFLOOR XCEIL XROUND XRINT)
5410 tos (XFLOORF XCEILF XROUNDF XRINTF)
5411 /* llfloorl(extend(x)) and llfloor(extend(x)) -> llfloorf(x), etc.,
5412 if x is a float. */
5413 (if (optimize && canonicalize_math_p ())
5414 (simplify
5415 (froms (convert float_value_p@0))
5416 (tos @0))))
5417
5418 (if (canonicalize_math_p ())
5419 /* xfloor(x) -> fix_trunc(x) if x is nonnegative. */
5420 (for floors (IFLOOR LFLOOR LLFLOOR)
5421 (simplify
5422 (floors tree_expr_nonnegative_p@0)
5423 (fix_trunc @0))))
5424
5425 (if (canonicalize_math_p ())
5426 /* xfloor(x) -> fix_trunc(x), etc., if x is integer valued. */
5427 (for fns (IFLOOR LFLOOR LLFLOOR
5428 ICEIL LCEIL LLCEIL
5429 IROUND LROUND LLROUND)
5430 (simplify
5431 (fns integer_valued_real_p@0)
5432 (fix_trunc @0)))
5433 (if (!flag_errno_math)
5434 /* xrint(x) -> fix_trunc(x), etc., if x is integer valued. */
5435 (for rints (IRINT LRINT LLRINT)
5436 (simplify
5437 (rints integer_valued_real_p@0)
5438 (fix_trunc @0)))))
5439
5440 (if (canonicalize_math_p ())
5441 (for ifn (IFLOOR ICEIL IROUND IRINT)
5442 lfn (LFLOOR LCEIL LROUND LRINT)
5443 llfn (LLFLOOR LLCEIL LLROUND LLRINT)
5444 /* Canonicalize iround (x) to lround (x) on ILP32 targets where
5445 sizeof (int) == sizeof (long). */
5446 (if (TYPE_PRECISION (integer_type_node)
5447 == TYPE_PRECISION (long_integer_type_node))
5448 (simplify
5449 (ifn @0)
5450 (lfn:long_integer_type_node @0)))
5451 /* Canonicalize llround (x) to lround (x) on LP64 targets where
5452 sizeof (long long) == sizeof (long). */
5453 (if (TYPE_PRECISION (long_long_integer_type_node)
5454 == TYPE_PRECISION (long_integer_type_node))
5455 (simplify
5456 (llfn @0)
5457 (lfn:long_integer_type_node @0)))))
5458
5459 /* cproj(x) -> x if we're ignoring infinities. */
5460 (simplify
5461 (CPROJ @0)
5462 (if (!HONOR_INFINITIES (type))
5463 @0))
5464
5465 /* If the real part is inf and the imag part is known to be
5466 nonnegative, return (inf + 0i). */
5467 (simplify
5468 (CPROJ (complex REAL_CST@0 tree_expr_nonnegative_p@1))
5469 (if (real_isinf (TREE_REAL_CST_PTR (@0)))
5470 { build_complex_inf (type, false); }))
5471
5472 /* If the imag part is inf, return (inf+I*copysign(0,imag)). */
5473 (simplify
5474 (CPROJ (complex @0 REAL_CST@1))
5475 (if (real_isinf (TREE_REAL_CST_PTR (@1)))
5476 { build_complex_inf (type, TREE_REAL_CST_PTR (@1)->sign); }))
5477
5478 (for pows (POW)
5479 sqrts (SQRT)
5480 cbrts (CBRT)
5481 (simplify
5482 (pows @0 REAL_CST@1)
5483 (with {
5484 const REAL_VALUE_TYPE *value = TREE_REAL_CST_PTR (@1);
5485 REAL_VALUE_TYPE tmp;
5486 }
5487 (switch
5488 /* pow(x,0) -> 1. */
5489 (if (real_equal (value, &dconst0))
5490 { build_real (type, dconst1); })
5491 /* pow(x,1) -> x. */
5492 (if (real_equal (value, &dconst1))
5493 @0)
5494 /* pow(x,-1) -> 1/x. */
5495 (if (real_equal (value, &dconstm1))
5496 (rdiv { build_real (type, dconst1); } @0))
5497 /* pow(x,0.5) -> sqrt(x). */
5498 (if (flag_unsafe_math_optimizations
5499 && canonicalize_math_p ()
5500 && real_equal (value, &dconsthalf))
5501 (sqrts @0))
5502 /* pow(x,1/3) -> cbrt(x). */
5503 (if (flag_unsafe_math_optimizations
5504 && canonicalize_math_p ()
5505 && (tmp = real_value_truncate (TYPE_MODE (type), dconst_third ()),
5506 real_equal (value, &tmp)))
5507 (cbrts @0))))))
5508
5509 /* powi(1,x) -> 1. */
5510 (simplify
5511 (POWI real_onep@0 @1)
5512 @0)
5513
5514 (simplify
5515 (POWI @0 INTEGER_CST@1)
5516 (switch
5517 /* powi(x,0) -> 1. */
5518 (if (wi::to_wide (@1) == 0)
5519 { build_real (type, dconst1); })
5520 /* powi(x,1) -> x. */
5521 (if (wi::to_wide (@1) == 1)
5522 @0)
5523 /* powi(x,-1) -> 1/x. */
5524 (if (wi::to_wide (@1) == -1)
5525 (rdiv { build_real (type, dconst1); } @0))))
5526
5527 /* Narrowing of arithmetic and logical operations.
5528
5529 These are conceptually similar to the transformations performed for
5530 the C/C++ front-ends by shorten_binary_op and shorten_compare. Long
5531 term we want to move all that code out of the front-ends into here. */
5532
5533 /* Convert (outertype)((innertype0)a+(innertype1)b)
5534 into ((newtype)a+(newtype)b) where newtype
5535 is the widest mode from all of these. */
5536 (for op (plus minus mult rdiv)
5537 (simplify
5538 (convert (op:s@0 (convert1?@3 @1) (convert2?@4 @2)))
5539 /* If we have a narrowing conversion of an arithmetic operation where
5540 both operands are widening conversions from the same type as the outer
5541 narrowing conversion. Then convert the innermost operands to a
5542 suitable unsigned type (to avoid introducing undefined behavior),
5543 perform the operation and convert the result to the desired type. */
5544 (if (INTEGRAL_TYPE_P (type)
5545 && op != MULT_EXPR
5546 && op != RDIV_EXPR
5547 /* We check for type compatibility between @0 and @1 below,
5548 so there's no need to check that @2/@4 are integral types. */
5549 && INTEGRAL_TYPE_P (TREE_TYPE (@1))
5550 && INTEGRAL_TYPE_P (TREE_TYPE (@3))
5551 /* The precision of the type of each operand must match the
5552 precision of the mode of each operand, similarly for the
5553 result. */
5554 && type_has_mode_precision_p (TREE_TYPE (@1))
5555 && type_has_mode_precision_p (TREE_TYPE (@2))
5556 && type_has_mode_precision_p (type)
5557 /* The inner conversion must be a widening conversion. */
5558 && TYPE_PRECISION (TREE_TYPE (@3)) > TYPE_PRECISION (TREE_TYPE (@1))
5559 && types_match (@1, type)
5560 && (types_match (@1, @2)
5561 /* Or the second operand is const integer or converted const
5562 integer from valueize. */
5563 || TREE_CODE (@2) == INTEGER_CST))
5564 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@1)))
5565 (op @1 (convert @2))
5566 (with { tree utype = unsigned_type_for (TREE_TYPE (@1)); }
5567 (convert (op (convert:utype @1)
5568 (convert:utype @2)))))
5569 (if (FLOAT_TYPE_P (type)
5570 && DECIMAL_FLOAT_TYPE_P (TREE_TYPE (@0))
5571 == DECIMAL_FLOAT_TYPE_P (type))
5572 (with { tree arg0 = strip_float_extensions (@1);
5573 tree arg1 = strip_float_extensions (@2);
5574 tree itype = TREE_TYPE (@0);
5575 tree ty1 = TREE_TYPE (arg0);
5576 tree ty2 = TREE_TYPE (arg1);
5577 enum tree_code code = TREE_CODE (itype); }
5578 (if (FLOAT_TYPE_P (ty1)
5579 && FLOAT_TYPE_P (ty2))
5580 (with { tree newtype = type;
5581 if (TYPE_MODE (ty1) == SDmode
5582 || TYPE_MODE (ty2) == SDmode
5583 || TYPE_MODE (type) == SDmode)
5584 newtype = dfloat32_type_node;
5585 if (TYPE_MODE (ty1) == DDmode
5586 || TYPE_MODE (ty2) == DDmode
5587 || TYPE_MODE (type) == DDmode)
5588 newtype = dfloat64_type_node;
5589 if (TYPE_MODE (ty1) == TDmode
5590 || TYPE_MODE (ty2) == TDmode
5591 || TYPE_MODE (type) == TDmode)
5592 newtype = dfloat128_type_node; }
5593 (if ((newtype == dfloat32_type_node
5594 || newtype == dfloat64_type_node
5595 || newtype == dfloat128_type_node)
5596 && newtype == type
5597 && types_match (newtype, type))
5598 (op (convert:newtype @1) (convert:newtype @2))
5599 (with { if (TYPE_PRECISION (ty1) > TYPE_PRECISION (newtype))
5600 newtype = ty1;
5601 if (TYPE_PRECISION (ty2) > TYPE_PRECISION (newtype))
5602 newtype = ty2; }
5603 /* Sometimes this transformation is safe (cannot
5604 change results through affecting double rounding
5605 cases) and sometimes it is not. If NEWTYPE is
5606 wider than TYPE, e.g. (float)((long double)double
5607 + (long double)double) converted to
5608 (float)(double + double), the transformation is
5609 unsafe regardless of the details of the types
5610 involved; double rounding can arise if the result
5611 of NEWTYPE arithmetic is a NEWTYPE value half way
5612 between two representable TYPE values but the
5613 exact value is sufficiently different (in the
5614 right direction) for this difference to be
5615 visible in ITYPE arithmetic. If NEWTYPE is the
5616 same as TYPE, however, the transformation may be
5617 safe depending on the types involved: it is safe
5618 if the ITYPE has strictly more than twice as many
5619 mantissa bits as TYPE, can represent infinities
5620 and NaNs if the TYPE can, and has sufficient
5621 exponent range for the product or ratio of two
5622 values representable in the TYPE to be within the
5623 range of normal values of ITYPE. */
5624 (if (TYPE_PRECISION (newtype) < TYPE_PRECISION (itype)
5625 && (flag_unsafe_math_optimizations
5626 || (TYPE_PRECISION (newtype) == TYPE_PRECISION (type)
5627 && real_can_shorten_arithmetic (TYPE_MODE (itype),
5628 TYPE_MODE (type))
5629 && !excess_precision_type (newtype)))
5630 && !types_match (itype, newtype))
5631 (convert:type (op (convert:newtype @1)
5632 (convert:newtype @2)))
5633 )))) )
5634 ))
5635 )))
5636
5637 /* This is another case of narrowing, specifically when there's an outer
5638 BIT_AND_EXPR which masks off bits outside the type of the innermost
5639 operands. Like the previous case we have to convert the operands
5640 to unsigned types to avoid introducing undefined behavior for the
5641 arithmetic operation. */
5642 (for op (minus plus)
5643 (simplify
5644 (bit_and (op:s (convert@2 @0) (convert@3 @1)) INTEGER_CST@4)
5645 (if (INTEGRAL_TYPE_P (type)
5646 /* We check for type compatibility between @0 and @1 below,
5647 so there's no need to check that @1/@3 are integral types. */
5648 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
5649 && INTEGRAL_TYPE_P (TREE_TYPE (@2))
5650 /* The precision of the type of each operand must match the
5651 precision of the mode of each operand, similarly for the
5652 result. */
5653 && type_has_mode_precision_p (TREE_TYPE (@0))
5654 && type_has_mode_precision_p (TREE_TYPE (@1))
5655 && type_has_mode_precision_p (type)
5656 /* The inner conversion must be a widening conversion. */
5657 && TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
5658 && types_match (@0, @1)
5659 && (tree_int_cst_min_precision (@4, TYPE_SIGN (TREE_TYPE (@0)))
5660 <= TYPE_PRECISION (TREE_TYPE (@0)))
5661 && (wi::to_wide (@4)
5662 & wi::mask (TYPE_PRECISION (TREE_TYPE (@0)),
5663 true, TYPE_PRECISION (type))) == 0)
5664 (if (TYPE_OVERFLOW_WRAPS (TREE_TYPE (@0)))
5665 (with { tree ntype = TREE_TYPE (@0); }
5666 (convert (bit_and (op @0 @1) (convert:ntype @4))))
5667 (with { tree utype = unsigned_type_for (TREE_TYPE (@0)); }
5668 (convert (bit_and (op (convert:utype @0) (convert:utype @1))
5669 (convert:utype @4))))))))
5670
5671 /* Transform (@0 < @1 and @0 < @2) to use min,
5672 (@0 > @1 and @0 > @2) to use max */
5673 (for logic (bit_and bit_and bit_and bit_and bit_ior bit_ior bit_ior bit_ior)
5674 op (lt le gt ge lt le gt ge )
5675 ext (min min max max max max min min )
5676 (simplify
5677 (logic (op:cs @0 @1) (op:cs @0 @2))
5678 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5679 && TREE_CODE (@0) != INTEGER_CST)
5680 (op @0 (ext @1 @2)))))
5681
5682 (simplify
5683 /* signbit(x) -> 0 if x is nonnegative. */
5684 (SIGNBIT tree_expr_nonnegative_p@0)
5685 { integer_zero_node; })
5686
5687 (simplify
5688 /* signbit(x) -> x<0 if x doesn't have signed zeros. */
5689 (SIGNBIT @0)
5690 (if (!HONOR_SIGNED_ZEROS (@0))
5691 (convert (lt @0 { build_real (TREE_TYPE (@0), dconst0); }))))
5692
5693 /* Transform comparisons of the form X +- C1 CMP C2 to X CMP C2 -+ C1. */
5694 (for cmp (eq ne)
5695 (for op (plus minus)
5696 rop (minus plus)
5697 (simplify
5698 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
5699 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
5700 && !TYPE_OVERFLOW_SANITIZED (TREE_TYPE (@0))
5701 && !TYPE_OVERFLOW_TRAPS (TREE_TYPE (@0))
5702 && !TYPE_SATURATING (TREE_TYPE (@0)))
5703 (with { tree res = int_const_binop (rop, @2, @1); }
5704 (if (TREE_OVERFLOW (res)
5705 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
5706 { constant_boolean_node (cmp == NE_EXPR, type); }
5707 (if (single_use (@3))
5708 (cmp @0 { TREE_OVERFLOW (res)
5709 ? drop_tree_overflow (res) : res; }))))))))
5710 (for cmp (lt le gt ge)
5711 (for op (plus minus)
5712 rop (minus plus)
5713 (simplify
5714 (cmp (op@3 @0 INTEGER_CST@1) INTEGER_CST@2)
5715 (if (!TREE_OVERFLOW (@1) && !TREE_OVERFLOW (@2)
5716 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
5717 (with { tree res = int_const_binop (rop, @2, @1); }
5718 (if (TREE_OVERFLOW (res))
5719 {
5720 fold_overflow_warning (("assuming signed overflow does not occur "
5721 "when simplifying conditional to constant"),
5722 WARN_STRICT_OVERFLOW_CONDITIONAL);
5723 bool less = cmp == LE_EXPR || cmp == LT_EXPR;
5724 /* wi::ges_p (@2, 0) should be sufficient for a signed type. */
5725 bool ovf_high = wi::lt_p (wi::to_wide (@1), 0,
5726 TYPE_SIGN (TREE_TYPE (@1)))
5727 != (op == MINUS_EXPR);
5728 constant_boolean_node (less == ovf_high, type);
5729 }
5730 (if (single_use (@3))
5731 (with
5732 {
5733 fold_overflow_warning (("assuming signed overflow does not occur "
5734 "when changing X +- C1 cmp C2 to "
5735 "X cmp C2 -+ C1"),
5736 WARN_STRICT_OVERFLOW_COMPARISON);
5737 }
5738 (cmp @0 { res; })))))))))
5739
5740 /* Canonicalizations of BIT_FIELD_REFs. */
5741
5742 (simplify
5743 (BIT_FIELD_REF (BIT_FIELD_REF @0 @1 @2) @3 @4)
5744 (BIT_FIELD_REF @0 @3 { const_binop (PLUS_EXPR, bitsizetype, @2, @4); }))
5745
5746 (simplify
5747 (BIT_FIELD_REF (view_convert @0) @1 @2)
5748 (BIT_FIELD_REF @0 @1 @2))
5749
5750 (simplify
5751 (BIT_FIELD_REF @0 @1 integer_zerop)
5752 (if (tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (@0))))
5753 (view_convert @0)))
5754
5755 (simplify
5756 (BIT_FIELD_REF @0 @1 @2)
5757 (switch
5758 (if (TREE_CODE (TREE_TYPE (@0)) == COMPLEX_TYPE
5759 && tree_int_cst_equal (@1, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
5760 (switch
5761 (if (integer_zerop (@2))
5762 (view_convert (realpart @0)))
5763 (if (tree_int_cst_equal (@2, TYPE_SIZE (TREE_TYPE (TREE_TYPE (@0)))))
5764 (view_convert (imagpart @0)))))
5765 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
5766 && INTEGRAL_TYPE_P (type)
5767 /* On GIMPLE this should only apply to register arguments. */
5768 && (! GIMPLE || is_gimple_reg (@0))
5769 /* A bit-field-ref that referenced the full argument can be stripped. */
5770 && ((compare_tree_int (@1, TYPE_PRECISION (TREE_TYPE (@0))) == 0
5771 && integer_zerop (@2))
5772 /* Low-parts can be reduced to integral conversions.
5773 ??? The following doesn't work for PDP endian. */
5774 || (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN
5775 /* Don't even think about BITS_BIG_ENDIAN. */
5776 && TYPE_PRECISION (TREE_TYPE (@0)) % BITS_PER_UNIT == 0
5777 && TYPE_PRECISION (type) % BITS_PER_UNIT == 0
5778 && compare_tree_int (@2, (BYTES_BIG_ENDIAN
5779 ? (TYPE_PRECISION (TREE_TYPE (@0))
5780 - TYPE_PRECISION (type))
5781 : 0)) == 0)))
5782 (convert @0))))
5783
5784 /* Simplify vector extracts. */
5785
5786 (simplify
5787 (BIT_FIELD_REF CONSTRUCTOR@0 @1 @2)
5788 (if (VECTOR_TYPE_P (TREE_TYPE (@0))
5789 && (types_match (type, TREE_TYPE (TREE_TYPE (@0)))
5790 || (VECTOR_TYPE_P (type)
5791 && types_match (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
5792 (with
5793 {
5794 tree ctor = (TREE_CODE (@0) == SSA_NAME
5795 ? gimple_assign_rhs1 (SSA_NAME_DEF_STMT (@0)) : @0);
5796 tree eltype = TREE_TYPE (TREE_TYPE (ctor));
5797 unsigned HOST_WIDE_INT width = tree_to_uhwi (TYPE_SIZE (eltype));
5798 unsigned HOST_WIDE_INT n = tree_to_uhwi (@1);
5799 unsigned HOST_WIDE_INT idx = tree_to_uhwi (@2);
5800 }
5801 (if (n != 0
5802 && (idx % width) == 0
5803 && (n % width) == 0
5804 && known_le ((idx + n) / width,
5805 TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor))))
5806 (with
5807 {
5808 idx = idx / width;
5809 n = n / width;
5810 /* Constructor elements can be subvectors. */
5811 poly_uint64 k = 1;
5812 if (CONSTRUCTOR_NELTS (ctor) != 0)
5813 {
5814 tree cons_elem = TREE_TYPE (CONSTRUCTOR_ELT (ctor, 0)->value);
5815 if (TREE_CODE (cons_elem) == VECTOR_TYPE)
5816 k = TYPE_VECTOR_SUBPARTS (cons_elem);
5817 }
5818 unsigned HOST_WIDE_INT elt, count, const_k;
5819 }
5820 (switch
5821 /* We keep an exact subset of the constructor elements. */
5822 (if (multiple_p (idx, k, &elt) && multiple_p (n, k, &count))
5823 (if (CONSTRUCTOR_NELTS (ctor) == 0)
5824 { build_constructor (type, NULL); }
5825 (if (count == 1)
5826 (if (elt < CONSTRUCTOR_NELTS (ctor))
5827 (view_convert { CONSTRUCTOR_ELT (ctor, elt)->value; })
5828 { build_zero_cst (type); })
5829 /* We don't want to emit new CTORs unless the old one goes away.
5830 ??? Eventually allow this if the CTOR ends up constant or
5831 uniform. */
5832 (if (single_use (@0))
5833 {
5834 vec<constructor_elt, va_gc> *vals;
5835 vec_alloc (vals, count);
5836 for (unsigned i = 0;
5837 i < count && elt + i < CONSTRUCTOR_NELTS (ctor); ++i)
5838 CONSTRUCTOR_APPEND_ELT (vals, NULL_TREE,
5839 CONSTRUCTOR_ELT (ctor, elt + i)->value);
5840 build_constructor (type, vals);
5841 }))))
5842 /* The bitfield references a single constructor element. */
5843 (if (k.is_constant (&const_k)
5844 && idx + n <= (idx / const_k + 1) * const_k)
5845 (switch
5846 (if (CONSTRUCTOR_NELTS (ctor) <= idx / const_k)
5847 { build_zero_cst (type); })
5848 (if (n == const_k)
5849 (view_convert { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }))
5850 (BIT_FIELD_REF { CONSTRUCTOR_ELT (ctor, idx / const_k)->value; }
5851 @1 { bitsize_int ((idx % const_k) * width); })))))))))
5852
5853 /* Simplify a bit extraction from a bit insertion for the cases with
5854 the inserted element fully covering the extraction or the insertion
5855 not touching the extraction. */
5856 (simplify
5857 (BIT_FIELD_REF (bit_insert @0 @1 @ipos) @rsize @rpos)
5858 (with
5859 {
5860 unsigned HOST_WIDE_INT isize;
5861 if (INTEGRAL_TYPE_P (TREE_TYPE (@1)))
5862 isize = TYPE_PRECISION (TREE_TYPE (@1));
5863 else
5864 isize = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (@1)));
5865 }
5866 (switch
5867 (if (wi::leu_p (wi::to_wide (@ipos), wi::to_wide (@rpos))
5868 && wi::leu_p (wi::to_wide (@rpos) + wi::to_wide (@rsize),
5869 wi::to_wide (@ipos) + isize))
5870 (BIT_FIELD_REF @1 @rsize { wide_int_to_tree (bitsizetype,
5871 wi::to_wide (@rpos)
5872 - wi::to_wide (@ipos)); }))
5873 (if (wi::geu_p (wi::to_wide (@ipos),
5874 wi::to_wide (@rpos) + wi::to_wide (@rsize))
5875 || wi::geu_p (wi::to_wide (@rpos),
5876 wi::to_wide (@ipos) + isize))
5877 (BIT_FIELD_REF @0 @rsize @rpos)))))
5878
5879 (if (canonicalize_math_after_vectorization_p ())
5880 (for fmas (FMA)
5881 (simplify
5882 (fmas:c (negate @0) @1 @2)
5883 (IFN_FNMA @0 @1 @2))
5884 (simplify
5885 (fmas @0 @1 (negate @2))
5886 (IFN_FMS @0 @1 @2))
5887 (simplify
5888 (fmas:c (negate @0) @1 (negate @2))
5889 (IFN_FNMS @0 @1 @2))
5890 (simplify
5891 (negate (fmas@3 @0 @1 @2))
5892 (if (single_use (@3))
5893 (IFN_FNMS @0 @1 @2))))
5894
5895 (simplify
5896 (IFN_FMS:c (negate @0) @1 @2)
5897 (IFN_FNMS @0 @1 @2))
5898 (simplify
5899 (IFN_FMS @0 @1 (negate @2))
5900 (IFN_FMA @0 @1 @2))
5901 (simplify
5902 (IFN_FMS:c (negate @0) @1 (negate @2))
5903 (IFN_FNMA @0 @1 @2))
5904 (simplify
5905 (negate (IFN_FMS@3 @0 @1 @2))
5906 (if (single_use (@3))
5907 (IFN_FNMA @0 @1 @2)))
5908
5909 (simplify
5910 (IFN_FNMA:c (negate @0) @1 @2)
5911 (IFN_FMA @0 @1 @2))
5912 (simplify
5913 (IFN_FNMA @0 @1 (negate @2))
5914 (IFN_FNMS @0 @1 @2))
5915 (simplify
5916 (IFN_FNMA:c (negate @0) @1 (negate @2))
5917 (IFN_FMS @0 @1 @2))
5918 (simplify
5919 (negate (IFN_FNMA@3 @0 @1 @2))
5920 (if (single_use (@3))
5921 (IFN_FMS @0 @1 @2)))
5922
5923 (simplify
5924 (IFN_FNMS:c (negate @0) @1 @2)
5925 (IFN_FMS @0 @1 @2))
5926 (simplify
5927 (IFN_FNMS @0 @1 (negate @2))
5928 (IFN_FNMA @0 @1 @2))
5929 (simplify
5930 (IFN_FNMS:c (negate @0) @1 (negate @2))
5931 (IFN_FMA @0 @1 @2))
5932 (simplify
5933 (negate (IFN_FNMS@3 @0 @1 @2))
5934 (if (single_use (@3))
5935 (IFN_FMA @0 @1 @2))))
5936
5937 /* POPCOUNT simplifications. */
5938 (for popcount (BUILT_IN_POPCOUNT BUILT_IN_POPCOUNTL BUILT_IN_POPCOUNTLL
5939 BUILT_IN_POPCOUNTIMAX)
5940 /* popcount(X&1) is nop_expr(X&1). */
5941 (simplify
5942 (popcount @0)
5943 (if (tree_nonzero_bits (@0) == 1)
5944 (convert @0)))
5945 /* popcount(X) + popcount(Y) is popcount(X|Y) when X&Y must be zero. */
5946 (simplify
5947 (plus (popcount:s @0) (popcount:s @1))
5948 (if (wi::bit_and (tree_nonzero_bits (@0), tree_nonzero_bits (@1)) == 0)
5949 (popcount (bit_ior @0 @1))))
5950 /* popcount(X) == 0 is X == 0, and related (in)equalities. */
5951 (for cmp (le eq ne gt)
5952 rep (eq eq ne ne)
5953 (simplify
5954 (cmp (popcount @0) integer_zerop)
5955 (rep @0 { build_zero_cst (TREE_TYPE (@0)); }))))
5956
5957 #if GIMPLE
5958 /* 64- and 32-bits branchless implementations of popcount are detected:
5959
5960 int popcount64c (uint64_t x)
5961 {
5962 x -= (x >> 1) & 0x5555555555555555ULL;
5963 x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL);
5964 x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
5965 return (x * 0x0101010101010101ULL) >> 56;
5966 }
5967
5968 int popcount32c (uint32_t x)
5969 {
5970 x -= (x >> 1) & 0x55555555;
5971 x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
5972 x = (x + (x >> 4)) & 0x0f0f0f0f;
5973 return (x * 0x01010101) >> 24;
5974 } */
5975 (simplify
5976 (rshift
5977 (mult
5978 (bit_and
5979 (plus:c
5980 (rshift @8 INTEGER_CST@5)
5981 (plus:c@8
5982 (bit_and @6 INTEGER_CST@7)
5983 (bit_and
5984 (rshift
5985 (minus@6 @0
5986 (bit_and (rshift @0 INTEGER_CST@4) INTEGER_CST@11))
5987 INTEGER_CST@10)
5988 INTEGER_CST@9)))
5989 INTEGER_CST@3)
5990 INTEGER_CST@2)
5991 INTEGER_CST@1)
5992 /* Check constants and optab. */
5993 (with { unsigned prec = TYPE_PRECISION (type);
5994 int shift = (64 - prec) & 63;
5995 unsigned HOST_WIDE_INT c1
5996 = HOST_WIDE_INT_UC (0x0101010101010101) >> shift;
5997 unsigned HOST_WIDE_INT c2
5998 = HOST_WIDE_INT_UC (0x0F0F0F0F0F0F0F0F) >> shift;
5999 unsigned HOST_WIDE_INT c3
6000 = HOST_WIDE_INT_UC (0x3333333333333333) >> shift;
6001 unsigned HOST_WIDE_INT c4
6002 = HOST_WIDE_INT_UC (0x5555555555555555) >> shift;
6003 }
6004 (if (prec >= 16
6005 && prec <= 64
6006 && pow2p_hwi (prec)
6007 && TYPE_UNSIGNED (type)
6008 && integer_onep (@4)
6009 && wi::to_widest (@10) == 2
6010 && wi::to_widest (@5) == 4
6011 && wi::to_widest (@1) == prec - 8
6012 && tree_to_uhwi (@2) == c1
6013 && tree_to_uhwi (@3) == c2
6014 && tree_to_uhwi (@9) == c3
6015 && tree_to_uhwi (@7) == c3
6016 && tree_to_uhwi (@11) == c4
6017 && direct_internal_fn_supported_p (IFN_POPCOUNT, type,
6018 OPTIMIZE_FOR_BOTH))
6019 (convert (IFN_POPCOUNT:type @0)))))
6020
6021 /* __builtin_ffs needs to deal on many targets with the possible zero
6022 argument. If we know the argument is always non-zero, __builtin_ctz + 1
6023 should lead to better code. */
6024 (simplify
6025 (FFS tree_expr_nonzero_p@0)
6026 (if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
6027 && direct_internal_fn_supported_p (IFN_CTZ, TREE_TYPE (@0),
6028 OPTIMIZE_FOR_SPEED))
6029 (plus (CTZ:type @0) { build_one_cst (type); })))
6030 #endif
6031
6032 (for ffs (BUILT_IN_FFS BUILT_IN_FFSL BUILT_IN_FFSLL
6033 BUILT_IN_FFSIMAX)
6034 /* __builtin_ffs (X) == 0 -> X == 0.
6035 __builtin_ffs (X) == 6 -> (X & 63) == 32. */
6036 (for cmp (eq ne)
6037 (simplify
6038 (cmp (ffs@2 @0) INTEGER_CST@1)
6039 (with { int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
6040 (switch
6041 (if (integer_zerop (@1))
6042 (cmp @0 { build_zero_cst (TREE_TYPE (@0)); }))
6043 (if (tree_int_cst_sgn (@1) < 0 || wi::to_widest (@1) > prec)
6044 { constant_boolean_node (cmp == NE_EXPR ? true : false, type); })
6045 (if (single_use (@2))
6046 (cmp (bit_and @0 { wide_int_to_tree (TREE_TYPE (@0),
6047 wi::mask (tree_to_uhwi (@1),
6048 false, prec)); })
6049 { wide_int_to_tree (TREE_TYPE (@0),
6050 wi::shifted_mask (tree_to_uhwi (@1) - 1, 1,
6051 false, prec)); }))))))
6052
6053 /* __builtin_ffs (X) > 6 -> X != 0 && (X & 63) == 0. */
6054 (for cmp (gt le)
6055 cmp2 (ne eq)
6056 cmp3 (eq ne)
6057 bit_op (bit_and bit_ior)
6058 (simplify
6059 (cmp (ffs@2 @0) INTEGER_CST@1)
6060 (with { int prec = TYPE_PRECISION (TREE_TYPE (@0)); }
6061 (switch
6062 (if (integer_zerop (@1))
6063 (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); }))
6064 (if (tree_int_cst_sgn (@1) < 0)
6065 { constant_boolean_node (cmp == GT_EXPR ? true : false, type); })
6066 (if (wi::to_widest (@1) >= prec)
6067 { constant_boolean_node (cmp == GT_EXPR ? false : true, type); })
6068 (if (wi::to_widest (@1) == prec - 1)
6069 (cmp3 @0 { wide_int_to_tree (TREE_TYPE (@0),
6070 wi::shifted_mask (prec - 1, 1,
6071 false, prec)); }))
6072 (if (single_use (@2))
6073 (bit_op (cmp2 @0 { build_zero_cst (TREE_TYPE (@0)); })
6074 (cmp3 (bit_and @0
6075 { wide_int_to_tree (TREE_TYPE (@0),
6076 wi::mask (tree_to_uhwi (@1),
6077 false, prec)); })
6078 { build_zero_cst (TREE_TYPE (@0)); }))))))))
6079
6080 /* Simplify:
6081
6082 a = a1 op a2
6083 r = c ? a : b;
6084
6085 to:
6086
6087 r = c ? a1 op a2 : b;
6088
6089 if the target can do it in one go. This makes the operation conditional
6090 on c, so could drop potentially-trapping arithmetic, but that's a valid
6091 simplification if the result of the operation isn't needed.
6092
6093 Avoid speculatively generating a stand-alone vector comparison
6094 on targets that might not support them. Any target implementing
6095 conditional internal functions must support the same comparisons
6096 inside and outside a VEC_COND_EXPR. */
6097
6098 #if GIMPLE
6099 (for uncond_op (UNCOND_BINARY)
6100 cond_op (COND_BINARY)
6101 (simplify
6102 (vec_cond @0 (view_convert? (uncond_op@4 @1 @2)) @3)
6103 (with { tree op_type = TREE_TYPE (@4); }
6104 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
6105 && element_precision (type) == element_precision (op_type))
6106 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @3))))))
6107 (simplify
6108 (vec_cond @0 @1 (view_convert? (uncond_op@4 @2 @3)))
6109 (with { tree op_type = TREE_TYPE (@4); }
6110 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
6111 && element_precision (type) == element_precision (op_type))
6112 (view_convert (cond_op (bit_not @0) @2 @3 (view_convert:op_type @1)))))))
6113
6114 /* Same for ternary operations. */
6115 (for uncond_op (UNCOND_TERNARY)
6116 cond_op (COND_TERNARY)
6117 (simplify
6118 (vec_cond @0 (view_convert? (uncond_op@5 @1 @2 @3)) @4)
6119 (with { tree op_type = TREE_TYPE (@5); }
6120 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
6121 && element_precision (type) == element_precision (op_type))
6122 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @4))))))
6123 (simplify
6124 (vec_cond @0 @1 (view_convert? (uncond_op@5 @2 @3 @4)))
6125 (with { tree op_type = TREE_TYPE (@5); }
6126 (if (vectorized_internal_fn_supported_p (as_internal_fn (cond_op), op_type)
6127 && element_precision (type) == element_precision (op_type))
6128 (view_convert (cond_op (bit_not @0) @2 @3 @4
6129 (view_convert:op_type @1)))))))
6130 #endif
6131
6132 /* Detect cases in which a VEC_COND_EXPR effectively replaces the
6133 "else" value of an IFN_COND_*. */
6134 (for cond_op (COND_BINARY)
6135 (simplify
6136 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3)) @4)
6137 (with { tree op_type = TREE_TYPE (@3); }
6138 (if (element_precision (type) == element_precision (op_type))
6139 (view_convert (cond_op @0 @1 @2 (view_convert:op_type @4))))))
6140 (simplify
6141 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5)))
6142 (with { tree op_type = TREE_TYPE (@5); }
6143 (if (inverse_conditions_p (@0, @2)
6144 && element_precision (type) == element_precision (op_type))
6145 (view_convert (cond_op @2 @3 @4 (view_convert:op_type @1)))))))
6146
6147 /* Same for ternary operations. */
6148 (for cond_op (COND_TERNARY)
6149 (simplify
6150 (vec_cond @0 (view_convert? (cond_op @0 @1 @2 @3 @4)) @5)
6151 (with { tree op_type = TREE_TYPE (@4); }
6152 (if (element_precision (type) == element_precision (op_type))
6153 (view_convert (cond_op @0 @1 @2 @3 (view_convert:op_type @5))))))
6154 (simplify
6155 (vec_cond @0 @1 (view_convert? (cond_op @2 @3 @4 @5 @6)))
6156 (with { tree op_type = TREE_TYPE (@6); }
6157 (if (inverse_conditions_p (@0, @2)
6158 && element_precision (type) == element_precision (op_type))
6159 (view_convert (cond_op @2 @3 @4 @5 (view_convert:op_type @1)))))))
6160
6161 /* For pointers @0 and @2 and nonnegative constant offset @1, look for
6162 expressions like:
6163
6164 A: (@0 + @1 < @2) | (@2 + @1 < @0)
6165 B: (@0 + @1 <= @2) | (@2 + @1 <= @0)
6166
6167 If pointers are known not to wrap, B checks whether @1 bytes starting
6168 at @0 and @2 do not overlap, while A tests the same thing for @1 + 1
6169 bytes. A is more efficiently tested as:
6170
6171 A: (sizetype) (@0 + @1 - @2) > @1 * 2
6172
6173 The equivalent expression for B is given by replacing @1 with @1 - 1:
6174
6175 B: (sizetype) (@0 + (@1 - 1) - @2) > (@1 - 1) * 2
6176
6177 @0 and @2 can be swapped in both expressions without changing the result.
6178
6179 The folds rely on sizetype's being unsigned (which is always true)
6180 and on its being the same width as the pointer (which we have to check).
6181
6182 The fold replaces two pointer_plus expressions, two comparisons and
6183 an IOR with a pointer_plus, a pointer_diff, and a comparison, so in
6184 the best case it's a saving of two operations. The A fold retains one
6185 of the original pointer_pluses, so is a win even if both pointer_pluses
6186 are used elsewhere. The B fold is a wash if both pointer_pluses are
6187 used elsewhere, since all we end up doing is replacing a comparison with
6188 a pointer_plus. We do still apply the fold under those circumstances
6189 though, in case applying it to other conditions eventually makes one of the
6190 pointer_pluses dead. */
6191 (for ior (truth_orif truth_or bit_ior)
6192 (for cmp (le lt)
6193 (simplify
6194 (ior (cmp:cs (pointer_plus@3 @0 INTEGER_CST@1) @2)
6195 (cmp:cs (pointer_plus@4 @2 @1) @0))
6196 (if (TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0))
6197 && TYPE_OVERFLOW_WRAPS (sizetype)
6198 && TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (sizetype))
6199 /* Calculate the rhs constant. */
6200 (with { offset_int off = wi::to_offset (@1) - (cmp == LE_EXPR ? 1 : 0);
6201 offset_int rhs = off * 2; }
6202 /* Always fails for negative values. */
6203 (if (wi::min_precision (rhs, UNSIGNED) <= TYPE_PRECISION (sizetype))
6204 /* Since the order of @0 and @2 doesn't matter, let tree_swap_operands_p
6205 pick a canonical order. This increases the chances of using the
6206 same pointer_plus in multiple checks. */
6207 (with { bool swap_p = tree_swap_operands_p (@0, @2);
6208 tree rhs_tree = wide_int_to_tree (sizetype, rhs); }
6209 (if (cmp == LT_EXPR)
6210 (gt (convert:sizetype
6211 (pointer_diff:ssizetype { swap_p ? @4 : @3; }
6212 { swap_p ? @0 : @2; }))
6213 { rhs_tree; })
6214 (gt (convert:sizetype
6215 (pointer_diff:ssizetype
6216 (pointer_plus { swap_p ? @2 : @0; }
6217 { wide_int_to_tree (sizetype, off); })
6218 { swap_p ? @0 : @2; }))
6219 { rhs_tree; })))))))))
6220
6221 /* Fold REDUC (@0 & @1) -> @0[I] & @1[I] if element I is the only nonzero
6222 element of @1. */
6223 (for reduc (IFN_REDUC_PLUS IFN_REDUC_IOR IFN_REDUC_XOR)
6224 (simplify (reduc (view_convert? (bit_and @0 VECTOR_CST@1)))
6225 (with { int i = single_nonzero_element (@1); }
6226 (if (i >= 0)
6227 (with { tree elt = vector_cst_elt (@1, i);
6228 tree elt_type = TREE_TYPE (elt);
6229 unsigned int elt_bits = tree_to_uhwi (TYPE_SIZE (elt_type));
6230 tree size = bitsize_int (elt_bits);
6231 tree pos = bitsize_int (elt_bits * i); }
6232 (view_convert
6233 (bit_and:elt_type
6234 (BIT_FIELD_REF:elt_type @0 { size; } { pos; })
6235 { elt; })))))))
6236
6237 (simplify
6238 (vec_perm @0 @1 VECTOR_CST@2)
6239 (with
6240 {
6241 tree op0 = @0, op1 = @1, op2 = @2;
6242
6243 /* Build a vector of integers from the tree mask. */
6244 vec_perm_builder builder;
6245 if (!tree_to_vec_perm_builder (&builder, op2))
6246 return NULL_TREE;
6247
6248 /* Create a vec_perm_indices for the integer vector. */
6249 poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (type);
6250 bool single_arg = (op0 == op1);
6251 vec_perm_indices sel (builder, single_arg ? 1 : 2, nelts);
6252 }
6253 (if (sel.series_p (0, 1, 0, 1))
6254 { op0; }
6255 (if (sel.series_p (0, 1, nelts, 1))
6256 { op1; }
6257 (with
6258 {
6259 if (!single_arg)
6260 {
6261 if (sel.all_from_input_p (0))
6262 op1 = op0;
6263 else if (sel.all_from_input_p (1))
6264 {
6265 op0 = op1;
6266 sel.rotate_inputs (1);
6267 }
6268 else if (known_ge (poly_uint64 (sel[0]), nelts))
6269 {
6270 std::swap (op0, op1);
6271 sel.rotate_inputs (1);
6272 }
6273 }
6274 gassign *def;
6275 tree cop0 = op0, cop1 = op1;
6276 if (TREE_CODE (op0) == SSA_NAME
6277 && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op0)))
6278 && gimple_assign_rhs_code (def) == CONSTRUCTOR)
6279 cop0 = gimple_assign_rhs1 (def);
6280 if (TREE_CODE (op1) == SSA_NAME
6281 && (def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (op1)))
6282 && gimple_assign_rhs_code (def) == CONSTRUCTOR)
6283 cop1 = gimple_assign_rhs1 (def);
6284
6285 tree t;
6286 }
6287 (if ((TREE_CODE (cop0) == VECTOR_CST
6288 || TREE_CODE (cop0) == CONSTRUCTOR)
6289 && (TREE_CODE (cop1) == VECTOR_CST
6290 || TREE_CODE (cop1) == CONSTRUCTOR)
6291 && (t = fold_vec_perm (type, cop0, cop1, sel)))
6292 { t; }
6293 (with
6294 {
6295 bool changed = (op0 == op1 && !single_arg);
6296 tree ins = NULL_TREE;
6297 unsigned at = 0;
6298
6299 /* See if the permutation is performing a single element
6300 insert from a CONSTRUCTOR or constant and use a BIT_INSERT_EXPR
6301 in that case. But only if the vector mode is supported,
6302 otherwise this is invalid GIMPLE. */
6303 if (TYPE_MODE (type) != BLKmode
6304 && (TREE_CODE (cop0) == VECTOR_CST
6305 || TREE_CODE (cop0) == CONSTRUCTOR
6306 || TREE_CODE (cop1) == VECTOR_CST
6307 || TREE_CODE (cop1) == CONSTRUCTOR))
6308 {
6309 bool insert_first_p = sel.series_p (1, 1, nelts + 1, 1);
6310 if (insert_first_p)
6311 {
6312 /* After canonicalizing the first elt to come from the
6313 first vector we only can insert the first elt from
6314 the first vector. */
6315 at = 0;
6316 if ((ins = fold_read_from_vector (cop0, sel[0])))
6317 op0 = op1;
6318 }
6319 /* The above can fail for two-element vectors which always
6320 appear to insert the first element, so try inserting
6321 into the second lane as well. For more than two
6322 elements that's wasted time. */
6323 if (!insert_first_p || (!ins && maybe_eq (nelts, 2u)))
6324 {
6325 unsigned int encoded_nelts = sel.encoding ().encoded_nelts ();
6326 for (at = 0; at < encoded_nelts; ++at)
6327 if (maybe_ne (sel[at], at))
6328 break;
6329 if (at < encoded_nelts
6330 && (known_eq (at + 1, nelts)
6331 || sel.series_p (at + 1, 1, at + 1, 1)))
6332 {
6333 if (known_lt (poly_uint64 (sel[at]), nelts))
6334 ins = fold_read_from_vector (cop0, sel[at]);
6335 else
6336 ins = fold_read_from_vector (cop1, sel[at] - nelts);
6337 }
6338 }
6339 }
6340
6341 /* Generate a canonical form of the selector. */
6342 if (!ins && sel.encoding () != builder)
6343 {
6344 /* Some targets are deficient and fail to expand a single
6345 argument permutation while still allowing an equivalent
6346 2-argument version. */
6347 tree oldop2 = op2;
6348 if (sel.ninputs () == 2
6349 || can_vec_perm_const_p (TYPE_MODE (type), sel, false))
6350 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel);
6351 else
6352 {
6353 vec_perm_indices sel2 (builder, 2, nelts);
6354 if (can_vec_perm_const_p (TYPE_MODE (type), sel2, false))
6355 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel2);
6356 else
6357 /* Not directly supported with either encoding,
6358 so use the preferred form. */
6359 op2 = vec_perm_indices_to_tree (TREE_TYPE (op2), sel);
6360 }
6361 if (!operand_equal_p (op2, oldop2, 0))
6362 changed = true;
6363 }
6364 }
6365 (if (ins)
6366 (bit_insert { op0; } { ins; }
6367 { bitsize_int (at * vector_element_bits (type)); })
6368 (if (changed)
6369 (vec_perm { op0; } { op1; } { op2; }))))))))))
6370
6371 /* VEC_PERM_EXPR (v, v, mask) -> v where v contains same element. */
6372
6373 (match vec_same_elem_p
6374 @0
6375 (if (uniform_vector_p (@0))))
6376
6377 (match vec_same_elem_p
6378 (vec_duplicate @0))
6379
6380 (simplify
6381 (vec_perm vec_same_elem_p@0 @0 @1)
6382 @0)
6383
6384 /* Match count trailing zeroes for simplify_count_trailing_zeroes in fwprop.
6385 The canonical form is array[((x & -x) * C) >> SHIFT] where C is a magic
6386 constant which when multiplied by a power of 2 contains a unique value
6387 in the top 5 or 6 bits. This is then indexed into a table which maps it
6388 to the number of trailing zeroes. */
6389 (match (ctz_table_index @1 @2 @3)
6390 (rshift (mult (bit_and:c (negate @1) @1) INTEGER_CST@2) INTEGER_CST@3))