Update copyright years.
[gcc.git] / gcc / match.pd
1 /* Match-and-simplify patterns for shared GENERIC and GIMPLE folding.
2 This file is consumed by genmatch which produces gimple-match.c
3 and generic-match.c from it.
4
5 Copyright (C) 2014-2015 Free Software Foundation, Inc.
6 Contributed by Richard Biener <rguenther@suse.de>
7 and Prathamesh Kulkarni <bilbotheelffriend@gmail.com>
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25
26 /* Generic tree predicates we inherit. */
27 (define_predicates
28 integer_onep integer_zerop integer_all_onesp integer_minus_onep
29 integer_each_onep integer_truep
30 real_zerop real_onep real_minus_onep
31 CONSTANT_CLASS_P
32 tree_expr_nonnegative_p)
33
34 /* Operator lists. */
35 (define_operator_list tcc_comparison
36 lt le eq ne ge gt unordered ordered unlt unle ungt unge uneq ltgt)
37 (define_operator_list inverted_tcc_comparison
38 ge gt ne eq lt le ordered unordered ge gt le lt ltgt uneq)
39 (define_operator_list inverted_tcc_comparison_with_nans
40 unge ungt ne eq unlt unle ordered unordered ge gt le lt ltgt uneq)
41
42
43 /* Simplifications of operations with one constant operand and
44 simplifications to constants or single values. */
45
46 (for op (plus pointer_plus minus bit_ior bit_xor)
47 (simplify
48 (op @0 integer_zerop)
49 (non_lvalue @0)))
50
51 /* 0 +p index -> (type)index */
52 (simplify
53 (pointer_plus integer_zerop @1)
54 (non_lvalue (convert @1)))
55
56 /* See if ARG1 is zero and X + ARG1 reduces to X.
57 Likewise if the operands are reversed. */
58 (simplify
59 (plus:c @0 real_zerop@1)
60 (if (fold_real_zero_addition_p (type, @1, 0))
61 (non_lvalue @0)))
62
63 /* See if ARG1 is zero and X - ARG1 reduces to X. */
64 (simplify
65 (minus @0 real_zerop@1)
66 (if (fold_real_zero_addition_p (type, @1, 1))
67 (non_lvalue @0)))
68
69 /* Simplify x - x.
70 This is unsafe for certain floats even in non-IEEE formats.
71 In IEEE, it is unsafe because it does wrong for NaNs.
72 Also note that operand_equal_p is always false if an operand
73 is volatile. */
74 (simplify
75 (minus @0 @0)
76 (if (!FLOAT_TYPE_P (type) || !HONOR_NANS (type))
77 { build_zero_cst (type); }))
78
79 (simplify
80 (mult @0 integer_zerop@1)
81 @1)
82
83 /* Maybe fold x * 0 to 0. The expressions aren't the same
84 when x is NaN, since x * 0 is also NaN. Nor are they the
85 same in modes with signed zeros, since multiplying a
86 negative value by 0 gives -0, not +0. */
87 (simplify
88 (mult @0 real_zerop@1)
89 (if (!HONOR_NANS (type) && !HONOR_SIGNED_ZEROS (element_mode (type)))
90 @1))
91
92 /* In IEEE floating point, x*1 is not equivalent to x for snans.
93 Likewise for complex arithmetic with signed zeros. */
94 (simplify
95 (mult @0 real_onep)
96 (if (!HONOR_SNANS (element_mode (type))
97 && (!HONOR_SIGNED_ZEROS (element_mode (type))
98 || !COMPLEX_FLOAT_TYPE_P (type)))
99 (non_lvalue @0)))
100
101 /* Transform x * -1.0 into -x. */
102 (simplify
103 (mult @0 real_minus_onep)
104 (if (!HONOR_SNANS (element_mode (type))
105 && (!HONOR_SIGNED_ZEROS (element_mode (type))
106 || !COMPLEX_FLOAT_TYPE_P (type)))
107 (negate @0)))
108
109 /* Make sure to preserve divisions by zero. This is the reason why
110 we don't simplify x / x to 1 or 0 / x to 0. */
111 (for op (mult trunc_div ceil_div floor_div round_div exact_div)
112 (simplify
113 (op @0 integer_onep)
114 (non_lvalue @0)))
115
116 /* X / -1 is -X. */
117 (for div (trunc_div ceil_div floor_div round_div exact_div)
118 (simplify
119 (div @0 integer_minus_onep@1)
120 (if (!TYPE_UNSIGNED (type))
121 (negate @0))))
122
123 /* For unsigned integral types, FLOOR_DIV_EXPR is the same as
124 TRUNC_DIV_EXPR. Rewrite into the latter in this case. */
125 (simplify
126 (floor_div @0 @1)
127 (if ((INTEGRAL_TYPE_P (type) || VECTOR_INTEGER_TYPE_P (type))
128 && TYPE_UNSIGNED (type))
129 (trunc_div @0 @1)))
130
131 /* Combine two successive divisions. Note that combining ceil_div
132 and floor_div is trickier and combining round_div even more so. */
133 (for div (trunc_div exact_div)
134 (simplify
135 (div (div @0 INTEGER_CST@1) INTEGER_CST@2)
136 (with {
137 bool overflow_p;
138 wide_int mul = wi::mul (@1, @2, TYPE_SIGN (type), &overflow_p);
139 }
140 (if (!overflow_p)
141 (div @0 { wide_int_to_tree (type, mul); }))
142 (if (overflow_p
143 && (TYPE_UNSIGNED (type)
144 || mul != wi::min_value (TYPE_PRECISION (type), SIGNED)))
145 { build_zero_cst (type); }))))
146
147 /* Optimize A / A to 1.0 if we don't care about
148 NaNs or Infinities. */
149 (simplify
150 (rdiv @0 @0)
151 (if (FLOAT_TYPE_P (type)
152 && ! HONOR_NANS (type)
153 && ! HONOR_INFINITIES (element_mode (type)))
154 { build_one_cst (type); }))
155
156 /* Optimize -A / A to -1.0 if we don't care about
157 NaNs or Infinities. */
158 (simplify
159 (rdiv:c @0 (negate @0))
160 (if (FLOAT_TYPE_P (type)
161 && ! HONOR_NANS (type)
162 && ! HONOR_INFINITIES (element_mode (type)))
163 { build_minus_one_cst (type); }))
164
165 /* In IEEE floating point, x/1 is not equivalent to x for snans. */
166 (simplify
167 (rdiv @0 real_onep)
168 (if (!HONOR_SNANS (element_mode (type)))
169 (non_lvalue @0)))
170
171 /* In IEEE floating point, x/-1 is not equivalent to -x for snans. */
172 (simplify
173 (rdiv @0 real_minus_onep)
174 (if (!HONOR_SNANS (element_mode (type)))
175 (negate @0)))
176
177 /* If ARG1 is a constant, we can convert this to a multiply by the
178 reciprocal. This does not have the same rounding properties,
179 so only do this if -freciprocal-math. We can actually
180 always safely do it if ARG1 is a power of two, but it's hard to
181 tell if it is or not in a portable manner. */
182 (for cst (REAL_CST COMPLEX_CST VECTOR_CST)
183 (simplify
184 (rdiv @0 cst@1)
185 (if (optimize)
186 (if (flag_reciprocal_math
187 && !real_zerop (@1))
188 (with
189 { tree tem = const_binop (RDIV_EXPR, type, build_one_cst (type), @1); }
190 (if (tem)
191 (mult @0 { tem; } ))))
192 (if (cst != COMPLEX_CST)
193 (with { tree inverse = exact_inverse (type, @1); }
194 (if (inverse)
195 (mult @0 { inverse; } )))))))
196
197 /* Same applies to modulo operations, but fold is inconsistent here
198 and simplifies 0 % x to 0, only preserving literal 0 % 0. */
199 (for mod (ceil_mod floor_mod round_mod trunc_mod)
200 /* 0 % X is always zero. */
201 (simplify
202 (mod integer_zerop@0 @1)
203 /* But not for 0 % 0 so that we can get the proper warnings and errors. */
204 (if (!integer_zerop (@1))
205 @0))
206 /* X % 1 is always zero. */
207 (simplify
208 (mod @0 integer_onep)
209 { build_zero_cst (type); })
210 /* X % -1 is zero. */
211 (simplify
212 (mod @0 integer_minus_onep@1)
213 (if (!TYPE_UNSIGNED (type))
214 { build_zero_cst (type); })))
215
216 /* X % -C is the same as X % C. */
217 (simplify
218 (trunc_mod @0 INTEGER_CST@1)
219 (if (TYPE_SIGN (type) == SIGNED
220 && !TREE_OVERFLOW (@1)
221 && wi::neg_p (@1)
222 && !TYPE_OVERFLOW_TRAPS (type)
223 /* Avoid this transformation if C is INT_MIN, i.e. C == -C. */
224 && !sign_bit_p (@1, @1))
225 (trunc_mod @0 (negate @1))))
226
227 /* x | ~0 -> ~0 */
228 (simplify
229 (bit_ior @0 integer_all_onesp@1)
230 @1)
231
232 /* x & 0 -> 0 */
233 (simplify
234 (bit_and @0 integer_zerop@1)
235 @1)
236
237 /* x ^ x -> 0 */
238 (simplify
239 (bit_xor @0 @0)
240 { build_zero_cst (type); })
241
242 /* Canonicalize X ^ ~0 to ~X. */
243 (simplify
244 (bit_xor @0 integer_all_onesp@1)
245 (bit_not @0))
246
247 /* x & ~0 -> x */
248 (simplify
249 (bit_and @0 integer_all_onesp)
250 (non_lvalue @0))
251
252 /* x & x -> x, x | x -> x */
253 (for bitop (bit_and bit_ior)
254 (simplify
255 (bitop @0 @0)
256 (non_lvalue @0)))
257
258 (simplify
259 (abs (negate @0))
260 (abs @0))
261 (simplify
262 (abs tree_expr_nonnegative_p@0)
263 @0)
264
265
266 /* Try to fold (type) X op CST -> (type) (X op ((type-x) CST))
267 when profitable.
268 For bitwise binary operations apply operand conversions to the
269 binary operation result instead of to the operands. This allows
270 to combine successive conversions and bitwise binary operations.
271 We combine the above two cases by using a conditional convert. */
272 (for bitop (bit_and bit_ior bit_xor)
273 (simplify
274 (bitop (convert @0) (convert? @1))
275 (if (((TREE_CODE (@1) == INTEGER_CST
276 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
277 && int_fits_type_p (@1, TREE_TYPE (@0)))
278 || (GIMPLE && types_compatible_p (TREE_TYPE (@0), TREE_TYPE (@1)))
279 || (GENERIC && TREE_TYPE (@0) == TREE_TYPE (@1)))
280 /* ??? This transform conflicts with fold-const.c doing
281 Convert (T)(x & c) into (T)x & (T)c, if c is an integer
282 constants (if x has signed type, the sign bit cannot be set
283 in c). This folds extension into the BIT_AND_EXPR.
284 Restrict it to GIMPLE to avoid endless recursions. */
285 && (bitop != BIT_AND_EXPR || GIMPLE)
286 && (/* That's a good idea if the conversion widens the operand, thus
287 after hoisting the conversion the operation will be narrower. */
288 TYPE_PRECISION (TREE_TYPE (@0)) < TYPE_PRECISION (type)
289 /* It's also a good idea if the conversion is to a non-integer
290 mode. */
291 || GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
292 /* Or if the precision of TO is not the same as the precision
293 of its mode. */
294 || TYPE_PRECISION (type) != GET_MODE_PRECISION (TYPE_MODE (type))))
295 (convert (bitop @0 (convert @1))))))
296
297 /* Simplify (A & B) OP0 (C & B) to (A OP0 C) & B. */
298 (for bitop (bit_and bit_ior bit_xor)
299 (simplify
300 (bitop (bit_and:c @0 @1) (bit_and @2 @1))
301 (bit_and (bitop @0 @2) @1)))
302
303 /* (x | CST1) & CST2 -> (x & CST2) | (CST1 & CST2) */
304 (simplify
305 (bit_and (bit_ior @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
306 (bit_ior (bit_and @0 @2) (bit_and @1 @2)))
307
308 /* Combine successive equal operations with constants. */
309 (for bitop (bit_and bit_ior bit_xor)
310 (simplify
311 (bitop (bitop @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
312 (bitop @0 (bitop @1 @2))))
313
314 /* Try simple folding for X op !X, and X op X with the help
315 of the truth_valued_p and logical_inverted_value predicates. */
316 (match truth_valued_p
317 @0
318 (if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1)))
319 (for op (tcc_comparison truth_and truth_andif truth_or truth_orif truth_xor)
320 (match truth_valued_p
321 (op @0 @1)))
322 (match truth_valued_p
323 (truth_not @0))
324
325 (match (logical_inverted_value @0)
326 (bit_not truth_valued_p@0))
327 (match (logical_inverted_value @0)
328 (eq @0 integer_zerop))
329 (match (logical_inverted_value @0)
330 (ne truth_valued_p@0 integer_truep))
331 (match (logical_inverted_value @0)
332 (bit_xor truth_valued_p@0 integer_truep))
333
334 /* X & !X -> 0. */
335 (simplify
336 (bit_and:c @0 (logical_inverted_value @0))
337 { build_zero_cst (type); })
338 /* X | !X and X ^ !X -> 1, , if X is truth-valued. */
339 (for op (bit_ior bit_xor)
340 (simplify
341 (op:c truth_valued_p@0 (logical_inverted_value @0))
342 { constant_boolean_node (true, type); }))
343
344 (for bitop (bit_and bit_ior)
345 rbitop (bit_ior bit_and)
346 /* (x | y) & x -> x */
347 /* (x & y) | x -> x */
348 (simplify
349 (bitop:c (rbitop:c @0 @1) @0)
350 @0)
351 /* (~x | y) & x -> x & y */
352 /* (~x & y) | x -> x | y */
353 (simplify
354 (bitop:c (rbitop:c (bit_not @0) @1) @0)
355 (bitop @0 @1)))
356
357 /* If arg1 and arg2 are booleans (or any single bit type)
358 then try to simplify:
359
360 (~X & Y) -> X < Y
361 (X & ~Y) -> Y < X
362 (~X | Y) -> X <= Y
363 (X | ~Y) -> Y <= X
364
365 But only do this if our result feeds into a comparison as
366 this transformation is not always a win, particularly on
367 targets with and-not instructions.
368 -> simplify_bitwise_binary_boolean */
369 (simplify
370 (ne (bit_and:c (bit_not @0) @1) integer_zerop)
371 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
372 && TYPE_PRECISION (TREE_TYPE (@1)) == 1)
373 (lt @0 @1)))
374 (simplify
375 (ne (bit_ior:c (bit_not @0) @1) integer_zerop)
376 (if (INTEGRAL_TYPE_P (TREE_TYPE (@1))
377 && TYPE_PRECISION (TREE_TYPE (@1)) == 1)
378 (le @0 @1)))
379
380 /* ~~x -> x */
381 (simplify
382 (bit_not (bit_not @0))
383 @0)
384
385 /* (x & ~m) | (y & m) -> ((x ^ y) & m) ^ x */
386 (simplify
387 (bit_ior:c (bit_and:c@3 @0 (bit_not @2)) (bit_and:c@4 @1 @2))
388 (if ((TREE_CODE (@3) != SSA_NAME || has_single_use (@3))
389 && (TREE_CODE (@4) != SSA_NAME || has_single_use (@4)))
390 (bit_xor (bit_and (bit_xor @0 @1) @2) @0)))
391
392
393 /* Associate (p +p off1) +p off2 as (p +p (off1 + off2)). */
394 (simplify
395 (pointer_plus (pointer_plus@2 @0 @1) @3)
396 (if (TREE_CODE (@2) != SSA_NAME || has_single_use (@2))
397 (pointer_plus @0 (plus @1 @3))))
398
399 /* Pattern match
400 tem1 = (long) ptr1;
401 tem2 = (long) ptr2;
402 tem3 = tem2 - tem1;
403 tem4 = (unsigned long) tem3;
404 tem5 = ptr1 + tem4;
405 and produce
406 tem5 = ptr2; */
407 (simplify
408 (pointer_plus @0 (convert?@2 (minus@3 (convert @1) (convert @0))))
409 /* Conditionally look through a sign-changing conversion. */
410 (if (TYPE_PRECISION (TREE_TYPE (@2)) == TYPE_PRECISION (TREE_TYPE (@3))
411 && ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@1)))
412 || (GENERIC && type == TREE_TYPE (@1))))
413 @1))
414
415 /* Pattern match
416 tem = (sizetype) ptr;
417 tem = tem & algn;
418 tem = -tem;
419 ... = ptr p+ tem;
420 and produce the simpler and easier to analyze with respect to alignment
421 ... = ptr & ~algn; */
422 (simplify
423 (pointer_plus @0 (negate (bit_and (convert @0) INTEGER_CST@1)))
424 (with { tree algn = wide_int_to_tree (TREE_TYPE (@0), wi::bit_not (@1)); }
425 (bit_and @0 { algn; })))
426
427
428 /* We can't reassociate at all for saturating types. */
429 (if (!TYPE_SATURATING (type))
430
431 /* Contract negates. */
432 /* A + (-B) -> A - B */
433 (simplify
434 (plus:c (convert1? @0) (convert2? (negate @1)))
435 /* Apply STRIP_NOPS on @0 and the negate. */
436 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
437 && tree_nop_conversion_p (type, TREE_TYPE (@1))
438 && !TYPE_OVERFLOW_SANITIZED (type))
439 (minus (convert @0) (convert @1))))
440 /* A - (-B) -> A + B */
441 (simplify
442 (minus (convert1? @0) (convert2? (negate @1)))
443 (if (tree_nop_conversion_p (type, TREE_TYPE (@0))
444 && tree_nop_conversion_p (type, TREE_TYPE (@1))
445 && !TYPE_OVERFLOW_SANITIZED (type))
446 (plus (convert @0) (convert @1))))
447 /* -(-A) -> A */
448 (simplify
449 (negate (convert? (negate @1)))
450 (if (tree_nop_conversion_p (type, TREE_TYPE (@1))
451 && !TYPE_OVERFLOW_SANITIZED (type))
452 (convert @1)))
453
454 /* We can't reassociate floating-point or fixed-point plus or minus
455 because of saturation to +-Inf. */
456 (if (!FLOAT_TYPE_P (type) && !FIXED_POINT_TYPE_P (type))
457
458 /* Match patterns that allow contracting a plus-minus pair
459 irrespective of overflow issues. */
460 /* (A +- B) - A -> +- B */
461 /* (A +- B) -+ B -> A */
462 /* A - (A +- B) -> -+ B */
463 /* A +- (B -+ A) -> +- B */
464 (simplify
465 (minus (plus:c @0 @1) @0)
466 @1)
467 (simplify
468 (minus (minus @0 @1) @0)
469 (negate @1))
470 (simplify
471 (plus:c (minus @0 @1) @1)
472 @0)
473 (simplify
474 (minus @0 (plus:c @0 @1))
475 (negate @1))
476 (simplify
477 (minus @0 (minus @0 @1))
478 @1)
479
480 /* (A +- CST) +- CST -> A + CST */
481 (for outer_op (plus minus)
482 (for inner_op (plus minus)
483 (simplify
484 (outer_op (inner_op @0 CONSTANT_CLASS_P@1) CONSTANT_CLASS_P@2)
485 /* If the constant operation overflows we cannot do the transform
486 as we would introduce undefined overflow, for example
487 with (a - 1) + INT_MIN. */
488 (with { tree cst = fold_binary (outer_op == inner_op
489 ? PLUS_EXPR : MINUS_EXPR, type, @1, @2); }
490 (if (cst && !TREE_OVERFLOW (cst))
491 (inner_op @0 { cst; } ))))))
492
493 /* (CST - A) +- CST -> CST - A */
494 (for outer_op (plus minus)
495 (simplify
496 (outer_op (minus CONSTANT_CLASS_P@1 @0) CONSTANT_CLASS_P@2)
497 (with { tree cst = fold_binary (outer_op, type, @1, @2); }
498 (if (cst && !TREE_OVERFLOW (cst))
499 (minus { cst; } @0)))))
500
501 /* ~A + A -> -1 */
502 (simplify
503 (plus:c (bit_not @0) @0)
504 (if (!TYPE_OVERFLOW_TRAPS (type))
505 { build_all_ones_cst (type); }))
506
507 /* ~A + 1 -> -A */
508 (simplify
509 (plus (convert? (bit_not @0)) integer_each_onep)
510 (if (tree_nop_conversion_p (type, TREE_TYPE (@0)))
511 (negate (convert @0))))
512
513 /* -A - 1 -> ~A */
514 (simplify
515 (minus (convert? (negate @0)) integer_each_onep)
516 (if (!TYPE_OVERFLOW_TRAPS (type)
517 && tree_nop_conversion_p (type, TREE_TYPE (@0)))
518 (bit_not (convert @0))))
519
520 /* -1 - A -> ~A */
521 (simplify
522 (minus integer_all_onesp @0)
523 (if (TREE_CODE (type) != COMPLEX_TYPE)
524 (bit_not @0)))
525
526 /* (T)(P + A) - (T)P -> (T) A */
527 (for add (plus pointer_plus)
528 (simplify
529 (minus (convert (add @0 @1))
530 (convert @0))
531 (if (element_precision (type) <= element_precision (TREE_TYPE (@1))
532 /* For integer types, if A has a smaller type
533 than T the result depends on the possible
534 overflow in P + A.
535 E.g. T=size_t, A=(unsigned)429497295, P>0.
536 However, if an overflow in P + A would cause
537 undefined behavior, we can assume that there
538 is no overflow. */
539 || (INTEGRAL_TYPE_P (TREE_TYPE (@0))
540 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (@0)))
541 /* For pointer types, if the conversion of A to the
542 final type requires a sign- or zero-extension,
543 then we have to punt - it is not defined which
544 one is correct. */
545 || (POINTER_TYPE_P (TREE_TYPE (@0))
546 && TREE_CODE (@1) == INTEGER_CST
547 && tree_int_cst_sign_bit (@1) == 0))
548 (convert @1))))))
549
550
551 /* Simplifications of MIN_EXPR and MAX_EXPR. */
552
553 (for minmax (min max)
554 (simplify
555 (minmax @0 @0)
556 @0))
557 (simplify
558 (min @0 @1)
559 (if (INTEGRAL_TYPE_P (type)
560 && TYPE_MIN_VALUE (type)
561 && operand_equal_p (@1, TYPE_MIN_VALUE (type), OEP_ONLY_CONST))
562 @1))
563 (simplify
564 (max @0 @1)
565 (if (INTEGRAL_TYPE_P (type)
566 && TYPE_MAX_VALUE (type)
567 && operand_equal_p (@1, TYPE_MAX_VALUE (type), OEP_ONLY_CONST))
568 @1))
569
570
571 /* Simplifications of shift and rotates. */
572
573 (for rotate (lrotate rrotate)
574 (simplify
575 (rotate integer_all_onesp@0 @1)
576 @0))
577
578 /* Optimize -1 >> x for arithmetic right shifts. */
579 (simplify
580 (rshift integer_all_onesp@0 @1)
581 (if (!TYPE_UNSIGNED (type)
582 && tree_expr_nonnegative_p (@1))
583 @0))
584
585 (for shiftrotate (lrotate rrotate lshift rshift)
586 (simplify
587 (shiftrotate @0 integer_zerop)
588 (non_lvalue @0))
589 (simplify
590 (shiftrotate integer_zerop@0 @1)
591 @0)
592 /* Prefer vector1 << scalar to vector1 << vector2
593 if vector2 is uniform. */
594 (for vec (VECTOR_CST CONSTRUCTOR)
595 (simplify
596 (shiftrotate @0 vec@1)
597 (with { tree tem = uniform_vector_p (@1); }
598 (if (tem)
599 (shiftrotate @0 { tem; }))))))
600
601 /* Rewrite an LROTATE_EXPR by a constant into an
602 RROTATE_EXPR by a new constant. */
603 (simplify
604 (lrotate @0 INTEGER_CST@1)
605 (rrotate @0 { fold_binary (MINUS_EXPR, TREE_TYPE (@1),
606 build_int_cst (TREE_TYPE (@1),
607 element_precision (type)), @1); }))
608
609 /* ((1 << A) & 1) != 0 -> A == 0
610 ((1 << A) & 1) == 0 -> A != 0 */
611 (for cmp (ne eq)
612 icmp (eq ne)
613 (simplify
614 (cmp (bit_and (lshift integer_onep @0) integer_onep) integer_zerop)
615 (icmp @0 { build_zero_cst (TREE_TYPE (@0)); })))
616
617 /* Simplifications of conversions. */
618
619 /* Basic strip-useless-type-conversions / strip_nops. */
620 (for cvt (convert view_convert float fix_trunc)
621 (simplify
622 (cvt @0)
623 (if ((GIMPLE && useless_type_conversion_p (type, TREE_TYPE (@0)))
624 || (GENERIC && type == TREE_TYPE (@0)))
625 @0)))
626
627 /* Contract view-conversions. */
628 (simplify
629 (view_convert (view_convert @0))
630 (view_convert @0))
631
632 /* For integral conversions with the same precision or pointer
633 conversions use a NOP_EXPR instead. */
634 (simplify
635 (view_convert @0)
636 (if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
637 && (INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
638 && TYPE_PRECISION (type) == TYPE_PRECISION (TREE_TYPE (@0)))
639 (convert @0)))
640
641 /* Strip inner integral conversions that do not change precision or size. */
642 (simplify
643 (view_convert (convert@0 @1))
644 (if ((INTEGRAL_TYPE_P (TREE_TYPE (@0)) || POINTER_TYPE_P (TREE_TYPE (@0)))
645 && (INTEGRAL_TYPE_P (TREE_TYPE (@1)) || POINTER_TYPE_P (TREE_TYPE (@1)))
646 && (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (TREE_TYPE (@1)))
647 && (TYPE_SIZE (TREE_TYPE (@0)) == TYPE_SIZE (TREE_TYPE (@1))))
648 (view_convert @1)))
649
650 /* Re-association barriers around constants and other re-association
651 barriers can be removed. */
652 (simplify
653 (paren CONSTANT_CLASS_P@0)
654 @0)
655 (simplify
656 (paren (paren@1 @0))
657 @1)
658
659 /* Handle cases of two conversions in a row. */
660 (for ocvt (convert float fix_trunc)
661 (for icvt (convert float)
662 (simplify
663 (ocvt (icvt@1 @0))
664 (with
665 {
666 tree inside_type = TREE_TYPE (@0);
667 tree inter_type = TREE_TYPE (@1);
668 int inside_int = INTEGRAL_TYPE_P (inside_type);
669 int inside_ptr = POINTER_TYPE_P (inside_type);
670 int inside_float = FLOAT_TYPE_P (inside_type);
671 int inside_vec = VECTOR_TYPE_P (inside_type);
672 unsigned int inside_prec = TYPE_PRECISION (inside_type);
673 int inside_unsignedp = TYPE_UNSIGNED (inside_type);
674 int inter_int = INTEGRAL_TYPE_P (inter_type);
675 int inter_ptr = POINTER_TYPE_P (inter_type);
676 int inter_float = FLOAT_TYPE_P (inter_type);
677 int inter_vec = VECTOR_TYPE_P (inter_type);
678 unsigned int inter_prec = TYPE_PRECISION (inter_type);
679 int inter_unsignedp = TYPE_UNSIGNED (inter_type);
680 int final_int = INTEGRAL_TYPE_P (type);
681 int final_ptr = POINTER_TYPE_P (type);
682 int final_float = FLOAT_TYPE_P (type);
683 int final_vec = VECTOR_TYPE_P (type);
684 unsigned int final_prec = TYPE_PRECISION (type);
685 int final_unsignedp = TYPE_UNSIGNED (type);
686 }
687 /* In addition to the cases of two conversions in a row
688 handled below, if we are converting something to its own
689 type via an object of identical or wider precision, neither
690 conversion is needed. */
691 (if (((GIMPLE && useless_type_conversion_p (type, inside_type))
692 || (GENERIC
693 && TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (inside_type)))
694 && (((inter_int || inter_ptr) && final_int)
695 || (inter_float && final_float))
696 && inter_prec >= final_prec)
697 (ocvt @0))
698
699 /* Likewise, if the intermediate and initial types are either both
700 float or both integer, we don't need the middle conversion if the
701 former is wider than the latter and doesn't change the signedness
702 (for integers). Avoid this if the final type is a pointer since
703 then we sometimes need the middle conversion. Likewise if the
704 final type has a precision not equal to the size of its mode. */
705 (if (((inter_int && inside_int)
706 || (inter_float && inside_float)
707 || (inter_vec && inside_vec))
708 && inter_prec >= inside_prec
709 && (inter_float || inter_vec
710 || inter_unsignedp == inside_unsignedp)
711 && ! (final_prec != GET_MODE_PRECISION (element_mode (type))
712 && element_mode (type) == element_mode (inter_type))
713 && ! final_ptr
714 && (! final_vec || inter_prec == inside_prec))
715 (ocvt @0))
716
717 /* If we have a sign-extension of a zero-extended value, we can
718 replace that by a single zero-extension. Likewise if the
719 final conversion does not change precision we can drop the
720 intermediate conversion. */
721 (if (inside_int && inter_int && final_int
722 && ((inside_prec < inter_prec && inter_prec < final_prec
723 && inside_unsignedp && !inter_unsignedp)
724 || final_prec == inter_prec))
725 (ocvt @0))
726
727 /* Two conversions in a row are not needed unless:
728 - some conversion is floating-point (overstrict for now), or
729 - some conversion is a vector (overstrict for now), or
730 - the intermediate type is narrower than both initial and
731 final, or
732 - the intermediate type and innermost type differ in signedness,
733 and the outermost type is wider than the intermediate, or
734 - the initial type is a pointer type and the precisions of the
735 intermediate and final types differ, or
736 - the final type is a pointer type and the precisions of the
737 initial and intermediate types differ. */
738 (if (! inside_float && ! inter_float && ! final_float
739 && ! inside_vec && ! inter_vec && ! final_vec
740 && (inter_prec >= inside_prec || inter_prec >= final_prec)
741 && ! (inside_int && inter_int
742 && inter_unsignedp != inside_unsignedp
743 && inter_prec < final_prec)
744 && ((inter_unsignedp && inter_prec > inside_prec)
745 == (final_unsignedp && final_prec > inter_prec))
746 && ! (inside_ptr && inter_prec != final_prec)
747 && ! (final_ptr && inside_prec != inter_prec)
748 && ! (final_prec != GET_MODE_PRECISION (TYPE_MODE (type))
749 && TYPE_MODE (type) == TYPE_MODE (inter_type)))
750 (ocvt @0))
751
752 /* A truncation to an unsigned type (a zero-extension) should be
753 canonicalized as bitwise and of a mask. */
754 (if (final_int && inter_int && inside_int
755 && final_prec == inside_prec
756 && final_prec > inter_prec
757 && inter_unsignedp)
758 (convert (bit_and @0 { wide_int_to_tree
759 (inside_type,
760 wi::mask (inter_prec, false,
761 TYPE_PRECISION (inside_type))); })))
762
763 /* If we are converting an integer to a floating-point that can
764 represent it exactly and back to an integer, we can skip the
765 floating-point conversion. */
766 (if (inside_int && inter_float && final_int &&
767 (unsigned) significand_size (TYPE_MODE (inter_type))
768 >= inside_prec - !inside_unsignedp)
769 (convert @0))))))
770
771 /* If we have a narrowing conversion to an integral type that is fed by a
772 BIT_AND_EXPR, we might be able to remove the BIT_AND_EXPR if it merely
773 masks off bits outside the final type (and nothing else). */
774 (simplify
775 (convert (bit_and @0 INTEGER_CST@1))
776 (if (INTEGRAL_TYPE_P (type)
777 && INTEGRAL_TYPE_P (TREE_TYPE (@0))
778 && TYPE_PRECISION (type) <= TYPE_PRECISION (TREE_TYPE (@0))
779 && operand_equal_p (@1, build_low_bits_mask (TREE_TYPE (@1),
780 TYPE_PRECISION (type)), 0))
781 (convert @0)))
782
783
784 /* (X /[ex] A) * A -> X. */
785 (simplify
786 (mult (convert? (exact_div @0 @1)) @1)
787 /* Look through a sign-changing conversion. */
788 (if (TYPE_PRECISION (TREE_TYPE (@0)) == TYPE_PRECISION (type))
789 (convert @0)))
790
791 /* Canonicalization of binary operations. */
792
793 /* Convert X + -C into X - C. */
794 (simplify
795 (plus @0 REAL_CST@1)
796 (if (REAL_VALUE_NEGATIVE (TREE_REAL_CST (@1)))
797 (with { tree tem = fold_unary (NEGATE_EXPR, type, @1); }
798 (if (!TREE_OVERFLOW (tem) || !flag_trapping_math)
799 (minus @0 { tem; })))))
800
801 /* Convert x+x into x*2.0. */
802 (simplify
803 (plus @0 @0)
804 (if (SCALAR_FLOAT_TYPE_P (type))
805 (mult @0 { build_real (type, dconst2); })))
806
807 (simplify
808 (minus integer_zerop @1)
809 (negate @1))
810
811 /* (ARG0 - ARG1) is the same as (-ARG1 + ARG0). So check whether
812 ARG0 is zero and X + ARG0 reduces to X, since that would mean
813 (-ARG1 + ARG0) reduces to -ARG1. */
814 (simplify
815 (minus real_zerop@0 @1)
816 (if (fold_real_zero_addition_p (type, @0, 0))
817 (negate @1)))
818
819 /* Transform x * -1 into -x. */
820 (simplify
821 (mult @0 integer_minus_onep)
822 (negate @0))
823
824 /* COMPLEX_EXPR and REALPART/IMAGPART_EXPR cancellations. */
825 (simplify
826 (complex (realpart @0) (imagpart @0))
827 @0)
828 (simplify
829 (realpart (complex @0 @1))
830 @0)
831 (simplify
832 (imagpart (complex @0 @1))
833 @1)
834
835
836 /* BSWAP simplifications, transforms checked by gcc.dg/builtin-bswap-8.c. */
837 (for bswap (BUILT_IN_BSWAP16 BUILT_IN_BSWAP32 BUILT_IN_BSWAP64)
838 (simplify
839 (bswap (bswap @0))
840 @0)
841 (simplify
842 (bswap (bit_not (bswap @0)))
843 (bit_not @0))
844 (for bitop (bit_xor bit_ior bit_and)
845 (simplify
846 (bswap (bitop:c (bswap @0) @1))
847 (bitop @0 (bswap @1)))))
848
849
850 /* Combine COND_EXPRs and VEC_COND_EXPRs. */
851
852 /* Simplify constant conditions.
853 Only optimize constant conditions when the selected branch
854 has the same type as the COND_EXPR. This avoids optimizing
855 away "c ? x : throw", where the throw has a void type.
856 Note that we cannot throw away the fold-const.c variant nor
857 this one as we depend on doing this transform before possibly
858 A ? B : B -> B triggers and the fold-const.c one can optimize
859 0 ? A : B to B even if A has side-effects. Something
860 genmatch cannot handle. */
861 (simplify
862 (cond INTEGER_CST@0 @1 @2)
863 (if (integer_zerop (@0)
864 && (!VOID_TYPE_P (TREE_TYPE (@2))
865 || VOID_TYPE_P (type)))
866 @2)
867 (if (!integer_zerop (@0)
868 && (!VOID_TYPE_P (TREE_TYPE (@1))
869 || VOID_TYPE_P (type)))
870 @1))
871 (simplify
872 (vec_cond VECTOR_CST@0 @1 @2)
873 (if (integer_all_onesp (@0))
874 @1)
875 (if (integer_zerop (@0))
876 @2))
877
878 (for cnd (cond vec_cond)
879 /* A ? B : (A ? X : C) -> A ? B : C. */
880 (simplify
881 (cnd @0 (cnd @0 @1 @2) @3)
882 (cnd @0 @1 @3))
883 (simplify
884 (cnd @0 @1 (cnd @0 @2 @3))
885 (cnd @0 @1 @3))
886
887 /* A ? B : B -> B. */
888 (simplify
889 (cnd @0 @1 @1)
890 @1)
891
892 /* !A ? B : C -> A ? C : B. */
893 (simplify
894 (cnd (logical_inverted_value truth_valued_p@0) @1 @2)
895 (cnd @0 @2 @1)))
896
897
898 /* Simplifications of comparisons. */
899
900 /* We can simplify a logical negation of a comparison to the
901 inverted comparison. As we cannot compute an expression
902 operator using invert_tree_comparison we have to simulate
903 that with expression code iteration. */
904 (for cmp (tcc_comparison)
905 icmp (inverted_tcc_comparison)
906 ncmp (inverted_tcc_comparison_with_nans)
907 /* Ideally we'd like to combine the following two patterns
908 and handle some more cases by using
909 (logical_inverted_value (cmp @0 @1))
910 here but for that genmatch would need to "inline" that.
911 For now implement what forward_propagate_comparison did. */
912 (simplify
913 (bit_not (cmp @0 @1))
914 (if (VECTOR_TYPE_P (type)
915 || (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) == 1))
916 /* Comparison inversion may be impossible for trapping math,
917 invert_tree_comparison will tell us. But we can't use
918 a computed operator in the replacement tree thus we have
919 to play the trick below. */
920 (with { enum tree_code ic = invert_tree_comparison
921 (cmp, HONOR_NANS (@0)); }
922 (if (ic == icmp)
923 (icmp @0 @1))
924 (if (ic == ncmp)
925 (ncmp @0 @1)))))
926 (simplify
927 (bit_xor (cmp @0 @1) integer_truep)
928 (with { enum tree_code ic = invert_tree_comparison
929 (cmp, HONOR_NANS (@0)); }
930 (if (ic == icmp)
931 (icmp @0 @1))
932 (if (ic == ncmp)
933 (ncmp @0 @1)))))
934
935
936 /* Simplification of math builtins. */
937
938 (define_operator_list LOG BUILT_IN_LOGF BUILT_IN_LOG BUILT_IN_LOGL)
939 (define_operator_list EXP BUILT_IN_EXPF BUILT_IN_EXP BUILT_IN_EXPL)
940 (define_operator_list LOG2 BUILT_IN_LOG2F BUILT_IN_LOG2 BUILT_IN_LOG2L)
941 (define_operator_list EXP2 BUILT_IN_EXP2F BUILT_IN_EXP2 BUILT_IN_EXP2L)
942 (define_operator_list LOG10 BUILT_IN_LOG10F BUILT_IN_LOG10 BUILT_IN_LOG10L)
943 (define_operator_list EXP10 BUILT_IN_EXP10F BUILT_IN_EXP10 BUILT_IN_EXP10L)
944 (define_operator_list POW BUILT_IN_POWF BUILT_IN_POW BUILT_IN_POWL)
945 (define_operator_list POW10 BUILT_IN_POW10F BUILT_IN_POW10 BUILT_IN_POW10L)
946 (define_operator_list SQRT BUILT_IN_SQRTF BUILT_IN_SQRT BUILT_IN_SQRTL)
947 (define_operator_list CBRT BUILT_IN_CBRTF BUILT_IN_CBRT BUILT_IN_CBRTL)
948
949
950 /* fold_builtin_logarithm */
951 (if (flag_unsafe_math_optimizations)
952 /* Special case, optimize logN(expN(x)) = x. */
953 (for logs (LOG LOG2 LOG10)
954 exps (EXP EXP2 EXP10)
955 (simplify
956 (logs (exps @0))
957 @0))
958 /* Optimize logN(func()) for various exponential functions. We
959 want to determine the value "x" and the power "exponent" in
960 order to transform logN(x**exponent) into exponent*logN(x). */
961 (for logs (LOG LOG LOG LOG
962 LOG2 LOG2 LOG2 LOG2
963 LOG10 LOG10 LOG10 LOG10)
964 exps (EXP EXP2 EXP10 POW10)
965 (simplify
966 (logs (exps @0))
967 (with {
968 tree x;
969 switch (exps)
970 {
971 CASE_FLT_FN (BUILT_IN_EXP):
972 /* Prepare to do logN(exp(exponent) -> exponent*logN(e). */
973 x = build_real (type, real_value_truncate (TYPE_MODE (type),
974 dconst_e ()));
975 break;
976 CASE_FLT_FN (BUILT_IN_EXP2):
977 /* Prepare to do logN(exp2(exponent) -> exponent*logN(2). */
978 x = build_real (type, dconst2);
979 break;
980 CASE_FLT_FN (BUILT_IN_EXP10):
981 CASE_FLT_FN (BUILT_IN_POW10):
982 /* Prepare to do logN(exp10(exponent) -> exponent*logN(10). */
983 {
984 REAL_VALUE_TYPE dconst10;
985 real_from_integer (&dconst10, VOIDmode, 10, SIGNED);
986 x = build_real (type, dconst10);
987 }
988 break;
989 }
990 }
991 (mult (logs { x; }) @0))))
992 (for logs (LOG LOG
993 LOG2 LOG2
994 LOG10 LOG10)
995 exps (SQRT CBRT)
996 (simplify
997 (logs (exps @0))
998 (with {
999 tree x;
1000 switch (exps)
1001 {
1002 CASE_FLT_FN (BUILT_IN_SQRT):
1003 /* Prepare to do logN(sqrt(x) -> 0.5*logN(x). */
1004 x = build_real (type, dconsthalf);
1005 break;
1006 CASE_FLT_FN (BUILT_IN_CBRT):
1007 /* Prepare to do logN(cbrt(x) -> (1/3)*logN(x). */
1008 x = build_real (type, real_value_truncate (TYPE_MODE (type),
1009 dconst_third ()));
1010 break;
1011 }
1012 }
1013 (mult { x; } (logs @0)))))
1014 /* logN(pow(x,exponent) -> exponent*logN(x). */
1015 (for logs (LOG LOG2 LOG10)
1016 pows (POW)
1017 (simplify
1018 (logs (pows @0 @1))
1019 (mult @1 (logs @0)))))
1020