1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
61 /* Negate a CONST_INT rtx, truncating (because a conversion from a
62 maximally negative number can overflow). */
64 neg_const_int (enum machine_mode mode
, rtx i
)
66 return gen_int_mode (- INTVAL (i
), mode
);
70 /* Make a binary operation by properly ordering the operands and
71 seeing if the expression folds. */
74 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
79 /* Put complex operands first and constants second if commutative. */
80 if (GET_RTX_CLASS (code
) == 'c'
81 && swap_commutative_operands_p (op0
, op1
))
82 tem
= op0
, op0
= op1
, op1
= tem
;
84 /* If this simplifies, do it. */
85 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
89 /* Handle addition and subtraction specially. Otherwise, just form
92 if (code
== PLUS
|| code
== MINUS
)
94 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
99 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
102 /* If X is a MEM referencing the constant pool, return the real value.
103 Otherwise return X. */
105 avoid_constant_pool_reference (rtx x
)
108 enum machine_mode cmode
;
110 switch (GET_CODE (x
))
116 /* Handle float extensions of constant pool references. */
118 c
= avoid_constant_pool_reference (tmp
);
119 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
123 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
124 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
134 /* Call target hook to avoid the effects of -fpic etc.... */
135 addr
= (*targetm
.delegitimize_address
) (addr
);
137 if (GET_CODE (addr
) == LO_SUM
)
138 addr
= XEXP (addr
, 1);
140 if (GET_CODE (addr
) != SYMBOL_REF
141 || ! CONSTANT_POOL_ADDRESS_P (addr
))
144 c
= get_pool_constant (addr
);
145 cmode
= get_pool_mode (addr
);
147 /* If we're accessing the constant in a different mode than it was
148 originally stored, attempt to fix that up via subreg simplifications.
149 If that fails we have no choice but to return the original memory. */
150 if (cmode
!= GET_MODE (x
))
152 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
159 /* Make a unary operation by first seeing if it folds and otherwise making
160 the specified operation. */
163 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
164 enum machine_mode op_mode
)
168 /* If this simplifies, use it. */
169 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
172 return gen_rtx_fmt_e (code
, mode
, op
);
175 /* Likewise for ternary operations. */
178 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
179 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
183 /* If this simplifies, use it. */
184 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
188 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
191 /* Likewise, for relational operations.
192 CMP_MODE specifies mode comparison is done in.
196 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
197 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
201 if (cmp_mode
== VOIDmode
)
202 cmp_mode
= GET_MODE (op0
);
203 if (cmp_mode
== VOIDmode
)
204 cmp_mode
= GET_MODE (op1
);
206 if (cmp_mode
!= VOIDmode
)
208 tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
);
212 #ifdef FLOAT_STORE_FLAG_VALUE
213 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
216 if (tem
== const0_rtx
)
217 return CONST0_RTX (mode
);
218 if (tem
!= const_true_rtx
)
220 val
= FLOAT_STORE_FLAG_VALUE (mode
);
221 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
228 /* For the following tests, ensure const0_rtx is op1. */
229 if (swap_commutative_operands_p (op0
, op1
)
230 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
231 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
233 /* If op0 is a compare, extract the comparison arguments from it. */
234 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
235 return simplify_gen_relational (code
, mode
, VOIDmode
,
236 XEXP (op0
, 0), XEXP (op0
, 1));
238 /* If op0 is a comparison, extract the comparison arguments form it. */
239 if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && op1
== const0_rtx
)
243 if (GET_MODE (op0
) == mode
)
245 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
246 XEXP (op0
, 0), XEXP (op0
, 1));
250 enum rtx_code
new = reversed_comparison_code (op0
, NULL_RTX
);
252 return simplify_gen_relational (new, mode
, VOIDmode
,
253 XEXP (op0
, 0), XEXP (op0
, 1));
257 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
260 /* Replace all occurrences of OLD in X with NEW and try to simplify the
261 resulting RTX. Return a new RTX which is as simplified as possible. */
264 simplify_replace_rtx (rtx x
, rtx old
, rtx
new)
266 enum rtx_code code
= GET_CODE (x
);
267 enum machine_mode mode
= GET_MODE (x
);
268 enum machine_mode op_mode
;
271 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
272 to build a new expression substituting recursively. If we can't do
273 anything, return our input. */
278 switch (GET_RTX_CLASS (code
))
282 op_mode
= GET_MODE (op0
);
283 op0
= simplify_replace_rtx (op0
, old
, new);
284 if (op0
== XEXP (x
, 0))
286 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
290 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
291 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
292 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
294 return simplify_gen_binary (code
, mode
, op0
, op1
);
299 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
300 op0
= simplify_replace_rtx (op0
, old
, new);
301 op1
= simplify_replace_rtx (op1
, old
, new);
302 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
304 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
309 op_mode
= GET_MODE (op0
);
310 op0
= simplify_replace_rtx (op0
, old
, new);
311 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
312 op2
= simplify_replace_rtx (XEXP (x
, 2), old
, new);
313 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
315 if (op_mode
== VOIDmode
)
316 op_mode
= GET_MODE (op0
);
317 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
320 /* The only case we try to handle is a SUBREG. */
323 op0
= simplify_replace_rtx (SUBREG_REG (x
), old
, new);
324 if (op0
== SUBREG_REG (x
))
326 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
327 GET_MODE (SUBREG_REG (x
)),
329 return op0
? op0
: x
;
336 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
337 if (op0
== XEXP (x
, 0))
339 return replace_equiv_address_nv (x
, op0
);
341 else if (code
== LO_SUM
)
343 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
344 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
346 /* (lo_sum (high x) x) -> x */
347 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
350 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
352 return gen_rtx_LO_SUM (mode
, op0
, op1
);
354 else if (code
== REG
)
356 if (REG_P (old
) && REGNO (x
) == REGNO (old
))
367 /* Try to simplify a unary operation CODE whose output mode is to be
368 MODE with input operand OP whose mode was originally OP_MODE.
369 Return zero if no simplification can be made. */
371 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
372 rtx op
, enum machine_mode op_mode
)
374 unsigned int width
= GET_MODE_BITSIZE (mode
);
375 rtx trueop
= avoid_constant_pool_reference (op
);
377 if (code
== VEC_DUPLICATE
)
379 if (!VECTOR_MODE_P (mode
))
381 if (GET_MODE (trueop
) != VOIDmode
382 && !VECTOR_MODE_P (GET_MODE (trueop
))
383 && GET_MODE_INNER (mode
) != GET_MODE (trueop
))
385 if (GET_MODE (trueop
) != VOIDmode
386 && VECTOR_MODE_P (GET_MODE (trueop
))
387 && GET_MODE_INNER (mode
) != GET_MODE_INNER (GET_MODE (trueop
)))
389 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
390 || GET_CODE (trueop
) == CONST_VECTOR
)
392 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
393 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
394 rtvec v
= rtvec_alloc (n_elts
);
397 if (GET_CODE (trueop
) != CONST_VECTOR
)
398 for (i
= 0; i
< n_elts
; i
++)
399 RTVEC_ELT (v
, i
) = trueop
;
402 enum machine_mode inmode
= GET_MODE (trueop
);
403 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
404 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
406 if (in_n_elts
>= n_elts
|| n_elts
% in_n_elts
)
408 for (i
= 0; i
< n_elts
; i
++)
409 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
411 return gen_rtx_CONST_VECTOR (mode
, v
);
414 else if (GET_CODE (op
) == CONST
)
415 return simplify_unary_operation (code
, mode
, XEXP (op
, 0), op_mode
);
417 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
419 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
420 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
421 enum machine_mode opmode
= GET_MODE (trueop
);
422 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
423 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
424 rtvec v
= rtvec_alloc (n_elts
);
427 if (op_n_elts
!= n_elts
)
430 for (i
= 0; i
< n_elts
; i
++)
432 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
433 CONST_VECTOR_ELT (trueop
, i
),
434 GET_MODE_INNER (opmode
));
437 RTVEC_ELT (v
, i
) = x
;
439 return gen_rtx_CONST_VECTOR (mode
, v
);
442 /* The order of these tests is critical so that, for example, we don't
443 check the wrong mode (input vs. output) for a conversion operation,
444 such as FIX. At some point, this should be simplified. */
446 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
447 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
449 HOST_WIDE_INT hv
, lv
;
452 if (GET_CODE (trueop
) == CONST_INT
)
453 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
455 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
457 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
458 d
= real_value_truncate (mode
, d
);
459 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
461 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
462 && (GET_CODE (trueop
) == CONST_DOUBLE
463 || GET_CODE (trueop
) == CONST_INT
))
465 HOST_WIDE_INT hv
, lv
;
468 if (GET_CODE (trueop
) == CONST_INT
)
469 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
471 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
473 if (op_mode
== VOIDmode
)
475 /* We don't know how to interpret negative-looking numbers in
476 this case, so don't try to fold those. */
480 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
483 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
485 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
486 d
= real_value_truncate (mode
, d
);
487 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
490 if (GET_CODE (trueop
) == CONST_INT
491 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
493 HOST_WIDE_INT arg0
= INTVAL (trueop
);
507 val
= (arg0
>= 0 ? arg0
: - arg0
);
511 /* Don't use ffs here. Instead, get low order bit and then its
512 number. If arg0 is zero, this will return 0, as desired. */
513 arg0
&= GET_MODE_MASK (mode
);
514 val
= exact_log2 (arg0
& (- arg0
)) + 1;
518 arg0
&= GET_MODE_MASK (mode
);
519 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
522 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
526 arg0
&= GET_MODE_MASK (mode
);
529 /* Even if the value at zero is undefined, we have to come
530 up with some replacement. Seems good enough. */
531 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
532 val
= GET_MODE_BITSIZE (mode
);
535 val
= exact_log2 (arg0
& -arg0
);
539 arg0
&= GET_MODE_MASK (mode
);
542 val
++, arg0
&= arg0
- 1;
546 arg0
&= GET_MODE_MASK (mode
);
549 val
++, arg0
&= arg0
- 1;
558 /* When zero-extending a CONST_INT, we need to know its
560 if (op_mode
== VOIDmode
)
562 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
564 /* If we were really extending the mode,
565 we would have to distinguish between zero-extension
566 and sign-extension. */
567 if (width
!= GET_MODE_BITSIZE (op_mode
))
571 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
572 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
578 if (op_mode
== VOIDmode
)
580 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
582 /* If we were really extending the mode,
583 we would have to distinguish between zero-extension
584 and sign-extension. */
585 if (width
!= GET_MODE_BITSIZE (op_mode
))
589 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
592 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
594 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
595 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
612 val
= trunc_int_for_mode (val
, mode
);
614 return GEN_INT (val
);
617 /* We can do some operations on integer CONST_DOUBLEs. Also allow
618 for a DImode operation on a CONST_INT. */
619 else if (GET_MODE (trueop
) == VOIDmode
620 && width
<= HOST_BITS_PER_WIDE_INT
* 2
621 && (GET_CODE (trueop
) == CONST_DOUBLE
622 || GET_CODE (trueop
) == CONST_INT
))
624 unsigned HOST_WIDE_INT l1
, lv
;
625 HOST_WIDE_INT h1
, hv
;
627 if (GET_CODE (trueop
) == CONST_DOUBLE
)
628 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
630 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
640 neg_double (l1
, h1
, &lv
, &hv
);
645 neg_double (l1
, h1
, &lv
, &hv
);
657 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
660 lv
= exact_log2 (l1
& -l1
) + 1;
666 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
667 - HOST_BITS_PER_WIDE_INT
;
669 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
670 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
671 lv
= GET_MODE_BITSIZE (mode
);
677 lv
= exact_log2 (l1
& -l1
);
679 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
680 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
681 lv
= GET_MODE_BITSIZE (mode
);
704 /* This is just a change-of-mode, so do nothing. */
709 if (op_mode
== VOIDmode
)
712 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
716 lv
= l1
& GET_MODE_MASK (op_mode
);
720 if (op_mode
== VOIDmode
721 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
725 lv
= l1
& GET_MODE_MASK (op_mode
);
726 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
727 && (lv
& ((HOST_WIDE_INT
) 1
728 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
729 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
731 hv
= HWI_SIGN_EXTEND (lv
);
742 return immed_double_const (lv
, hv
, mode
);
745 else if (GET_CODE (trueop
) == CONST_DOUBLE
746 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
748 REAL_VALUE_TYPE d
, t
;
749 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
754 if (HONOR_SNANS (mode
) && real_isnan (&d
))
756 real_sqrt (&t
, mode
, &d
);
760 d
= REAL_VALUE_ABS (d
);
763 d
= REAL_VALUE_NEGATE (d
);
766 d
= real_value_truncate (mode
, d
);
769 /* All this does is change the mode. */
772 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
779 real_to_target (tmp
, &d
, GET_MODE (trueop
));
780 for (i
= 0; i
< 4; i
++)
782 real_from_target (&d
, tmp
, mode
);
787 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
790 else if (GET_CODE (trueop
) == CONST_DOUBLE
791 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
792 && GET_MODE_CLASS (mode
) == MODE_INT
793 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
795 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
796 operators are intentionally left unspecified (to ease implementation
797 by target backends), for consistency, this routine implements the
798 same semantics for constant folding as used by the middle-end. */
800 HOST_WIDE_INT xh
, xl
, th
, tl
;
801 REAL_VALUE_TYPE x
, t
;
802 REAL_VALUE_FROM_CONST_DOUBLE (x
, trueop
);
806 if (REAL_VALUE_ISNAN (x
))
809 /* Test against the signed upper bound. */
810 if (width
> HOST_BITS_PER_WIDE_INT
)
812 th
= ((unsigned HOST_WIDE_INT
) 1
813 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
819 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
821 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
822 if (REAL_VALUES_LESS (t
, x
))
829 /* Test against the signed lower bound. */
830 if (width
> HOST_BITS_PER_WIDE_INT
)
832 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
838 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
840 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
841 if (REAL_VALUES_LESS (x
, t
))
847 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
851 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
854 /* Test against the unsigned upper bound. */
855 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
860 else if (width
>= HOST_BITS_PER_WIDE_INT
)
862 th
= ((unsigned HOST_WIDE_INT
) 1
863 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
869 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
871 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
872 if (REAL_VALUES_LESS (t
, x
))
879 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
885 return immed_double_const (xl
, xh
, mode
);
888 /* This was formerly used only for non-IEEE float.
889 eggert@twinsun.com says it is safe for IEEE also. */
892 enum rtx_code reversed
;
895 /* There are some simplifications we can do even if the operands
900 /* (not (not X)) == X. */
901 if (GET_CODE (op
) == NOT
)
904 /* (not (eq X Y)) == (ne X Y), etc. */
905 if (GET_RTX_CLASS (GET_CODE (op
)) == '<'
906 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
907 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
909 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
910 XEXP (op
, 0), XEXP (op
, 1));
912 /* (not (plus X -1)) can become (neg X). */
913 if (GET_CODE (op
) == PLUS
914 && XEXP (op
, 1) == constm1_rtx
)
915 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
917 /* Similarly, (not (neg X)) is (plus X -1). */
918 if (GET_CODE (op
) == NEG
)
919 return plus_constant (XEXP (op
, 0), -1);
921 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
922 if (GET_CODE (op
) == XOR
923 && GET_CODE (XEXP (op
, 1)) == CONST_INT
924 && (temp
= simplify_unary_operation (NOT
, mode
,
927 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
930 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
931 operands other than 1, but that is not valid. We could do a
932 similar simplification for (not (lshiftrt C X)) where C is
933 just the sign bit, but this doesn't seem common enough to
935 if (GET_CODE (op
) == ASHIFT
936 && XEXP (op
, 0) == const1_rtx
)
938 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
939 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
942 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
943 by reversing the comparison code if valid. */
944 if (STORE_FLAG_VALUE
== -1
945 && GET_RTX_CLASS (GET_CODE (op
)) == '<'
946 && (reversed
= reversed_comparison_code (op
, NULL_RTX
))
948 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
949 XEXP (op
, 0), XEXP (op
, 1));
951 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
952 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
953 so we can perform the above simplification. */
955 if (STORE_FLAG_VALUE
== -1
956 && GET_CODE (op
) == ASHIFTRT
957 && GET_CODE (XEXP (op
, 1)) == CONST_INT
958 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
959 return simplify_gen_relational (GE
, mode
, VOIDmode
,
960 XEXP (op
, 0), const0_rtx
);
965 /* (neg (neg X)) == X. */
966 if (GET_CODE (op
) == NEG
)
969 /* (neg (plus X 1)) can become (not X). */
970 if (GET_CODE (op
) == PLUS
971 && XEXP (op
, 1) == const1_rtx
)
972 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
974 /* Similarly, (neg (not X)) is (plus X 1). */
975 if (GET_CODE (op
) == NOT
)
976 return plus_constant (XEXP (op
, 0), 1);
978 /* (neg (minus X Y)) can become (minus Y X). This transformation
979 isn't safe for modes with signed zeros, since if X and Y are
980 both +0, (minus Y X) is the same as (minus X Y). If the
981 rounding mode is towards +infinity (or -infinity) then the two
982 expressions will be rounded differently. */
983 if (GET_CODE (op
) == MINUS
984 && !HONOR_SIGNED_ZEROS (mode
)
985 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
986 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1),
989 if (GET_CODE (op
) == PLUS
990 && !HONOR_SIGNED_ZEROS (mode
)
991 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
993 /* (neg (plus A C)) is simplified to (minus -C A). */
994 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
995 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
997 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1),
1000 return simplify_gen_binary (MINUS
, mode
, temp
,
1004 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1005 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1006 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1009 /* (neg (mult A B)) becomes (mult (neg A) B).
1010 This works even for floating-point values. */
1011 if (GET_CODE (op
) == MULT
1012 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1014 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1015 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
1018 /* NEG commutes with ASHIFT since it is multiplication. Only do
1019 this if we can then eliminate the NEG (e.g., if the operand
1021 if (GET_CODE (op
) == ASHIFT
)
1023 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0),
1026 return simplify_gen_binary (ASHIFT
, mode
, temp
,
1033 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1034 becomes just the MINUS if its mode is MODE. This allows
1035 folding switch statements on machines using casesi (such as
1037 if (GET_CODE (op
) == TRUNCATE
1038 && GET_MODE (XEXP (op
, 0)) == mode
1039 && GET_CODE (XEXP (op
, 0)) == MINUS
1040 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1041 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1042 return XEXP (op
, 0);
1044 /* Check for a sign extension of a subreg of a promoted
1045 variable, where the promotion is sign-extended, and the
1046 target mode is the same as the variable's promotion. */
1047 if (GET_CODE (op
) == SUBREG
1048 && SUBREG_PROMOTED_VAR_P (op
)
1049 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1050 && GET_MODE (XEXP (op
, 0)) == mode
)
1051 return XEXP (op
, 0);
1053 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1054 if (! POINTERS_EXTEND_UNSIGNED
1055 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1057 || (GET_CODE (op
) == SUBREG
1058 && GET_CODE (SUBREG_REG (op
)) == REG
1059 && REG_POINTER (SUBREG_REG (op
))
1060 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1061 return convert_memory_address (Pmode
, op
);
1066 /* Check for a zero extension of a subreg of a promoted
1067 variable, where the promotion is zero-extended, and the
1068 target mode is the same as the variable's promotion. */
1069 if (GET_CODE (op
) == SUBREG
1070 && SUBREG_PROMOTED_VAR_P (op
)
1071 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1072 && GET_MODE (XEXP (op
, 0)) == mode
)
1073 return XEXP (op
, 0);
1075 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1076 if (POINTERS_EXTEND_UNSIGNED
> 0
1077 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1079 || (GET_CODE (op
) == SUBREG
1080 && GET_CODE (SUBREG_REG (op
)) == REG
1081 && REG_POINTER (SUBREG_REG (op
))
1082 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1083 return convert_memory_address (Pmode
, op
);
1095 /* Subroutine of simplify_binary_operation to simplify a commutative,
1096 associative binary operation CODE with result mode MODE, operating
1097 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1098 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1099 canonicalization is possible. */
1102 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1107 /* Linearize the operator to the left. */
1108 if (GET_CODE (op1
) == code
)
1110 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1111 if (GET_CODE (op0
) == code
)
1113 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1114 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1117 /* "a op (b op c)" becomes "(b op c) op a". */
1118 if (! swap_commutative_operands_p (op1
, op0
))
1119 return simplify_gen_binary (code
, mode
, op1
, op0
);
1126 if (GET_CODE (op0
) == code
)
1128 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1129 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1131 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1132 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1135 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1136 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1137 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1138 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1140 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1142 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1143 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1144 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1145 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1147 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1153 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1154 and OP1. Return 0 if no simplification is possible.
1156 Don't use this for relational operations such as EQ or LT.
1157 Use simplify_relational_operation instead. */
1159 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1162 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
1164 unsigned int width
= GET_MODE_BITSIZE (mode
);
1165 rtx trueop0
, trueop1
;
1168 /* Relational operations don't work here. We must know the mode
1169 of the operands in order to do the comparison correctly.
1170 Assuming a full word can give incorrect results.
1171 Consider comparing 128 with -128 in QImode. */
1173 if (GET_RTX_CLASS (code
) == '<')
1176 /* Make sure the constant is second. */
1177 if (GET_RTX_CLASS (code
) == 'c'
1178 && swap_commutative_operands_p (op0
, op1
))
1180 tem
= op0
, op0
= op1
, op1
= tem
;
1183 trueop0
= avoid_constant_pool_reference (op0
);
1184 trueop1
= avoid_constant_pool_reference (op1
);
1186 if (VECTOR_MODE_P (mode
)
1187 && GET_CODE (trueop0
) == CONST_VECTOR
1188 && GET_CODE (trueop1
) == CONST_VECTOR
)
1190 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1191 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1192 enum machine_mode op0mode
= GET_MODE (trueop0
);
1193 int op0_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op0mode
));
1194 unsigned op0_n_elts
= (GET_MODE_SIZE (op0mode
) / op0_elt_size
);
1195 enum machine_mode op1mode
= GET_MODE (trueop1
);
1196 int op1_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op1mode
));
1197 unsigned op1_n_elts
= (GET_MODE_SIZE (op1mode
) / op1_elt_size
);
1198 rtvec v
= rtvec_alloc (n_elts
);
1201 if (op0_n_elts
!= n_elts
|| op1_n_elts
!= n_elts
)
1204 for (i
= 0; i
< n_elts
; i
++)
1206 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
1207 CONST_VECTOR_ELT (trueop0
, i
),
1208 CONST_VECTOR_ELT (trueop1
, i
));
1211 RTVEC_ELT (v
, i
) = x
;
1214 return gen_rtx_CONST_VECTOR (mode
, v
);
1217 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1218 && GET_CODE (trueop0
) == CONST_DOUBLE
1219 && GET_CODE (trueop1
) == CONST_DOUBLE
1220 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
1231 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
1233 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
1235 for (i
= 0; i
< 4; i
++)
1239 else if (code
== IOR
)
1241 else if (code
== XOR
)
1246 real_from_target (&r
, tmp0
, mode
);
1247 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
1251 REAL_VALUE_TYPE f0
, f1
, value
;
1253 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
1254 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
1255 f0
= real_value_truncate (mode
, f0
);
1256 f1
= real_value_truncate (mode
, f1
);
1258 if (HONOR_SNANS (mode
)
1259 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
1263 && REAL_VALUES_EQUAL (f1
, dconst0
)
1264 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
1267 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
1269 value
= real_value_truncate (mode
, value
);
1270 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
1274 /* We can fold some multi-word operations. */
1275 if (GET_MODE_CLASS (mode
) == MODE_INT
1276 && width
== HOST_BITS_PER_WIDE_INT
* 2
1277 && (GET_CODE (trueop0
) == CONST_DOUBLE
1278 || GET_CODE (trueop0
) == CONST_INT
)
1279 && (GET_CODE (trueop1
) == CONST_DOUBLE
1280 || GET_CODE (trueop1
) == CONST_INT
))
1282 unsigned HOST_WIDE_INT l1
, l2
, lv
;
1283 HOST_WIDE_INT h1
, h2
, hv
;
1285 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1286 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
1288 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
1290 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1291 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
1293 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
1298 /* A - B == A + (-B). */
1299 neg_double (l2
, h2
, &lv
, &hv
);
1302 /* Fall through.... */
1305 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1309 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1312 case DIV
: case MOD
: case UDIV
: case UMOD
:
1313 /* We'd need to include tree.h to do this and it doesn't seem worth
1318 lv
= l1
& l2
, hv
= h1
& h2
;
1322 lv
= l1
| l2
, hv
= h1
| h2
;
1326 lv
= l1
^ l2
, hv
= h1
^ h2
;
1332 && ((unsigned HOST_WIDE_INT
) l1
1333 < (unsigned HOST_WIDE_INT
) l2
)))
1342 && ((unsigned HOST_WIDE_INT
) l1
1343 > (unsigned HOST_WIDE_INT
) l2
)))
1350 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1352 && ((unsigned HOST_WIDE_INT
) l1
1353 < (unsigned HOST_WIDE_INT
) l2
)))
1360 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1362 && ((unsigned HOST_WIDE_INT
) l1
1363 > (unsigned HOST_WIDE_INT
) l2
)))
1369 case LSHIFTRT
: case ASHIFTRT
:
1371 case ROTATE
: case ROTATERT
:
1372 #ifdef SHIFT_COUNT_TRUNCATED
1373 if (SHIFT_COUNT_TRUNCATED
)
1374 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1377 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1380 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1381 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1383 else if (code
== ASHIFT
)
1384 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1385 else if (code
== ROTATE
)
1386 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1387 else /* code == ROTATERT */
1388 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1395 return immed_double_const (lv
, hv
, mode
);
1398 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1399 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1401 /* Even if we can't compute a constant result,
1402 there are some cases worth simplifying. */
1407 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1408 when x is NaN, infinite, or finite and nonzero. They aren't
1409 when x is -0 and the rounding mode is not towards -infinity,
1410 since (-0) + 0 is then 0. */
1411 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1414 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1415 transformations are safe even for IEEE. */
1416 if (GET_CODE (op0
) == NEG
)
1417 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1418 else if (GET_CODE (op1
) == NEG
)
1419 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1421 /* (~a) + 1 -> -a */
1422 if (INTEGRAL_MODE_P (mode
)
1423 && GET_CODE (op0
) == NOT
1424 && trueop1
== const1_rtx
)
1425 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1427 /* Handle both-operands-constant cases. We can only add
1428 CONST_INTs to constants since the sum of relocatable symbols
1429 can't be handled by most assemblers. Don't add CONST_INT
1430 to CONST_INT since overflow won't be computed properly if wider
1431 than HOST_BITS_PER_WIDE_INT. */
1433 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1434 && GET_CODE (op1
) == CONST_INT
)
1435 return plus_constant (op0
, INTVAL (op1
));
1436 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1437 && GET_CODE (op0
) == CONST_INT
)
1438 return plus_constant (op1
, INTVAL (op0
));
1440 /* See if this is something like X * C - X or vice versa or
1441 if the multiplication is written as a shift. If so, we can
1442 distribute and make a new multiply, shift, or maybe just
1443 have X (if C is 2 in the example above). But don't make
1444 real multiply if we didn't have one before. */
1446 if (! FLOAT_MODE_P (mode
))
1448 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1449 rtx lhs
= op0
, rhs
= op1
;
1452 if (GET_CODE (lhs
) == NEG
)
1453 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1454 else if (GET_CODE (lhs
) == MULT
1455 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1457 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1460 else if (GET_CODE (lhs
) == ASHIFT
1461 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1462 && INTVAL (XEXP (lhs
, 1)) >= 0
1463 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1465 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1466 lhs
= XEXP (lhs
, 0);
1469 if (GET_CODE (rhs
) == NEG
)
1470 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1471 else if (GET_CODE (rhs
) == MULT
1472 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1474 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1477 else if (GET_CODE (rhs
) == ASHIFT
1478 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1479 && INTVAL (XEXP (rhs
, 1)) >= 0
1480 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1482 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1483 rhs
= XEXP (rhs
, 0);
1486 if (rtx_equal_p (lhs
, rhs
))
1488 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1489 GEN_INT (coeff0
+ coeff1
));
1490 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1494 /* If one of the operands is a PLUS or a MINUS, see if we can
1495 simplify this by the associative law.
1496 Don't use the associative law for floating point.
1497 The inaccuracy makes it nonassociative,
1498 and subtle programs can break if operations are associated. */
1500 if (INTEGRAL_MODE_P (mode
)
1501 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1502 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1503 || (GET_CODE (op0
) == CONST
1504 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1505 || (GET_CODE (op1
) == CONST
1506 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1507 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1510 /* Reassociate floating point addition only when the user
1511 specifies unsafe math optimizations. */
1512 if (FLOAT_MODE_P (mode
)
1513 && flag_unsafe_math_optimizations
)
1515 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1523 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1524 using cc0, in which case we want to leave it as a COMPARE
1525 so we can distinguish it from a register-register-copy.
1527 In IEEE floating point, x-0 is not the same as x. */
1529 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1530 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1531 && trueop1
== CONST0_RTX (mode
))
1535 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1536 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1537 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1538 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1540 rtx xop00
= XEXP (op0
, 0);
1541 rtx xop10
= XEXP (op1
, 0);
1544 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1546 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1547 && GET_MODE (xop00
) == GET_MODE (xop10
)
1548 && REGNO (xop00
) == REGNO (xop10
)
1549 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1550 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1557 /* We can't assume x-x is 0 even with non-IEEE floating point,
1558 but since it is zero except in very strange circumstances, we
1559 will treat it as zero with -funsafe-math-optimizations. */
1560 if (rtx_equal_p (trueop0
, trueop1
)
1561 && ! side_effects_p (op0
)
1562 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1563 return CONST0_RTX (mode
);
1565 /* Change subtraction from zero into negation. (0 - x) is the
1566 same as -x when x is NaN, infinite, or finite and nonzero.
1567 But if the mode has signed zeros, and does not round towards
1568 -infinity, then 0 - 0 is 0, not -0. */
1569 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1570 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1572 /* (-1 - a) is ~a. */
1573 if (trueop0
== constm1_rtx
)
1574 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1576 /* Subtracting 0 has no effect unless the mode has signed zeros
1577 and supports rounding towards -infinity. In such a case,
1579 if (!(HONOR_SIGNED_ZEROS (mode
)
1580 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1581 && trueop1
== CONST0_RTX (mode
))
1584 /* See if this is something like X * C - X or vice versa or
1585 if the multiplication is written as a shift. If so, we can
1586 distribute and make a new multiply, shift, or maybe just
1587 have X (if C is 2 in the example above). But don't make
1588 real multiply if we didn't have one before. */
1590 if (! FLOAT_MODE_P (mode
))
1592 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1593 rtx lhs
= op0
, rhs
= op1
;
1596 if (GET_CODE (lhs
) == NEG
)
1597 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1598 else if (GET_CODE (lhs
) == MULT
1599 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1601 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1604 else if (GET_CODE (lhs
) == ASHIFT
1605 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1606 && INTVAL (XEXP (lhs
, 1)) >= 0
1607 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1609 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1610 lhs
= XEXP (lhs
, 0);
1613 if (GET_CODE (rhs
) == NEG
)
1614 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1615 else if (GET_CODE (rhs
) == MULT
1616 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1618 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1621 else if (GET_CODE (rhs
) == ASHIFT
1622 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1623 && INTVAL (XEXP (rhs
, 1)) >= 0
1624 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1626 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1627 rhs
= XEXP (rhs
, 0);
1630 if (rtx_equal_p (lhs
, rhs
))
1632 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1633 GEN_INT (coeff0
- coeff1
));
1634 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1638 /* (a - (-b)) -> (a + b). True even for IEEE. */
1639 if (GET_CODE (op1
) == NEG
)
1640 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1642 /* (-x - c) may be simplified as (-c - x). */
1643 if (GET_CODE (op0
) == NEG
1644 && (GET_CODE (op1
) == CONST_INT
1645 || GET_CODE (op1
) == CONST_DOUBLE
))
1647 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1649 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1652 /* If one of the operands is a PLUS or a MINUS, see if we can
1653 simplify this by the associative law.
1654 Don't use the associative law for floating point.
1655 The inaccuracy makes it nonassociative,
1656 and subtle programs can break if operations are associated. */
1658 if (INTEGRAL_MODE_P (mode
)
1659 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1660 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1661 || (GET_CODE (op0
) == CONST
1662 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1663 || (GET_CODE (op1
) == CONST
1664 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1665 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1668 /* Don't let a relocatable value get a negative coeff. */
1669 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1670 return simplify_gen_binary (PLUS
, mode
,
1672 neg_const_int (mode
, op1
));
1674 /* (x - (x & y)) -> (x & ~y) */
1675 if (GET_CODE (op1
) == AND
)
1677 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1679 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1680 GET_MODE (XEXP (op1
, 1)));
1681 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1683 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1685 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1686 GET_MODE (XEXP (op1
, 0)));
1687 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1693 if (trueop1
== constm1_rtx
)
1694 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1696 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1697 x is NaN, since x * 0 is then also NaN. Nor is it valid
1698 when the mode has signed zeros, since multiplying a negative
1699 number by 0 will give -0, not 0. */
1700 if (!HONOR_NANS (mode
)
1701 && !HONOR_SIGNED_ZEROS (mode
)
1702 && trueop1
== CONST0_RTX (mode
)
1703 && ! side_effects_p (op0
))
1706 /* In IEEE floating point, x*1 is not equivalent to x for
1708 if (!HONOR_SNANS (mode
)
1709 && trueop1
== CONST1_RTX (mode
))
1712 /* Convert multiply by constant power of two into shift unless
1713 we are still generating RTL. This test is a kludge. */
1714 if (GET_CODE (trueop1
) == CONST_INT
1715 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1716 /* If the mode is larger than the host word size, and the
1717 uppermost bit is set, then this isn't a power of two due
1718 to implicit sign extension. */
1719 && (width
<= HOST_BITS_PER_WIDE_INT
1720 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1721 && ! rtx_equal_function_value_matters
)
1722 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1724 /* x*2 is x+x and x*(-1) is -x */
1725 if (GET_CODE (trueop1
) == CONST_DOUBLE
1726 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1727 && GET_MODE (op0
) == mode
)
1730 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1732 if (REAL_VALUES_EQUAL (d
, dconst2
))
1733 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1735 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1736 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1739 /* Reassociate multiplication, but for floating point MULTs
1740 only when the user specifies unsafe math optimizations. */
1741 if (! FLOAT_MODE_P (mode
)
1742 || flag_unsafe_math_optimizations
)
1744 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1751 if (trueop1
== const0_rtx
)
1753 if (GET_CODE (trueop1
) == CONST_INT
1754 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1755 == GET_MODE_MASK (mode
)))
1757 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1759 /* A | (~A) -> -1 */
1760 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1761 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1762 && ! side_effects_p (op0
)
1763 && GET_MODE_CLASS (mode
) != MODE_CC
)
1765 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1771 if (trueop1
== const0_rtx
)
1773 if (GET_CODE (trueop1
) == CONST_INT
1774 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1775 == GET_MODE_MASK (mode
)))
1776 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1777 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1778 && GET_MODE_CLASS (mode
) != MODE_CC
)
1780 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1786 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1788 if (GET_CODE (trueop1
) == CONST_INT
1789 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1790 == GET_MODE_MASK (mode
)))
1792 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1793 && GET_MODE_CLASS (mode
) != MODE_CC
)
1796 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1797 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1798 && ! side_effects_p (op0
)
1799 && GET_MODE_CLASS (mode
) != MODE_CC
)
1801 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1807 /* Convert divide by power of two into shift (divide by 1 handled
1809 if (GET_CODE (trueop1
) == CONST_INT
1810 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1811 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (arg1
));
1813 /* Fall through.... */
1816 if (trueop1
== CONST1_RTX (mode
))
1818 /* On some platforms DIV uses narrower mode than its
1820 rtx x
= gen_lowpart_common (mode
, op0
);
1823 else if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1824 return gen_lowpart_SUBREG (mode
, op0
);
1829 /* Maybe change 0 / x to 0. This transformation isn't safe for
1830 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1831 Nor is it safe for modes with signed zeros, since dividing
1832 0 by a negative number gives -0, not 0. */
1833 if (!HONOR_NANS (mode
)
1834 && !HONOR_SIGNED_ZEROS (mode
)
1835 && trueop0
== CONST0_RTX (mode
)
1836 && ! side_effects_p (op1
))
1839 /* Change division by a constant into multiplication. Only do
1840 this with -funsafe-math-optimizations. */
1841 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1842 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1843 && trueop1
!= CONST0_RTX (mode
)
1844 && flag_unsafe_math_optimizations
)
1847 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1849 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1851 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1852 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1853 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
1859 /* Handle modulus by power of two (mod with 1 handled below). */
1860 if (GET_CODE (trueop1
) == CONST_INT
1861 && exact_log2 (INTVAL (trueop1
)) > 0)
1862 return simplify_gen_binary (AND
, mode
, op0
,
1863 GEN_INT (INTVAL (op1
) - 1));
1865 /* Fall through.... */
1868 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1869 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1876 /* Rotating ~0 always results in ~0. */
1877 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1878 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1879 && ! side_effects_p (op1
))
1882 /* Fall through.... */
1886 if (trueop1
== const0_rtx
)
1888 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1893 if (width
<= HOST_BITS_PER_WIDE_INT
1894 && GET_CODE (trueop1
) == CONST_INT
1895 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1896 && ! side_effects_p (op0
))
1898 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1900 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1906 if (width
<= HOST_BITS_PER_WIDE_INT
1907 && GET_CODE (trueop1
) == CONST_INT
1908 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1909 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1910 && ! side_effects_p (op0
))
1912 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1914 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1920 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1922 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1924 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1930 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1932 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1934 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1943 /* ??? There are simplifications that can be done. */
1947 if (!VECTOR_MODE_P (mode
))
1949 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1951 != GET_MODE_INNER (GET_MODE (trueop0
)))
1952 || GET_CODE (trueop1
) != PARALLEL
1953 || XVECLEN (trueop1
, 0) != 1
1954 || GET_CODE (XVECEXP (trueop1
, 0, 0)) != CONST_INT
)
1957 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1958 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP (trueop1
, 0, 0)));
1962 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1963 || (GET_MODE_INNER (mode
)
1964 != GET_MODE_INNER (GET_MODE (trueop0
)))
1965 || GET_CODE (trueop1
) != PARALLEL
)
1968 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1970 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1971 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1972 rtvec v
= rtvec_alloc (n_elts
);
1975 if (XVECLEN (trueop1
, 0) != (int) n_elts
)
1977 for (i
= 0; i
< n_elts
; i
++)
1979 rtx x
= XVECEXP (trueop1
, 0, i
);
1981 if (GET_CODE (x
) != CONST_INT
)
1983 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, INTVAL (x
));
1986 return gen_rtx_CONST_VECTOR (mode
, v
);
1992 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
1993 ? GET_MODE (trueop0
)
1994 : GET_MODE_INNER (mode
));
1995 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
1996 ? GET_MODE (trueop1
)
1997 : GET_MODE_INNER (mode
));
1999 if (!VECTOR_MODE_P (mode
)
2000 || (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2001 != GET_MODE_SIZE (mode
)))
2004 if ((VECTOR_MODE_P (op0_mode
)
2005 && (GET_MODE_INNER (mode
)
2006 != GET_MODE_INNER (op0_mode
)))
2007 || (!VECTOR_MODE_P (op0_mode
)
2008 && GET_MODE_INNER (mode
) != op0_mode
))
2011 if ((VECTOR_MODE_P (op1_mode
)
2012 && (GET_MODE_INNER (mode
)
2013 != GET_MODE_INNER (op1_mode
)))
2014 || (!VECTOR_MODE_P (op1_mode
)
2015 && GET_MODE_INNER (mode
) != op1_mode
))
2018 if ((GET_CODE (trueop0
) == CONST_VECTOR
2019 || GET_CODE (trueop0
) == CONST_INT
2020 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2021 && (GET_CODE (trueop1
) == CONST_VECTOR
2022 || GET_CODE (trueop1
) == CONST_INT
2023 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2025 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2026 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2027 rtvec v
= rtvec_alloc (n_elts
);
2029 unsigned in_n_elts
= 1;
2031 if (VECTOR_MODE_P (op0_mode
))
2032 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2033 for (i
= 0; i
< n_elts
; i
++)
2037 if (!VECTOR_MODE_P (op0_mode
))
2038 RTVEC_ELT (v
, i
) = trueop0
;
2040 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2044 if (!VECTOR_MODE_P (op1_mode
))
2045 RTVEC_ELT (v
, i
) = trueop1
;
2047 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2052 return gen_rtx_CONST_VECTOR (mode
, v
);
2064 /* Get the integer argument values in two forms:
2065 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2067 arg0
= INTVAL (trueop0
);
2068 arg1
= INTVAL (trueop1
);
2070 if (width
< HOST_BITS_PER_WIDE_INT
)
2072 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2073 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2076 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2077 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2080 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2081 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2089 /* Compute the value of the arithmetic. */
2094 val
= arg0s
+ arg1s
;
2098 val
= arg0s
- arg1s
;
2102 val
= arg0s
* arg1s
;
2107 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2110 val
= arg0s
/ arg1s
;
2115 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2118 val
= arg0s
% arg1s
;
2123 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2126 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2131 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2134 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
2150 /* If shift count is undefined, don't fold it; let the machine do
2151 what it wants. But truncate it if the machine will do that. */
2155 #ifdef SHIFT_COUNT_TRUNCATED
2156 if (SHIFT_COUNT_TRUNCATED
)
2160 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
2167 #ifdef SHIFT_COUNT_TRUNCATED
2168 if (SHIFT_COUNT_TRUNCATED
)
2172 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
2179 #ifdef SHIFT_COUNT_TRUNCATED
2180 if (SHIFT_COUNT_TRUNCATED
)
2184 val
= arg0s
>> arg1
;
2186 /* Bootstrap compiler may not have sign extended the right shift.
2187 Manually extend the sign to insure bootstrap cc matches gcc. */
2188 if (arg0s
< 0 && arg1
> 0)
2189 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
2198 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2199 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2207 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2208 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2212 /* Do nothing here. */
2216 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2220 val
= ((unsigned HOST_WIDE_INT
) arg0
2221 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2225 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2229 val
= ((unsigned HOST_WIDE_INT
) arg0
2230 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2237 /* ??? There are simplifications that can be done. */
2244 val
= trunc_int_for_mode (val
, mode
);
2246 return GEN_INT (val
);
2249 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2252 Rather than test for specific case, we do this by a brute-force method
2253 and do all possible simplifications until no more changes occur. Then
2254 we rebuild the operation.
2256 If FORCE is true, then always generate the rtx. This is used to
2257 canonicalize stuff emitted from simplify_gen_binary. Note that this
2258 can still fail if the rtx is too complex. It won't fail just because
2259 the result is not 'simpler' than the input, however. */
2261 struct simplify_plus_minus_op_data
2268 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2270 const struct simplify_plus_minus_op_data
*d1
= p1
;
2271 const struct simplify_plus_minus_op_data
*d2
= p2
;
2273 return (commutative_operand_precedence (d2
->op
)
2274 - commutative_operand_precedence (d1
->op
));
2278 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2281 struct simplify_plus_minus_op_data ops
[8];
2283 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2287 memset (ops
, 0, sizeof ops
);
2289 /* Set up the two operands and then expand them until nothing has been
2290 changed. If we run out of room in our array, give up; this should
2291 almost never happen. */
2296 ops
[1].neg
= (code
== MINUS
);
2302 for (i
= 0; i
< n_ops
; i
++)
2304 rtx this_op
= ops
[i
].op
;
2305 int this_neg
= ops
[i
].neg
;
2306 enum rtx_code this_code
= GET_CODE (this_op
);
2315 ops
[n_ops
].op
= XEXP (this_op
, 1);
2316 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2319 ops
[i
].op
= XEXP (this_op
, 0);
2325 ops
[i
].op
= XEXP (this_op
, 0);
2326 ops
[i
].neg
= ! this_neg
;
2332 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2333 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2334 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2336 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2337 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2338 ops
[n_ops
].neg
= this_neg
;
2346 /* ~a -> (-a - 1) */
2349 ops
[n_ops
].op
= constm1_rtx
;
2350 ops
[n_ops
++].neg
= this_neg
;
2351 ops
[i
].op
= XEXP (this_op
, 0);
2352 ops
[i
].neg
= !this_neg
;
2360 ops
[i
].op
= neg_const_int (mode
, this_op
);
2373 /* If we only have two operands, we can't do anything. */
2374 if (n_ops
<= 2 && !force
)
2377 /* Count the number of CONSTs we didn't split above. */
2378 for (i
= 0; i
< n_ops
; i
++)
2379 if (GET_CODE (ops
[i
].op
) == CONST
)
2382 /* Now simplify each pair of operands until nothing changes. The first
2383 time through just simplify constants against each other. */
2390 for (i
= 0; i
< n_ops
- 1; i
++)
2391 for (j
= i
+ 1; j
< n_ops
; j
++)
2393 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2394 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2396 if (lhs
!= 0 && rhs
!= 0
2397 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2399 enum rtx_code ncode
= PLUS
;
2405 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2407 else if (swap_commutative_operands_p (lhs
, rhs
))
2408 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2410 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2412 /* Reject "simplifications" that just wrap the two
2413 arguments in a CONST. Failure to do so can result
2414 in infinite recursion with simplify_binary_operation
2415 when it calls us to simplify CONST operations. */
2417 && ! (GET_CODE (tem
) == CONST
2418 && GET_CODE (XEXP (tem
, 0)) == ncode
2419 && XEXP (XEXP (tem
, 0), 0) == lhs
2420 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2421 /* Don't allow -x + -1 -> ~x simplifications in the
2422 first pass. This allows us the chance to combine
2423 the -1 with other constants. */
2425 && GET_CODE (tem
) == NOT
2426 && XEXP (tem
, 0) == rhs
))
2429 if (GET_CODE (tem
) == NEG
)
2430 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2431 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2432 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2436 ops
[j
].op
= NULL_RTX
;
2446 /* Pack all the operands to the lower-numbered entries. */
2447 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2452 /* Sort the operations based on swap_commutative_operands_p. */
2453 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2455 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2457 && GET_CODE (ops
[1].op
) == CONST_INT
2458 && CONSTANT_P (ops
[0].op
)
2460 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
2462 /* We suppressed creation of trivial CONST expressions in the
2463 combination loop to avoid recursion. Create one manually now.
2464 The combination loop should have ensured that there is exactly
2465 one CONST_INT, and the sort will have ensured that it is last
2466 in the array and that any other constant will be next-to-last. */
2469 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2470 && CONSTANT_P (ops
[n_ops
- 2].op
))
2472 rtx value
= ops
[n_ops
- 1].op
;
2473 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2474 value
= neg_const_int (mode
, value
);
2475 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2479 /* Count the number of CONSTs that we generated. */
2481 for (i
= 0; i
< n_ops
; i
++)
2482 if (GET_CODE (ops
[i
].op
) == CONST
)
2485 /* Give up if we didn't reduce the number of operands we had. Make
2486 sure we count a CONST as two operands. If we have the same
2487 number of operands, but have made more CONSTs than before, this
2488 is also an improvement, so accept it. */
2490 && (n_ops
+ n_consts
> input_ops
2491 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2494 /* Put a non-negated operand first, if possible. */
2496 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2499 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
2508 /* Now make the result by performing the requested operations. */
2510 for (i
= 1; i
< n_ops
; i
++)
2511 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2512 mode
, result
, ops
[i
].op
);
2517 /* Like simplify_binary_operation except used for relational operators.
2518 MODE is the mode of the operands, not that of the result. If MODE
2519 is VOIDmode, both operands must also be VOIDmode and we compare the
2520 operands in "infinite precision".
2522 If no simplification is possible, this function returns zero. Otherwise,
2523 it returns either const_true_rtx or const0_rtx. */
2526 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2529 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2534 if (mode
== VOIDmode
2535 && (GET_MODE (op0
) != VOIDmode
2536 || GET_MODE (op1
) != VOIDmode
))
2539 /* If op0 is a compare, extract the comparison arguments from it. */
2540 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2541 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2543 /* We can't simplify MODE_CC values since we don't know what the
2544 actual comparison is. */
2545 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2548 /* Make sure the constant is second. */
2549 if (swap_commutative_operands_p (op0
, op1
))
2551 tem
= op0
, op0
= op1
, op1
= tem
;
2552 code
= swap_condition (code
);
2555 trueop0
= avoid_constant_pool_reference (op0
);
2556 trueop1
= avoid_constant_pool_reference (op1
);
2558 /* For integer comparisons of A and B maybe we can simplify A - B and can
2559 then simplify a comparison of that with zero. If A and B are both either
2560 a register or a CONST_INT, this can't help; testing for these cases will
2561 prevent infinite recursion here and speed things up.
2563 If CODE is an unsigned comparison, then we can never do this optimization,
2564 because it gives an incorrect result if the subtraction wraps around zero.
2565 ANSI C defines unsigned operations such that they never overflow, and
2566 thus such cases can not be ignored. */
2568 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2569 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
2570 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
2571 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2572 /* We cannot do this for == or != if tem is a nonzero address. */
2573 && ((code
!= EQ
&& code
!= NE
) || ! nonzero_address_p (tem
))
2574 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2575 return simplify_relational_operation (signed_condition (code
),
2576 mode
, tem
, const0_rtx
);
2578 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2579 return const_true_rtx
;
2581 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2584 /* For modes without NaNs, if the two operands are equal, we know the
2585 result except if they have side-effects. */
2586 if (! HONOR_NANS (GET_MODE (trueop0
))
2587 && rtx_equal_p (trueop0
, trueop1
)
2588 && ! side_effects_p (trueop0
))
2589 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2591 /* If the operands are floating-point constants, see if we can fold
2593 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2594 && GET_CODE (trueop1
) == CONST_DOUBLE
2595 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2597 REAL_VALUE_TYPE d0
, d1
;
2599 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2600 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2602 /* Comparisons are unordered iff at least one of the values is NaN. */
2603 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2613 return const_true_rtx
;
2626 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2627 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2628 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2631 /* Otherwise, see if the operands are both integers. */
2632 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2633 && (GET_CODE (trueop0
) == CONST_DOUBLE
2634 || GET_CODE (trueop0
) == CONST_INT
)
2635 && (GET_CODE (trueop1
) == CONST_DOUBLE
2636 || GET_CODE (trueop1
) == CONST_INT
))
2638 int width
= GET_MODE_BITSIZE (mode
);
2639 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2640 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2642 /* Get the two words comprising each integer constant. */
2643 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2645 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2646 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2650 l0u
= l0s
= INTVAL (trueop0
);
2651 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2654 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2656 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2657 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2661 l1u
= l1s
= INTVAL (trueop1
);
2662 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2665 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2666 we have to sign or zero-extend the values. */
2667 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2669 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2670 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2672 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2673 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2675 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2676 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2678 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2679 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2681 equal
= (h0u
== h1u
&& l0u
== l1u
);
2682 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2683 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2684 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2685 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2688 /* Otherwise, there are some code-specific tests we can make. */
2694 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2699 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2700 return const_true_rtx
;
2704 /* Unsigned values are never negative. */
2705 if (trueop1
== const0_rtx
)
2706 return const_true_rtx
;
2710 if (trueop1
== const0_rtx
)
2715 /* Unsigned values are never greater than the largest
2717 if (GET_CODE (trueop1
) == CONST_INT
2718 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2719 && INTEGRAL_MODE_P (mode
))
2720 return const_true_rtx
;
2724 if (GET_CODE (trueop1
) == CONST_INT
2725 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2726 && INTEGRAL_MODE_P (mode
))
2731 /* Optimize abs(x) < 0.0. */
2732 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
2734 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2736 if (GET_CODE (tem
) == ABS
)
2742 /* Optimize abs(x) >= 0.0. */
2743 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
2745 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2747 if (GET_CODE (tem
) == ABS
)
2748 return const_true_rtx
;
2753 /* Optimize ! (abs(x) < 0.0). */
2754 if (trueop1
== CONST0_RTX (mode
))
2756 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2758 if (GET_CODE (tem
) == ABS
)
2759 return const_true_rtx
;
2770 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2776 return equal
? const_true_rtx
: const0_rtx
;
2779 return ! equal
? const_true_rtx
: const0_rtx
;
2782 return op0lt
? const_true_rtx
: const0_rtx
;
2785 return op1lt
? const_true_rtx
: const0_rtx
;
2787 return op0ltu
? const_true_rtx
: const0_rtx
;
2789 return op1ltu
? const_true_rtx
: const0_rtx
;
2792 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2795 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2797 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2799 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2801 return const_true_rtx
;
2809 /* Simplify CODE, an operation with result mode MODE and three operands,
2810 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2811 a constant. Return 0 if no simplifications is possible. */
2814 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
2815 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
2818 unsigned int width
= GET_MODE_BITSIZE (mode
);
2820 /* VOIDmode means "infinite" precision. */
2822 width
= HOST_BITS_PER_WIDE_INT
;
2828 if (GET_CODE (op0
) == CONST_INT
2829 && GET_CODE (op1
) == CONST_INT
2830 && GET_CODE (op2
) == CONST_INT
2831 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2832 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2834 /* Extracting a bit-field from a constant */
2835 HOST_WIDE_INT val
= INTVAL (op0
);
2837 if (BITS_BIG_ENDIAN
)
2838 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2839 - INTVAL (op2
) - INTVAL (op1
));
2841 val
>>= INTVAL (op2
);
2843 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2845 /* First zero-extend. */
2846 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2847 /* If desired, propagate sign bit. */
2848 if (code
== SIGN_EXTRACT
2849 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2850 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2853 /* Clear the bits that don't belong in our mode,
2854 unless they and our sign bit are all one.
2855 So we get either a reasonable negative value or a reasonable
2856 unsigned value for this mode. */
2857 if (width
< HOST_BITS_PER_WIDE_INT
2858 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2859 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2860 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2862 return GEN_INT (val
);
2867 if (GET_CODE (op0
) == CONST_INT
)
2868 return op0
!= const0_rtx
? op1
: op2
;
2870 /* Convert c ? a : a into "a". */
2871 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
2874 /* Convert a != b ? a : b into "a". */
2875 if (GET_CODE (op0
) == NE
2876 && ! side_effects_p (op0
)
2877 && ! HONOR_NANS (mode
)
2878 && ! HONOR_SIGNED_ZEROS (mode
)
2879 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
2880 && rtx_equal_p (XEXP (op0
, 1), op2
))
2881 || (rtx_equal_p (XEXP (op0
, 0), op2
)
2882 && rtx_equal_p (XEXP (op0
, 1), op1
))))
2885 /* Convert a == b ? a : b into "b". */
2886 if (GET_CODE (op0
) == EQ
2887 && ! side_effects_p (op0
)
2888 && ! HONOR_NANS (mode
)
2889 && ! HONOR_SIGNED_ZEROS (mode
)
2890 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
2891 && rtx_equal_p (XEXP (op0
, 1), op2
))
2892 || (rtx_equal_p (XEXP (op0
, 0), op2
)
2893 && rtx_equal_p (XEXP (op0
, 1), op1
))))
2896 if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2898 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2899 ? GET_MODE (XEXP (op0
, 1))
2900 : GET_MODE (XEXP (op0
, 0)));
2902 if (cmp_mode
== VOIDmode
)
2903 cmp_mode
= op0_mode
;
2904 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2905 XEXP (op0
, 0), XEXP (op0
, 1));
2907 /* See if any simplifications were possible. */
2908 if (temp
== const0_rtx
)
2910 else if (temp
== const_true_rtx
)
2915 /* Look for happy constants in op1 and op2. */
2916 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2918 HOST_WIDE_INT t
= INTVAL (op1
);
2919 HOST_WIDE_INT f
= INTVAL (op2
);
2921 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2922 code
= GET_CODE (op0
);
2923 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2926 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2934 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2940 if (GET_MODE (op0
) != mode
2941 || GET_MODE (op1
) != mode
2942 || !VECTOR_MODE_P (mode
))
2944 op2
= avoid_constant_pool_reference (op2
);
2945 if (GET_CODE (op2
) == CONST_INT
)
2947 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2948 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2949 int mask
= (1 << n_elts
) - 1;
2951 if (!(INTVAL (op2
) & mask
))
2953 if ((INTVAL (op2
) & mask
) == mask
)
2956 op0
= avoid_constant_pool_reference (op0
);
2957 op1
= avoid_constant_pool_reference (op1
);
2958 if (GET_CODE (op0
) == CONST_VECTOR
2959 && GET_CODE (op1
) == CONST_VECTOR
)
2961 rtvec v
= rtvec_alloc (n_elts
);
2964 for (i
= 0; i
< n_elts
; i
++)
2965 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
2966 ? CONST_VECTOR_ELT (op0
, i
)
2967 : CONST_VECTOR_ELT (op1
, i
));
2968 return gen_rtx_CONST_VECTOR (mode
, v
);
2980 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
2981 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
2983 Works by unpacking OP into a collection of 8-bit values
2984 represented as a little-endian array of 'unsigned char', selecting by BYTE,
2985 and then repacking them again for OUTERMODE. */
2988 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
2989 enum machine_mode innermode
, unsigned int byte
)
2991 /* We support up to 512-bit values (for V8DFmode). */
2995 value_mask
= (1 << value_bit
) - 1
2997 unsigned char value
[max_bitsize
/ value_bit
];
3006 rtvec result_v
= NULL
;
3007 enum mode_class outer_class
;
3008 enum machine_mode outer_submode
;
3010 /* Some ports misuse CCmode. */
3011 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
3014 /* Unpack the value. */
3016 if (GET_CODE (op
) == CONST_VECTOR
)
3018 num_elem
= CONST_VECTOR_NUNITS (op
);
3019 elems
= &CONST_VECTOR_ELT (op
, 0);
3020 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
3026 elem_bitsize
= max_bitsize
;
3029 if (BITS_PER_UNIT
% value_bit
!= 0)
3030 abort (); /* Too complicated; reducing value_bit may help. */
3031 if (elem_bitsize
% BITS_PER_UNIT
!= 0)
3032 abort (); /* I don't know how to handle endianness of sub-units. */
3034 for (elem
= 0; elem
< num_elem
; elem
++)
3037 rtx el
= elems
[elem
];
3039 /* Vectors are kept in target memory order. (This is probably
3042 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3043 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3045 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3046 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3047 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3048 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3049 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3052 switch (GET_CODE (el
))
3056 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3058 *vp
++ = INTVAL (el
) >> i
;
3059 /* CONST_INTs are always logically sign-extended. */
3060 for (; i
< elem_bitsize
; i
+= value_bit
)
3061 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
3065 if (GET_MODE (el
) == VOIDmode
)
3067 /* If this triggers, someone should have generated a
3068 CONST_INT instead. */
3069 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3072 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
3073 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
3074 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
3077 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
3080 /* It shouldn't matter what's done here, so fill it with
3082 for (; i
< max_bitsize
; i
+= value_bit
)
3085 else if (GET_MODE_CLASS (GET_MODE (el
)) == MODE_FLOAT
)
3087 long tmp
[max_bitsize
/ 32];
3088 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
3090 if (bitsize
> elem_bitsize
)
3092 if (bitsize
% value_bit
!= 0)
3095 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
3098 /* real_to_target produces its result in words affected by
3099 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3100 and use WORDS_BIG_ENDIAN instead; see the documentation
3101 of SUBREG in rtl.texi. */
3102 for (i
= 0; i
< bitsize
; i
+= value_bit
)
3105 if (WORDS_BIG_ENDIAN
)
3106 ibase
= bitsize
- 1 - i
;
3109 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
3112 /* It shouldn't matter what's done here, so fill it with
3114 for (; i
< elem_bitsize
; i
+= value_bit
)
3126 /* Now, pick the right byte to start with. */
3127 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3128 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3129 will already have offset 0. */
3130 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
3132 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
3134 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3135 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3136 byte
= (subword_byte
% UNITS_PER_WORD
3137 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3140 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3141 so if it's become negative it will instead be very large.) */
3142 if (byte
>= GET_MODE_SIZE (innermode
))
3145 /* Convert from bytes to chunks of size value_bit. */
3146 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
3148 /* Re-pack the value. */
3150 if (VECTOR_MODE_P (outermode
))
3152 num_elem
= GET_MODE_NUNITS (outermode
);
3153 result_v
= rtvec_alloc (num_elem
);
3154 elems
= &RTVEC_ELT (result_v
, 0);
3155 outer_submode
= GET_MODE_INNER (outermode
);
3161 outer_submode
= outermode
;
3164 outer_class
= GET_MODE_CLASS (outer_submode
);
3165 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
3167 if (elem_bitsize
% value_bit
!= 0)
3169 if (elem_bitsize
+ value_start
* value_bit
> max_bitsize
)
3172 for (elem
= 0; elem
< num_elem
; elem
++)
3176 /* Vectors are stored in target memory order. (This is probably
3179 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3180 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3182 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3183 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3184 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3185 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3186 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3189 switch (outer_class
)
3192 case MODE_PARTIAL_INT
:
3194 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
3197 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3199 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
3200 for (; i
< elem_bitsize
; i
+= value_bit
)
3201 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
3202 << (i
- HOST_BITS_PER_WIDE_INT
));
3204 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3206 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3207 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
3209 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
3216 long tmp
[max_bitsize
/ 32];
3218 /* real_from_target wants its input in words affected by
3219 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3220 and use WORDS_BIG_ENDIAN instead; see the documentation
3221 of SUBREG in rtl.texi. */
3222 for (i
= 0; i
< max_bitsize
/ 32; i
++)
3224 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
3227 if (WORDS_BIG_ENDIAN
)
3228 ibase
= elem_bitsize
- 1 - i
;
3231 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
3234 real_from_target (&r
, tmp
, outer_submode
);
3235 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
3243 if (VECTOR_MODE_P (outermode
))
3244 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
3249 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3250 Return 0 if no simplifications are possible. */
3252 simplify_subreg (enum machine_mode outermode
, rtx op
,
3253 enum machine_mode innermode
, unsigned int byte
)
3255 /* Little bit of sanity checking. */
3256 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3257 || innermode
== BLKmode
|| outermode
== BLKmode
)
3260 if (GET_MODE (op
) != innermode
3261 && GET_MODE (op
) != VOIDmode
)
3264 if (byte
% GET_MODE_SIZE (outermode
)
3265 || byte
>= GET_MODE_SIZE (innermode
))
3268 if (outermode
== innermode
&& !byte
)
3271 if (GET_CODE (op
) == CONST_INT
3272 || GET_CODE (op
) == CONST_DOUBLE
3273 || GET_CODE (op
) == CONST_VECTOR
)
3274 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
3276 /* Changing mode twice with SUBREG => just change it once,
3277 or not at all if changing back op starting mode. */
3278 if (GET_CODE (op
) == SUBREG
)
3280 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3281 int final_offset
= byte
+ SUBREG_BYTE (op
);
3284 if (outermode
== innermostmode
3285 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3286 return SUBREG_REG (op
);
3288 /* The SUBREG_BYTE represents offset, as if the value were stored
3289 in memory. Irritating exception is paradoxical subreg, where
3290 we define SUBREG_BYTE to be 0. On big endian machines, this
3291 value should be negative. For a moment, undo this exception. */
3292 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3294 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3295 if (WORDS_BIG_ENDIAN
)
3296 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3297 if (BYTES_BIG_ENDIAN
)
3298 final_offset
+= difference
% UNITS_PER_WORD
;
3300 if (SUBREG_BYTE (op
) == 0
3301 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3303 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3304 if (WORDS_BIG_ENDIAN
)
3305 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3306 if (BYTES_BIG_ENDIAN
)
3307 final_offset
+= difference
% UNITS_PER_WORD
;
3310 /* See whether resulting subreg will be paradoxical. */
3311 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3313 /* In nonparadoxical subregs we can't handle negative offsets. */
3314 if (final_offset
< 0)
3316 /* Bail out in case resulting subreg would be incorrect. */
3317 if (final_offset
% GET_MODE_SIZE (outermode
)
3318 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3324 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3326 /* In paradoxical subreg, see if we are still looking on lower part.
3327 If so, our SUBREG_BYTE will be 0. */
3328 if (WORDS_BIG_ENDIAN
)
3329 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3330 if (BYTES_BIG_ENDIAN
)
3331 offset
+= difference
% UNITS_PER_WORD
;
3332 if (offset
== final_offset
)
3338 /* Recurse for further possible simplifications. */
3339 new = simplify_subreg (outermode
, SUBREG_REG (op
),
3340 GET_MODE (SUBREG_REG (op
)),
3344 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3347 /* SUBREG of a hard register => just change the register number
3348 and/or mode. If the hard register is not valid in that mode,
3349 suppress this simplification. If the hard register is the stack,
3350 frame, or argument pointer, leave this as a SUBREG. */
3353 && (! REG_FUNCTION_VALUE_P (op
)
3354 || ! rtx_equal_function_value_matters
)
3355 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3356 #ifdef CANNOT_CHANGE_MODE_CLASS
3357 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3358 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3359 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3361 && ((reload_completed
&& !frame_pointer_needed
)
3362 || (REGNO (op
) != FRAME_POINTER_REGNUM
3363 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3364 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3367 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3368 && REGNO (op
) != ARG_POINTER_REGNUM
3370 && REGNO (op
) != STACK_POINTER_REGNUM
3371 && subreg_offset_representable_p (REGNO (op
), innermode
,
3374 rtx tem
= gen_rtx_SUBREG (outermode
, op
, byte
);
3375 int final_regno
= subreg_hard_regno (tem
, 0);
3377 /* ??? We do allow it if the current REG is not valid for
3378 its mode. This is a kludge to work around how float/complex
3379 arguments are passed on 32-bit SPARC and should be fixed. */
3380 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3381 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
3383 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3385 /* Propagate original regno. We don't have any way to specify
3386 the offset inside original regno, so do so only for lowpart.
3387 The information is used only by alias analysis that can not
3388 grog partial register anyway. */
3390 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3391 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3396 /* If we have a SUBREG of a register that we are replacing and we are
3397 replacing it with a MEM, make a new MEM and try replacing the
3398 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3399 or if we would be widening it. */
3401 if (GET_CODE (op
) == MEM
3402 && ! mode_dependent_address_p (XEXP (op
, 0))
3403 /* Allow splitting of volatile memory references in case we don't
3404 have instruction to move the whole thing. */
3405 && (! MEM_VOLATILE_P (op
)
3406 || ! have_insn_for (SET
, innermode
))
3407 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3408 return adjust_address_nv (op
, outermode
, byte
);
3410 /* Handle complex values represented as CONCAT
3411 of real and imaginary part. */
3412 if (GET_CODE (op
) == CONCAT
)
3414 int is_realpart
= byte
< (unsigned int) GET_MODE_UNIT_SIZE (innermode
);
3415 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
3416 unsigned int final_offset
;
3419 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
3420 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3423 /* We can at least simplify it by referring directly to the
3425 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3428 /* Optimize SUBREG truncations of zero and sign extended values. */
3429 if ((GET_CODE (op
) == ZERO_EXTEND
3430 || GET_CODE (op
) == SIGN_EXTEND
)
3431 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
3433 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
3435 /* If we're requesting the lowpart of a zero or sign extension,
3436 there are three possibilities. If the outermode is the same
3437 as the origmode, we can omit both the extension and the subreg.
3438 If the outermode is not larger than the origmode, we can apply
3439 the truncation without the extension. Finally, if the outermode
3440 is larger than the origmode, but both are integer modes, we
3441 can just extend to the appropriate mode. */
3444 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
3445 if (outermode
== origmode
)
3446 return XEXP (op
, 0);
3447 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
3448 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
3449 subreg_lowpart_offset (outermode
,
3451 if (SCALAR_INT_MODE_P (outermode
))
3452 return simplify_gen_unary (GET_CODE (op
), outermode
,
3453 XEXP (op
, 0), origmode
);
3456 /* A SUBREG resulting from a zero extension may fold to zero if
3457 it extracts higher bits that the ZERO_EXTEND's source bits. */
3458 if (GET_CODE (op
) == ZERO_EXTEND
3459 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
3460 return CONST0_RTX (outermode
);
3466 /* Make a SUBREG operation or equivalent if it folds. */
3469 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
3470 enum machine_mode innermode
, unsigned int byte
)
3473 /* Little bit of sanity checking. */
3474 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3475 || innermode
== BLKmode
|| outermode
== BLKmode
)
3478 if (GET_MODE (op
) != innermode
3479 && GET_MODE (op
) != VOIDmode
)
3482 if (byte
% GET_MODE_SIZE (outermode
)
3483 || byte
>= GET_MODE_SIZE (innermode
))
3486 if (GET_CODE (op
) == QUEUED
)
3489 new = simplify_subreg (outermode
, op
, innermode
, byte
);
3493 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
3496 return gen_rtx_SUBREG (outermode
, op
, byte
);
3498 /* Simplify X, an rtx expression.
3500 Return the simplified expression or NULL if no simplifications
3503 This is the preferred entry point into the simplification routines;
3504 however, we still allow passes to call the more specific routines.
3506 Right now GCC has three (yes, three) major bodies of RTL simplification
3507 code that need to be unified.
3509 1. fold_rtx in cse.c. This code uses various CSE specific
3510 information to aid in RTL simplification.
3512 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3513 it uses combine specific information to aid in RTL
3516 3. The routines in this file.
3519 Long term we want to only have one body of simplification code; to
3520 get to that state I recommend the following steps:
3522 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3523 which are not pass dependent state into these routines.
3525 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3526 use this routine whenever possible.
3528 3. Allow for pass dependent state to be provided to these
3529 routines and add simplifications based on the pass dependent
3530 state. Remove code from cse.c & combine.c that becomes
3533 It will take time, but ultimately the compiler will be easier to
3534 maintain and improve. It's totally silly that when we add a
3535 simplification that it needs to be added to 4 places (3 for RTL
3536 simplification and 1 for tree simplification. */
3539 simplify_rtx (rtx x
)
3541 enum rtx_code code
= GET_CODE (x
);
3542 enum machine_mode mode
= GET_MODE (x
);
3545 switch (GET_RTX_CLASS (code
))
3548 return simplify_unary_operation (code
, mode
,
3549 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3551 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3552 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
3554 /* Fall through.... */
3557 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3561 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
3562 XEXP (x
, 0), XEXP (x
, 1),
3566 temp
= simplify_relational_operation (code
,
3567 ((GET_MODE (XEXP (x
, 0))
3569 ? GET_MODE (XEXP (x
, 0))
3570 : GET_MODE (XEXP (x
, 1))),
3571 XEXP (x
, 0), XEXP (x
, 1));
3572 #ifdef FLOAT_STORE_FLAG_VALUE
3573 if (temp
!= 0 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
3575 if (temp
== const0_rtx
)
3576 temp
= CONST0_RTX (mode
);
3578 temp
= CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode
),
3586 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
3587 GET_MODE (SUBREG_REG (x
)),
3589 if (code
== CONSTANT_P_RTX
)
3591 if (CONSTANT_P (XEXP (x
, 0)))
3599 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3600 if (GET_CODE (XEXP (x
, 0)) == HIGH
3601 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))