1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
61 /* Negate a CONST_INT rtx, truncating (because a conversion from a
62 maximally negative number can overflow). */
64 neg_const_int (enum machine_mode mode
, rtx i
)
66 return gen_int_mode (- INTVAL (i
), mode
);
70 /* Make a binary operation by properly ordering the operands and
71 seeing if the expression folds. */
74 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
79 /* Put complex operands first and constants second if commutative. */
80 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
81 && swap_commutative_operands_p (op0
, op1
))
82 tem
= op0
, op0
= op1
, op1
= tem
;
84 /* If this simplifies, do it. */
85 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
89 /* Handle addition and subtraction specially. Otherwise, just form
92 if (code
== PLUS
|| code
== MINUS
)
94 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
99 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
102 /* If X is a MEM referencing the constant pool, return the real value.
103 Otherwise return X. */
105 avoid_constant_pool_reference (rtx x
)
108 enum machine_mode cmode
;
110 switch (GET_CODE (x
))
116 /* Handle float extensions of constant pool references. */
118 c
= avoid_constant_pool_reference (tmp
);
119 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
123 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
124 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
134 /* Call target hook to avoid the effects of -fpic etc.... */
135 addr
= targetm
.delegitimize_address (addr
);
137 if (GET_CODE (addr
) == LO_SUM
)
138 addr
= XEXP (addr
, 1);
140 if (GET_CODE (addr
) != SYMBOL_REF
141 || ! CONSTANT_POOL_ADDRESS_P (addr
))
144 c
= get_pool_constant (addr
);
145 cmode
= get_pool_mode (addr
);
147 /* If we're accessing the constant in a different mode than it was
148 originally stored, attempt to fix that up via subreg simplifications.
149 If that fails we have no choice but to return the original memory. */
150 if (cmode
!= GET_MODE (x
))
152 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
159 /* Make a unary operation by first seeing if it folds and otherwise making
160 the specified operation. */
163 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
164 enum machine_mode op_mode
)
168 /* If this simplifies, use it. */
169 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
172 return gen_rtx_fmt_e (code
, mode
, op
);
175 /* Likewise for ternary operations. */
178 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
179 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
183 /* If this simplifies, use it. */
184 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
188 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
191 /* Likewise, for relational operations.
192 CMP_MODE specifies mode comparison is done in.
196 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
197 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
201 if (cmp_mode
== VOIDmode
)
202 cmp_mode
= GET_MODE (op0
);
203 if (cmp_mode
== VOIDmode
)
204 cmp_mode
= GET_MODE (op1
);
206 if (cmp_mode
!= VOIDmode
)
208 tem
= simplify_relational_operation (code
, mode
, cmp_mode
, op0
, op1
);
213 /* For the following tests, ensure const0_rtx is op1. */
214 if (swap_commutative_operands_p (op0
, op1
)
215 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
216 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
218 /* If op0 is a compare, extract the comparison arguments from it. */
219 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
220 return simplify_gen_relational (code
, mode
, VOIDmode
,
221 XEXP (op0
, 0), XEXP (op0
, 1));
223 /* If op0 is a comparison, extract the comparison arguments form it. */
224 if (COMPARISON_P (op0
) && op1
== const0_rtx
)
228 if (GET_MODE (op0
) == mode
)
230 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
231 XEXP (op0
, 0), XEXP (op0
, 1));
235 enum rtx_code
new = reversed_comparison_code (op0
, NULL_RTX
);
237 return simplify_gen_relational (new, mode
, VOIDmode
,
238 XEXP (op0
, 0), XEXP (op0
, 1));
242 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
245 /* Replace all occurrences of OLD in X with NEW and try to simplify the
246 resulting RTX. Return a new RTX which is as simplified as possible. */
249 simplify_replace_rtx (rtx x
, rtx old
, rtx
new)
251 enum rtx_code code
= GET_CODE (x
);
252 enum machine_mode mode
= GET_MODE (x
);
253 enum machine_mode op_mode
;
256 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
257 to build a new expression substituting recursively. If we can't do
258 anything, return our input. */
263 switch (GET_RTX_CLASS (code
))
267 op_mode
= GET_MODE (op0
);
268 op0
= simplify_replace_rtx (op0
, old
, new);
269 if (op0
== XEXP (x
, 0))
271 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
275 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
276 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
277 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
279 return simplify_gen_binary (code
, mode
, op0
, op1
);
282 case RTX_COMM_COMPARE
:
285 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
286 op0
= simplify_replace_rtx (op0
, old
, new);
287 op1
= simplify_replace_rtx (op1
, old
, new);
288 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
290 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
293 case RTX_BITFIELD_OPS
:
295 op_mode
= GET_MODE (op0
);
296 op0
= simplify_replace_rtx (op0
, old
, new);
297 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
298 op2
= simplify_replace_rtx (XEXP (x
, 2), old
, new);
299 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
301 if (op_mode
== VOIDmode
)
302 op_mode
= GET_MODE (op0
);
303 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
306 /* The only case we try to handle is a SUBREG. */
309 op0
= simplify_replace_rtx (SUBREG_REG (x
), old
, new);
310 if (op0
== SUBREG_REG (x
))
312 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
313 GET_MODE (SUBREG_REG (x
)),
315 return op0
? op0
: x
;
322 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
323 if (op0
== XEXP (x
, 0))
325 return replace_equiv_address_nv (x
, op0
);
327 else if (code
== LO_SUM
)
329 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
330 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
332 /* (lo_sum (high x) x) -> x */
333 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
336 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
338 return gen_rtx_LO_SUM (mode
, op0
, op1
);
340 else if (code
== REG
)
342 if (REG_P (old
) && REGNO (x
) == REGNO (old
))
353 /* Try to simplify a unary operation CODE whose output mode is to be
354 MODE with input operand OP whose mode was originally OP_MODE.
355 Return zero if no simplification can be made. */
357 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
358 rtx op
, enum machine_mode op_mode
)
360 unsigned int width
= GET_MODE_BITSIZE (mode
);
361 rtx trueop
= avoid_constant_pool_reference (op
);
363 if (code
== VEC_DUPLICATE
)
365 if (!VECTOR_MODE_P (mode
))
367 if (GET_MODE (trueop
) != VOIDmode
368 && !VECTOR_MODE_P (GET_MODE (trueop
))
369 && GET_MODE_INNER (mode
) != GET_MODE (trueop
))
371 if (GET_MODE (trueop
) != VOIDmode
372 && VECTOR_MODE_P (GET_MODE (trueop
))
373 && GET_MODE_INNER (mode
) != GET_MODE_INNER (GET_MODE (trueop
)))
375 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
376 || GET_CODE (trueop
) == CONST_VECTOR
)
378 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
379 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
380 rtvec v
= rtvec_alloc (n_elts
);
383 if (GET_CODE (trueop
) != CONST_VECTOR
)
384 for (i
= 0; i
< n_elts
; i
++)
385 RTVEC_ELT (v
, i
) = trueop
;
388 enum machine_mode inmode
= GET_MODE (trueop
);
389 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
390 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
392 if (in_n_elts
>= n_elts
|| n_elts
% in_n_elts
)
394 for (i
= 0; i
< n_elts
; i
++)
395 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
397 return gen_rtx_CONST_VECTOR (mode
, v
);
400 else if (GET_CODE (op
) == CONST
)
401 return simplify_unary_operation (code
, mode
, XEXP (op
, 0), op_mode
);
403 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
405 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
406 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
407 enum machine_mode opmode
= GET_MODE (trueop
);
408 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
409 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
410 rtvec v
= rtvec_alloc (n_elts
);
413 if (op_n_elts
!= n_elts
)
416 for (i
= 0; i
< n_elts
; i
++)
418 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
419 CONST_VECTOR_ELT (trueop
, i
),
420 GET_MODE_INNER (opmode
));
423 RTVEC_ELT (v
, i
) = x
;
425 return gen_rtx_CONST_VECTOR (mode
, v
);
428 /* The order of these tests is critical so that, for example, we don't
429 check the wrong mode (input vs. output) for a conversion operation,
430 such as FIX. At some point, this should be simplified. */
432 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
433 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
435 HOST_WIDE_INT hv
, lv
;
438 if (GET_CODE (trueop
) == CONST_INT
)
439 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
441 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
443 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
444 d
= real_value_truncate (mode
, d
);
445 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
447 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
448 && (GET_CODE (trueop
) == CONST_DOUBLE
449 || GET_CODE (trueop
) == CONST_INT
))
451 HOST_WIDE_INT hv
, lv
;
454 if (GET_CODE (trueop
) == CONST_INT
)
455 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
457 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
459 if (op_mode
== VOIDmode
)
461 /* We don't know how to interpret negative-looking numbers in
462 this case, so don't try to fold those. */
466 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
469 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
471 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
472 d
= real_value_truncate (mode
, d
);
473 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
476 if (GET_CODE (trueop
) == CONST_INT
477 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
479 HOST_WIDE_INT arg0
= INTVAL (trueop
);
493 val
= (arg0
>= 0 ? arg0
: - arg0
);
497 /* Don't use ffs here. Instead, get low order bit and then its
498 number. If arg0 is zero, this will return 0, as desired. */
499 arg0
&= GET_MODE_MASK (mode
);
500 val
= exact_log2 (arg0
& (- arg0
)) + 1;
504 arg0
&= GET_MODE_MASK (mode
);
505 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
508 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
512 arg0
&= GET_MODE_MASK (mode
);
515 /* Even if the value at zero is undefined, we have to come
516 up with some replacement. Seems good enough. */
517 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
518 val
= GET_MODE_BITSIZE (mode
);
521 val
= exact_log2 (arg0
& -arg0
);
525 arg0
&= GET_MODE_MASK (mode
);
528 val
++, arg0
&= arg0
- 1;
532 arg0
&= GET_MODE_MASK (mode
);
535 val
++, arg0
&= arg0
- 1;
544 /* When zero-extending a CONST_INT, we need to know its
546 if (op_mode
== VOIDmode
)
548 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
550 /* If we were really extending the mode,
551 we would have to distinguish between zero-extension
552 and sign-extension. */
553 if (width
!= GET_MODE_BITSIZE (op_mode
))
557 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
558 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
564 if (op_mode
== VOIDmode
)
566 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
568 /* If we were really extending the mode,
569 we would have to distinguish between zero-extension
570 and sign-extension. */
571 if (width
!= GET_MODE_BITSIZE (op_mode
))
575 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
578 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
580 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
581 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
598 val
= trunc_int_for_mode (val
, mode
);
600 return GEN_INT (val
);
603 /* We can do some operations on integer CONST_DOUBLEs. Also allow
604 for a DImode operation on a CONST_INT. */
605 else if (GET_MODE (trueop
) == VOIDmode
606 && width
<= HOST_BITS_PER_WIDE_INT
* 2
607 && (GET_CODE (trueop
) == CONST_DOUBLE
608 || GET_CODE (trueop
) == CONST_INT
))
610 unsigned HOST_WIDE_INT l1
, lv
;
611 HOST_WIDE_INT h1
, hv
;
613 if (GET_CODE (trueop
) == CONST_DOUBLE
)
614 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
616 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
626 neg_double (l1
, h1
, &lv
, &hv
);
631 neg_double (l1
, h1
, &lv
, &hv
);
643 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
646 lv
= exact_log2 (l1
& -l1
) + 1;
652 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
653 - HOST_BITS_PER_WIDE_INT
;
655 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
656 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
657 lv
= GET_MODE_BITSIZE (mode
);
663 lv
= exact_log2 (l1
& -l1
);
665 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
666 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
667 lv
= GET_MODE_BITSIZE (mode
);
690 /* This is just a change-of-mode, so do nothing. */
695 if (op_mode
== VOIDmode
)
698 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
702 lv
= l1
& GET_MODE_MASK (op_mode
);
706 if (op_mode
== VOIDmode
707 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
711 lv
= l1
& GET_MODE_MASK (op_mode
);
712 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
713 && (lv
& ((HOST_WIDE_INT
) 1
714 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
715 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
717 hv
= HWI_SIGN_EXTEND (lv
);
728 return immed_double_const (lv
, hv
, mode
);
731 else if (GET_CODE (trueop
) == CONST_DOUBLE
732 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
734 REAL_VALUE_TYPE d
, t
;
735 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
740 if (HONOR_SNANS (mode
) && real_isnan (&d
))
742 real_sqrt (&t
, mode
, &d
);
746 d
= REAL_VALUE_ABS (d
);
749 d
= REAL_VALUE_NEGATE (d
);
752 d
= real_value_truncate (mode
, d
);
755 /* All this does is change the mode. */
758 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
765 real_to_target (tmp
, &d
, GET_MODE (trueop
));
766 for (i
= 0; i
< 4; i
++)
768 real_from_target (&d
, tmp
, mode
);
773 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
776 else if (GET_CODE (trueop
) == CONST_DOUBLE
777 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
778 && GET_MODE_CLASS (mode
) == MODE_INT
779 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
781 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
782 operators are intentionally left unspecified (to ease implementation
783 by target backends), for consistency, this routine implements the
784 same semantics for constant folding as used by the middle-end. */
786 HOST_WIDE_INT xh
, xl
, th
, tl
;
787 REAL_VALUE_TYPE x
, t
;
788 REAL_VALUE_FROM_CONST_DOUBLE (x
, trueop
);
792 if (REAL_VALUE_ISNAN (x
))
795 /* Test against the signed upper bound. */
796 if (width
> HOST_BITS_PER_WIDE_INT
)
798 th
= ((unsigned HOST_WIDE_INT
) 1
799 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
805 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
807 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
808 if (REAL_VALUES_LESS (t
, x
))
815 /* Test against the signed lower bound. */
816 if (width
> HOST_BITS_PER_WIDE_INT
)
818 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
824 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
826 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
827 if (REAL_VALUES_LESS (x
, t
))
833 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
837 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
840 /* Test against the unsigned upper bound. */
841 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
846 else if (width
>= HOST_BITS_PER_WIDE_INT
)
848 th
= ((unsigned HOST_WIDE_INT
) 1
849 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
855 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
857 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
858 if (REAL_VALUES_LESS (t
, x
))
865 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
871 return immed_double_const (xl
, xh
, mode
);
874 /* This was formerly used only for non-IEEE float.
875 eggert@twinsun.com says it is safe for IEEE also. */
878 enum rtx_code reversed
;
881 /* There are some simplifications we can do even if the operands
886 /* (not (not X)) == X. */
887 if (GET_CODE (op
) == NOT
)
890 /* (not (eq X Y)) == (ne X Y), etc. */
891 if (COMPARISON_P (op
)
892 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
893 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
895 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
896 XEXP (op
, 0), XEXP (op
, 1));
898 /* (not (plus X -1)) can become (neg X). */
899 if (GET_CODE (op
) == PLUS
900 && XEXP (op
, 1) == constm1_rtx
)
901 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
903 /* Similarly, (not (neg X)) is (plus X -1). */
904 if (GET_CODE (op
) == NEG
)
905 return plus_constant (XEXP (op
, 0), -1);
907 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
908 if (GET_CODE (op
) == XOR
909 && GET_CODE (XEXP (op
, 1)) == CONST_INT
910 && (temp
= simplify_unary_operation (NOT
, mode
,
913 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
916 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
917 operands other than 1, but that is not valid. We could do a
918 similar simplification for (not (lshiftrt C X)) where C is
919 just the sign bit, but this doesn't seem common enough to
921 if (GET_CODE (op
) == ASHIFT
922 && XEXP (op
, 0) == const1_rtx
)
924 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
925 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
928 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
929 by reversing the comparison code if valid. */
930 if (STORE_FLAG_VALUE
== -1
932 && (reversed
= reversed_comparison_code (op
, NULL_RTX
))
934 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
935 XEXP (op
, 0), XEXP (op
, 1));
937 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
938 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
939 so we can perform the above simplification. */
941 if (STORE_FLAG_VALUE
== -1
942 && GET_CODE (op
) == ASHIFTRT
943 && GET_CODE (XEXP (op
, 1)) == CONST_INT
944 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
945 return simplify_gen_relational (GE
, mode
, VOIDmode
,
946 XEXP (op
, 0), const0_rtx
);
951 /* (neg (neg X)) == X. */
952 if (GET_CODE (op
) == NEG
)
955 /* (neg (plus X 1)) can become (not X). */
956 if (GET_CODE (op
) == PLUS
957 && XEXP (op
, 1) == const1_rtx
)
958 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
960 /* Similarly, (neg (not X)) is (plus X 1). */
961 if (GET_CODE (op
) == NOT
)
962 return plus_constant (XEXP (op
, 0), 1);
964 /* (neg (minus X Y)) can become (minus Y X). This transformation
965 isn't safe for modes with signed zeros, since if X and Y are
966 both +0, (minus Y X) is the same as (minus X Y). If the
967 rounding mode is towards +infinity (or -infinity) then the two
968 expressions will be rounded differently. */
969 if (GET_CODE (op
) == MINUS
970 && !HONOR_SIGNED_ZEROS (mode
)
971 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
972 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1),
975 if (GET_CODE (op
) == PLUS
976 && !HONOR_SIGNED_ZEROS (mode
)
977 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
979 /* (neg (plus A C)) is simplified to (minus -C A). */
980 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
981 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
983 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1),
986 return simplify_gen_binary (MINUS
, mode
, temp
,
990 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
991 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
992 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
995 /* (neg (mult A B)) becomes (mult (neg A) B).
996 This works even for floating-point values. */
997 if (GET_CODE (op
) == MULT
998 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1000 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1001 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
1004 /* NEG commutes with ASHIFT since it is multiplication. Only do
1005 this if we can then eliminate the NEG (e.g., if the operand
1007 if (GET_CODE (op
) == ASHIFT
)
1009 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0),
1012 return simplify_gen_binary (ASHIFT
, mode
, temp
,
1016 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1017 C is equal to the width of MODE minus 1. */
1018 if (GET_CODE (op
) == ASHIFTRT
1019 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1020 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
1021 return simplify_gen_binary (LSHIFTRT
, mode
,
1022 XEXP (op
, 0), XEXP (op
, 1));
1024 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1025 C is equal to the width of MODE minus 1. */
1026 if (GET_CODE (op
) == LSHIFTRT
1027 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1028 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
1029 return simplify_gen_binary (ASHIFTRT
, mode
,
1030 XEXP (op
, 0), XEXP (op
, 1));
1035 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1036 becomes just the MINUS if its mode is MODE. This allows
1037 folding switch statements on machines using casesi (such as
1039 if (GET_CODE (op
) == TRUNCATE
1040 && GET_MODE (XEXP (op
, 0)) == mode
1041 && GET_CODE (XEXP (op
, 0)) == MINUS
1042 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1043 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1044 return XEXP (op
, 0);
1046 /* Check for a sign extension of a subreg of a promoted
1047 variable, where the promotion is sign-extended, and the
1048 target mode is the same as the variable's promotion. */
1049 if (GET_CODE (op
) == SUBREG
1050 && SUBREG_PROMOTED_VAR_P (op
)
1051 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1052 && GET_MODE (XEXP (op
, 0)) == mode
)
1053 return XEXP (op
, 0);
1055 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1056 if (! POINTERS_EXTEND_UNSIGNED
1057 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1059 || (GET_CODE (op
) == SUBREG
1060 && GET_CODE (SUBREG_REG (op
)) == REG
1061 && REG_POINTER (SUBREG_REG (op
))
1062 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1063 return convert_memory_address (Pmode
, op
);
1068 /* Check for a zero extension of a subreg of a promoted
1069 variable, where the promotion is zero-extended, and the
1070 target mode is the same as the variable's promotion. */
1071 if (GET_CODE (op
) == SUBREG
1072 && SUBREG_PROMOTED_VAR_P (op
)
1073 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1074 && GET_MODE (XEXP (op
, 0)) == mode
)
1075 return XEXP (op
, 0);
1077 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1078 if (POINTERS_EXTEND_UNSIGNED
> 0
1079 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1081 || (GET_CODE (op
) == SUBREG
1082 && GET_CODE (SUBREG_REG (op
)) == REG
1083 && REG_POINTER (SUBREG_REG (op
))
1084 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1085 return convert_memory_address (Pmode
, op
);
1097 /* Subroutine of simplify_binary_operation to simplify a commutative,
1098 associative binary operation CODE with result mode MODE, operating
1099 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1100 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1101 canonicalization is possible. */
1104 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1109 /* Linearize the operator to the left. */
1110 if (GET_CODE (op1
) == code
)
1112 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1113 if (GET_CODE (op0
) == code
)
1115 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1116 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1119 /* "a op (b op c)" becomes "(b op c) op a". */
1120 if (! swap_commutative_operands_p (op1
, op0
))
1121 return simplify_gen_binary (code
, mode
, op1
, op0
);
1128 if (GET_CODE (op0
) == code
)
1130 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1131 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1133 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1134 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1137 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1138 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1139 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1140 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1142 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1144 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1145 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1146 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1147 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1149 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1155 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1156 and OP1. Return 0 if no simplification is possible.
1158 Don't use this for relational operations such as EQ or LT.
1159 Use simplify_relational_operation instead. */
1162 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1165 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
1167 unsigned int width
= GET_MODE_BITSIZE (mode
);
1168 rtx trueop0
, trueop1
;
1171 #ifdef ENABLE_CHECKING
1172 /* Relational operations don't work here. We must know the mode
1173 of the operands in order to do the comparison correctly.
1174 Assuming a full word can give incorrect results.
1175 Consider comparing 128 with -128 in QImode. */
1177 if (GET_RTX_CLASS (code
) == RTX_COMPARE
1178 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
1182 /* Make sure the constant is second. */
1183 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1184 && swap_commutative_operands_p (op0
, op1
))
1186 tem
= op0
, op0
= op1
, op1
= tem
;
1189 trueop0
= avoid_constant_pool_reference (op0
);
1190 trueop1
= avoid_constant_pool_reference (op1
);
1192 if (VECTOR_MODE_P (mode
)
1193 && GET_CODE (trueop0
) == CONST_VECTOR
1194 && GET_CODE (trueop1
) == CONST_VECTOR
)
1196 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1197 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1198 enum machine_mode op0mode
= GET_MODE (trueop0
);
1199 int op0_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op0mode
));
1200 unsigned op0_n_elts
= (GET_MODE_SIZE (op0mode
) / op0_elt_size
);
1201 enum machine_mode op1mode
= GET_MODE (trueop1
);
1202 int op1_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op1mode
));
1203 unsigned op1_n_elts
= (GET_MODE_SIZE (op1mode
) / op1_elt_size
);
1204 rtvec v
= rtvec_alloc (n_elts
);
1207 if (op0_n_elts
!= n_elts
|| op1_n_elts
!= n_elts
)
1210 for (i
= 0; i
< n_elts
; i
++)
1212 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
1213 CONST_VECTOR_ELT (trueop0
, i
),
1214 CONST_VECTOR_ELT (trueop1
, i
));
1217 RTVEC_ELT (v
, i
) = x
;
1220 return gen_rtx_CONST_VECTOR (mode
, v
);
1223 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1224 && GET_CODE (trueop0
) == CONST_DOUBLE
1225 && GET_CODE (trueop1
) == CONST_DOUBLE
1226 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
1237 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
1239 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
1241 for (i
= 0; i
< 4; i
++)
1245 else if (code
== IOR
)
1247 else if (code
== XOR
)
1252 real_from_target (&r
, tmp0
, mode
);
1253 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
1257 REAL_VALUE_TYPE f0
, f1
, value
;
1259 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
1260 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
1261 f0
= real_value_truncate (mode
, f0
);
1262 f1
= real_value_truncate (mode
, f1
);
1264 if (HONOR_SNANS (mode
)
1265 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
1269 && REAL_VALUES_EQUAL (f1
, dconst0
)
1270 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
1273 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
1275 value
= real_value_truncate (mode
, value
);
1276 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
1280 /* We can fold some multi-word operations. */
1281 if (GET_MODE_CLASS (mode
) == MODE_INT
1282 && width
== HOST_BITS_PER_WIDE_INT
* 2
1283 && (GET_CODE (trueop0
) == CONST_DOUBLE
1284 || GET_CODE (trueop0
) == CONST_INT
)
1285 && (GET_CODE (trueop1
) == CONST_DOUBLE
1286 || GET_CODE (trueop1
) == CONST_INT
))
1288 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
1289 HOST_WIDE_INT h1
, h2
, hv
, ht
;
1291 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1292 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
1294 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
1296 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1297 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
1299 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
1304 /* A - B == A + (-B). */
1305 neg_double (l2
, h2
, &lv
, &hv
);
1308 /* Fall through.... */
1311 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1315 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1319 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
1320 &lv
, &hv
, <
, &ht
))
1325 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
1326 <
, &ht
, &lv
, &hv
))
1331 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
1332 &lv
, &hv
, <
, &ht
))
1337 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
1338 <
, &ht
, &lv
, &hv
))
1343 lv
= l1
& l2
, hv
= h1
& h2
;
1347 lv
= l1
| l2
, hv
= h1
| h2
;
1351 lv
= l1
^ l2
, hv
= h1
^ h2
;
1357 && ((unsigned HOST_WIDE_INT
) l1
1358 < (unsigned HOST_WIDE_INT
) l2
)))
1367 && ((unsigned HOST_WIDE_INT
) l1
1368 > (unsigned HOST_WIDE_INT
) l2
)))
1375 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1377 && ((unsigned HOST_WIDE_INT
) l1
1378 < (unsigned HOST_WIDE_INT
) l2
)))
1385 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1387 && ((unsigned HOST_WIDE_INT
) l1
1388 > (unsigned HOST_WIDE_INT
) l2
)))
1394 case LSHIFTRT
: case ASHIFTRT
:
1396 case ROTATE
: case ROTATERT
:
1397 if (SHIFT_COUNT_TRUNCATED
)
1398 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1400 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1403 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1404 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1406 else if (code
== ASHIFT
)
1407 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1408 else if (code
== ROTATE
)
1409 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1410 else /* code == ROTATERT */
1411 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1418 return immed_double_const (lv
, hv
, mode
);
1421 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1422 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1424 /* Even if we can't compute a constant result,
1425 there are some cases worth simplifying. */
1430 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1431 when x is NaN, infinite, or finite and nonzero. They aren't
1432 when x is -0 and the rounding mode is not towards -infinity,
1433 since (-0) + 0 is then 0. */
1434 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1437 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1438 transformations are safe even for IEEE. */
1439 if (GET_CODE (op0
) == NEG
)
1440 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1441 else if (GET_CODE (op1
) == NEG
)
1442 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1444 /* (~a) + 1 -> -a */
1445 if (INTEGRAL_MODE_P (mode
)
1446 && GET_CODE (op0
) == NOT
1447 && trueop1
== const1_rtx
)
1448 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1450 /* Handle both-operands-constant cases. We can only add
1451 CONST_INTs to constants since the sum of relocatable symbols
1452 can't be handled by most assemblers. Don't add CONST_INT
1453 to CONST_INT since overflow won't be computed properly if wider
1454 than HOST_BITS_PER_WIDE_INT. */
1456 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1457 && GET_CODE (op1
) == CONST_INT
)
1458 return plus_constant (op0
, INTVAL (op1
));
1459 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1460 && GET_CODE (op0
) == CONST_INT
)
1461 return plus_constant (op1
, INTVAL (op0
));
1463 /* See if this is something like X * C - X or vice versa or
1464 if the multiplication is written as a shift. If so, we can
1465 distribute and make a new multiply, shift, or maybe just
1466 have X (if C is 2 in the example above). But don't make
1467 real multiply if we didn't have one before. */
1469 if (! FLOAT_MODE_P (mode
))
1471 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1472 rtx lhs
= op0
, rhs
= op1
;
1475 if (GET_CODE (lhs
) == NEG
)
1476 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1477 else if (GET_CODE (lhs
) == MULT
1478 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1480 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1483 else if (GET_CODE (lhs
) == ASHIFT
1484 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1485 && INTVAL (XEXP (lhs
, 1)) >= 0
1486 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1488 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1489 lhs
= XEXP (lhs
, 0);
1492 if (GET_CODE (rhs
) == NEG
)
1493 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1494 else if (GET_CODE (rhs
) == MULT
1495 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1497 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1500 else if (GET_CODE (rhs
) == ASHIFT
1501 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1502 && INTVAL (XEXP (rhs
, 1)) >= 0
1503 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1505 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1506 rhs
= XEXP (rhs
, 0);
1509 if (rtx_equal_p (lhs
, rhs
))
1511 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1512 GEN_INT (coeff0
+ coeff1
));
1513 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1517 /* If one of the operands is a PLUS or a MINUS, see if we can
1518 simplify this by the associative law.
1519 Don't use the associative law for floating point.
1520 The inaccuracy makes it nonassociative,
1521 and subtle programs can break if operations are associated. */
1523 if (INTEGRAL_MODE_P (mode
)
1524 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1525 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1526 || (GET_CODE (op0
) == CONST
1527 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1528 || (GET_CODE (op1
) == CONST
1529 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1530 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1533 /* Reassociate floating point addition only when the user
1534 specifies unsafe math optimizations. */
1535 if (FLOAT_MODE_P (mode
)
1536 && flag_unsafe_math_optimizations
)
1538 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1546 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1547 using cc0, in which case we want to leave it as a COMPARE
1548 so we can distinguish it from a register-register-copy.
1550 In IEEE floating point, x-0 is not the same as x. */
1552 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1553 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1554 && trueop1
== CONST0_RTX (mode
))
1558 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1559 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1560 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1561 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1563 rtx xop00
= XEXP (op0
, 0);
1564 rtx xop10
= XEXP (op1
, 0);
1567 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1569 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1570 && GET_MODE (xop00
) == GET_MODE (xop10
)
1571 && REGNO (xop00
) == REGNO (xop10
)
1572 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1573 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1580 /* We can't assume x-x is 0 even with non-IEEE floating point,
1581 but since it is zero except in very strange circumstances, we
1582 will treat it as zero with -funsafe-math-optimizations. */
1583 if (rtx_equal_p (trueop0
, trueop1
)
1584 && ! side_effects_p (op0
)
1585 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1586 return CONST0_RTX (mode
);
1588 /* Change subtraction from zero into negation. (0 - x) is the
1589 same as -x when x is NaN, infinite, or finite and nonzero.
1590 But if the mode has signed zeros, and does not round towards
1591 -infinity, then 0 - 0 is 0, not -0. */
1592 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1593 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1595 /* (-1 - a) is ~a. */
1596 if (trueop0
== constm1_rtx
)
1597 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1599 /* Subtracting 0 has no effect unless the mode has signed zeros
1600 and supports rounding towards -infinity. In such a case,
1602 if (!(HONOR_SIGNED_ZEROS (mode
)
1603 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1604 && trueop1
== CONST0_RTX (mode
))
1607 /* See if this is something like X * C - X or vice versa or
1608 if the multiplication is written as a shift. If so, we can
1609 distribute and make a new multiply, shift, or maybe just
1610 have X (if C is 2 in the example above). But don't make
1611 real multiply if we didn't have one before. */
1613 if (! FLOAT_MODE_P (mode
))
1615 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1616 rtx lhs
= op0
, rhs
= op1
;
1619 if (GET_CODE (lhs
) == NEG
)
1620 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1621 else if (GET_CODE (lhs
) == MULT
1622 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1624 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1627 else if (GET_CODE (lhs
) == ASHIFT
1628 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1629 && INTVAL (XEXP (lhs
, 1)) >= 0
1630 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1632 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1633 lhs
= XEXP (lhs
, 0);
1636 if (GET_CODE (rhs
) == NEG
)
1637 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1638 else if (GET_CODE (rhs
) == MULT
1639 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1641 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1644 else if (GET_CODE (rhs
) == ASHIFT
1645 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1646 && INTVAL (XEXP (rhs
, 1)) >= 0
1647 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1649 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1650 rhs
= XEXP (rhs
, 0);
1653 if (rtx_equal_p (lhs
, rhs
))
1655 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1656 GEN_INT (coeff0
- coeff1
));
1657 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1661 /* (a - (-b)) -> (a + b). True even for IEEE. */
1662 if (GET_CODE (op1
) == NEG
)
1663 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1665 /* (-x - c) may be simplified as (-c - x). */
1666 if (GET_CODE (op0
) == NEG
1667 && (GET_CODE (op1
) == CONST_INT
1668 || GET_CODE (op1
) == CONST_DOUBLE
))
1670 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1672 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1675 /* If one of the operands is a PLUS or a MINUS, see if we can
1676 simplify this by the associative law.
1677 Don't use the associative law for floating point.
1678 The inaccuracy makes it nonassociative,
1679 and subtle programs can break if operations are associated. */
1681 if (INTEGRAL_MODE_P (mode
)
1682 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1683 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1684 || (GET_CODE (op0
) == CONST
1685 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1686 || (GET_CODE (op1
) == CONST
1687 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1688 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1691 /* Don't let a relocatable value get a negative coeff. */
1692 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1693 return simplify_gen_binary (PLUS
, mode
,
1695 neg_const_int (mode
, op1
));
1697 /* (x - (x & y)) -> (x & ~y) */
1698 if (GET_CODE (op1
) == AND
)
1700 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1702 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1703 GET_MODE (XEXP (op1
, 1)));
1704 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1706 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1708 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1709 GET_MODE (XEXP (op1
, 0)));
1710 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1716 if (trueop1
== constm1_rtx
)
1717 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1719 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1720 x is NaN, since x * 0 is then also NaN. Nor is it valid
1721 when the mode has signed zeros, since multiplying a negative
1722 number by 0 will give -0, not 0. */
1723 if (!HONOR_NANS (mode
)
1724 && !HONOR_SIGNED_ZEROS (mode
)
1725 && trueop1
== CONST0_RTX (mode
)
1726 && ! side_effects_p (op0
))
1729 /* In IEEE floating point, x*1 is not equivalent to x for
1731 if (!HONOR_SNANS (mode
)
1732 && trueop1
== CONST1_RTX (mode
))
1735 /* Convert multiply by constant power of two into shift unless
1736 we are still generating RTL. This test is a kludge. */
1737 if (GET_CODE (trueop1
) == CONST_INT
1738 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1739 /* If the mode is larger than the host word size, and the
1740 uppermost bit is set, then this isn't a power of two due
1741 to implicit sign extension. */
1742 && (width
<= HOST_BITS_PER_WIDE_INT
1743 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1744 && ! rtx_equal_function_value_matters
)
1745 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1747 /* x*2 is x+x and x*(-1) is -x */
1748 if (GET_CODE (trueop1
) == CONST_DOUBLE
1749 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1750 && GET_MODE (op0
) == mode
)
1753 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1755 if (REAL_VALUES_EQUAL (d
, dconst2
))
1756 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1758 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1759 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1762 /* Reassociate multiplication, but for floating point MULTs
1763 only when the user specifies unsafe math optimizations. */
1764 if (! FLOAT_MODE_P (mode
)
1765 || flag_unsafe_math_optimizations
)
1767 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1774 if (trueop1
== const0_rtx
)
1776 if (GET_CODE (trueop1
) == CONST_INT
1777 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1778 == GET_MODE_MASK (mode
)))
1780 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1782 /* A | (~A) -> -1 */
1783 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1784 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1785 && ! side_effects_p (op0
)
1786 && GET_MODE_CLASS (mode
) != MODE_CC
)
1788 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1794 if (trueop1
== const0_rtx
)
1796 if (GET_CODE (trueop1
) == CONST_INT
1797 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1798 == GET_MODE_MASK (mode
)))
1799 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1800 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1801 && GET_MODE_CLASS (mode
) != MODE_CC
)
1803 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1809 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1811 if (GET_CODE (trueop1
) == CONST_INT
1812 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1813 == GET_MODE_MASK (mode
)))
1815 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1816 && GET_MODE_CLASS (mode
) != MODE_CC
)
1819 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1820 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1821 && ! side_effects_p (op0
)
1822 && GET_MODE_CLASS (mode
) != MODE_CC
)
1824 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1830 /* Convert divide by power of two into shift (divide by 1 handled
1832 if (GET_CODE (trueop1
) == CONST_INT
1833 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1834 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (arg1
));
1836 /* Fall through.... */
1839 if (trueop1
== CONST1_RTX (mode
))
1841 /* On some platforms DIV uses narrower mode than its
1843 rtx x
= gen_lowpart_common (mode
, op0
);
1846 else if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1847 return gen_lowpart_SUBREG (mode
, op0
);
1852 /* Maybe change 0 / x to 0. This transformation isn't safe for
1853 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1854 Nor is it safe for modes with signed zeros, since dividing
1855 0 by a negative number gives -0, not 0. */
1856 if (!HONOR_NANS (mode
)
1857 && !HONOR_SIGNED_ZEROS (mode
)
1858 && trueop0
== CONST0_RTX (mode
)
1859 && ! side_effects_p (op1
))
1862 /* Change division by a constant into multiplication. Only do
1863 this with -funsafe-math-optimizations. */
1864 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1865 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1866 && trueop1
!= CONST0_RTX (mode
)
1867 && flag_unsafe_math_optimizations
)
1870 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1872 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1874 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1875 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1876 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
1882 /* Handle modulus by power of two (mod with 1 handled below). */
1883 if (GET_CODE (trueop1
) == CONST_INT
1884 && exact_log2 (INTVAL (trueop1
)) > 0)
1885 return simplify_gen_binary (AND
, mode
, op0
,
1886 GEN_INT (INTVAL (op1
) - 1));
1888 /* Fall through.... */
1891 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1892 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1899 /* Rotating ~0 always results in ~0. */
1900 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1901 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1902 && ! side_effects_p (op1
))
1905 /* Fall through.... */
1909 if (trueop1
== const0_rtx
)
1911 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1916 if (width
<= HOST_BITS_PER_WIDE_INT
1917 && GET_CODE (trueop1
) == CONST_INT
1918 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1919 && ! side_effects_p (op0
))
1921 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1923 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1929 if (width
<= HOST_BITS_PER_WIDE_INT
1930 && GET_CODE (trueop1
) == CONST_INT
1931 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1932 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1933 && ! side_effects_p (op0
))
1935 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1937 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1943 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1945 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1947 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1953 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1955 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1957 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1966 /* ??? There are simplifications that can be done. */
1970 if (!VECTOR_MODE_P (mode
))
1972 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1974 != GET_MODE_INNER (GET_MODE (trueop0
)))
1975 || GET_CODE (trueop1
) != PARALLEL
1976 || XVECLEN (trueop1
, 0) != 1
1977 || GET_CODE (XVECEXP (trueop1
, 0, 0)) != CONST_INT
)
1980 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1981 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP (trueop1
, 0, 0)));
1985 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1986 || (GET_MODE_INNER (mode
)
1987 != GET_MODE_INNER (GET_MODE (trueop0
)))
1988 || GET_CODE (trueop1
) != PARALLEL
)
1991 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1993 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1994 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1995 rtvec v
= rtvec_alloc (n_elts
);
1998 if (XVECLEN (trueop1
, 0) != (int) n_elts
)
2000 for (i
= 0; i
< n_elts
; i
++)
2002 rtx x
= XVECEXP (trueop1
, 0, i
);
2004 if (GET_CODE (x
) != CONST_INT
)
2006 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, INTVAL (x
));
2009 return gen_rtx_CONST_VECTOR (mode
, v
);
2015 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2016 ? GET_MODE (trueop0
)
2017 : GET_MODE_INNER (mode
));
2018 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2019 ? GET_MODE (trueop1
)
2020 : GET_MODE_INNER (mode
));
2022 if (!VECTOR_MODE_P (mode
)
2023 || (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2024 != GET_MODE_SIZE (mode
)))
2027 if ((VECTOR_MODE_P (op0_mode
)
2028 && (GET_MODE_INNER (mode
)
2029 != GET_MODE_INNER (op0_mode
)))
2030 || (!VECTOR_MODE_P (op0_mode
)
2031 && GET_MODE_INNER (mode
) != op0_mode
))
2034 if ((VECTOR_MODE_P (op1_mode
)
2035 && (GET_MODE_INNER (mode
)
2036 != GET_MODE_INNER (op1_mode
)))
2037 || (!VECTOR_MODE_P (op1_mode
)
2038 && GET_MODE_INNER (mode
) != op1_mode
))
2041 if ((GET_CODE (trueop0
) == CONST_VECTOR
2042 || GET_CODE (trueop0
) == CONST_INT
2043 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2044 && (GET_CODE (trueop1
) == CONST_VECTOR
2045 || GET_CODE (trueop1
) == CONST_INT
2046 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2048 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2049 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2050 rtvec v
= rtvec_alloc (n_elts
);
2052 unsigned in_n_elts
= 1;
2054 if (VECTOR_MODE_P (op0_mode
))
2055 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2056 for (i
= 0; i
< n_elts
; i
++)
2060 if (!VECTOR_MODE_P (op0_mode
))
2061 RTVEC_ELT (v
, i
) = trueop0
;
2063 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2067 if (!VECTOR_MODE_P (op1_mode
))
2068 RTVEC_ELT (v
, i
) = trueop1
;
2070 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2075 return gen_rtx_CONST_VECTOR (mode
, v
);
2087 /* Get the integer argument values in two forms:
2088 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2090 arg0
= INTVAL (trueop0
);
2091 arg1
= INTVAL (trueop1
);
2093 if (width
< HOST_BITS_PER_WIDE_INT
)
2095 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2096 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2099 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2100 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2103 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2104 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2112 /* Compute the value of the arithmetic. */
2117 val
= arg0s
+ arg1s
;
2121 val
= arg0s
- arg1s
;
2125 val
= arg0s
* arg1s
;
2130 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2133 val
= arg0s
/ arg1s
;
2138 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2141 val
= arg0s
% arg1s
;
2146 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2149 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2154 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2157 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
2173 /* If shift count is undefined, don't fold it; let the machine do
2174 what it wants. But truncate it if the machine will do that. */
2178 if (SHIFT_COUNT_TRUNCATED
)
2181 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
2188 if (SHIFT_COUNT_TRUNCATED
)
2191 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
2198 if (SHIFT_COUNT_TRUNCATED
)
2201 val
= arg0s
>> arg1
;
2203 /* Bootstrap compiler may not have sign extended the right shift.
2204 Manually extend the sign to insure bootstrap cc matches gcc. */
2205 if (arg0s
< 0 && arg1
> 0)
2206 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
2215 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2216 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2224 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2225 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2229 /* Do nothing here. */
2233 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2237 val
= ((unsigned HOST_WIDE_INT
) arg0
2238 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2242 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2246 val
= ((unsigned HOST_WIDE_INT
) arg0
2247 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2254 /* ??? There are simplifications that can be done. */
2261 val
= trunc_int_for_mode (val
, mode
);
2263 return GEN_INT (val
);
2266 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2269 Rather than test for specific case, we do this by a brute-force method
2270 and do all possible simplifications until no more changes occur. Then
2271 we rebuild the operation.
2273 If FORCE is true, then always generate the rtx. This is used to
2274 canonicalize stuff emitted from simplify_gen_binary. Note that this
2275 can still fail if the rtx is too complex. It won't fail just because
2276 the result is not 'simpler' than the input, however. */
2278 struct simplify_plus_minus_op_data
2285 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2287 const struct simplify_plus_minus_op_data
*d1
= p1
;
2288 const struct simplify_plus_minus_op_data
*d2
= p2
;
2290 return (commutative_operand_precedence (d2
->op
)
2291 - commutative_operand_precedence (d1
->op
));
2295 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2298 struct simplify_plus_minus_op_data ops
[8];
2300 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2304 memset (ops
, 0, sizeof ops
);
2306 /* Set up the two operands and then expand them until nothing has been
2307 changed. If we run out of room in our array, give up; this should
2308 almost never happen. */
2313 ops
[1].neg
= (code
== MINUS
);
2319 for (i
= 0; i
< n_ops
; i
++)
2321 rtx this_op
= ops
[i
].op
;
2322 int this_neg
= ops
[i
].neg
;
2323 enum rtx_code this_code
= GET_CODE (this_op
);
2332 ops
[n_ops
].op
= XEXP (this_op
, 1);
2333 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2336 ops
[i
].op
= XEXP (this_op
, 0);
2342 ops
[i
].op
= XEXP (this_op
, 0);
2343 ops
[i
].neg
= ! this_neg
;
2349 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2350 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2351 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2353 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2354 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2355 ops
[n_ops
].neg
= this_neg
;
2363 /* ~a -> (-a - 1) */
2366 ops
[n_ops
].op
= constm1_rtx
;
2367 ops
[n_ops
++].neg
= this_neg
;
2368 ops
[i
].op
= XEXP (this_op
, 0);
2369 ops
[i
].neg
= !this_neg
;
2377 ops
[i
].op
= neg_const_int (mode
, this_op
);
2390 /* If we only have two operands, we can't do anything. */
2391 if (n_ops
<= 2 && !force
)
2394 /* Count the number of CONSTs we didn't split above. */
2395 for (i
= 0; i
< n_ops
; i
++)
2396 if (GET_CODE (ops
[i
].op
) == CONST
)
2399 /* Now simplify each pair of operands until nothing changes. The first
2400 time through just simplify constants against each other. */
2407 for (i
= 0; i
< n_ops
- 1; i
++)
2408 for (j
= i
+ 1; j
< n_ops
; j
++)
2410 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2411 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2413 if (lhs
!= 0 && rhs
!= 0
2414 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2416 enum rtx_code ncode
= PLUS
;
2422 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2424 else if (swap_commutative_operands_p (lhs
, rhs
))
2425 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2427 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2429 /* Reject "simplifications" that just wrap the two
2430 arguments in a CONST. Failure to do so can result
2431 in infinite recursion with simplify_binary_operation
2432 when it calls us to simplify CONST operations. */
2434 && ! (GET_CODE (tem
) == CONST
2435 && GET_CODE (XEXP (tem
, 0)) == ncode
2436 && XEXP (XEXP (tem
, 0), 0) == lhs
2437 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2438 /* Don't allow -x + -1 -> ~x simplifications in the
2439 first pass. This allows us the chance to combine
2440 the -1 with other constants. */
2442 && GET_CODE (tem
) == NOT
2443 && XEXP (tem
, 0) == rhs
))
2446 if (GET_CODE (tem
) == NEG
)
2447 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2448 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2449 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2453 ops
[j
].op
= NULL_RTX
;
2463 /* Pack all the operands to the lower-numbered entries. */
2464 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2469 /* Sort the operations based on swap_commutative_operands_p. */
2470 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2472 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2474 && GET_CODE (ops
[1].op
) == CONST_INT
2475 && CONSTANT_P (ops
[0].op
)
2477 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
2479 /* We suppressed creation of trivial CONST expressions in the
2480 combination loop to avoid recursion. Create one manually now.
2481 The combination loop should have ensured that there is exactly
2482 one CONST_INT, and the sort will have ensured that it is last
2483 in the array and that any other constant will be next-to-last. */
2486 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2487 && CONSTANT_P (ops
[n_ops
- 2].op
))
2489 rtx value
= ops
[n_ops
- 1].op
;
2490 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2491 value
= neg_const_int (mode
, value
);
2492 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2496 /* Count the number of CONSTs that we generated. */
2498 for (i
= 0; i
< n_ops
; i
++)
2499 if (GET_CODE (ops
[i
].op
) == CONST
)
2502 /* Give up if we didn't reduce the number of operands we had. Make
2503 sure we count a CONST as two operands. If we have the same
2504 number of operands, but have made more CONSTs than before, this
2505 is also an improvement, so accept it. */
2507 && (n_ops
+ n_consts
> input_ops
2508 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2511 /* Put a non-negated operand first, if possible. */
2513 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2516 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
2525 /* Now make the result by performing the requested operations. */
2527 for (i
= 1; i
< n_ops
; i
++)
2528 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2529 mode
, result
, ops
[i
].op
);
2534 /* Like simplify_binary_operation except used for relational operators.
2535 MODE is the mode of the operands, not that of the result. If MODE
2536 is VOIDmode, both operands must also be VOIDmode and we compare the
2537 operands in "infinite precision".
2539 If no simplification is possible, this function returns zero.
2540 Otherwise, it returns either const_true_rtx or const0_rtx. */
2543 simplify_const_relational_operation (enum rtx_code code
,
2544 enum machine_mode mode
,
2547 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2552 if (mode
== VOIDmode
2553 && (GET_MODE (op0
) != VOIDmode
2554 || GET_MODE (op1
) != VOIDmode
))
2557 /* If op0 is a compare, extract the comparison arguments from it. */
2558 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2559 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2561 /* We can't simplify MODE_CC values since we don't know what the
2562 actual comparison is. */
2563 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2566 /* Make sure the constant is second. */
2567 if (swap_commutative_operands_p (op0
, op1
))
2569 tem
= op0
, op0
= op1
, op1
= tem
;
2570 code
= swap_condition (code
);
2573 trueop0
= avoid_constant_pool_reference (op0
);
2574 trueop1
= avoid_constant_pool_reference (op1
);
2576 /* For integer comparisons of A and B maybe we can simplify A - B and can
2577 then simplify a comparison of that with zero. If A and B are both either
2578 a register or a CONST_INT, this can't help; testing for these cases will
2579 prevent infinite recursion here and speed things up.
2581 If CODE is an unsigned comparison, then we can never do this optimization,
2582 because it gives an incorrect result if the subtraction wraps around zero.
2583 ANSI C defines unsigned operations such that they never overflow, and
2584 thus such cases can not be ignored; but we cannot do it even for
2585 signed comparisons for languages such as Java, so test flag_wrapv. */
2587 if (!flag_wrapv
&& INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2588 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
2589 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
2590 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2591 /* We cannot do this for == or != if tem is a nonzero address. */
2592 && ((code
!= EQ
&& code
!= NE
) || ! nonzero_address_p (tem
))
2593 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2594 return simplify_const_relational_operation (signed_condition (code
),
2595 mode
, tem
, const0_rtx
);
2597 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2598 return const_true_rtx
;
2600 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2603 /* For modes without NaNs, if the two operands are equal, we know the
2604 result except if they have side-effects. */
2605 if (! HONOR_NANS (GET_MODE (trueop0
))
2606 && rtx_equal_p (trueop0
, trueop1
)
2607 && ! side_effects_p (trueop0
))
2608 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2610 /* If the operands are floating-point constants, see if we can fold
2612 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2613 && GET_CODE (trueop1
) == CONST_DOUBLE
2614 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2616 REAL_VALUE_TYPE d0
, d1
;
2618 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2619 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2621 /* Comparisons are unordered iff at least one of the values is NaN. */
2622 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2632 return const_true_rtx
;
2645 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2646 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2647 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2650 /* Otherwise, see if the operands are both integers. */
2651 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2652 && (GET_CODE (trueop0
) == CONST_DOUBLE
2653 || GET_CODE (trueop0
) == CONST_INT
)
2654 && (GET_CODE (trueop1
) == CONST_DOUBLE
2655 || GET_CODE (trueop1
) == CONST_INT
))
2657 int width
= GET_MODE_BITSIZE (mode
);
2658 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2659 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2661 /* Get the two words comprising each integer constant. */
2662 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2664 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2665 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2669 l0u
= l0s
= INTVAL (trueop0
);
2670 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2673 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2675 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2676 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2680 l1u
= l1s
= INTVAL (trueop1
);
2681 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2684 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2685 we have to sign or zero-extend the values. */
2686 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2688 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2689 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2691 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2692 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2694 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2695 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2697 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2698 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2700 equal
= (h0u
== h1u
&& l0u
== l1u
);
2701 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2702 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2703 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2704 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2707 /* Otherwise, there are some code-specific tests we can make. */
2713 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2718 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2719 return const_true_rtx
;
2723 /* Unsigned values are never negative. */
2724 if (trueop1
== const0_rtx
)
2725 return const_true_rtx
;
2729 if (trueop1
== const0_rtx
)
2734 /* Unsigned values are never greater than the largest
2736 if (GET_CODE (trueop1
) == CONST_INT
2737 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2738 && INTEGRAL_MODE_P (mode
))
2739 return const_true_rtx
;
2743 if (GET_CODE (trueop1
) == CONST_INT
2744 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2745 && INTEGRAL_MODE_P (mode
))
2750 /* Optimize abs(x) < 0.0. */
2751 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
2753 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2755 if (GET_CODE (tem
) == ABS
)
2761 /* Optimize abs(x) >= 0.0. */
2762 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
2764 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2766 if (GET_CODE (tem
) == ABS
)
2767 return const_true_rtx
;
2772 /* Optimize ! (abs(x) < 0.0). */
2773 if (trueop1
== CONST0_RTX (mode
))
2775 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2777 if (GET_CODE (tem
) == ABS
)
2778 return const_true_rtx
;
2789 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2795 return equal
? const_true_rtx
: const0_rtx
;
2798 return ! equal
? const_true_rtx
: const0_rtx
;
2801 return op0lt
? const_true_rtx
: const0_rtx
;
2804 return op1lt
? const_true_rtx
: const0_rtx
;
2806 return op0ltu
? const_true_rtx
: const0_rtx
;
2808 return op1ltu
? const_true_rtx
: const0_rtx
;
2811 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2814 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2816 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2818 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2820 return const_true_rtx
;
2828 /* Like simplify_binary_operation except used for relational operators.
2829 MODE is the mode of the result, and CMP_MODE is the mode of the operands.
2830 If CMP_MODE is VOIDmode, both operands must also be VOIDmode and we
2831 compare the operands in "infinite precision". */
2834 simplify_relational_operation (enum rtx_code code
,
2835 enum machine_mode mode ATTRIBUTE_UNUSED
,
2836 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2840 tmp
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
2843 #ifdef FLOAT_STORE_FLAG_VALUE
2844 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2846 if (tmp
== const0_rtx
)
2847 return CONST0_RTX (mode
);
2848 return CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode
),
2858 /* Simplify CODE, an operation with result mode MODE and three operands,
2859 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2860 a constant. Return 0 if no simplifications is possible. */
2863 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
2864 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
2867 unsigned int width
= GET_MODE_BITSIZE (mode
);
2869 /* VOIDmode means "infinite" precision. */
2871 width
= HOST_BITS_PER_WIDE_INT
;
2877 if (GET_CODE (op0
) == CONST_INT
2878 && GET_CODE (op1
) == CONST_INT
2879 && GET_CODE (op2
) == CONST_INT
2880 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2881 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2883 /* Extracting a bit-field from a constant */
2884 HOST_WIDE_INT val
= INTVAL (op0
);
2886 if (BITS_BIG_ENDIAN
)
2887 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2888 - INTVAL (op2
) - INTVAL (op1
));
2890 val
>>= INTVAL (op2
);
2892 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2894 /* First zero-extend. */
2895 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2896 /* If desired, propagate sign bit. */
2897 if (code
== SIGN_EXTRACT
2898 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2899 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2902 /* Clear the bits that don't belong in our mode,
2903 unless they and our sign bit are all one.
2904 So we get either a reasonable negative value or a reasonable
2905 unsigned value for this mode. */
2906 if (width
< HOST_BITS_PER_WIDE_INT
2907 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2908 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2909 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2911 return GEN_INT (val
);
2916 if (GET_CODE (op0
) == CONST_INT
)
2917 return op0
!= const0_rtx
? op1
: op2
;
2919 /* Convert c ? a : a into "a". */
2920 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
2923 /* Convert a != b ? a : b into "a". */
2924 if (GET_CODE (op0
) == NE
2925 && ! side_effects_p (op0
)
2926 && ! HONOR_NANS (mode
)
2927 && ! HONOR_SIGNED_ZEROS (mode
)
2928 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
2929 && rtx_equal_p (XEXP (op0
, 1), op2
))
2930 || (rtx_equal_p (XEXP (op0
, 0), op2
)
2931 && rtx_equal_p (XEXP (op0
, 1), op1
))))
2934 /* Convert a == b ? a : b into "b". */
2935 if (GET_CODE (op0
) == EQ
2936 && ! side_effects_p (op0
)
2937 && ! HONOR_NANS (mode
)
2938 && ! HONOR_SIGNED_ZEROS (mode
)
2939 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
2940 && rtx_equal_p (XEXP (op0
, 1), op2
))
2941 || (rtx_equal_p (XEXP (op0
, 0), op2
)
2942 && rtx_equal_p (XEXP (op0
, 1), op1
))))
2945 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
2947 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2948 ? GET_MODE (XEXP (op0
, 1))
2949 : GET_MODE (XEXP (op0
, 0)));
2951 if (cmp_mode
== VOIDmode
)
2952 cmp_mode
= op0_mode
;
2953 temp
= simplify_const_relational_operation (GET_CODE (op0
),
2958 /* See if any simplifications were possible. */
2959 if (temp
== const0_rtx
)
2961 else if (temp
== const_true_rtx
)
2966 /* Look for happy constants in op1 and op2. */
2967 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2969 HOST_WIDE_INT t
= INTVAL (op1
);
2970 HOST_WIDE_INT f
= INTVAL (op2
);
2972 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2973 code
= GET_CODE (op0
);
2974 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2977 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2985 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2991 if (GET_MODE (op0
) != mode
2992 || GET_MODE (op1
) != mode
2993 || !VECTOR_MODE_P (mode
))
2995 op2
= avoid_constant_pool_reference (op2
);
2996 if (GET_CODE (op2
) == CONST_INT
)
2998 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2999 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3000 int mask
= (1 << n_elts
) - 1;
3002 if (!(INTVAL (op2
) & mask
))
3004 if ((INTVAL (op2
) & mask
) == mask
)
3007 op0
= avoid_constant_pool_reference (op0
);
3008 op1
= avoid_constant_pool_reference (op1
);
3009 if (GET_CODE (op0
) == CONST_VECTOR
3010 && GET_CODE (op1
) == CONST_VECTOR
)
3012 rtvec v
= rtvec_alloc (n_elts
);
3015 for (i
= 0; i
< n_elts
; i
++)
3016 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
3017 ? CONST_VECTOR_ELT (op0
, i
)
3018 : CONST_VECTOR_ELT (op1
, i
));
3019 return gen_rtx_CONST_VECTOR (mode
, v
);
3031 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3032 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3034 Works by unpacking OP into a collection of 8-bit values
3035 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3036 and then repacking them again for OUTERMODE. */
3039 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
3040 enum machine_mode innermode
, unsigned int byte
)
3042 /* We support up to 512-bit values (for V8DFmode). */
3046 value_mask
= (1 << value_bit
) - 1
3048 unsigned char value
[max_bitsize
/ value_bit
];
3057 rtvec result_v
= NULL
;
3058 enum mode_class outer_class
;
3059 enum machine_mode outer_submode
;
3061 /* Some ports misuse CCmode. */
3062 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
3065 /* Unpack the value. */
3067 if (GET_CODE (op
) == CONST_VECTOR
)
3069 num_elem
= CONST_VECTOR_NUNITS (op
);
3070 elems
= &CONST_VECTOR_ELT (op
, 0);
3071 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
3077 elem_bitsize
= max_bitsize
;
3080 if (BITS_PER_UNIT
% value_bit
!= 0)
3081 abort (); /* Too complicated; reducing value_bit may help. */
3082 if (elem_bitsize
% BITS_PER_UNIT
!= 0)
3083 abort (); /* I don't know how to handle endianness of sub-units. */
3085 for (elem
= 0; elem
< num_elem
; elem
++)
3088 rtx el
= elems
[elem
];
3090 /* Vectors are kept in target memory order. (This is probably
3093 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3094 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3096 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3097 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3098 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3099 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3100 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3103 switch (GET_CODE (el
))
3107 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3109 *vp
++ = INTVAL (el
) >> i
;
3110 /* CONST_INTs are always logically sign-extended. */
3111 for (; i
< elem_bitsize
; i
+= value_bit
)
3112 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
3116 if (GET_MODE (el
) == VOIDmode
)
3118 /* If this triggers, someone should have generated a
3119 CONST_INT instead. */
3120 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3123 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
3124 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
3125 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
3128 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
3131 /* It shouldn't matter what's done here, so fill it with
3133 for (; i
< max_bitsize
; i
+= value_bit
)
3136 else if (GET_MODE_CLASS (GET_MODE (el
)) == MODE_FLOAT
)
3138 long tmp
[max_bitsize
/ 32];
3139 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
3141 if (bitsize
> elem_bitsize
)
3143 if (bitsize
% value_bit
!= 0)
3146 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
3149 /* real_to_target produces its result in words affected by
3150 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3151 and use WORDS_BIG_ENDIAN instead; see the documentation
3152 of SUBREG in rtl.texi. */
3153 for (i
= 0; i
< bitsize
; i
+= value_bit
)
3156 if (WORDS_BIG_ENDIAN
)
3157 ibase
= bitsize
- 1 - i
;
3160 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
3163 /* It shouldn't matter what's done here, so fill it with
3165 for (; i
< elem_bitsize
; i
+= value_bit
)
3177 /* Now, pick the right byte to start with. */
3178 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3179 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3180 will already have offset 0. */
3181 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
3183 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
3185 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3186 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3187 byte
= (subword_byte
% UNITS_PER_WORD
3188 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3191 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3192 so if it's become negative it will instead be very large.) */
3193 if (byte
>= GET_MODE_SIZE (innermode
))
3196 /* Convert from bytes to chunks of size value_bit. */
3197 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
3199 /* Re-pack the value. */
3201 if (VECTOR_MODE_P (outermode
))
3203 num_elem
= GET_MODE_NUNITS (outermode
);
3204 result_v
= rtvec_alloc (num_elem
);
3205 elems
= &RTVEC_ELT (result_v
, 0);
3206 outer_submode
= GET_MODE_INNER (outermode
);
3212 outer_submode
= outermode
;
3215 outer_class
= GET_MODE_CLASS (outer_submode
);
3216 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
3218 if (elem_bitsize
% value_bit
!= 0)
3220 if (elem_bitsize
+ value_start
* value_bit
> max_bitsize
)
3223 for (elem
= 0; elem
< num_elem
; elem
++)
3227 /* Vectors are stored in target memory order. (This is probably
3230 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3231 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3233 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3234 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3235 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3236 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3237 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3240 switch (outer_class
)
3243 case MODE_PARTIAL_INT
:
3245 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
3248 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3250 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
3251 for (; i
< elem_bitsize
; i
+= value_bit
)
3252 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
3253 << (i
- HOST_BITS_PER_WIDE_INT
));
3255 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3257 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3258 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
3260 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
3267 long tmp
[max_bitsize
/ 32];
3269 /* real_from_target wants its input in words affected by
3270 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3271 and use WORDS_BIG_ENDIAN instead; see the documentation
3272 of SUBREG in rtl.texi. */
3273 for (i
= 0; i
< max_bitsize
/ 32; i
++)
3275 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
3278 if (WORDS_BIG_ENDIAN
)
3279 ibase
= elem_bitsize
- 1 - i
;
3282 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
3285 real_from_target (&r
, tmp
, outer_submode
);
3286 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
3294 if (VECTOR_MODE_P (outermode
))
3295 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
3300 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3301 Return 0 if no simplifications are possible. */
3303 simplify_subreg (enum machine_mode outermode
, rtx op
,
3304 enum machine_mode innermode
, unsigned int byte
)
3306 /* Little bit of sanity checking. */
3307 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3308 || innermode
== BLKmode
|| outermode
== BLKmode
)
3311 if (GET_MODE (op
) != innermode
3312 && GET_MODE (op
) != VOIDmode
)
3315 if (byte
% GET_MODE_SIZE (outermode
)
3316 || byte
>= GET_MODE_SIZE (innermode
))
3319 if (outermode
== innermode
&& !byte
)
3322 if (GET_CODE (op
) == CONST_INT
3323 || GET_CODE (op
) == CONST_DOUBLE
3324 || GET_CODE (op
) == CONST_VECTOR
)
3325 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
3327 /* Changing mode twice with SUBREG => just change it once,
3328 or not at all if changing back op starting mode. */
3329 if (GET_CODE (op
) == SUBREG
)
3331 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3332 int final_offset
= byte
+ SUBREG_BYTE (op
);
3335 if (outermode
== innermostmode
3336 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3337 return SUBREG_REG (op
);
3339 /* The SUBREG_BYTE represents offset, as if the value were stored
3340 in memory. Irritating exception is paradoxical subreg, where
3341 we define SUBREG_BYTE to be 0. On big endian machines, this
3342 value should be negative. For a moment, undo this exception. */
3343 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3345 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3346 if (WORDS_BIG_ENDIAN
)
3347 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3348 if (BYTES_BIG_ENDIAN
)
3349 final_offset
+= difference
% UNITS_PER_WORD
;
3351 if (SUBREG_BYTE (op
) == 0
3352 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3354 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3355 if (WORDS_BIG_ENDIAN
)
3356 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3357 if (BYTES_BIG_ENDIAN
)
3358 final_offset
+= difference
% UNITS_PER_WORD
;
3361 /* See whether resulting subreg will be paradoxical. */
3362 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3364 /* In nonparadoxical subregs we can't handle negative offsets. */
3365 if (final_offset
< 0)
3367 /* Bail out in case resulting subreg would be incorrect. */
3368 if (final_offset
% GET_MODE_SIZE (outermode
)
3369 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3375 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3377 /* In paradoxical subreg, see if we are still looking on lower part.
3378 If so, our SUBREG_BYTE will be 0. */
3379 if (WORDS_BIG_ENDIAN
)
3380 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3381 if (BYTES_BIG_ENDIAN
)
3382 offset
+= difference
% UNITS_PER_WORD
;
3383 if (offset
== final_offset
)
3389 /* Recurse for further possible simplifications. */
3390 new = simplify_subreg (outermode
, SUBREG_REG (op
),
3391 GET_MODE (SUBREG_REG (op
)),
3395 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3398 /* SUBREG of a hard register => just change the register number
3399 and/or mode. If the hard register is not valid in that mode,
3400 suppress this simplification. If the hard register is the stack,
3401 frame, or argument pointer, leave this as a SUBREG. */
3404 && (! REG_FUNCTION_VALUE_P (op
)
3405 || ! rtx_equal_function_value_matters
)
3406 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3407 #ifdef CANNOT_CHANGE_MODE_CLASS
3408 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3409 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3410 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3412 && ((reload_completed
&& !frame_pointer_needed
)
3413 || (REGNO (op
) != FRAME_POINTER_REGNUM
3414 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3415 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3418 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3419 && REGNO (op
) != ARG_POINTER_REGNUM
3421 && REGNO (op
) != STACK_POINTER_REGNUM
3422 && subreg_offset_representable_p (REGNO (op
), innermode
,
3425 rtx tem
= gen_rtx_SUBREG (outermode
, op
, byte
);
3426 int final_regno
= subreg_hard_regno (tem
, 0);
3428 /* ??? We do allow it if the current REG is not valid for
3429 its mode. This is a kludge to work around how float/complex
3430 arguments are passed on 32-bit SPARC and should be fixed. */
3431 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3432 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
3434 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3436 /* Propagate original regno. We don't have any way to specify
3437 the offset inside original regno, so do so only for lowpart.
3438 The information is used only by alias analysis that can not
3439 grog partial register anyway. */
3441 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3442 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3447 /* If we have a SUBREG of a register that we are replacing and we are
3448 replacing it with a MEM, make a new MEM and try replacing the
3449 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3450 or if we would be widening it. */
3452 if (GET_CODE (op
) == MEM
3453 && ! mode_dependent_address_p (XEXP (op
, 0))
3454 /* Allow splitting of volatile memory references in case we don't
3455 have instruction to move the whole thing. */
3456 && (! MEM_VOLATILE_P (op
)
3457 || ! have_insn_for (SET
, innermode
))
3458 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3459 return adjust_address_nv (op
, outermode
, byte
);
3461 /* Handle complex values represented as CONCAT
3462 of real and imaginary part. */
3463 if (GET_CODE (op
) == CONCAT
)
3465 int is_realpart
= byte
< (unsigned int) GET_MODE_UNIT_SIZE (innermode
);
3466 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
3467 unsigned int final_offset
;
3470 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
3471 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3474 /* We can at least simplify it by referring directly to the
3476 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3479 /* Optimize SUBREG truncations of zero and sign extended values. */
3480 if ((GET_CODE (op
) == ZERO_EXTEND
3481 || GET_CODE (op
) == SIGN_EXTEND
)
3482 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
3484 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
3486 /* If we're requesting the lowpart of a zero or sign extension,
3487 there are three possibilities. If the outermode is the same
3488 as the origmode, we can omit both the extension and the subreg.
3489 If the outermode is not larger than the origmode, we can apply
3490 the truncation without the extension. Finally, if the outermode
3491 is larger than the origmode, but both are integer modes, we
3492 can just extend to the appropriate mode. */
3495 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
3496 if (outermode
== origmode
)
3497 return XEXP (op
, 0);
3498 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
3499 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
3500 subreg_lowpart_offset (outermode
,
3502 if (SCALAR_INT_MODE_P (outermode
))
3503 return simplify_gen_unary (GET_CODE (op
), outermode
,
3504 XEXP (op
, 0), origmode
);
3507 /* A SUBREG resulting from a zero extension may fold to zero if
3508 it extracts higher bits that the ZERO_EXTEND's source bits. */
3509 if (GET_CODE (op
) == ZERO_EXTEND
3510 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
3511 return CONST0_RTX (outermode
);
3517 /* Make a SUBREG operation or equivalent if it folds. */
3520 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
3521 enum machine_mode innermode
, unsigned int byte
)
3524 /* Little bit of sanity checking. */
3525 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3526 || innermode
== BLKmode
|| outermode
== BLKmode
)
3529 if (GET_MODE (op
) != innermode
3530 && GET_MODE (op
) != VOIDmode
)
3533 if (byte
% GET_MODE_SIZE (outermode
)
3534 || byte
>= GET_MODE_SIZE (innermode
))
3537 if (GET_CODE (op
) == QUEUED
)
3540 new = simplify_subreg (outermode
, op
, innermode
, byte
);
3544 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
3547 return gen_rtx_SUBREG (outermode
, op
, byte
);
3549 /* Simplify X, an rtx expression.
3551 Return the simplified expression or NULL if no simplifications
3554 This is the preferred entry point into the simplification routines;
3555 however, we still allow passes to call the more specific routines.
3557 Right now GCC has three (yes, three) major bodies of RTL simplification
3558 code that need to be unified.
3560 1. fold_rtx in cse.c. This code uses various CSE specific
3561 information to aid in RTL simplification.
3563 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3564 it uses combine specific information to aid in RTL
3567 3. The routines in this file.
3570 Long term we want to only have one body of simplification code; to
3571 get to that state I recommend the following steps:
3573 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3574 which are not pass dependent state into these routines.
3576 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3577 use this routine whenever possible.
3579 3. Allow for pass dependent state to be provided to these
3580 routines and add simplifications based on the pass dependent
3581 state. Remove code from cse.c & combine.c that becomes
3584 It will take time, but ultimately the compiler will be easier to
3585 maintain and improve. It's totally silly that when we add a
3586 simplification that it needs to be added to 4 places (3 for RTL
3587 simplification and 1 for tree simplification. */
3590 simplify_rtx (rtx x
)
3592 enum rtx_code code
= GET_CODE (x
);
3593 enum machine_mode mode
= GET_MODE (x
);
3596 switch (GET_RTX_CLASS (code
))
3599 return simplify_unary_operation (code
, mode
,
3600 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3601 case RTX_COMM_ARITH
:
3602 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3603 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
3605 /* Fall through.... */
3608 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3611 case RTX_BITFIELD_OPS
:
3612 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
3613 XEXP (x
, 0), XEXP (x
, 1),
3617 case RTX_COMM_COMPARE
:
3618 temp
= simplify_relational_operation (code
, mode
,
3619 ((GET_MODE (XEXP (x
, 0))
3621 ? GET_MODE (XEXP (x
, 0))
3622 : GET_MODE (XEXP (x
, 1))),
3623 XEXP (x
, 0), XEXP (x
, 1));
3628 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
3629 GET_MODE (SUBREG_REG (x
)),
3631 if (code
== CONSTANT_P_RTX
)
3633 if (CONSTANT_P (XEXP (x
, 0)))
3641 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3642 if (GET_CODE (XEXP (x
, 0)) == HIGH
3643 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))