1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
61 /* Negate a CONST_INT rtx, truncating (because a conversion from a
62 maximally negative number can overflow). */
64 neg_const_int (enum machine_mode mode
, rtx i
)
66 return gen_int_mode (- INTVAL (i
), mode
);
70 /* Make a binary operation by properly ordering the operands and
71 seeing if the expression folds. */
74 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
79 /* Put complex operands first and constants second if commutative. */
80 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
81 && swap_commutative_operands_p (op0
, op1
))
82 tem
= op0
, op0
= op1
, op1
= tem
;
84 /* If this simplifies, do it. */
85 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
89 /* Handle addition and subtraction specially. Otherwise, just form
92 if (code
== PLUS
|| code
== MINUS
)
94 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
99 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
102 /* If X is a MEM referencing the constant pool, return the real value.
103 Otherwise return X. */
105 avoid_constant_pool_reference (rtx x
)
108 enum machine_mode cmode
;
110 switch (GET_CODE (x
))
116 /* Handle float extensions of constant pool references. */
118 c
= avoid_constant_pool_reference (tmp
);
119 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
123 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
124 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
134 /* Call target hook to avoid the effects of -fpic etc.... */
135 addr
= targetm
.delegitimize_address (addr
);
137 if (GET_CODE (addr
) == LO_SUM
)
138 addr
= XEXP (addr
, 1);
140 if (GET_CODE (addr
) != SYMBOL_REF
141 || ! CONSTANT_POOL_ADDRESS_P (addr
))
144 c
= get_pool_constant (addr
);
145 cmode
= get_pool_mode (addr
);
147 /* If we're accessing the constant in a different mode than it was
148 originally stored, attempt to fix that up via subreg simplifications.
149 If that fails we have no choice but to return the original memory. */
150 if (cmode
!= GET_MODE (x
))
152 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
159 /* Make a unary operation by first seeing if it folds and otherwise making
160 the specified operation. */
163 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
164 enum machine_mode op_mode
)
168 /* If this simplifies, use it. */
169 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
172 return gen_rtx_fmt_e (code
, mode
, op
);
175 /* Likewise for ternary operations. */
178 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
179 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
183 /* If this simplifies, use it. */
184 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
188 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
191 /* Likewise, for relational operations.
192 CMP_MODE specifies mode comparison is done in.
196 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
197 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
201 if (cmp_mode
== VOIDmode
)
202 cmp_mode
= GET_MODE (op0
);
203 if (cmp_mode
== VOIDmode
)
204 cmp_mode
= GET_MODE (op1
);
206 if (cmp_mode
!= VOIDmode
)
208 tem
= simplify_relational_operation (code
, mode
, cmp_mode
, op0
, op1
);
213 /* For the following tests, ensure const0_rtx is op1. */
214 if (swap_commutative_operands_p (op0
, op1
)
215 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
216 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
218 /* If op0 is a compare, extract the comparison arguments from it. */
219 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
220 return simplify_gen_relational (code
, mode
, VOIDmode
,
221 XEXP (op0
, 0), XEXP (op0
, 1));
223 /* If op0 is a comparison, extract the comparison arguments form it. */
224 if (COMPARISON_P (op0
) && op1
== const0_rtx
)
228 if (GET_MODE (op0
) == mode
)
230 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
231 XEXP (op0
, 0), XEXP (op0
, 1));
235 enum rtx_code
new = reversed_comparison_code (op0
, NULL_RTX
);
237 return simplify_gen_relational (new, mode
, VOIDmode
,
238 XEXP (op0
, 0), XEXP (op0
, 1));
242 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
245 /* Replace all occurrences of OLD in X with NEW and try to simplify the
246 resulting RTX. Return a new RTX which is as simplified as possible. */
249 simplify_replace_rtx (rtx x
, rtx old
, rtx
new)
251 enum rtx_code code
= GET_CODE (x
);
252 enum machine_mode mode
= GET_MODE (x
);
253 enum machine_mode op_mode
;
256 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
257 to build a new expression substituting recursively. If we can't do
258 anything, return our input. */
263 switch (GET_RTX_CLASS (code
))
267 op_mode
= GET_MODE (op0
);
268 op0
= simplify_replace_rtx (op0
, old
, new);
269 if (op0
== XEXP (x
, 0))
271 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
275 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
276 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
277 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
279 return simplify_gen_binary (code
, mode
, op0
, op1
);
282 case RTX_COMM_COMPARE
:
285 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
286 op0
= simplify_replace_rtx (op0
, old
, new);
287 op1
= simplify_replace_rtx (op1
, old
, new);
288 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
290 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
293 case RTX_BITFIELD_OPS
:
295 op_mode
= GET_MODE (op0
);
296 op0
= simplify_replace_rtx (op0
, old
, new);
297 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
298 op2
= simplify_replace_rtx (XEXP (x
, 2), old
, new);
299 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
301 if (op_mode
== VOIDmode
)
302 op_mode
= GET_MODE (op0
);
303 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
306 /* The only case we try to handle is a SUBREG. */
309 op0
= simplify_replace_rtx (SUBREG_REG (x
), old
, new);
310 if (op0
== SUBREG_REG (x
))
312 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
313 GET_MODE (SUBREG_REG (x
)),
315 return op0
? op0
: x
;
322 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
323 if (op0
== XEXP (x
, 0))
325 return replace_equiv_address_nv (x
, op0
);
327 else if (code
== LO_SUM
)
329 op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
330 op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
332 /* (lo_sum (high x) x) -> x */
333 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
336 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
338 return gen_rtx_LO_SUM (mode
, op0
, op1
);
340 else if (code
== REG
)
342 if (REG_P (old
) && REGNO (x
) == REGNO (old
))
353 /* Try to simplify a unary operation CODE whose output mode is to be
354 MODE with input operand OP whose mode was originally OP_MODE.
355 Return zero if no simplification can be made. */
357 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
358 rtx op
, enum machine_mode op_mode
)
360 unsigned int width
= GET_MODE_BITSIZE (mode
);
361 rtx trueop
= avoid_constant_pool_reference (op
);
363 if (code
== VEC_DUPLICATE
)
365 if (!VECTOR_MODE_P (mode
))
367 if (GET_MODE (trueop
) != VOIDmode
368 && !VECTOR_MODE_P (GET_MODE (trueop
))
369 && GET_MODE_INNER (mode
) != GET_MODE (trueop
))
371 if (GET_MODE (trueop
) != VOIDmode
372 && VECTOR_MODE_P (GET_MODE (trueop
))
373 && GET_MODE_INNER (mode
) != GET_MODE_INNER (GET_MODE (trueop
)))
375 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
376 || GET_CODE (trueop
) == CONST_VECTOR
)
378 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
379 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
380 rtvec v
= rtvec_alloc (n_elts
);
383 if (GET_CODE (trueop
) != CONST_VECTOR
)
384 for (i
= 0; i
< n_elts
; i
++)
385 RTVEC_ELT (v
, i
) = trueop
;
388 enum machine_mode inmode
= GET_MODE (trueop
);
389 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
390 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
392 if (in_n_elts
>= n_elts
|| n_elts
% in_n_elts
)
394 for (i
= 0; i
< n_elts
; i
++)
395 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
397 return gen_rtx_CONST_VECTOR (mode
, v
);
400 else if (GET_CODE (op
) == CONST
)
401 return simplify_unary_operation (code
, mode
, XEXP (op
, 0), op_mode
);
403 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
405 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
406 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
407 enum machine_mode opmode
= GET_MODE (trueop
);
408 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
409 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
410 rtvec v
= rtvec_alloc (n_elts
);
413 if (op_n_elts
!= n_elts
)
416 for (i
= 0; i
< n_elts
; i
++)
418 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
419 CONST_VECTOR_ELT (trueop
, i
),
420 GET_MODE_INNER (opmode
));
423 RTVEC_ELT (v
, i
) = x
;
425 return gen_rtx_CONST_VECTOR (mode
, v
);
428 /* The order of these tests is critical so that, for example, we don't
429 check the wrong mode (input vs. output) for a conversion operation,
430 such as FIX. At some point, this should be simplified. */
432 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
433 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
435 HOST_WIDE_INT hv
, lv
;
438 if (GET_CODE (trueop
) == CONST_INT
)
439 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
441 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
443 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
444 d
= real_value_truncate (mode
, d
);
445 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
447 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
448 && (GET_CODE (trueop
) == CONST_DOUBLE
449 || GET_CODE (trueop
) == CONST_INT
))
451 HOST_WIDE_INT hv
, lv
;
454 if (GET_CODE (trueop
) == CONST_INT
)
455 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
457 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
459 if (op_mode
== VOIDmode
)
461 /* We don't know how to interpret negative-looking numbers in
462 this case, so don't try to fold those. */
466 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
469 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
471 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
472 d
= real_value_truncate (mode
, d
);
473 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
476 if (GET_CODE (trueop
) == CONST_INT
477 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
479 HOST_WIDE_INT arg0
= INTVAL (trueop
);
493 val
= (arg0
>= 0 ? arg0
: - arg0
);
497 /* Don't use ffs here. Instead, get low order bit and then its
498 number. If arg0 is zero, this will return 0, as desired. */
499 arg0
&= GET_MODE_MASK (mode
);
500 val
= exact_log2 (arg0
& (- arg0
)) + 1;
504 arg0
&= GET_MODE_MASK (mode
);
505 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
508 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
512 arg0
&= GET_MODE_MASK (mode
);
515 /* Even if the value at zero is undefined, we have to come
516 up with some replacement. Seems good enough. */
517 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
518 val
= GET_MODE_BITSIZE (mode
);
521 val
= exact_log2 (arg0
& -arg0
);
525 arg0
&= GET_MODE_MASK (mode
);
528 val
++, arg0
&= arg0
- 1;
532 arg0
&= GET_MODE_MASK (mode
);
535 val
++, arg0
&= arg0
- 1;
544 /* When zero-extending a CONST_INT, we need to know its
546 if (op_mode
== VOIDmode
)
548 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
550 /* If we were really extending the mode,
551 we would have to distinguish between zero-extension
552 and sign-extension. */
553 if (width
!= GET_MODE_BITSIZE (op_mode
))
557 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
558 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
564 if (op_mode
== VOIDmode
)
566 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
568 /* If we were really extending the mode,
569 we would have to distinguish between zero-extension
570 and sign-extension. */
571 if (width
!= GET_MODE_BITSIZE (op_mode
))
575 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
578 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
580 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
581 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
598 val
= trunc_int_for_mode (val
, mode
);
600 return GEN_INT (val
);
603 /* We can do some operations on integer CONST_DOUBLEs. Also allow
604 for a DImode operation on a CONST_INT. */
605 else if (GET_MODE (trueop
) == VOIDmode
606 && width
<= HOST_BITS_PER_WIDE_INT
* 2
607 && (GET_CODE (trueop
) == CONST_DOUBLE
608 || GET_CODE (trueop
) == CONST_INT
))
610 unsigned HOST_WIDE_INT l1
, lv
;
611 HOST_WIDE_INT h1
, hv
;
613 if (GET_CODE (trueop
) == CONST_DOUBLE
)
614 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
616 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
626 neg_double (l1
, h1
, &lv
, &hv
);
631 neg_double (l1
, h1
, &lv
, &hv
);
643 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
646 lv
= exact_log2 (l1
& -l1
) + 1;
652 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
653 - HOST_BITS_PER_WIDE_INT
;
655 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
656 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
657 lv
= GET_MODE_BITSIZE (mode
);
663 lv
= exact_log2 (l1
& -l1
);
665 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
666 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
667 lv
= GET_MODE_BITSIZE (mode
);
690 /* This is just a change-of-mode, so do nothing. */
695 if (op_mode
== VOIDmode
)
698 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
702 lv
= l1
& GET_MODE_MASK (op_mode
);
706 if (op_mode
== VOIDmode
707 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
711 lv
= l1
& GET_MODE_MASK (op_mode
);
712 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
713 && (lv
& ((HOST_WIDE_INT
) 1
714 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
715 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
717 hv
= HWI_SIGN_EXTEND (lv
);
728 return immed_double_const (lv
, hv
, mode
);
731 else if (GET_CODE (trueop
) == CONST_DOUBLE
732 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
734 REAL_VALUE_TYPE d
, t
;
735 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
740 if (HONOR_SNANS (mode
) && real_isnan (&d
))
742 real_sqrt (&t
, mode
, &d
);
746 d
= REAL_VALUE_ABS (d
);
749 d
= REAL_VALUE_NEGATE (d
);
752 d
= real_value_truncate (mode
, d
);
755 /* All this does is change the mode. */
758 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
765 real_to_target (tmp
, &d
, GET_MODE (trueop
));
766 for (i
= 0; i
< 4; i
++)
768 real_from_target (&d
, tmp
, mode
);
773 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
776 else if (GET_CODE (trueop
) == CONST_DOUBLE
777 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
778 && GET_MODE_CLASS (mode
) == MODE_INT
779 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
781 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
782 operators are intentionally left unspecified (to ease implementation
783 by target backends), for consistency, this routine implements the
784 same semantics for constant folding as used by the middle-end. */
786 HOST_WIDE_INT xh
, xl
, th
, tl
;
787 REAL_VALUE_TYPE x
, t
;
788 REAL_VALUE_FROM_CONST_DOUBLE (x
, trueop
);
792 if (REAL_VALUE_ISNAN (x
))
795 /* Test against the signed upper bound. */
796 if (width
> HOST_BITS_PER_WIDE_INT
)
798 th
= ((unsigned HOST_WIDE_INT
) 1
799 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
805 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
807 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
808 if (REAL_VALUES_LESS (t
, x
))
815 /* Test against the signed lower bound. */
816 if (width
> HOST_BITS_PER_WIDE_INT
)
818 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
824 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
826 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
827 if (REAL_VALUES_LESS (x
, t
))
833 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
837 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
840 /* Test against the unsigned upper bound. */
841 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
846 else if (width
>= HOST_BITS_PER_WIDE_INT
)
848 th
= ((unsigned HOST_WIDE_INT
) 1
849 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
855 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
857 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
858 if (REAL_VALUES_LESS (t
, x
))
865 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
871 return immed_double_const (xl
, xh
, mode
);
874 /* This was formerly used only for non-IEEE float.
875 eggert@twinsun.com says it is safe for IEEE also. */
878 enum rtx_code reversed
;
881 /* There are some simplifications we can do even if the operands
886 /* (not (not X)) == X. */
887 if (GET_CODE (op
) == NOT
)
890 /* (not (eq X Y)) == (ne X Y), etc. */
891 if (COMPARISON_P (op
)
892 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
893 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
895 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
896 XEXP (op
, 0), XEXP (op
, 1));
898 /* (not (plus X -1)) can become (neg X). */
899 if (GET_CODE (op
) == PLUS
900 && XEXP (op
, 1) == constm1_rtx
)
901 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
903 /* Similarly, (not (neg X)) is (plus X -1). */
904 if (GET_CODE (op
) == NEG
)
905 return plus_constant (XEXP (op
, 0), -1);
907 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
908 if (GET_CODE (op
) == XOR
909 && GET_CODE (XEXP (op
, 1)) == CONST_INT
910 && (temp
= simplify_unary_operation (NOT
, mode
,
913 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
916 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
917 operands other than 1, but that is not valid. We could do a
918 similar simplification for (not (lshiftrt C X)) where C is
919 just the sign bit, but this doesn't seem common enough to
921 if (GET_CODE (op
) == ASHIFT
922 && XEXP (op
, 0) == const1_rtx
)
924 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
925 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
928 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
929 by reversing the comparison code if valid. */
930 if (STORE_FLAG_VALUE
== -1
932 && (reversed
= reversed_comparison_code (op
, NULL_RTX
))
934 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
935 XEXP (op
, 0), XEXP (op
, 1));
937 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
938 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
939 so we can perform the above simplification. */
941 if (STORE_FLAG_VALUE
== -1
942 && GET_CODE (op
) == ASHIFTRT
943 && GET_CODE (XEXP (op
, 1)) == CONST_INT
944 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
945 return simplify_gen_relational (GE
, mode
, VOIDmode
,
946 XEXP (op
, 0), const0_rtx
);
951 /* (neg (neg X)) == X. */
952 if (GET_CODE (op
) == NEG
)
955 /* (neg (plus X 1)) can become (not X). */
956 if (GET_CODE (op
) == PLUS
957 && XEXP (op
, 1) == const1_rtx
)
958 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
960 /* Similarly, (neg (not X)) is (plus X 1). */
961 if (GET_CODE (op
) == NOT
)
962 return plus_constant (XEXP (op
, 0), 1);
964 /* (neg (minus X Y)) can become (minus Y X). This transformation
965 isn't safe for modes with signed zeros, since if X and Y are
966 both +0, (minus Y X) is the same as (minus X Y). If the
967 rounding mode is towards +infinity (or -infinity) then the two
968 expressions will be rounded differently. */
969 if (GET_CODE (op
) == MINUS
970 && !HONOR_SIGNED_ZEROS (mode
)
971 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
972 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1),
975 if (GET_CODE (op
) == PLUS
976 && !HONOR_SIGNED_ZEROS (mode
)
977 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
979 /* (neg (plus A C)) is simplified to (minus -C A). */
980 if (GET_CODE (XEXP (op
, 1)) == CONST_INT
981 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
983 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1),
986 return simplify_gen_binary (MINUS
, mode
, temp
,
990 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
991 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
992 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
995 /* (neg (mult A B)) becomes (mult (neg A) B).
996 This works even for floating-point values. */
997 if (GET_CODE (op
) == MULT
998 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1000 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1001 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
1004 /* NEG commutes with ASHIFT since it is multiplication. Only do
1005 this if we can then eliminate the NEG (e.g., if the operand
1007 if (GET_CODE (op
) == ASHIFT
)
1009 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0),
1012 return simplify_gen_binary (ASHIFT
, mode
, temp
,
1016 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1017 C is equal to the width of MODE minus 1. */
1018 if (GET_CODE (op
) == ASHIFTRT
1019 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1020 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
1021 return simplify_gen_binary (LSHIFTRT
, mode
,
1022 XEXP (op
, 0), XEXP (op
, 1));
1024 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1025 C is equal to the width of MODE minus 1. */
1026 if (GET_CODE (op
) == LSHIFTRT
1027 && GET_CODE (XEXP (op
, 1)) == CONST_INT
1028 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
1029 return simplify_gen_binary (ASHIFTRT
, mode
,
1030 XEXP (op
, 0), XEXP (op
, 1));
1035 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1036 becomes just the MINUS if its mode is MODE. This allows
1037 folding switch statements on machines using casesi (such as
1039 if (GET_CODE (op
) == TRUNCATE
1040 && GET_MODE (XEXP (op
, 0)) == mode
1041 && GET_CODE (XEXP (op
, 0)) == MINUS
1042 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1043 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1044 return XEXP (op
, 0);
1046 /* Check for a sign extension of a subreg of a promoted
1047 variable, where the promotion is sign-extended, and the
1048 target mode is the same as the variable's promotion. */
1049 if (GET_CODE (op
) == SUBREG
1050 && SUBREG_PROMOTED_VAR_P (op
)
1051 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1052 && GET_MODE (XEXP (op
, 0)) == mode
)
1053 return XEXP (op
, 0);
1055 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1056 if (! POINTERS_EXTEND_UNSIGNED
1057 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1059 || (GET_CODE (op
) == SUBREG
1060 && GET_CODE (SUBREG_REG (op
)) == REG
1061 && REG_POINTER (SUBREG_REG (op
))
1062 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1063 return convert_memory_address (Pmode
, op
);
1068 /* Check for a zero extension of a subreg of a promoted
1069 variable, where the promotion is zero-extended, and the
1070 target mode is the same as the variable's promotion. */
1071 if (GET_CODE (op
) == SUBREG
1072 && SUBREG_PROMOTED_VAR_P (op
)
1073 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1074 && GET_MODE (XEXP (op
, 0)) == mode
)
1075 return XEXP (op
, 0);
1077 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1078 if (POINTERS_EXTEND_UNSIGNED
> 0
1079 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1081 || (GET_CODE (op
) == SUBREG
1082 && GET_CODE (SUBREG_REG (op
)) == REG
1083 && REG_POINTER (SUBREG_REG (op
))
1084 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1085 return convert_memory_address (Pmode
, op
);
1097 /* Subroutine of simplify_binary_operation to simplify a commutative,
1098 associative binary operation CODE with result mode MODE, operating
1099 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1100 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1101 canonicalization is possible. */
1104 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1109 /* Linearize the operator to the left. */
1110 if (GET_CODE (op1
) == code
)
1112 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1113 if (GET_CODE (op0
) == code
)
1115 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1116 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1119 /* "a op (b op c)" becomes "(b op c) op a". */
1120 if (! swap_commutative_operands_p (op1
, op0
))
1121 return simplify_gen_binary (code
, mode
, op1
, op0
);
1128 if (GET_CODE (op0
) == code
)
1130 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1131 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1133 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1134 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1137 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1138 tem
= swap_commutative_operands_p (XEXP (op0
, 1), op1
)
1139 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 1))
1140 : simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1142 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1144 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1145 tem
= swap_commutative_operands_p (XEXP (op0
, 0), op1
)
1146 ? simplify_binary_operation (code
, mode
, op1
, XEXP (op0
, 0))
1147 : simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1149 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1155 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1156 and OP1. Return 0 if no simplification is possible.
1158 Don't use this for relational operations such as EQ or LT.
1159 Use simplify_relational_operation instead. */
1162 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1165 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
1167 unsigned int width
= GET_MODE_BITSIZE (mode
);
1168 rtx trueop0
, trueop1
;
1171 #ifdef ENABLE_CHECKING
1172 /* Relational operations don't work here. We must know the mode
1173 of the operands in order to do the comparison correctly.
1174 Assuming a full word can give incorrect results.
1175 Consider comparing 128 with -128 in QImode. */
1177 if (GET_RTX_CLASS (code
) == RTX_COMPARE
1178 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
1182 /* Make sure the constant is second. */
1183 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1184 && swap_commutative_operands_p (op0
, op1
))
1186 tem
= op0
, op0
= op1
, op1
= tem
;
1189 trueop0
= avoid_constant_pool_reference (op0
);
1190 trueop1
= avoid_constant_pool_reference (op1
);
1192 if (VECTOR_MODE_P (mode
)
1193 && GET_CODE (trueop0
) == CONST_VECTOR
1194 && GET_CODE (trueop1
) == CONST_VECTOR
)
1196 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1197 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1198 enum machine_mode op0mode
= GET_MODE (trueop0
);
1199 int op0_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op0mode
));
1200 unsigned op0_n_elts
= (GET_MODE_SIZE (op0mode
) / op0_elt_size
);
1201 enum machine_mode op1mode
= GET_MODE (trueop1
);
1202 int op1_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op1mode
));
1203 unsigned op1_n_elts
= (GET_MODE_SIZE (op1mode
) / op1_elt_size
);
1204 rtvec v
= rtvec_alloc (n_elts
);
1207 if (op0_n_elts
!= n_elts
|| op1_n_elts
!= n_elts
)
1210 for (i
= 0; i
< n_elts
; i
++)
1212 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
1213 CONST_VECTOR_ELT (trueop0
, i
),
1214 CONST_VECTOR_ELT (trueop1
, i
));
1217 RTVEC_ELT (v
, i
) = x
;
1220 return gen_rtx_CONST_VECTOR (mode
, v
);
1223 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1224 && GET_CODE (trueop0
) == CONST_DOUBLE
1225 && GET_CODE (trueop1
) == CONST_DOUBLE
1226 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
1237 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
1239 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
1241 for (i
= 0; i
< 4; i
++)
1245 else if (code
== IOR
)
1247 else if (code
== XOR
)
1252 real_from_target (&r
, tmp0
, mode
);
1253 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
1257 REAL_VALUE_TYPE f0
, f1
, value
;
1259 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
1260 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
1261 f0
= real_value_truncate (mode
, f0
);
1262 f1
= real_value_truncate (mode
, f1
);
1264 if (HONOR_SNANS (mode
)
1265 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
1269 && REAL_VALUES_EQUAL (f1
, dconst0
)
1270 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
1273 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
1275 value
= real_value_truncate (mode
, value
);
1276 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
1280 /* We can fold some multi-word operations. */
1281 if (GET_MODE_CLASS (mode
) == MODE_INT
1282 && width
== HOST_BITS_PER_WIDE_INT
* 2
1283 && (GET_CODE (trueop0
) == CONST_DOUBLE
1284 || GET_CODE (trueop0
) == CONST_INT
)
1285 && (GET_CODE (trueop1
) == CONST_DOUBLE
1286 || GET_CODE (trueop1
) == CONST_INT
))
1288 unsigned HOST_WIDE_INT l1
, l2
, lv
;
1289 HOST_WIDE_INT h1
, h2
, hv
;
1291 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1292 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
1294 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
1296 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1297 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
1299 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
1304 /* A - B == A + (-B). */
1305 neg_double (l2
, h2
, &lv
, &hv
);
1308 /* Fall through.... */
1311 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1315 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1318 case DIV
: case MOD
: case UDIV
: case UMOD
:
1319 /* We'd need to include tree.h to do this and it doesn't seem worth
1324 lv
= l1
& l2
, hv
= h1
& h2
;
1328 lv
= l1
| l2
, hv
= h1
| h2
;
1332 lv
= l1
^ l2
, hv
= h1
^ h2
;
1338 && ((unsigned HOST_WIDE_INT
) l1
1339 < (unsigned HOST_WIDE_INT
) l2
)))
1348 && ((unsigned HOST_WIDE_INT
) l1
1349 > (unsigned HOST_WIDE_INT
) l2
)))
1356 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1358 && ((unsigned HOST_WIDE_INT
) l1
1359 < (unsigned HOST_WIDE_INT
) l2
)))
1366 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1368 && ((unsigned HOST_WIDE_INT
) l1
1369 > (unsigned HOST_WIDE_INT
) l2
)))
1375 case LSHIFTRT
: case ASHIFTRT
:
1377 case ROTATE
: case ROTATERT
:
1378 if (SHIFT_COUNT_TRUNCATED
)
1379 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1381 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1384 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1385 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1387 else if (code
== ASHIFT
)
1388 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1389 else if (code
== ROTATE
)
1390 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1391 else /* code == ROTATERT */
1392 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1399 return immed_double_const (lv
, hv
, mode
);
1402 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1403 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1405 /* Even if we can't compute a constant result,
1406 there are some cases worth simplifying. */
1411 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1412 when x is NaN, infinite, or finite and nonzero. They aren't
1413 when x is -0 and the rounding mode is not towards -infinity,
1414 since (-0) + 0 is then 0. */
1415 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1418 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1419 transformations are safe even for IEEE. */
1420 if (GET_CODE (op0
) == NEG
)
1421 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1422 else if (GET_CODE (op1
) == NEG
)
1423 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1425 /* (~a) + 1 -> -a */
1426 if (INTEGRAL_MODE_P (mode
)
1427 && GET_CODE (op0
) == NOT
1428 && trueop1
== const1_rtx
)
1429 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1431 /* Handle both-operands-constant cases. We can only add
1432 CONST_INTs to constants since the sum of relocatable symbols
1433 can't be handled by most assemblers. Don't add CONST_INT
1434 to CONST_INT since overflow won't be computed properly if wider
1435 than HOST_BITS_PER_WIDE_INT. */
1437 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1438 && GET_CODE (op1
) == CONST_INT
)
1439 return plus_constant (op0
, INTVAL (op1
));
1440 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1441 && GET_CODE (op0
) == CONST_INT
)
1442 return plus_constant (op1
, INTVAL (op0
));
1444 /* See if this is something like X * C - X or vice versa or
1445 if the multiplication is written as a shift. If so, we can
1446 distribute and make a new multiply, shift, or maybe just
1447 have X (if C is 2 in the example above). But don't make
1448 real multiply if we didn't have one before. */
1450 if (! FLOAT_MODE_P (mode
))
1452 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1453 rtx lhs
= op0
, rhs
= op1
;
1456 if (GET_CODE (lhs
) == NEG
)
1457 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1458 else if (GET_CODE (lhs
) == MULT
1459 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1461 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1464 else if (GET_CODE (lhs
) == ASHIFT
1465 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1466 && INTVAL (XEXP (lhs
, 1)) >= 0
1467 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1469 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1470 lhs
= XEXP (lhs
, 0);
1473 if (GET_CODE (rhs
) == NEG
)
1474 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1475 else if (GET_CODE (rhs
) == MULT
1476 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1478 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1481 else if (GET_CODE (rhs
) == ASHIFT
1482 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1483 && INTVAL (XEXP (rhs
, 1)) >= 0
1484 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1486 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1487 rhs
= XEXP (rhs
, 0);
1490 if (rtx_equal_p (lhs
, rhs
))
1492 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1493 GEN_INT (coeff0
+ coeff1
));
1494 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1498 /* If one of the operands is a PLUS or a MINUS, see if we can
1499 simplify this by the associative law.
1500 Don't use the associative law for floating point.
1501 The inaccuracy makes it nonassociative,
1502 and subtle programs can break if operations are associated. */
1504 if (INTEGRAL_MODE_P (mode
)
1505 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1506 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1507 || (GET_CODE (op0
) == CONST
1508 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1509 || (GET_CODE (op1
) == CONST
1510 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1511 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1514 /* Reassociate floating point addition only when the user
1515 specifies unsafe math optimizations. */
1516 if (FLOAT_MODE_P (mode
)
1517 && flag_unsafe_math_optimizations
)
1519 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1527 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1528 using cc0, in which case we want to leave it as a COMPARE
1529 so we can distinguish it from a register-register-copy.
1531 In IEEE floating point, x-0 is not the same as x. */
1533 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1534 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1535 && trueop1
== CONST0_RTX (mode
))
1539 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1540 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1541 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1542 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1544 rtx xop00
= XEXP (op0
, 0);
1545 rtx xop10
= XEXP (op1
, 0);
1548 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1550 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1551 && GET_MODE (xop00
) == GET_MODE (xop10
)
1552 && REGNO (xop00
) == REGNO (xop10
)
1553 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1554 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1561 /* We can't assume x-x is 0 even with non-IEEE floating point,
1562 but since it is zero except in very strange circumstances, we
1563 will treat it as zero with -funsafe-math-optimizations. */
1564 if (rtx_equal_p (trueop0
, trueop1
)
1565 && ! side_effects_p (op0
)
1566 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1567 return CONST0_RTX (mode
);
1569 /* Change subtraction from zero into negation. (0 - x) is the
1570 same as -x when x is NaN, infinite, or finite and nonzero.
1571 But if the mode has signed zeros, and does not round towards
1572 -infinity, then 0 - 0 is 0, not -0. */
1573 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1574 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1576 /* (-1 - a) is ~a. */
1577 if (trueop0
== constm1_rtx
)
1578 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1580 /* Subtracting 0 has no effect unless the mode has signed zeros
1581 and supports rounding towards -infinity. In such a case,
1583 if (!(HONOR_SIGNED_ZEROS (mode
)
1584 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1585 && trueop1
== CONST0_RTX (mode
))
1588 /* See if this is something like X * C - X or vice versa or
1589 if the multiplication is written as a shift. If so, we can
1590 distribute and make a new multiply, shift, or maybe just
1591 have X (if C is 2 in the example above). But don't make
1592 real multiply if we didn't have one before. */
1594 if (! FLOAT_MODE_P (mode
))
1596 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1597 rtx lhs
= op0
, rhs
= op1
;
1600 if (GET_CODE (lhs
) == NEG
)
1601 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1602 else if (GET_CODE (lhs
) == MULT
1603 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1605 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1608 else if (GET_CODE (lhs
) == ASHIFT
1609 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1610 && INTVAL (XEXP (lhs
, 1)) >= 0
1611 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1613 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1614 lhs
= XEXP (lhs
, 0);
1617 if (GET_CODE (rhs
) == NEG
)
1618 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1619 else if (GET_CODE (rhs
) == MULT
1620 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1622 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1625 else if (GET_CODE (rhs
) == ASHIFT
1626 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1627 && INTVAL (XEXP (rhs
, 1)) >= 0
1628 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1630 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1631 rhs
= XEXP (rhs
, 0);
1634 if (rtx_equal_p (lhs
, rhs
))
1636 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1637 GEN_INT (coeff0
- coeff1
));
1638 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1642 /* (a - (-b)) -> (a + b). True even for IEEE. */
1643 if (GET_CODE (op1
) == NEG
)
1644 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1646 /* (-x - c) may be simplified as (-c - x). */
1647 if (GET_CODE (op0
) == NEG
1648 && (GET_CODE (op1
) == CONST_INT
1649 || GET_CODE (op1
) == CONST_DOUBLE
))
1651 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1653 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1656 /* If one of the operands is a PLUS or a MINUS, see if we can
1657 simplify this by the associative law.
1658 Don't use the associative law for floating point.
1659 The inaccuracy makes it nonassociative,
1660 and subtle programs can break if operations are associated. */
1662 if (INTEGRAL_MODE_P (mode
)
1663 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1664 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1665 || (GET_CODE (op0
) == CONST
1666 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1667 || (GET_CODE (op1
) == CONST
1668 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1669 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1672 /* Don't let a relocatable value get a negative coeff. */
1673 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1674 return simplify_gen_binary (PLUS
, mode
,
1676 neg_const_int (mode
, op1
));
1678 /* (x - (x & y)) -> (x & ~y) */
1679 if (GET_CODE (op1
) == AND
)
1681 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1683 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1684 GET_MODE (XEXP (op1
, 1)));
1685 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1687 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1689 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1690 GET_MODE (XEXP (op1
, 0)));
1691 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1697 if (trueop1
== constm1_rtx
)
1698 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1700 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1701 x is NaN, since x * 0 is then also NaN. Nor is it valid
1702 when the mode has signed zeros, since multiplying a negative
1703 number by 0 will give -0, not 0. */
1704 if (!HONOR_NANS (mode
)
1705 && !HONOR_SIGNED_ZEROS (mode
)
1706 && trueop1
== CONST0_RTX (mode
)
1707 && ! side_effects_p (op0
))
1710 /* In IEEE floating point, x*1 is not equivalent to x for
1712 if (!HONOR_SNANS (mode
)
1713 && trueop1
== CONST1_RTX (mode
))
1716 /* Convert multiply by constant power of two into shift unless
1717 we are still generating RTL. This test is a kludge. */
1718 if (GET_CODE (trueop1
) == CONST_INT
1719 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1720 /* If the mode is larger than the host word size, and the
1721 uppermost bit is set, then this isn't a power of two due
1722 to implicit sign extension. */
1723 && (width
<= HOST_BITS_PER_WIDE_INT
1724 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1725 && ! rtx_equal_function_value_matters
)
1726 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1728 /* x*2 is x+x and x*(-1) is -x */
1729 if (GET_CODE (trueop1
) == CONST_DOUBLE
1730 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1731 && GET_MODE (op0
) == mode
)
1734 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1736 if (REAL_VALUES_EQUAL (d
, dconst2
))
1737 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1739 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1740 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1743 /* Reassociate multiplication, but for floating point MULTs
1744 only when the user specifies unsafe math optimizations. */
1745 if (! FLOAT_MODE_P (mode
)
1746 || flag_unsafe_math_optimizations
)
1748 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1755 if (trueop1
== const0_rtx
)
1757 if (GET_CODE (trueop1
) == CONST_INT
1758 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1759 == GET_MODE_MASK (mode
)))
1761 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1763 /* A | (~A) -> -1 */
1764 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1765 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1766 && ! side_effects_p (op0
)
1767 && GET_MODE_CLASS (mode
) != MODE_CC
)
1769 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1775 if (trueop1
== const0_rtx
)
1777 if (GET_CODE (trueop1
) == CONST_INT
1778 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1779 == GET_MODE_MASK (mode
)))
1780 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1781 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1782 && GET_MODE_CLASS (mode
) != MODE_CC
)
1784 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1790 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1792 if (GET_CODE (trueop1
) == CONST_INT
1793 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1794 == GET_MODE_MASK (mode
)))
1796 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1797 && GET_MODE_CLASS (mode
) != MODE_CC
)
1800 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1801 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1802 && ! side_effects_p (op0
)
1803 && GET_MODE_CLASS (mode
) != MODE_CC
)
1805 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1811 /* Convert divide by power of two into shift (divide by 1 handled
1813 if (GET_CODE (trueop1
) == CONST_INT
1814 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1815 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (arg1
));
1817 /* Fall through.... */
1820 if (trueop1
== CONST1_RTX (mode
))
1822 /* On some platforms DIV uses narrower mode than its
1824 rtx x
= gen_lowpart_common (mode
, op0
);
1827 else if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1828 return gen_lowpart_SUBREG (mode
, op0
);
1833 /* Maybe change 0 / x to 0. This transformation isn't safe for
1834 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1835 Nor is it safe for modes with signed zeros, since dividing
1836 0 by a negative number gives -0, not 0. */
1837 if (!HONOR_NANS (mode
)
1838 && !HONOR_SIGNED_ZEROS (mode
)
1839 && trueop0
== CONST0_RTX (mode
)
1840 && ! side_effects_p (op1
))
1843 /* Change division by a constant into multiplication. Only do
1844 this with -funsafe-math-optimizations. */
1845 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1846 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1847 && trueop1
!= CONST0_RTX (mode
)
1848 && flag_unsafe_math_optimizations
)
1851 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1853 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1855 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1856 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1857 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
1863 /* Handle modulus by power of two (mod with 1 handled below). */
1864 if (GET_CODE (trueop1
) == CONST_INT
1865 && exact_log2 (INTVAL (trueop1
)) > 0)
1866 return simplify_gen_binary (AND
, mode
, op0
,
1867 GEN_INT (INTVAL (op1
) - 1));
1869 /* Fall through.... */
1872 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1873 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1880 /* Rotating ~0 always results in ~0. */
1881 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1882 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1883 && ! side_effects_p (op1
))
1886 /* Fall through.... */
1890 if (trueop1
== const0_rtx
)
1892 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1897 if (width
<= HOST_BITS_PER_WIDE_INT
1898 && GET_CODE (trueop1
) == CONST_INT
1899 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1900 && ! side_effects_p (op0
))
1902 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1904 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1910 if (width
<= HOST_BITS_PER_WIDE_INT
1911 && GET_CODE (trueop1
) == CONST_INT
1912 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1913 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1914 && ! side_effects_p (op0
))
1916 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1918 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1924 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1926 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1928 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1934 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1936 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1938 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1947 /* ??? There are simplifications that can be done. */
1951 if (!VECTOR_MODE_P (mode
))
1953 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1955 != GET_MODE_INNER (GET_MODE (trueop0
)))
1956 || GET_CODE (trueop1
) != PARALLEL
1957 || XVECLEN (trueop1
, 0) != 1
1958 || GET_CODE (XVECEXP (trueop1
, 0, 0)) != CONST_INT
)
1961 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1962 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP (trueop1
, 0, 0)));
1966 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1967 || (GET_MODE_INNER (mode
)
1968 != GET_MODE_INNER (GET_MODE (trueop0
)))
1969 || GET_CODE (trueop1
) != PARALLEL
)
1972 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1974 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1975 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1976 rtvec v
= rtvec_alloc (n_elts
);
1979 if (XVECLEN (trueop1
, 0) != (int) n_elts
)
1981 for (i
= 0; i
< n_elts
; i
++)
1983 rtx x
= XVECEXP (trueop1
, 0, i
);
1985 if (GET_CODE (x
) != CONST_INT
)
1987 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, INTVAL (x
));
1990 return gen_rtx_CONST_VECTOR (mode
, v
);
1996 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
1997 ? GET_MODE (trueop0
)
1998 : GET_MODE_INNER (mode
));
1999 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2000 ? GET_MODE (trueop1
)
2001 : GET_MODE_INNER (mode
));
2003 if (!VECTOR_MODE_P (mode
)
2004 || (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2005 != GET_MODE_SIZE (mode
)))
2008 if ((VECTOR_MODE_P (op0_mode
)
2009 && (GET_MODE_INNER (mode
)
2010 != GET_MODE_INNER (op0_mode
)))
2011 || (!VECTOR_MODE_P (op0_mode
)
2012 && GET_MODE_INNER (mode
) != op0_mode
))
2015 if ((VECTOR_MODE_P (op1_mode
)
2016 && (GET_MODE_INNER (mode
)
2017 != GET_MODE_INNER (op1_mode
)))
2018 || (!VECTOR_MODE_P (op1_mode
)
2019 && GET_MODE_INNER (mode
) != op1_mode
))
2022 if ((GET_CODE (trueop0
) == CONST_VECTOR
2023 || GET_CODE (trueop0
) == CONST_INT
2024 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2025 && (GET_CODE (trueop1
) == CONST_VECTOR
2026 || GET_CODE (trueop1
) == CONST_INT
2027 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2029 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2030 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2031 rtvec v
= rtvec_alloc (n_elts
);
2033 unsigned in_n_elts
= 1;
2035 if (VECTOR_MODE_P (op0_mode
))
2036 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2037 for (i
= 0; i
< n_elts
; i
++)
2041 if (!VECTOR_MODE_P (op0_mode
))
2042 RTVEC_ELT (v
, i
) = trueop0
;
2044 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2048 if (!VECTOR_MODE_P (op1_mode
))
2049 RTVEC_ELT (v
, i
) = trueop1
;
2051 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
2056 return gen_rtx_CONST_VECTOR (mode
, v
);
2068 /* Get the integer argument values in two forms:
2069 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
2071 arg0
= INTVAL (trueop0
);
2072 arg1
= INTVAL (trueop1
);
2074 if (width
< HOST_BITS_PER_WIDE_INT
)
2076 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2077 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2080 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2081 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2084 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2085 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2093 /* Compute the value of the arithmetic. */
2098 val
= arg0s
+ arg1s
;
2102 val
= arg0s
- arg1s
;
2106 val
= arg0s
* arg1s
;
2111 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2114 val
= arg0s
/ arg1s
;
2119 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2122 val
= arg0s
% arg1s
;
2127 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2130 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
2135 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
2138 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
2154 /* If shift count is undefined, don't fold it; let the machine do
2155 what it wants. But truncate it if the machine will do that. */
2159 if (SHIFT_COUNT_TRUNCATED
)
2162 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
2169 if (SHIFT_COUNT_TRUNCATED
)
2172 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
2179 if (SHIFT_COUNT_TRUNCATED
)
2182 val
= arg0s
>> arg1
;
2184 /* Bootstrap compiler may not have sign extended the right shift.
2185 Manually extend the sign to insure bootstrap cc matches gcc. */
2186 if (arg0s
< 0 && arg1
> 0)
2187 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
2196 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2197 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2205 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2206 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2210 /* Do nothing here. */
2214 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2218 val
= ((unsigned HOST_WIDE_INT
) arg0
2219 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2223 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2227 val
= ((unsigned HOST_WIDE_INT
) arg0
2228 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2235 /* ??? There are simplifications that can be done. */
2242 val
= trunc_int_for_mode (val
, mode
);
2244 return GEN_INT (val
);
2247 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2250 Rather than test for specific case, we do this by a brute-force method
2251 and do all possible simplifications until no more changes occur. Then
2252 we rebuild the operation.
2254 If FORCE is true, then always generate the rtx. This is used to
2255 canonicalize stuff emitted from simplify_gen_binary. Note that this
2256 can still fail if the rtx is too complex. It won't fail just because
2257 the result is not 'simpler' than the input, however. */
2259 struct simplify_plus_minus_op_data
2266 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2268 const struct simplify_plus_minus_op_data
*d1
= p1
;
2269 const struct simplify_plus_minus_op_data
*d2
= p2
;
2271 return (commutative_operand_precedence (d2
->op
)
2272 - commutative_operand_precedence (d1
->op
));
2276 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2279 struct simplify_plus_minus_op_data ops
[8];
2281 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2285 memset (ops
, 0, sizeof ops
);
2287 /* Set up the two operands and then expand them until nothing has been
2288 changed. If we run out of room in our array, give up; this should
2289 almost never happen. */
2294 ops
[1].neg
= (code
== MINUS
);
2300 for (i
= 0; i
< n_ops
; i
++)
2302 rtx this_op
= ops
[i
].op
;
2303 int this_neg
= ops
[i
].neg
;
2304 enum rtx_code this_code
= GET_CODE (this_op
);
2313 ops
[n_ops
].op
= XEXP (this_op
, 1);
2314 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2317 ops
[i
].op
= XEXP (this_op
, 0);
2323 ops
[i
].op
= XEXP (this_op
, 0);
2324 ops
[i
].neg
= ! this_neg
;
2330 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2331 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2332 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2334 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2335 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2336 ops
[n_ops
].neg
= this_neg
;
2344 /* ~a -> (-a - 1) */
2347 ops
[n_ops
].op
= constm1_rtx
;
2348 ops
[n_ops
++].neg
= this_neg
;
2349 ops
[i
].op
= XEXP (this_op
, 0);
2350 ops
[i
].neg
= !this_neg
;
2358 ops
[i
].op
= neg_const_int (mode
, this_op
);
2371 /* If we only have two operands, we can't do anything. */
2372 if (n_ops
<= 2 && !force
)
2375 /* Count the number of CONSTs we didn't split above. */
2376 for (i
= 0; i
< n_ops
; i
++)
2377 if (GET_CODE (ops
[i
].op
) == CONST
)
2380 /* Now simplify each pair of operands until nothing changes. The first
2381 time through just simplify constants against each other. */
2388 for (i
= 0; i
< n_ops
- 1; i
++)
2389 for (j
= i
+ 1; j
< n_ops
; j
++)
2391 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2392 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2394 if (lhs
!= 0 && rhs
!= 0
2395 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2397 enum rtx_code ncode
= PLUS
;
2403 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2405 else if (swap_commutative_operands_p (lhs
, rhs
))
2406 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2408 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2410 /* Reject "simplifications" that just wrap the two
2411 arguments in a CONST. Failure to do so can result
2412 in infinite recursion with simplify_binary_operation
2413 when it calls us to simplify CONST operations. */
2415 && ! (GET_CODE (tem
) == CONST
2416 && GET_CODE (XEXP (tem
, 0)) == ncode
2417 && XEXP (XEXP (tem
, 0), 0) == lhs
2418 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2419 /* Don't allow -x + -1 -> ~x simplifications in the
2420 first pass. This allows us the chance to combine
2421 the -1 with other constants. */
2423 && GET_CODE (tem
) == NOT
2424 && XEXP (tem
, 0) == rhs
))
2427 if (GET_CODE (tem
) == NEG
)
2428 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2429 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2430 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2434 ops
[j
].op
= NULL_RTX
;
2444 /* Pack all the operands to the lower-numbered entries. */
2445 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2450 /* Sort the operations based on swap_commutative_operands_p. */
2451 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2453 /* Create (minus -C X) instead of (neg (const (plus X C))). */
2455 && GET_CODE (ops
[1].op
) == CONST_INT
2456 && CONSTANT_P (ops
[0].op
)
2458 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
2460 /* We suppressed creation of trivial CONST expressions in the
2461 combination loop to avoid recursion. Create one manually now.
2462 The combination loop should have ensured that there is exactly
2463 one CONST_INT, and the sort will have ensured that it is last
2464 in the array and that any other constant will be next-to-last. */
2467 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2468 && CONSTANT_P (ops
[n_ops
- 2].op
))
2470 rtx value
= ops
[n_ops
- 1].op
;
2471 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2472 value
= neg_const_int (mode
, value
);
2473 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2477 /* Count the number of CONSTs that we generated. */
2479 for (i
= 0; i
< n_ops
; i
++)
2480 if (GET_CODE (ops
[i
].op
) == CONST
)
2483 /* Give up if we didn't reduce the number of operands we had. Make
2484 sure we count a CONST as two operands. If we have the same
2485 number of operands, but have made more CONSTs than before, this
2486 is also an improvement, so accept it. */
2488 && (n_ops
+ n_consts
> input_ops
2489 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2492 /* Put a non-negated operand first, if possible. */
2494 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2497 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
2506 /* Now make the result by performing the requested operations. */
2508 for (i
= 1; i
< n_ops
; i
++)
2509 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2510 mode
, result
, ops
[i
].op
);
2515 /* Like simplify_binary_operation except used for relational operators.
2516 MODE is the mode of the operands, not that of the result. If MODE
2517 is VOIDmode, both operands must also be VOIDmode and we compare the
2518 operands in "infinite precision".
2520 If no simplification is possible, this function returns zero.
2521 Otherwise, it returns either const_true_rtx or const0_rtx. */
2524 simplify_const_relational_operation (enum rtx_code code
,
2525 enum machine_mode mode
,
2528 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2533 if (mode
== VOIDmode
2534 && (GET_MODE (op0
) != VOIDmode
2535 || GET_MODE (op1
) != VOIDmode
))
2538 /* If op0 is a compare, extract the comparison arguments from it. */
2539 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2540 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2542 /* We can't simplify MODE_CC values since we don't know what the
2543 actual comparison is. */
2544 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2547 /* Make sure the constant is second. */
2548 if (swap_commutative_operands_p (op0
, op1
))
2550 tem
= op0
, op0
= op1
, op1
= tem
;
2551 code
= swap_condition (code
);
2554 trueop0
= avoid_constant_pool_reference (op0
);
2555 trueop1
= avoid_constant_pool_reference (op1
);
2557 /* For integer comparisons of A and B maybe we can simplify A - B and can
2558 then simplify a comparison of that with zero. If A and B are both either
2559 a register or a CONST_INT, this can't help; testing for these cases will
2560 prevent infinite recursion here and speed things up.
2562 If CODE is an unsigned comparison, then we can never do this optimization,
2563 because it gives an incorrect result if the subtraction wraps around zero.
2564 ANSI C defines unsigned operations such that they never overflow, and
2565 thus such cases can not be ignored; but we cannot do it even for
2566 signed comparisons for languages such as Java, so test flag_wrapv. */
2568 if (!flag_wrapv
&& INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2569 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
2570 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
2571 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2572 /* We cannot do this for == or != if tem is a nonzero address. */
2573 && ((code
!= EQ
&& code
!= NE
) || ! nonzero_address_p (tem
))
2574 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2575 return simplify_const_relational_operation (signed_condition (code
),
2576 mode
, tem
, const0_rtx
);
2578 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2579 return const_true_rtx
;
2581 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2584 /* For modes without NaNs, if the two operands are equal, we know the
2585 result except if they have side-effects. */
2586 if (! HONOR_NANS (GET_MODE (trueop0
))
2587 && rtx_equal_p (trueop0
, trueop1
)
2588 && ! side_effects_p (trueop0
))
2589 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2591 /* If the operands are floating-point constants, see if we can fold
2593 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2594 && GET_CODE (trueop1
) == CONST_DOUBLE
2595 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2597 REAL_VALUE_TYPE d0
, d1
;
2599 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2600 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2602 /* Comparisons are unordered iff at least one of the values is NaN. */
2603 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2613 return const_true_rtx
;
2626 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2627 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2628 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2631 /* Otherwise, see if the operands are both integers. */
2632 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2633 && (GET_CODE (trueop0
) == CONST_DOUBLE
2634 || GET_CODE (trueop0
) == CONST_INT
)
2635 && (GET_CODE (trueop1
) == CONST_DOUBLE
2636 || GET_CODE (trueop1
) == CONST_INT
))
2638 int width
= GET_MODE_BITSIZE (mode
);
2639 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2640 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2642 /* Get the two words comprising each integer constant. */
2643 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2645 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2646 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2650 l0u
= l0s
= INTVAL (trueop0
);
2651 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2654 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2656 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2657 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2661 l1u
= l1s
= INTVAL (trueop1
);
2662 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2665 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2666 we have to sign or zero-extend the values. */
2667 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2669 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2670 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2672 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2673 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2675 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2676 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2678 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2679 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2681 equal
= (h0u
== h1u
&& l0u
== l1u
);
2682 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2683 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2684 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2685 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2688 /* Otherwise, there are some code-specific tests we can make. */
2694 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2699 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2700 return const_true_rtx
;
2704 /* Unsigned values are never negative. */
2705 if (trueop1
== const0_rtx
)
2706 return const_true_rtx
;
2710 if (trueop1
== const0_rtx
)
2715 /* Unsigned values are never greater than the largest
2717 if (GET_CODE (trueop1
) == CONST_INT
2718 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2719 && INTEGRAL_MODE_P (mode
))
2720 return const_true_rtx
;
2724 if (GET_CODE (trueop1
) == CONST_INT
2725 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2726 && INTEGRAL_MODE_P (mode
))
2731 /* Optimize abs(x) < 0.0. */
2732 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
2734 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2736 if (GET_CODE (tem
) == ABS
)
2742 /* Optimize abs(x) >= 0.0. */
2743 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
2745 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2747 if (GET_CODE (tem
) == ABS
)
2748 return const_true_rtx
;
2753 /* Optimize ! (abs(x) < 0.0). */
2754 if (trueop1
== CONST0_RTX (mode
))
2756 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2758 if (GET_CODE (tem
) == ABS
)
2759 return const_true_rtx
;
2770 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2776 return equal
? const_true_rtx
: const0_rtx
;
2779 return ! equal
? const_true_rtx
: const0_rtx
;
2782 return op0lt
? const_true_rtx
: const0_rtx
;
2785 return op1lt
? const_true_rtx
: const0_rtx
;
2787 return op0ltu
? const_true_rtx
: const0_rtx
;
2789 return op1ltu
? const_true_rtx
: const0_rtx
;
2792 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2795 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2797 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2799 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2801 return const_true_rtx
;
2809 /* Like simplify_binary_operation except used for relational operators.
2810 MODE is the mode of the result, and CMP_MODE is the mode of the operands.
2811 If CMP_MODE is VOIDmode, both operands must also be VOIDmode and we
2812 compare the operands in "infinite precision". */
2815 simplify_relational_operation (enum rtx_code code
,
2816 enum machine_mode mode ATTRIBUTE_UNUSED
,
2817 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
2821 tmp
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
2824 #ifdef FLOAT_STORE_FLAG_VALUE
2825 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
2827 if (tmp
== const0_rtx
)
2828 return CONST0_RTX (mode
);
2829 return CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode
),
2839 /* Simplify CODE, an operation with result mode MODE and three operands,
2840 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2841 a constant. Return 0 if no simplifications is possible. */
2844 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
2845 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
2848 unsigned int width
= GET_MODE_BITSIZE (mode
);
2850 /* VOIDmode means "infinite" precision. */
2852 width
= HOST_BITS_PER_WIDE_INT
;
2858 if (GET_CODE (op0
) == CONST_INT
2859 && GET_CODE (op1
) == CONST_INT
2860 && GET_CODE (op2
) == CONST_INT
2861 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2862 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2864 /* Extracting a bit-field from a constant */
2865 HOST_WIDE_INT val
= INTVAL (op0
);
2867 if (BITS_BIG_ENDIAN
)
2868 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2869 - INTVAL (op2
) - INTVAL (op1
));
2871 val
>>= INTVAL (op2
);
2873 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2875 /* First zero-extend. */
2876 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2877 /* If desired, propagate sign bit. */
2878 if (code
== SIGN_EXTRACT
2879 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2880 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2883 /* Clear the bits that don't belong in our mode,
2884 unless they and our sign bit are all one.
2885 So we get either a reasonable negative value or a reasonable
2886 unsigned value for this mode. */
2887 if (width
< HOST_BITS_PER_WIDE_INT
2888 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2889 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2890 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2892 return GEN_INT (val
);
2897 if (GET_CODE (op0
) == CONST_INT
)
2898 return op0
!= const0_rtx
? op1
: op2
;
2900 /* Convert c ? a : a into "a". */
2901 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
2904 /* Convert a != b ? a : b into "a". */
2905 if (GET_CODE (op0
) == NE
2906 && ! side_effects_p (op0
)
2907 && ! HONOR_NANS (mode
)
2908 && ! HONOR_SIGNED_ZEROS (mode
)
2909 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
2910 && rtx_equal_p (XEXP (op0
, 1), op2
))
2911 || (rtx_equal_p (XEXP (op0
, 0), op2
)
2912 && rtx_equal_p (XEXP (op0
, 1), op1
))))
2915 /* Convert a == b ? a : b into "b". */
2916 if (GET_CODE (op0
) == EQ
2917 && ! side_effects_p (op0
)
2918 && ! HONOR_NANS (mode
)
2919 && ! HONOR_SIGNED_ZEROS (mode
)
2920 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
2921 && rtx_equal_p (XEXP (op0
, 1), op2
))
2922 || (rtx_equal_p (XEXP (op0
, 0), op2
)
2923 && rtx_equal_p (XEXP (op0
, 1), op1
))))
2926 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
2928 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2929 ? GET_MODE (XEXP (op0
, 1))
2930 : GET_MODE (XEXP (op0
, 0)));
2932 if (cmp_mode
== VOIDmode
)
2933 cmp_mode
= op0_mode
;
2934 temp
= simplify_const_relational_operation (GET_CODE (op0
),
2939 /* See if any simplifications were possible. */
2940 if (temp
== const0_rtx
)
2942 else if (temp
== const_true_rtx
)
2947 /* Look for happy constants in op1 and op2. */
2948 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2950 HOST_WIDE_INT t
= INTVAL (op1
);
2951 HOST_WIDE_INT f
= INTVAL (op2
);
2953 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2954 code
= GET_CODE (op0
);
2955 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2958 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2966 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2972 if (GET_MODE (op0
) != mode
2973 || GET_MODE (op1
) != mode
2974 || !VECTOR_MODE_P (mode
))
2976 op2
= avoid_constant_pool_reference (op2
);
2977 if (GET_CODE (op2
) == CONST_INT
)
2979 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2980 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2981 int mask
= (1 << n_elts
) - 1;
2983 if (!(INTVAL (op2
) & mask
))
2985 if ((INTVAL (op2
) & mask
) == mask
)
2988 op0
= avoid_constant_pool_reference (op0
);
2989 op1
= avoid_constant_pool_reference (op1
);
2990 if (GET_CODE (op0
) == CONST_VECTOR
2991 && GET_CODE (op1
) == CONST_VECTOR
)
2993 rtvec v
= rtvec_alloc (n_elts
);
2996 for (i
= 0; i
< n_elts
; i
++)
2997 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
2998 ? CONST_VECTOR_ELT (op0
, i
)
2999 : CONST_VECTOR_ELT (op1
, i
));
3000 return gen_rtx_CONST_VECTOR (mode
, v
);
3012 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_VECTOR,
3013 returning another CONST_INT or CONST_DOUBLE or CONST_VECTOR.
3015 Works by unpacking OP into a collection of 8-bit values
3016 represented as a little-endian array of 'unsigned char', selecting by BYTE,
3017 and then repacking them again for OUTERMODE. */
3020 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
3021 enum machine_mode innermode
, unsigned int byte
)
3023 /* We support up to 512-bit values (for V8DFmode). */
3027 value_mask
= (1 << value_bit
) - 1
3029 unsigned char value
[max_bitsize
/ value_bit
];
3038 rtvec result_v
= NULL
;
3039 enum mode_class outer_class
;
3040 enum machine_mode outer_submode
;
3042 /* Some ports misuse CCmode. */
3043 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& GET_CODE (op
) == CONST_INT
)
3046 /* Unpack the value. */
3048 if (GET_CODE (op
) == CONST_VECTOR
)
3050 num_elem
= CONST_VECTOR_NUNITS (op
);
3051 elems
= &CONST_VECTOR_ELT (op
, 0);
3052 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
3058 elem_bitsize
= max_bitsize
;
3061 if (BITS_PER_UNIT
% value_bit
!= 0)
3062 abort (); /* Too complicated; reducing value_bit may help. */
3063 if (elem_bitsize
% BITS_PER_UNIT
!= 0)
3064 abort (); /* I don't know how to handle endianness of sub-units. */
3066 for (elem
= 0; elem
< num_elem
; elem
++)
3069 rtx el
= elems
[elem
];
3071 /* Vectors are kept in target memory order. (This is probably
3074 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3075 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3077 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3078 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3079 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3080 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3081 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3084 switch (GET_CODE (el
))
3088 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3090 *vp
++ = INTVAL (el
) >> i
;
3091 /* CONST_INTs are always logically sign-extended. */
3092 for (; i
< elem_bitsize
; i
+= value_bit
)
3093 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
3097 if (GET_MODE (el
) == VOIDmode
)
3099 /* If this triggers, someone should have generated a
3100 CONST_INT instead. */
3101 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3104 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
3105 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
3106 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
3109 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
3112 /* It shouldn't matter what's done here, so fill it with
3114 for (; i
< max_bitsize
; i
+= value_bit
)
3117 else if (GET_MODE_CLASS (GET_MODE (el
)) == MODE_FLOAT
)
3119 long tmp
[max_bitsize
/ 32];
3120 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
3122 if (bitsize
> elem_bitsize
)
3124 if (bitsize
% value_bit
!= 0)
3127 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
3130 /* real_to_target produces its result in words affected by
3131 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3132 and use WORDS_BIG_ENDIAN instead; see the documentation
3133 of SUBREG in rtl.texi. */
3134 for (i
= 0; i
< bitsize
; i
+= value_bit
)
3137 if (WORDS_BIG_ENDIAN
)
3138 ibase
= bitsize
- 1 - i
;
3141 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
3144 /* It shouldn't matter what's done here, so fill it with
3146 for (; i
< elem_bitsize
; i
+= value_bit
)
3158 /* Now, pick the right byte to start with. */
3159 /* Renumber BYTE so that the least-significant byte is byte 0. A special
3160 case is paradoxical SUBREGs, which shouldn't be adjusted since they
3161 will already have offset 0. */
3162 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
3164 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
3166 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3167 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3168 byte
= (subword_byte
% UNITS_PER_WORD
3169 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3172 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
3173 so if it's become negative it will instead be very large.) */
3174 if (byte
>= GET_MODE_SIZE (innermode
))
3177 /* Convert from bytes to chunks of size value_bit. */
3178 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
3180 /* Re-pack the value. */
3182 if (VECTOR_MODE_P (outermode
))
3184 num_elem
= GET_MODE_NUNITS (outermode
);
3185 result_v
= rtvec_alloc (num_elem
);
3186 elems
= &RTVEC_ELT (result_v
, 0);
3187 outer_submode
= GET_MODE_INNER (outermode
);
3193 outer_submode
= outermode
;
3196 outer_class
= GET_MODE_CLASS (outer_submode
);
3197 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
3199 if (elem_bitsize
% value_bit
!= 0)
3201 if (elem_bitsize
+ value_start
* value_bit
> max_bitsize
)
3204 for (elem
= 0; elem
< num_elem
; elem
++)
3208 /* Vectors are stored in target memory order. (This is probably
3211 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
3212 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
3214 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
3215 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
3216 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
3217 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
3218 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
3221 switch (outer_class
)
3224 case MODE_PARTIAL_INT
:
3226 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
3229 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
3231 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
3232 for (; i
< elem_bitsize
; i
+= value_bit
)
3233 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
3234 << (i
- HOST_BITS_PER_WIDE_INT
));
3236 /* immed_double_const doesn't call trunc_int_for_mode. I don't
3238 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
3239 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
3241 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
3248 long tmp
[max_bitsize
/ 32];
3250 /* real_from_target wants its input in words affected by
3251 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
3252 and use WORDS_BIG_ENDIAN instead; see the documentation
3253 of SUBREG in rtl.texi. */
3254 for (i
= 0; i
< max_bitsize
/ 32; i
++)
3256 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
3259 if (WORDS_BIG_ENDIAN
)
3260 ibase
= elem_bitsize
- 1 - i
;
3263 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
3266 real_from_target (&r
, tmp
, outer_submode
);
3267 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
3275 if (VECTOR_MODE_P (outermode
))
3276 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
3281 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
3282 Return 0 if no simplifications are possible. */
3284 simplify_subreg (enum machine_mode outermode
, rtx op
,
3285 enum machine_mode innermode
, unsigned int byte
)
3287 /* Little bit of sanity checking. */
3288 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3289 || innermode
== BLKmode
|| outermode
== BLKmode
)
3292 if (GET_MODE (op
) != innermode
3293 && GET_MODE (op
) != VOIDmode
)
3296 if (byte
% GET_MODE_SIZE (outermode
)
3297 || byte
>= GET_MODE_SIZE (innermode
))
3300 if (outermode
== innermode
&& !byte
)
3303 if (GET_CODE (op
) == CONST_INT
3304 || GET_CODE (op
) == CONST_DOUBLE
3305 || GET_CODE (op
) == CONST_VECTOR
)
3306 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
3308 /* Changing mode twice with SUBREG => just change it once,
3309 or not at all if changing back op starting mode. */
3310 if (GET_CODE (op
) == SUBREG
)
3312 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3313 int final_offset
= byte
+ SUBREG_BYTE (op
);
3316 if (outermode
== innermostmode
3317 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3318 return SUBREG_REG (op
);
3320 /* The SUBREG_BYTE represents offset, as if the value were stored
3321 in memory. Irritating exception is paradoxical subreg, where
3322 we define SUBREG_BYTE to be 0. On big endian machines, this
3323 value should be negative. For a moment, undo this exception. */
3324 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3326 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3327 if (WORDS_BIG_ENDIAN
)
3328 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3329 if (BYTES_BIG_ENDIAN
)
3330 final_offset
+= difference
% UNITS_PER_WORD
;
3332 if (SUBREG_BYTE (op
) == 0
3333 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3335 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3336 if (WORDS_BIG_ENDIAN
)
3337 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3338 if (BYTES_BIG_ENDIAN
)
3339 final_offset
+= difference
% UNITS_PER_WORD
;
3342 /* See whether resulting subreg will be paradoxical. */
3343 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3345 /* In nonparadoxical subregs we can't handle negative offsets. */
3346 if (final_offset
< 0)
3348 /* Bail out in case resulting subreg would be incorrect. */
3349 if (final_offset
% GET_MODE_SIZE (outermode
)
3350 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3356 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3358 /* In paradoxical subreg, see if we are still looking on lower part.
3359 If so, our SUBREG_BYTE will be 0. */
3360 if (WORDS_BIG_ENDIAN
)
3361 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3362 if (BYTES_BIG_ENDIAN
)
3363 offset
+= difference
% UNITS_PER_WORD
;
3364 if (offset
== final_offset
)
3370 /* Recurse for further possible simplifications. */
3371 new = simplify_subreg (outermode
, SUBREG_REG (op
),
3372 GET_MODE (SUBREG_REG (op
)),
3376 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3379 /* SUBREG of a hard register => just change the register number
3380 and/or mode. If the hard register is not valid in that mode,
3381 suppress this simplification. If the hard register is the stack,
3382 frame, or argument pointer, leave this as a SUBREG. */
3385 && (! REG_FUNCTION_VALUE_P (op
)
3386 || ! rtx_equal_function_value_matters
)
3387 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3388 #ifdef CANNOT_CHANGE_MODE_CLASS
3389 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3390 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3391 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3393 && ((reload_completed
&& !frame_pointer_needed
)
3394 || (REGNO (op
) != FRAME_POINTER_REGNUM
3395 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3396 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3399 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3400 && REGNO (op
) != ARG_POINTER_REGNUM
3402 && REGNO (op
) != STACK_POINTER_REGNUM
3403 && subreg_offset_representable_p (REGNO (op
), innermode
,
3406 rtx tem
= gen_rtx_SUBREG (outermode
, op
, byte
);
3407 int final_regno
= subreg_hard_regno (tem
, 0);
3409 /* ??? We do allow it if the current REG is not valid for
3410 its mode. This is a kludge to work around how float/complex
3411 arguments are passed on 32-bit SPARC and should be fixed. */
3412 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3413 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
3415 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3417 /* Propagate original regno. We don't have any way to specify
3418 the offset inside original regno, so do so only for lowpart.
3419 The information is used only by alias analysis that can not
3420 grog partial register anyway. */
3422 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3423 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3428 /* If we have a SUBREG of a register that we are replacing and we are
3429 replacing it with a MEM, make a new MEM and try replacing the
3430 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3431 or if we would be widening it. */
3433 if (GET_CODE (op
) == MEM
3434 && ! mode_dependent_address_p (XEXP (op
, 0))
3435 /* Allow splitting of volatile memory references in case we don't
3436 have instruction to move the whole thing. */
3437 && (! MEM_VOLATILE_P (op
)
3438 || ! have_insn_for (SET
, innermode
))
3439 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3440 return adjust_address_nv (op
, outermode
, byte
);
3442 /* Handle complex values represented as CONCAT
3443 of real and imaginary part. */
3444 if (GET_CODE (op
) == CONCAT
)
3446 int is_realpart
= byte
< (unsigned int) GET_MODE_UNIT_SIZE (innermode
);
3447 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
3448 unsigned int final_offset
;
3451 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
3452 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3455 /* We can at least simplify it by referring directly to the
3457 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3460 /* Optimize SUBREG truncations of zero and sign extended values. */
3461 if ((GET_CODE (op
) == ZERO_EXTEND
3462 || GET_CODE (op
) == SIGN_EXTEND
)
3463 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
3465 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
3467 /* If we're requesting the lowpart of a zero or sign extension,
3468 there are three possibilities. If the outermode is the same
3469 as the origmode, we can omit both the extension and the subreg.
3470 If the outermode is not larger than the origmode, we can apply
3471 the truncation without the extension. Finally, if the outermode
3472 is larger than the origmode, but both are integer modes, we
3473 can just extend to the appropriate mode. */
3476 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
3477 if (outermode
== origmode
)
3478 return XEXP (op
, 0);
3479 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
3480 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
3481 subreg_lowpart_offset (outermode
,
3483 if (SCALAR_INT_MODE_P (outermode
))
3484 return simplify_gen_unary (GET_CODE (op
), outermode
,
3485 XEXP (op
, 0), origmode
);
3488 /* A SUBREG resulting from a zero extension may fold to zero if
3489 it extracts higher bits that the ZERO_EXTEND's source bits. */
3490 if (GET_CODE (op
) == ZERO_EXTEND
3491 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
3492 return CONST0_RTX (outermode
);
3498 /* Make a SUBREG operation or equivalent if it folds. */
3501 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
3502 enum machine_mode innermode
, unsigned int byte
)
3505 /* Little bit of sanity checking. */
3506 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3507 || innermode
== BLKmode
|| outermode
== BLKmode
)
3510 if (GET_MODE (op
) != innermode
3511 && GET_MODE (op
) != VOIDmode
)
3514 if (byte
% GET_MODE_SIZE (outermode
)
3515 || byte
>= GET_MODE_SIZE (innermode
))
3518 if (GET_CODE (op
) == QUEUED
)
3521 new = simplify_subreg (outermode
, op
, innermode
, byte
);
3525 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
3528 return gen_rtx_SUBREG (outermode
, op
, byte
);
3530 /* Simplify X, an rtx expression.
3532 Return the simplified expression or NULL if no simplifications
3535 This is the preferred entry point into the simplification routines;
3536 however, we still allow passes to call the more specific routines.
3538 Right now GCC has three (yes, three) major bodies of RTL simplification
3539 code that need to be unified.
3541 1. fold_rtx in cse.c. This code uses various CSE specific
3542 information to aid in RTL simplification.
3544 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3545 it uses combine specific information to aid in RTL
3548 3. The routines in this file.
3551 Long term we want to only have one body of simplification code; to
3552 get to that state I recommend the following steps:
3554 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3555 which are not pass dependent state into these routines.
3557 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3558 use this routine whenever possible.
3560 3. Allow for pass dependent state to be provided to these
3561 routines and add simplifications based on the pass dependent
3562 state. Remove code from cse.c & combine.c that becomes
3565 It will take time, but ultimately the compiler will be easier to
3566 maintain and improve. It's totally silly that when we add a
3567 simplification that it needs to be added to 4 places (3 for RTL
3568 simplification and 1 for tree simplification. */
3571 simplify_rtx (rtx x
)
3573 enum rtx_code code
= GET_CODE (x
);
3574 enum machine_mode mode
= GET_MODE (x
);
3577 switch (GET_RTX_CLASS (code
))
3580 return simplify_unary_operation (code
, mode
,
3581 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3582 case RTX_COMM_ARITH
:
3583 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3584 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
3586 /* Fall through.... */
3589 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3592 case RTX_BITFIELD_OPS
:
3593 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
3594 XEXP (x
, 0), XEXP (x
, 1),
3598 case RTX_COMM_COMPARE
:
3599 temp
= simplify_relational_operation (code
, mode
,
3600 ((GET_MODE (XEXP (x
, 0))
3602 ? GET_MODE (XEXP (x
, 0))
3603 : GET_MODE (XEXP (x
, 1))),
3604 XEXP (x
, 0), XEXP (x
, 1));
3609 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
3610 GET_MODE (SUBREG_REG (x
)),
3612 if (code
== CONSTANT_P_RTX
)
3614 if (CONSTANT_P (XEXP (x
, 0)))
3622 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3623 if (GET_CODE (XEXP (x
, 0)) == HIGH
3624 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))