1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, rtx
);
53 static int simplify_plus_minus_op_data_cmp (const void *, const void *);
54 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
,
56 static bool associative_constant_p (rtx
);
57 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 /* Negate a CONST_INT rtx, truncating (because a conversion from a
61 maximally negative number can overflow). */
63 neg_const_int (enum machine_mode mode
, rtx i
)
65 return gen_int_mode (- INTVAL (i
), mode
);
69 /* Make a binary operation by properly ordering the operands and
70 seeing if the expression folds. */
73 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
78 /* Put complex operands first and constants second if commutative. */
79 if (GET_RTX_CLASS (code
) == 'c'
80 && swap_commutative_operands_p (op0
, op1
))
81 tem
= op0
, op0
= op1
, op1
= tem
;
83 /* If this simplifies, do it. */
84 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
88 /* Handle addition and subtraction specially. Otherwise, just form
91 if (code
== PLUS
|| code
== MINUS
)
93 tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 1);
98 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
101 /* If X is a MEM referencing the constant pool, return the real value.
102 Otherwise return X. */
104 avoid_constant_pool_reference (rtx x
)
107 enum machine_mode cmode
;
109 switch (GET_CODE (x
))
115 /* Handle float extensions of constant pool references. */
117 c
= avoid_constant_pool_reference (tmp
);
118 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
122 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
123 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
133 /* Call target hook to avoid the effects of -fpic etc.... */
134 addr
= (*targetm
.delegitimize_address
) (addr
);
136 if (GET_CODE (addr
) == LO_SUM
)
137 addr
= XEXP (addr
, 1);
139 if (GET_CODE (addr
) != SYMBOL_REF
140 || ! CONSTANT_POOL_ADDRESS_P (addr
))
143 c
= get_pool_constant (addr
);
144 cmode
= get_pool_mode (addr
);
146 /* If we're accessing the constant in a different mode than it was
147 originally stored, attempt to fix that up via subreg simplifications.
148 If that fails we have no choice but to return the original memory. */
149 if (cmode
!= GET_MODE (x
))
151 c
= simplify_subreg (GET_MODE (x
), c
, cmode
, 0);
158 /* Make a unary operation by first seeing if it folds and otherwise making
159 the specified operation. */
162 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
163 enum machine_mode op_mode
)
167 /* If this simplifies, use it. */
168 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
171 return gen_rtx_fmt_e (code
, mode
, op
);
174 /* Likewise for ternary operations. */
177 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
178 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
182 /* If this simplifies, use it. */
183 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
187 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
190 /* Likewise, for relational operations.
191 CMP_MODE specifies mode comparison is done in.
195 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
196 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
200 if (cmp_mode
== VOIDmode
)
201 cmp_mode
= GET_MODE (op0
);
202 if (cmp_mode
== VOIDmode
)
203 cmp_mode
= GET_MODE (op1
);
205 if (cmp_mode
!= VOIDmode
)
207 tem
= simplify_relational_operation (code
, cmp_mode
, op0
, op1
);
211 #ifdef FLOAT_STORE_FLAG_VALUE
212 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
)
215 if (tem
== const0_rtx
)
216 return CONST0_RTX (mode
);
217 if (tem
!= const_true_rtx
)
219 val
= FLOAT_STORE_FLAG_VALUE (mode
);
220 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
227 /* For the following tests, ensure const0_rtx is op1. */
228 if (swap_commutative_operands_p (op0
, op1
)
229 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
230 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
232 /* If op0 is a compare, extract the comparison arguments from it. */
233 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
234 return simplify_gen_relational (code
, mode
, VOIDmode
,
235 XEXP (op0
, 0), XEXP (op0
, 1));
237 /* If op0 is a comparison, extract the comparison arguments form it. */
238 if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && op1
== const0_rtx
)
242 if (GET_MODE (op0
) == mode
)
244 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
245 XEXP (op0
, 0), XEXP (op0
, 1));
249 enum rtx_code
new = reversed_comparison_code (op0
, NULL_RTX
);
251 return simplify_gen_relational (new, mode
, VOIDmode
,
252 XEXP (op0
, 0), XEXP (op0
, 1));
256 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
259 /* Replace all occurrences of OLD in X with NEW and try to simplify the
260 resulting RTX. Return a new RTX which is as simplified as possible. */
263 simplify_replace_rtx (rtx x
, rtx old
, rtx
new)
265 enum rtx_code code
= GET_CODE (x
);
266 enum machine_mode mode
= GET_MODE (x
);
268 /* If X is OLD, return NEW. Otherwise, if this is an expression, try
269 to build a new expression substituting recursively. If we can't do
270 anything, return our input. */
275 switch (GET_RTX_CLASS (code
))
279 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
280 rtx op
= (XEXP (x
, 0) == old
281 ? new : simplify_replace_rtx (XEXP (x
, 0), old
, new));
283 return simplify_gen_unary (code
, mode
, op
, op_mode
);
289 simplify_gen_binary (code
, mode
,
290 simplify_replace_rtx (XEXP (x
, 0), old
, new),
291 simplify_replace_rtx (XEXP (x
, 1), old
, new));
294 enum machine_mode op_mode
= (GET_MODE (XEXP (x
, 0)) != VOIDmode
295 ? GET_MODE (XEXP (x
, 0))
296 : GET_MODE (XEXP (x
, 1)));
297 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
298 rtx op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
299 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
305 enum machine_mode op_mode
= GET_MODE (XEXP (x
, 0));
306 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
309 simplify_gen_ternary (code
, mode
,
314 simplify_replace_rtx (XEXP (x
, 1), old
, new),
315 simplify_replace_rtx (XEXP (x
, 2), old
, new));
319 /* The only case we try to handle is a SUBREG. */
323 exp
= simplify_gen_subreg (GET_MODE (x
),
324 simplify_replace_rtx (SUBREG_REG (x
),
326 GET_MODE (SUBREG_REG (x
)),
335 return replace_equiv_address_nv (x
,
336 simplify_replace_rtx (XEXP (x
, 0),
338 else if (code
== LO_SUM
)
340 rtx op0
= simplify_replace_rtx (XEXP (x
, 0), old
, new);
341 rtx op1
= simplify_replace_rtx (XEXP (x
, 1), old
, new);
343 /* (lo_sum (high x) x) -> x */
344 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
347 return gen_rtx_LO_SUM (mode
, op0
, op1
);
349 else if (code
== REG
)
351 if (REG_P (old
) && REGNO (x
) == REGNO (old
))
363 /* Try to simplify a unary operation CODE whose output mode is to be
364 MODE with input operand OP whose mode was originally OP_MODE.
365 Return zero if no simplification can be made. */
367 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
368 rtx op
, enum machine_mode op_mode
)
370 unsigned int width
= GET_MODE_BITSIZE (mode
);
371 rtx trueop
= avoid_constant_pool_reference (op
);
373 if (code
== VEC_DUPLICATE
)
375 if (!VECTOR_MODE_P (mode
))
377 if (GET_MODE (trueop
) != VOIDmode
378 && !VECTOR_MODE_P (GET_MODE (trueop
))
379 && GET_MODE_INNER (mode
) != GET_MODE (trueop
))
381 if (GET_MODE (trueop
) != VOIDmode
382 && VECTOR_MODE_P (GET_MODE (trueop
))
383 && GET_MODE_INNER (mode
) != GET_MODE_INNER (GET_MODE (trueop
)))
385 if (GET_CODE (trueop
) == CONST_INT
|| GET_CODE (trueop
) == CONST_DOUBLE
386 || GET_CODE (trueop
) == CONST_VECTOR
)
388 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
389 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
390 rtvec v
= rtvec_alloc (n_elts
);
393 if (GET_CODE (trueop
) != CONST_VECTOR
)
394 for (i
= 0; i
< n_elts
; i
++)
395 RTVEC_ELT (v
, i
) = trueop
;
398 enum machine_mode inmode
= GET_MODE (trueop
);
399 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
400 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
402 if (in_n_elts
>= n_elts
|| n_elts
% in_n_elts
)
404 for (i
= 0; i
< n_elts
; i
++)
405 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop
, i
% in_n_elts
);
407 return gen_rtx_CONST_VECTOR (mode
, v
);
411 if (VECTOR_MODE_P (mode
) && GET_CODE (trueop
) == CONST_VECTOR
)
413 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
414 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
415 enum machine_mode opmode
= GET_MODE (trueop
);
416 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
417 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
418 rtvec v
= rtvec_alloc (n_elts
);
421 if (op_n_elts
!= n_elts
)
424 for (i
= 0; i
< n_elts
; i
++)
426 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
427 CONST_VECTOR_ELT (trueop
, i
),
428 GET_MODE_INNER (opmode
));
431 RTVEC_ELT (v
, i
) = x
;
433 return gen_rtx_CONST_VECTOR (mode
, v
);
436 /* The order of these tests is critical so that, for example, we don't
437 check the wrong mode (input vs. output) for a conversion operation,
438 such as FIX. At some point, this should be simplified. */
440 if (code
== FLOAT
&& GET_MODE (trueop
) == VOIDmode
441 && (GET_CODE (trueop
) == CONST_DOUBLE
|| GET_CODE (trueop
) == CONST_INT
))
443 HOST_WIDE_INT hv
, lv
;
446 if (GET_CODE (trueop
) == CONST_INT
)
447 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
449 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
451 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
452 d
= real_value_truncate (mode
, d
);
453 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
455 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (trueop
) == VOIDmode
456 && (GET_CODE (trueop
) == CONST_DOUBLE
457 || GET_CODE (trueop
) == CONST_INT
))
459 HOST_WIDE_INT hv
, lv
;
462 if (GET_CODE (trueop
) == CONST_INT
)
463 lv
= INTVAL (trueop
), hv
= HWI_SIGN_EXTEND (lv
);
465 lv
= CONST_DOUBLE_LOW (trueop
), hv
= CONST_DOUBLE_HIGH (trueop
);
467 if (op_mode
== VOIDmode
)
469 /* We don't know how to interpret negative-looking numbers in
470 this case, so don't try to fold those. */
474 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
477 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
479 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
480 d
= real_value_truncate (mode
, d
);
481 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
484 if (GET_CODE (trueop
) == CONST_INT
485 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
487 HOST_WIDE_INT arg0
= INTVAL (trueop
);
501 val
= (arg0
>= 0 ? arg0
: - arg0
);
505 /* Don't use ffs here. Instead, get low order bit and then its
506 number. If arg0 is zero, this will return 0, as desired. */
507 arg0
&= GET_MODE_MASK (mode
);
508 val
= exact_log2 (arg0
& (- arg0
)) + 1;
512 arg0
&= GET_MODE_MASK (mode
);
513 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
516 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
520 arg0
&= GET_MODE_MASK (mode
);
523 /* Even if the value at zero is undefined, we have to come
524 up with some replacement. Seems good enough. */
525 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
526 val
= GET_MODE_BITSIZE (mode
);
529 val
= exact_log2 (arg0
& -arg0
);
533 arg0
&= GET_MODE_MASK (mode
);
536 val
++, arg0
&= arg0
- 1;
540 arg0
&= GET_MODE_MASK (mode
);
543 val
++, arg0
&= arg0
- 1;
552 /* When zero-extending a CONST_INT, we need to know its
554 if (op_mode
== VOIDmode
)
556 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
558 /* If we were really extending the mode,
559 we would have to distinguish between zero-extension
560 and sign-extension. */
561 if (width
!= GET_MODE_BITSIZE (op_mode
))
565 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
566 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
572 if (op_mode
== VOIDmode
)
574 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
576 /* If we were really extending the mode,
577 we would have to distinguish between zero-extension
578 and sign-extension. */
579 if (width
!= GET_MODE_BITSIZE (op_mode
))
583 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
586 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
588 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
589 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
606 val
= trunc_int_for_mode (val
, mode
);
608 return GEN_INT (val
);
611 /* We can do some operations on integer CONST_DOUBLEs. Also allow
612 for a DImode operation on a CONST_INT. */
613 else if (GET_MODE (trueop
) == VOIDmode
614 && width
<= HOST_BITS_PER_WIDE_INT
* 2
615 && (GET_CODE (trueop
) == CONST_DOUBLE
616 || GET_CODE (trueop
) == CONST_INT
))
618 unsigned HOST_WIDE_INT l1
, lv
;
619 HOST_WIDE_INT h1
, hv
;
621 if (GET_CODE (trueop
) == CONST_DOUBLE
)
622 l1
= CONST_DOUBLE_LOW (trueop
), h1
= CONST_DOUBLE_HIGH (trueop
);
624 l1
= INTVAL (trueop
), h1
= HWI_SIGN_EXTEND (l1
);
634 neg_double (l1
, h1
, &lv
, &hv
);
639 neg_double (l1
, h1
, &lv
, &hv
);
651 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
654 lv
= exact_log2 (l1
& -l1
) + 1;
660 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
661 - HOST_BITS_PER_WIDE_INT
;
663 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
664 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
665 lv
= GET_MODE_BITSIZE (mode
);
671 lv
= exact_log2 (l1
& -l1
);
673 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
674 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
675 lv
= GET_MODE_BITSIZE (mode
);
698 /* This is just a change-of-mode, so do nothing. */
703 if (op_mode
== VOIDmode
)
706 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
710 lv
= l1
& GET_MODE_MASK (op_mode
);
714 if (op_mode
== VOIDmode
715 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
719 lv
= l1
& GET_MODE_MASK (op_mode
);
720 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
721 && (lv
& ((HOST_WIDE_INT
) 1
722 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
723 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
725 hv
= HWI_SIGN_EXTEND (lv
);
736 return immed_double_const (lv
, hv
, mode
);
739 else if (GET_CODE (trueop
) == CONST_DOUBLE
740 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
742 REAL_VALUE_TYPE d
, t
;
743 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
748 if (HONOR_SNANS (mode
) && real_isnan (&d
))
750 real_sqrt (&t
, mode
, &d
);
754 d
= REAL_VALUE_ABS (d
);
757 d
= REAL_VALUE_NEGATE (d
);
760 d
= real_value_truncate (mode
, d
);
763 /* All this does is change the mode. */
766 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
772 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
775 else if (GET_CODE (trueop
) == CONST_DOUBLE
776 && GET_MODE_CLASS (GET_MODE (trueop
)) == MODE_FLOAT
777 && GET_MODE_CLASS (mode
) == MODE_INT
778 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
782 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop
);
785 case FIX
: i
= REAL_VALUE_FIX (d
); break;
786 case UNSIGNED_FIX
: i
= REAL_VALUE_UNSIGNED_FIX (d
); break;
790 return gen_int_mode (i
, mode
);
793 /* This was formerly used only for non-IEEE float.
794 eggert@twinsun.com says it is safe for IEEE also. */
797 enum rtx_code reversed
;
800 /* There are some simplifications we can do even if the operands
805 /* (not (not X)) == X. */
806 if (GET_CODE (op
) == NOT
)
809 /* (not (eq X Y)) == (ne X Y), etc. */
810 if (GET_RTX_CLASS (GET_CODE (op
)) == '<'
811 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
))
813 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
814 XEXP (op
, 0), XEXP (op
, 1));
816 /* (not (plus X -1)) can become (neg X). */
817 if (GET_CODE (op
) == PLUS
818 && XEXP (op
, 1) == constm1_rtx
)
819 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
821 /* Similarly, (not (neg X)) is (plus X -1). */
822 if (GET_CODE (op
) == NEG
)
823 return plus_constant (XEXP (op
, 0), -1);
825 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
826 if (GET_CODE (op
) == XOR
827 && GET_CODE (XEXP (op
, 1)) == CONST_INT
828 && (temp
= simplify_unary_operation (NOT
, mode
,
831 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
834 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
835 operands other than 1, but that is not valid. We could do a
836 similar simplification for (not (lshiftrt C X)) where C is
837 just the sign bit, but this doesn't seem common enough to
839 if (GET_CODE (op
) == ASHIFT
840 && XEXP (op
, 0) == const1_rtx
)
842 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
843 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
846 /* If STORE_FLAG_VALUE is -1, (not (comparison X Y)) can be done
847 by reversing the comparison code if valid. */
848 if (STORE_FLAG_VALUE
== -1
849 && GET_RTX_CLASS (GET_CODE (op
)) == '<'
850 && (reversed
= reversed_comparison_code (op
, NULL_RTX
))
852 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
853 XEXP (op
, 0), XEXP (op
, 1));
855 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
856 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
857 so we can perform the above simplification. */
859 if (STORE_FLAG_VALUE
== -1
860 && GET_CODE (op
) == ASHIFTRT
861 && GET_CODE (XEXP (op
, 1)) == CONST_INT
862 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
863 return simplify_gen_relational (GE
, mode
, VOIDmode
,
864 XEXP (op
, 0), const0_rtx
);
869 /* (neg (neg X)) == X. */
870 if (GET_CODE (op
) == NEG
)
873 /* (neg (plus X 1)) can become (not X). */
874 if (GET_CODE (op
) == PLUS
875 && XEXP (op
, 1) == const1_rtx
)
876 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
878 /* Similarly, (neg (not X)) is (plus X 1). */
879 if (GET_CODE (op
) == NOT
)
880 return plus_constant (XEXP (op
, 0), 1);
882 /* (neg (minus X Y)) can become (minus Y X). This transformation
883 isn't safe for modes with signed zeros, since if X and Y are
884 both +0, (minus Y X) is the same as (minus X Y). If the
885 rounding mode is towards +infinity (or -infinity) then the two
886 expressions will be rounded differently. */
887 if (GET_CODE (op
) == MINUS
888 && !HONOR_SIGNED_ZEROS (mode
)
889 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
890 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1),
893 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
894 if (GET_CODE (op
) == PLUS
895 && !HONOR_SIGNED_ZEROS (mode
)
896 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
898 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
899 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
902 /* (neg (mult A B)) becomes (mult (neg A) B).
903 This works even for floating-point values. */
904 if (GET_CODE (op
) == MULT
905 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
907 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
908 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
911 /* NEG commutes with ASHIFT since it is multiplication. Only do
912 this if we can then eliminate the NEG (e.g., if the operand
914 if (GET_CODE (op
) == ASHIFT
)
916 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0),
919 return simplify_gen_binary (ASHIFT
, mode
, temp
,
926 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
927 becomes just the MINUS if its mode is MODE. This allows
928 folding switch statements on machines using casesi (such as
930 if (GET_CODE (op
) == TRUNCATE
931 && GET_MODE (XEXP (op
, 0)) == mode
932 && GET_CODE (XEXP (op
, 0)) == MINUS
933 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
934 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
937 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
938 if (! POINTERS_EXTEND_UNSIGNED
939 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
941 || (GET_CODE (op
) == SUBREG
942 && GET_CODE (SUBREG_REG (op
)) == REG
943 && REG_POINTER (SUBREG_REG (op
))
944 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
945 return convert_memory_address (Pmode
, op
);
949 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
951 if (POINTERS_EXTEND_UNSIGNED
> 0
952 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
954 || (GET_CODE (op
) == SUBREG
955 && GET_CODE (SUBREG_REG (op
)) == REG
956 && REG_POINTER (SUBREG_REG (op
))
957 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
958 return convert_memory_address (Pmode
, op
);
970 /* Subroutine of simplify_associative_operation. Return true if rtx OP
971 is a suitable integer or floating point immediate constant. */
973 associative_constant_p (rtx op
)
975 if (GET_CODE (op
) == CONST_INT
976 || GET_CODE (op
) == CONST_DOUBLE
)
978 op
= avoid_constant_pool_reference (op
);
979 return GET_CODE (op
) == CONST_INT
980 || GET_CODE (op
) == CONST_DOUBLE
;
983 /* Subroutine of simplify_binary_operation to simplify an associative
984 binary operation CODE with result mode MODE, operating on OP0 and OP1.
985 Return 0 if no simplification is possible. */
987 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
992 /* Simplify (x op c1) op c2 as x op (c1 op c2). */
993 if (GET_CODE (op0
) == code
994 && associative_constant_p (op1
)
995 && associative_constant_p (XEXP (op0
, 1)))
997 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1000 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1003 /* Simplify (x op c1) op (y op c2) as (x op y) op (c1 op c2). */
1004 if (GET_CODE (op0
) == code
1005 && GET_CODE (op1
) == code
1006 && associative_constant_p (XEXP (op0
, 1))
1007 && associative_constant_p (XEXP (op1
, 1)))
1009 rtx c
= simplify_binary_operation (code
, mode
,
1010 XEXP (op0
, 1), XEXP (op1
, 1));
1013 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1014 return simplify_gen_binary (code
, mode
, tem
, c
);
1017 /* Canonicalize (x op c) op y as (x op y) op c. */
1018 if (GET_CODE (op0
) == code
1019 && associative_constant_p (XEXP (op0
, 1)))
1021 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1022 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1025 /* Canonicalize x op (y op c) as (x op y) op c. */
1026 if (GET_CODE (op1
) == code
1027 && associative_constant_p (XEXP (op1
, 1)))
1029 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1030 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1036 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1037 and OP1. Return 0 if no simplification is possible.
1039 Don't use this for relational operations such as EQ or LT.
1040 Use simplify_relational_operation instead. */
1042 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1045 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
1047 unsigned int width
= GET_MODE_BITSIZE (mode
);
1049 rtx trueop0
= avoid_constant_pool_reference (op0
);
1050 rtx trueop1
= avoid_constant_pool_reference (op1
);
1052 /* Relational operations don't work here. We must know the mode
1053 of the operands in order to do the comparison correctly.
1054 Assuming a full word can give incorrect results.
1055 Consider comparing 128 with -128 in QImode. */
1057 if (GET_RTX_CLASS (code
) == '<')
1060 /* Make sure the constant is second. */
1061 if (GET_RTX_CLASS (code
) == 'c'
1062 && swap_commutative_operands_p (trueop0
, trueop1
))
1064 tem
= op0
, op0
= op1
, op1
= tem
;
1065 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
1068 if (VECTOR_MODE_P (mode
)
1069 && GET_CODE (trueop0
) == CONST_VECTOR
1070 && GET_CODE (trueop1
) == CONST_VECTOR
)
1072 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1073 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1074 enum machine_mode op0mode
= GET_MODE (trueop0
);
1075 int op0_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op0mode
));
1076 unsigned op0_n_elts
= (GET_MODE_SIZE (op0mode
) / op0_elt_size
);
1077 enum machine_mode op1mode
= GET_MODE (trueop1
);
1078 int op1_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (op1mode
));
1079 unsigned op1_n_elts
= (GET_MODE_SIZE (op1mode
) / op1_elt_size
);
1080 rtvec v
= rtvec_alloc (n_elts
);
1083 if (op0_n_elts
!= n_elts
|| op1_n_elts
!= n_elts
)
1086 for (i
= 0; i
< n_elts
; i
++)
1088 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
1089 CONST_VECTOR_ELT (trueop0
, i
),
1090 CONST_VECTOR_ELT (trueop1
, i
));
1093 RTVEC_ELT (v
, i
) = x
;
1096 return gen_rtx_CONST_VECTOR (mode
, v
);
1099 if (GET_MODE_CLASS (mode
) == MODE_FLOAT
1100 && GET_CODE (trueop0
) == CONST_DOUBLE
1101 && GET_CODE (trueop1
) == CONST_DOUBLE
1102 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
1104 REAL_VALUE_TYPE f0
, f1
, value
;
1106 REAL_VALUE_FROM_CONST_DOUBLE (f0
, trueop0
);
1107 REAL_VALUE_FROM_CONST_DOUBLE (f1
, trueop1
);
1108 f0
= real_value_truncate (mode
, f0
);
1109 f1
= real_value_truncate (mode
, f1
);
1111 if (HONOR_SNANS (mode
)
1112 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
1116 && REAL_VALUES_EQUAL (f1
, dconst0
)
1117 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
1120 REAL_ARITHMETIC (value
, rtx_to_tree_code (code
), f0
, f1
);
1122 value
= real_value_truncate (mode
, value
);
1123 return CONST_DOUBLE_FROM_REAL_VALUE (value
, mode
);
1126 /* We can fold some multi-word operations. */
1127 if (GET_MODE_CLASS (mode
) == MODE_INT
1128 && width
== HOST_BITS_PER_WIDE_INT
* 2
1129 && (GET_CODE (trueop0
) == CONST_DOUBLE
1130 || GET_CODE (trueop0
) == CONST_INT
)
1131 && (GET_CODE (trueop1
) == CONST_DOUBLE
1132 || GET_CODE (trueop1
) == CONST_INT
))
1134 unsigned HOST_WIDE_INT l1
, l2
, lv
;
1135 HOST_WIDE_INT h1
, h2
, hv
;
1137 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
1138 l1
= CONST_DOUBLE_LOW (trueop0
), h1
= CONST_DOUBLE_HIGH (trueop0
);
1140 l1
= INTVAL (trueop0
), h1
= HWI_SIGN_EXTEND (l1
);
1142 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
1143 l2
= CONST_DOUBLE_LOW (trueop1
), h2
= CONST_DOUBLE_HIGH (trueop1
);
1145 l2
= INTVAL (trueop1
), h2
= HWI_SIGN_EXTEND (l2
);
1150 /* A - B == A + (-B). */
1151 neg_double (l2
, h2
, &lv
, &hv
);
1154 /* Fall through.... */
1157 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1161 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
1164 case DIV
: case MOD
: case UDIV
: case UMOD
:
1165 /* We'd need to include tree.h to do this and it doesn't seem worth
1170 lv
= l1
& l2
, hv
= h1
& h2
;
1174 lv
= l1
| l2
, hv
= h1
| h2
;
1178 lv
= l1
^ l2
, hv
= h1
^ h2
;
1184 && ((unsigned HOST_WIDE_INT
) l1
1185 < (unsigned HOST_WIDE_INT
) l2
)))
1194 && ((unsigned HOST_WIDE_INT
) l1
1195 > (unsigned HOST_WIDE_INT
) l2
)))
1202 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
1204 && ((unsigned HOST_WIDE_INT
) l1
1205 < (unsigned HOST_WIDE_INT
) l2
)))
1212 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
1214 && ((unsigned HOST_WIDE_INT
) l1
1215 > (unsigned HOST_WIDE_INT
) l2
)))
1221 case LSHIFTRT
: case ASHIFTRT
:
1223 case ROTATE
: case ROTATERT
:
1224 #ifdef SHIFT_COUNT_TRUNCATED
1225 if (SHIFT_COUNT_TRUNCATED
)
1226 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
1229 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
1232 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
1233 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
1235 else if (code
== ASHIFT
)
1236 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
1237 else if (code
== ROTATE
)
1238 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1239 else /* code == ROTATERT */
1240 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
1247 return immed_double_const (lv
, hv
, mode
);
1250 if (GET_CODE (op0
) != CONST_INT
|| GET_CODE (op1
) != CONST_INT
1251 || width
> HOST_BITS_PER_WIDE_INT
|| width
== 0)
1253 /* Even if we can't compute a constant result,
1254 there are some cases worth simplifying. */
1259 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1260 when x is NaN, infinite, or finite and nonzero. They aren't
1261 when x is -0 and the rounding mode is not towards -infinity,
1262 since (-0) + 0 is then 0. */
1263 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1266 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1267 transformations are safe even for IEEE. */
1268 if (GET_CODE (op0
) == NEG
)
1269 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1270 else if (GET_CODE (op1
) == NEG
)
1271 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1273 /* (~a) + 1 -> -a */
1274 if (INTEGRAL_MODE_P (mode
)
1275 && GET_CODE (op0
) == NOT
1276 && trueop1
== const1_rtx
)
1277 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1279 /* Handle both-operands-constant cases. We can only add
1280 CONST_INTs to constants since the sum of relocatable symbols
1281 can't be handled by most assemblers. Don't add CONST_INT
1282 to CONST_INT since overflow won't be computed properly if wider
1283 than HOST_BITS_PER_WIDE_INT. */
1285 if (CONSTANT_P (op0
) && GET_MODE (op0
) != VOIDmode
1286 && GET_CODE (op1
) == CONST_INT
)
1287 return plus_constant (op0
, INTVAL (op1
));
1288 else if (CONSTANT_P (op1
) && GET_MODE (op1
) != VOIDmode
1289 && GET_CODE (op0
) == CONST_INT
)
1290 return plus_constant (op1
, INTVAL (op0
));
1292 /* See if this is something like X * C - X or vice versa or
1293 if the multiplication is written as a shift. If so, we can
1294 distribute and make a new multiply, shift, or maybe just
1295 have X (if C is 2 in the example above). But don't make
1296 real multiply if we didn't have one before. */
1298 if (! FLOAT_MODE_P (mode
))
1300 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1301 rtx lhs
= op0
, rhs
= op1
;
1304 if (GET_CODE (lhs
) == NEG
)
1305 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1306 else if (GET_CODE (lhs
) == MULT
1307 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1309 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1312 else if (GET_CODE (lhs
) == ASHIFT
1313 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1314 && INTVAL (XEXP (lhs
, 1)) >= 0
1315 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1317 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1318 lhs
= XEXP (lhs
, 0);
1321 if (GET_CODE (rhs
) == NEG
)
1322 coeff1
= -1, rhs
= XEXP (rhs
, 0);
1323 else if (GET_CODE (rhs
) == MULT
1324 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1326 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1329 else if (GET_CODE (rhs
) == ASHIFT
1330 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1331 && INTVAL (XEXP (rhs
, 1)) >= 0
1332 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1334 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1335 rhs
= XEXP (rhs
, 0);
1338 if (rtx_equal_p (lhs
, rhs
))
1340 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1341 GEN_INT (coeff0
+ coeff1
));
1342 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1346 /* If one of the operands is a PLUS or a MINUS, see if we can
1347 simplify this by the associative law.
1348 Don't use the associative law for floating point.
1349 The inaccuracy makes it nonassociative,
1350 and subtle programs can break if operations are associated. */
1352 if (INTEGRAL_MODE_P (mode
)
1353 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1354 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1355 || (GET_CODE (op0
) == CONST
1356 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1357 || (GET_CODE (op1
) == CONST
1358 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1359 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1362 /* Reassociate floating point addition only when the user
1363 specifies unsafe math optimizations. */
1364 if (FLOAT_MODE_P (mode
)
1365 && flag_unsafe_math_optimizations
)
1367 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1375 /* Convert (compare FOO (const_int 0)) to FOO unless we aren't
1376 using cc0, in which case we want to leave it as a COMPARE
1377 so we can distinguish it from a register-register-copy.
1379 In IEEE floating point, x-0 is not the same as x. */
1381 if ((TARGET_FLOAT_FORMAT
!= IEEE_FLOAT_FORMAT
1382 || ! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
)
1383 && trueop1
== CONST0_RTX (mode
))
1387 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1388 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1389 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1390 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1392 rtx xop00
= XEXP (op0
, 0);
1393 rtx xop10
= XEXP (op1
, 0);
1396 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1398 if (GET_CODE (xop00
) == REG
&& GET_CODE (xop10
) == REG
1399 && GET_MODE (xop00
) == GET_MODE (xop10
)
1400 && REGNO (xop00
) == REGNO (xop10
)
1401 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1402 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1409 /* We can't assume x-x is 0 even with non-IEEE floating point,
1410 but since it is zero except in very strange circumstances, we
1411 will treat it as zero with -funsafe-math-optimizations. */
1412 if (rtx_equal_p (trueop0
, trueop1
)
1413 && ! side_effects_p (op0
)
1414 && (! FLOAT_MODE_P (mode
) || flag_unsafe_math_optimizations
))
1415 return CONST0_RTX (mode
);
1417 /* Change subtraction from zero into negation. (0 - x) is the
1418 same as -x when x is NaN, infinite, or finite and nonzero.
1419 But if the mode has signed zeros, and does not round towards
1420 -infinity, then 0 - 0 is 0, not -0. */
1421 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1422 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1424 /* (-1 - a) is ~a. */
1425 if (trueop0
== constm1_rtx
)
1426 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1428 /* Subtracting 0 has no effect unless the mode has signed zeros
1429 and supports rounding towards -infinity. In such a case,
1431 if (!(HONOR_SIGNED_ZEROS (mode
)
1432 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1433 && trueop1
== CONST0_RTX (mode
))
1436 /* See if this is something like X * C - X or vice versa or
1437 if the multiplication is written as a shift. If so, we can
1438 distribute and make a new multiply, shift, or maybe just
1439 have X (if C is 2 in the example above). But don't make
1440 real multiply if we didn't have one before. */
1442 if (! FLOAT_MODE_P (mode
))
1444 HOST_WIDE_INT coeff0
= 1, coeff1
= 1;
1445 rtx lhs
= op0
, rhs
= op1
;
1448 if (GET_CODE (lhs
) == NEG
)
1449 coeff0
= -1, lhs
= XEXP (lhs
, 0);
1450 else if (GET_CODE (lhs
) == MULT
1451 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
)
1453 coeff0
= INTVAL (XEXP (lhs
, 1)), lhs
= XEXP (lhs
, 0);
1456 else if (GET_CODE (lhs
) == ASHIFT
1457 && GET_CODE (XEXP (lhs
, 1)) == CONST_INT
1458 && INTVAL (XEXP (lhs
, 1)) >= 0
1459 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1461 coeff0
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1462 lhs
= XEXP (lhs
, 0);
1465 if (GET_CODE (rhs
) == NEG
)
1466 coeff1
= - 1, rhs
= XEXP (rhs
, 0);
1467 else if (GET_CODE (rhs
) == MULT
1468 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
)
1470 coeff1
= INTVAL (XEXP (rhs
, 1)), rhs
= XEXP (rhs
, 0);
1473 else if (GET_CODE (rhs
) == ASHIFT
1474 && GET_CODE (XEXP (rhs
, 1)) == CONST_INT
1475 && INTVAL (XEXP (rhs
, 1)) >= 0
1476 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1478 coeff1
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1479 rhs
= XEXP (rhs
, 0);
1482 if (rtx_equal_p (lhs
, rhs
))
1484 tem
= simplify_gen_binary (MULT
, mode
, lhs
,
1485 GEN_INT (coeff0
- coeff1
));
1486 return (GET_CODE (tem
) == MULT
&& ! had_mult
) ? 0 : tem
;
1490 /* (a - (-b)) -> (a + b). True even for IEEE. */
1491 if (GET_CODE (op1
) == NEG
)
1492 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1494 /* If one of the operands is a PLUS or a MINUS, see if we can
1495 simplify this by the associative law.
1496 Don't use the associative law for floating point.
1497 The inaccuracy makes it nonassociative,
1498 and subtle programs can break if operations are associated. */
1500 if (INTEGRAL_MODE_P (mode
)
1501 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
1502 || GET_CODE (op1
) == PLUS
|| GET_CODE (op1
) == MINUS
1503 || (GET_CODE (op0
) == CONST
1504 && GET_CODE (XEXP (op0
, 0)) == PLUS
)
1505 || (GET_CODE (op1
) == CONST
1506 && GET_CODE (XEXP (op1
, 0)) == PLUS
))
1507 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
, 0)) != 0)
1510 /* Don't let a relocatable value get a negative coeff. */
1511 if (GET_CODE (op1
) == CONST_INT
&& GET_MODE (op0
) != VOIDmode
)
1512 return simplify_gen_binary (PLUS
, mode
,
1514 neg_const_int (mode
, op1
));
1516 /* (x - (x & y)) -> (x & ~y) */
1517 if (GET_CODE (op1
) == AND
)
1519 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1521 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1522 GET_MODE (XEXP (op1
, 1)));
1523 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1525 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1527 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1528 GET_MODE (XEXP (op1
, 0)));
1529 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1535 if (trueop1
== constm1_rtx
)
1536 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1538 /* Maybe simplify x * 0 to 0. The reduction is not valid if
1539 x is NaN, since x * 0 is then also NaN. Nor is it valid
1540 when the mode has signed zeros, since multiplying a negative
1541 number by 0 will give -0, not 0. */
1542 if (!HONOR_NANS (mode
)
1543 && !HONOR_SIGNED_ZEROS (mode
)
1544 && trueop1
== CONST0_RTX (mode
)
1545 && ! side_effects_p (op0
))
1548 /* In IEEE floating point, x*1 is not equivalent to x for
1550 if (!HONOR_SNANS (mode
)
1551 && trueop1
== CONST1_RTX (mode
))
1554 /* Convert multiply by constant power of two into shift unless
1555 we are still generating RTL. This test is a kludge. */
1556 if (GET_CODE (trueop1
) == CONST_INT
1557 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
1558 /* If the mode is larger than the host word size, and the
1559 uppermost bit is set, then this isn't a power of two due
1560 to implicit sign extension. */
1561 && (width
<= HOST_BITS_PER_WIDE_INT
1562 || val
!= HOST_BITS_PER_WIDE_INT
- 1)
1563 && ! rtx_equal_function_value_matters
)
1564 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
1566 /* x*2 is x+x and x*(-1) is -x */
1567 if (GET_CODE (trueop1
) == CONST_DOUBLE
1568 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1569 && GET_MODE (op0
) == mode
)
1572 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1574 if (REAL_VALUES_EQUAL (d
, dconst2
))
1575 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
1577 if (REAL_VALUES_EQUAL (d
, dconstm1
))
1578 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
1581 /* Reassociate multiplication, but for floating point MULTs
1582 only when the user specifies unsafe math optimizations. */
1583 if (! FLOAT_MODE_P (mode
)
1584 || flag_unsafe_math_optimizations
)
1586 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1593 if (trueop1
== const0_rtx
)
1595 if (GET_CODE (trueop1
) == CONST_INT
1596 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1597 == GET_MODE_MASK (mode
)))
1599 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1601 /* A | (~A) -> -1 */
1602 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1603 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1604 && ! side_effects_p (op0
)
1605 && GET_MODE_CLASS (mode
) != MODE_CC
)
1607 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1613 if (trueop1
== const0_rtx
)
1615 if (GET_CODE (trueop1
) == CONST_INT
1616 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1617 == GET_MODE_MASK (mode
)))
1618 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
1619 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1620 && GET_MODE_CLASS (mode
) != MODE_CC
)
1622 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1628 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1630 if (GET_CODE (trueop1
) == CONST_INT
1631 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
1632 == GET_MODE_MASK (mode
)))
1634 if (trueop0
== trueop1
&& ! side_effects_p (op0
)
1635 && GET_MODE_CLASS (mode
) != MODE_CC
)
1638 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
1639 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
1640 && ! side_effects_p (op0
)
1641 && GET_MODE_CLASS (mode
) != MODE_CC
)
1643 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1649 /* Convert divide by power of two into shift (divide by 1 handled
1651 if (GET_CODE (trueop1
) == CONST_INT
1652 && (arg1
= exact_log2 (INTVAL (trueop1
))) > 0)
1653 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (arg1
));
1655 /* Fall through.... */
1658 if (trueop1
== CONST1_RTX (mode
))
1660 /* On some platforms DIV uses narrower mode than its
1662 rtx x
= gen_lowpart_common (mode
, op0
);
1665 else if (mode
!= GET_MODE (op0
) && GET_MODE (op0
) != VOIDmode
)
1666 return gen_lowpart_SUBREG (mode
, op0
);
1671 /* Maybe change 0 / x to 0. This transformation isn't safe for
1672 modes with NaNs, since 0 / 0 will then be NaN rather than 0.
1673 Nor is it safe for modes with signed zeros, since dividing
1674 0 by a negative number gives -0, not 0. */
1675 if (!HONOR_NANS (mode
)
1676 && !HONOR_SIGNED_ZEROS (mode
)
1677 && trueop0
== CONST0_RTX (mode
)
1678 && ! side_effects_p (op1
))
1681 /* Change division by a constant into multiplication. Only do
1682 this with -funsafe-math-optimizations. */
1683 else if (GET_CODE (trueop1
) == CONST_DOUBLE
1684 && GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_FLOAT
1685 && trueop1
!= CONST0_RTX (mode
)
1686 && flag_unsafe_math_optimizations
)
1689 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
1691 if (! REAL_VALUES_EQUAL (d
, dconst0
))
1693 REAL_ARITHMETIC (d
, rtx_to_tree_code (DIV
), dconst1
, d
);
1694 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1695 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
1701 /* Handle modulus by power of two (mod with 1 handled below). */
1702 if (GET_CODE (trueop1
) == CONST_INT
1703 && exact_log2 (INTVAL (trueop1
)) > 0)
1704 return simplify_gen_binary (AND
, mode
, op0
,
1705 GEN_INT (INTVAL (op1
) - 1));
1707 /* Fall through.... */
1710 if ((trueop0
== const0_rtx
|| trueop1
== const1_rtx
)
1711 && ! side_effects_p (op0
) && ! side_effects_p (op1
))
1718 /* Rotating ~0 always results in ~0. */
1719 if (GET_CODE (trueop0
) == CONST_INT
&& width
<= HOST_BITS_PER_WIDE_INT
1720 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
1721 && ! side_effects_p (op1
))
1724 /* Fall through.... */
1728 if (trueop1
== const0_rtx
)
1730 if (trueop0
== const0_rtx
&& ! side_effects_p (op1
))
1735 if (width
<= HOST_BITS_PER_WIDE_INT
1736 && GET_CODE (trueop1
) == CONST_INT
1737 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
1738 && ! side_effects_p (op0
))
1740 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1742 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1748 if (width
<= HOST_BITS_PER_WIDE_INT
1749 && GET_CODE (trueop1
) == CONST_INT
1750 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
1751 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
1752 && ! side_effects_p (op0
))
1754 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1756 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1762 if (trueop1
== const0_rtx
&& ! side_effects_p (op0
))
1764 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1766 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1772 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
1774 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
1776 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1785 /* ??? There are simplifications that can be done. */
1789 if (!VECTOR_MODE_P (mode
))
1791 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1793 != GET_MODE_INNER (GET_MODE (trueop0
)))
1794 || GET_CODE (trueop1
) != PARALLEL
1795 || XVECLEN (trueop1
, 0) != 1
1796 || GET_CODE (XVECEXP (trueop1
, 0, 0)) != CONST_INT
)
1799 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1800 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP (trueop1
, 0, 0)));
1804 if (!VECTOR_MODE_P (GET_MODE (trueop0
))
1805 || (GET_MODE_INNER (mode
)
1806 != GET_MODE_INNER (GET_MODE (trueop0
)))
1807 || GET_CODE (trueop1
) != PARALLEL
)
1810 if (GET_CODE (trueop0
) == CONST_VECTOR
)
1812 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1813 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1814 rtvec v
= rtvec_alloc (n_elts
);
1817 if (XVECLEN (trueop1
, 0) != (int) n_elts
)
1819 for (i
= 0; i
< n_elts
; i
++)
1821 rtx x
= XVECEXP (trueop1
, 0, i
);
1823 if (GET_CODE (x
) != CONST_INT
)
1825 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, INTVAL (x
));
1828 return gen_rtx_CONST_VECTOR (mode
, v
);
1834 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
1835 ? GET_MODE (trueop0
)
1836 : GET_MODE_INNER (mode
));
1837 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
1838 ? GET_MODE (trueop1
)
1839 : GET_MODE_INNER (mode
));
1841 if (!VECTOR_MODE_P (mode
)
1842 || (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
1843 != GET_MODE_SIZE (mode
)))
1846 if ((VECTOR_MODE_P (op0_mode
)
1847 && (GET_MODE_INNER (mode
)
1848 != GET_MODE_INNER (op0_mode
)))
1849 || (!VECTOR_MODE_P (op0_mode
)
1850 && GET_MODE_INNER (mode
) != op0_mode
))
1853 if ((VECTOR_MODE_P (op1_mode
)
1854 && (GET_MODE_INNER (mode
)
1855 != GET_MODE_INNER (op1_mode
)))
1856 || (!VECTOR_MODE_P (op1_mode
)
1857 && GET_MODE_INNER (mode
) != op1_mode
))
1860 if ((GET_CODE (trueop0
) == CONST_VECTOR
1861 || GET_CODE (trueop0
) == CONST_INT
1862 || GET_CODE (trueop0
) == CONST_DOUBLE
)
1863 && (GET_CODE (trueop1
) == CONST_VECTOR
1864 || GET_CODE (trueop1
) == CONST_INT
1865 || GET_CODE (trueop1
) == CONST_DOUBLE
))
1867 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1868 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1869 rtvec v
= rtvec_alloc (n_elts
);
1871 unsigned in_n_elts
= 1;
1873 if (VECTOR_MODE_P (op0_mode
))
1874 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
1875 for (i
= 0; i
< n_elts
; i
++)
1879 if (!VECTOR_MODE_P (op0_mode
))
1880 RTVEC_ELT (v
, i
) = trueop0
;
1882 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
1886 if (!VECTOR_MODE_P (op1_mode
))
1887 RTVEC_ELT (v
, i
) = trueop1
;
1889 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
1894 return gen_rtx_CONST_VECTOR (mode
, v
);
1906 /* Get the integer argument values in two forms:
1907 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
1909 arg0
= INTVAL (trueop0
);
1910 arg1
= INTVAL (trueop1
);
1912 if (width
< HOST_BITS_PER_WIDE_INT
)
1914 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1915 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
1918 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1919 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
1922 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
1923 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
1931 /* Compute the value of the arithmetic. */
1936 val
= arg0s
+ arg1s
;
1940 val
= arg0s
- arg1s
;
1944 val
= arg0s
* arg1s
;
1949 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1952 val
= arg0s
/ arg1s
;
1957 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1960 val
= arg0s
% arg1s
;
1965 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1968 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
1973 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
1976 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
1992 /* If shift count is undefined, don't fold it; let the machine do
1993 what it wants. But truncate it if the machine will do that. */
1997 #ifdef SHIFT_COUNT_TRUNCATED
1998 if (SHIFT_COUNT_TRUNCATED
)
2002 val
= ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
;
2009 #ifdef SHIFT_COUNT_TRUNCATED
2010 if (SHIFT_COUNT_TRUNCATED
)
2014 val
= ((unsigned HOST_WIDE_INT
) arg0
) << arg1
;
2021 #ifdef SHIFT_COUNT_TRUNCATED
2022 if (SHIFT_COUNT_TRUNCATED
)
2026 val
= arg0s
>> arg1
;
2028 /* Bootstrap compiler may not have sign extended the right shift.
2029 Manually extend the sign to insure bootstrap cc matches gcc. */
2030 if (arg0s
< 0 && arg1
> 0)
2031 val
|= ((HOST_WIDE_INT
) -1) << (HOST_BITS_PER_WIDE_INT
- arg1
);
2040 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
2041 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
2049 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
2050 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
2054 /* Do nothing here. */
2058 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
2062 val
= ((unsigned HOST_WIDE_INT
) arg0
2063 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2067 val
= arg0s
> arg1s
? arg0s
: arg1s
;
2071 val
= ((unsigned HOST_WIDE_INT
) arg0
2072 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
2079 /* ??? There are simplifications that can be done. */
2086 val
= trunc_int_for_mode (val
, mode
);
2088 return GEN_INT (val
);
2091 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
2094 Rather than test for specific case, we do this by a brute-force method
2095 and do all possible simplifications until no more changes occur. Then
2096 we rebuild the operation.
2098 If FORCE is true, then always generate the rtx. This is used to
2099 canonicalize stuff emitted from simplify_gen_binary. Note that this
2100 can still fail if the rtx is too complex. It won't fail just because
2101 the result is not 'simpler' than the input, however. */
2103 struct simplify_plus_minus_op_data
2110 simplify_plus_minus_op_data_cmp (const void *p1
, const void *p2
)
2112 const struct simplify_plus_minus_op_data
*d1
= p1
;
2113 const struct simplify_plus_minus_op_data
*d2
= p2
;
2115 return (commutative_operand_precedence (d2
->op
)
2116 - commutative_operand_precedence (d1
->op
));
2120 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
2123 struct simplify_plus_minus_op_data ops
[8];
2125 int n_ops
= 2, input_ops
= 2, input_consts
= 0, n_consts
;
2126 int first
, negate
, changed
;
2129 memset (ops
, 0, sizeof ops
);
2131 /* Set up the two operands and then expand them until nothing has been
2132 changed. If we run out of room in our array, give up; this should
2133 almost never happen. */
2138 ops
[1].neg
= (code
== MINUS
);
2144 for (i
= 0; i
< n_ops
; i
++)
2146 rtx this_op
= ops
[i
].op
;
2147 int this_neg
= ops
[i
].neg
;
2148 enum rtx_code this_code
= GET_CODE (this_op
);
2157 ops
[n_ops
].op
= XEXP (this_op
, 1);
2158 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
2161 ops
[i
].op
= XEXP (this_op
, 0);
2167 ops
[i
].op
= XEXP (this_op
, 0);
2168 ops
[i
].neg
= ! this_neg
;
2174 && GET_CODE (XEXP (this_op
, 0)) == PLUS
2175 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
2176 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
2178 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
2179 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
2180 ops
[n_ops
].neg
= this_neg
;
2188 /* ~a -> (-a - 1) */
2191 ops
[n_ops
].op
= constm1_rtx
;
2192 ops
[n_ops
++].neg
= this_neg
;
2193 ops
[i
].op
= XEXP (this_op
, 0);
2194 ops
[i
].neg
= !this_neg
;
2202 ops
[i
].op
= neg_const_int (mode
, this_op
);
2215 /* If we only have two operands, we can't do anything. */
2216 if (n_ops
<= 2 && !force
)
2219 /* Count the number of CONSTs we didn't split above. */
2220 for (i
= 0; i
< n_ops
; i
++)
2221 if (GET_CODE (ops
[i
].op
) == CONST
)
2224 /* Now simplify each pair of operands until nothing changes. The first
2225 time through just simplify constants against each other. */
2232 for (i
= 0; i
< n_ops
- 1; i
++)
2233 for (j
= i
+ 1; j
< n_ops
; j
++)
2235 rtx lhs
= ops
[i
].op
, rhs
= ops
[j
].op
;
2236 int lneg
= ops
[i
].neg
, rneg
= ops
[j
].neg
;
2238 if (lhs
!= 0 && rhs
!= 0
2239 && (! first
|| (CONSTANT_P (lhs
) && CONSTANT_P (rhs
))))
2241 enum rtx_code ncode
= PLUS
;
2247 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2249 else if (swap_commutative_operands_p (lhs
, rhs
))
2250 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
2252 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
2254 /* Reject "simplifications" that just wrap the two
2255 arguments in a CONST. Failure to do so can result
2256 in infinite recursion with simplify_binary_operation
2257 when it calls us to simplify CONST operations. */
2259 && ! (GET_CODE (tem
) == CONST
2260 && GET_CODE (XEXP (tem
, 0)) == ncode
2261 && XEXP (XEXP (tem
, 0), 0) == lhs
2262 && XEXP (XEXP (tem
, 0), 1) == rhs
)
2263 /* Don't allow -x + -1 -> ~x simplifications in the
2264 first pass. This allows us the chance to combine
2265 the -1 with other constants. */
2267 && GET_CODE (tem
) == NOT
2268 && XEXP (tem
, 0) == rhs
))
2271 if (GET_CODE (tem
) == NEG
)
2272 tem
= XEXP (tem
, 0), lneg
= !lneg
;
2273 if (GET_CODE (tem
) == CONST_INT
&& lneg
)
2274 tem
= neg_const_int (mode
, tem
), lneg
= 0;
2278 ops
[j
].op
= NULL_RTX
;
2288 /* Pack all the operands to the lower-numbered entries. */
2289 for (i
= 0, j
= 0; j
< n_ops
; j
++)
2294 /* Sort the operations based on swap_commutative_operands_p. */
2295 qsort (ops
, n_ops
, sizeof (*ops
), simplify_plus_minus_op_data_cmp
);
2297 /* We suppressed creation of trivial CONST expressions in the
2298 combination loop to avoid recursion. Create one manually now.
2299 The combination loop should have ensured that there is exactly
2300 one CONST_INT, and the sort will have ensured that it is last
2301 in the array and that any other constant will be next-to-last. */
2304 && GET_CODE (ops
[n_ops
- 1].op
) == CONST_INT
2305 && CONSTANT_P (ops
[n_ops
- 2].op
))
2307 rtx value
= ops
[n_ops
- 1].op
;
2308 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
2309 value
= neg_const_int (mode
, value
);
2310 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
2314 /* Count the number of CONSTs that we generated. */
2316 for (i
= 0; i
< n_ops
; i
++)
2317 if (GET_CODE (ops
[i
].op
) == CONST
)
2320 /* Give up if we didn't reduce the number of operands we had. Make
2321 sure we count a CONST as two operands. If we have the same
2322 number of operands, but have made more CONSTs than before, this
2323 is also an improvement, so accept it. */
2325 && (n_ops
+ n_consts
> input_ops
2326 || (n_ops
+ n_consts
== input_ops
&& n_consts
<= input_consts
)))
2329 /* Put a non-negated operand first. If there aren't any, make all
2330 operands positive and negate the whole thing later. */
2333 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
2337 for (i
= 0; i
< n_ops
; i
++)
2349 /* Now make the result by performing the requested operations. */
2351 for (i
= 1; i
< n_ops
; i
++)
2352 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
2353 mode
, result
, ops
[i
].op
);
2355 return negate
? gen_rtx_NEG (mode
, result
) : result
;
2358 /* Like simplify_binary_operation except used for relational operators.
2359 MODE is the mode of the operands, not that of the result. If MODE
2360 is VOIDmode, both operands must also be VOIDmode and we compare the
2361 operands in "infinite precision".
2363 If no simplification is possible, this function returns zero. Otherwise,
2364 it returns either const_true_rtx or const0_rtx. */
2367 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
2370 int equal
, op0lt
, op0ltu
, op1lt
, op1ltu
;
2375 if (mode
== VOIDmode
2376 && (GET_MODE (op0
) != VOIDmode
2377 || GET_MODE (op1
) != VOIDmode
))
2380 /* If op0 is a compare, extract the comparison arguments from it. */
2381 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
2382 op1
= XEXP (op0
, 1), op0
= XEXP (op0
, 0);
2384 trueop0
= avoid_constant_pool_reference (op0
);
2385 trueop1
= avoid_constant_pool_reference (op1
);
2387 /* We can't simplify MODE_CC values since we don't know what the
2388 actual comparison is. */
2389 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
2392 /* Make sure the constant is second. */
2393 if (swap_commutative_operands_p (trueop0
, trueop1
))
2395 tem
= op0
, op0
= op1
, op1
= tem
;
2396 tem
= trueop0
, trueop0
= trueop1
, trueop1
= tem
;
2397 code
= swap_condition (code
);
2400 /* For integer comparisons of A and B maybe we can simplify A - B and can
2401 then simplify a comparison of that with zero. If A and B are both either
2402 a register or a CONST_INT, this can't help; testing for these cases will
2403 prevent infinite recursion here and speed things up.
2405 If CODE is an unsigned comparison, then we can never do this optimization,
2406 because it gives an incorrect result if the subtraction wraps around zero.
2407 ANSI C defines unsigned operations such that they never overflow, and
2408 thus such cases can not be ignored. */
2410 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
2411 && ! ((GET_CODE (op0
) == REG
|| GET_CODE (trueop0
) == CONST_INT
)
2412 && (GET_CODE (op1
) == REG
|| GET_CODE (trueop1
) == CONST_INT
))
2413 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
2414 && code
!= GTU
&& code
!= GEU
&& code
!= LTU
&& code
!= LEU
)
2415 return simplify_relational_operation (signed_condition (code
),
2416 mode
, tem
, const0_rtx
);
2418 if (flag_unsafe_math_optimizations
&& code
== ORDERED
)
2419 return const_true_rtx
;
2421 if (flag_unsafe_math_optimizations
&& code
== UNORDERED
)
2424 /* For modes without NaNs, if the two operands are equal, we know the
2425 result except if they have side-effects. */
2426 if (! HONOR_NANS (GET_MODE (trueop0
))
2427 && rtx_equal_p (trueop0
, trueop1
)
2428 && ! side_effects_p (trueop0
))
2429 equal
= 1, op0lt
= 0, op0ltu
= 0, op1lt
= 0, op1ltu
= 0;
2431 /* If the operands are floating-point constants, see if we can fold
2433 else if (GET_CODE (trueop0
) == CONST_DOUBLE
2434 && GET_CODE (trueop1
) == CONST_DOUBLE
2435 && GET_MODE_CLASS (GET_MODE (trueop0
)) == MODE_FLOAT
)
2437 REAL_VALUE_TYPE d0
, d1
;
2439 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
2440 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
2442 /* Comparisons are unordered iff at least one of the values is NaN. */
2443 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
2453 return const_true_rtx
;
2466 equal
= REAL_VALUES_EQUAL (d0
, d1
);
2467 op0lt
= op0ltu
= REAL_VALUES_LESS (d0
, d1
);
2468 op1lt
= op1ltu
= REAL_VALUES_LESS (d1
, d0
);
2471 /* Otherwise, see if the operands are both integers. */
2472 else if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
2473 && (GET_CODE (trueop0
) == CONST_DOUBLE
2474 || GET_CODE (trueop0
) == CONST_INT
)
2475 && (GET_CODE (trueop1
) == CONST_DOUBLE
2476 || GET_CODE (trueop1
) == CONST_INT
))
2478 int width
= GET_MODE_BITSIZE (mode
);
2479 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
2480 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
2482 /* Get the two words comprising each integer constant. */
2483 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
2485 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
2486 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
2490 l0u
= l0s
= INTVAL (trueop0
);
2491 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
2494 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
2496 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
2497 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
2501 l1u
= l1s
= INTVAL (trueop1
);
2502 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
2505 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
2506 we have to sign or zero-extend the values. */
2507 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
2509 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2510 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2512 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2513 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
2515 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
2516 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
2518 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
2519 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
2521 equal
= (h0u
== h1u
&& l0u
== l1u
);
2522 op0lt
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
));
2523 op1lt
= (h1s
< h0s
|| (h1s
== h0s
&& l1u
< l0u
));
2524 op0ltu
= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
));
2525 op1ltu
= (h1u
< h0u
|| (h1u
== h0u
&& l1u
< l0u
));
2528 /* Otherwise, there are some code-specific tests we can make. */
2534 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2539 if (trueop1
== const0_rtx
&& nonzero_address_p (op0
))
2540 return const_true_rtx
;
2544 /* Unsigned values are never negative. */
2545 if (trueop1
== const0_rtx
)
2546 return const_true_rtx
;
2550 if (trueop1
== const0_rtx
)
2555 /* Unsigned values are never greater than the largest
2557 if (GET_CODE (trueop1
) == CONST_INT
2558 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2559 && INTEGRAL_MODE_P (mode
))
2560 return const_true_rtx
;
2564 if (GET_CODE (trueop1
) == CONST_INT
2565 && (unsigned HOST_WIDE_INT
) INTVAL (trueop1
) == GET_MODE_MASK (mode
)
2566 && INTEGRAL_MODE_P (mode
))
2571 /* Optimize abs(x) < 0.0. */
2572 if (trueop1
== CONST0_RTX (mode
) && !HONOR_SNANS (mode
))
2574 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2576 if (GET_CODE (tem
) == ABS
)
2582 /* Optimize abs(x) >= 0.0. */
2583 if (trueop1
== CONST0_RTX (mode
) && !HONOR_NANS (mode
))
2585 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2587 if (GET_CODE (tem
) == ABS
)
2588 return const_true_rtx
;
2593 /* Optimize ! (abs(x) < 0.0). */
2594 if (trueop1
== CONST0_RTX (mode
))
2596 tem
= GET_CODE (trueop0
) == FLOAT_EXTEND
? XEXP (trueop0
, 0)
2598 if (GET_CODE (tem
) == ABS
)
2599 return const_true_rtx
;
2610 /* If we reach here, EQUAL, OP0LT, OP0LTU, OP1LT, and OP1LTU are set
2616 return equal
? const_true_rtx
: const0_rtx
;
2619 return ! equal
? const_true_rtx
: const0_rtx
;
2622 return op0lt
? const_true_rtx
: const0_rtx
;
2625 return op1lt
? const_true_rtx
: const0_rtx
;
2627 return op0ltu
? const_true_rtx
: const0_rtx
;
2629 return op1ltu
? const_true_rtx
: const0_rtx
;
2632 return equal
|| op0lt
? const_true_rtx
: const0_rtx
;
2635 return equal
|| op1lt
? const_true_rtx
: const0_rtx
;
2637 return equal
|| op0ltu
? const_true_rtx
: const0_rtx
;
2639 return equal
|| op1ltu
? const_true_rtx
: const0_rtx
;
2641 return const_true_rtx
;
2649 /* Simplify CODE, an operation with result mode MODE and three operands,
2650 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
2651 a constant. Return 0 if no simplifications is possible. */
2654 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
2655 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
2658 unsigned int width
= GET_MODE_BITSIZE (mode
);
2660 /* VOIDmode means "infinite" precision. */
2662 width
= HOST_BITS_PER_WIDE_INT
;
2668 if (GET_CODE (op0
) == CONST_INT
2669 && GET_CODE (op1
) == CONST_INT
2670 && GET_CODE (op2
) == CONST_INT
2671 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
2672 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
2674 /* Extracting a bit-field from a constant */
2675 HOST_WIDE_INT val
= INTVAL (op0
);
2677 if (BITS_BIG_ENDIAN
)
2678 val
>>= (GET_MODE_BITSIZE (op0_mode
)
2679 - INTVAL (op2
) - INTVAL (op1
));
2681 val
>>= INTVAL (op2
);
2683 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
2685 /* First zero-extend. */
2686 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
2687 /* If desired, propagate sign bit. */
2688 if (code
== SIGN_EXTRACT
2689 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
2690 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
2693 /* Clear the bits that don't belong in our mode,
2694 unless they and our sign bit are all one.
2695 So we get either a reasonable negative value or a reasonable
2696 unsigned value for this mode. */
2697 if (width
< HOST_BITS_PER_WIDE_INT
2698 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
2699 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
2700 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
2702 return GEN_INT (val
);
2707 if (GET_CODE (op0
) == CONST_INT
)
2708 return op0
!= const0_rtx
? op1
: op2
;
2710 /* Convert a == b ? b : a to "a". */
2711 if (GET_CODE (op0
) == NE
&& ! side_effects_p (op0
)
2712 && !HONOR_NANS (mode
)
2713 && rtx_equal_p (XEXP (op0
, 0), op1
)
2714 && rtx_equal_p (XEXP (op0
, 1), op2
))
2716 else if (GET_CODE (op0
) == EQ
&& ! side_effects_p (op0
)
2717 && !HONOR_NANS (mode
)
2718 && rtx_equal_p (XEXP (op0
, 1), op1
)
2719 && rtx_equal_p (XEXP (op0
, 0), op2
))
2721 else if (GET_RTX_CLASS (GET_CODE (op0
)) == '<' && ! side_effects_p (op0
))
2723 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
2724 ? GET_MODE (XEXP (op0
, 1))
2725 : GET_MODE (XEXP (op0
, 0)));
2727 if (cmp_mode
== VOIDmode
)
2728 cmp_mode
= op0_mode
;
2729 temp
= simplify_relational_operation (GET_CODE (op0
), cmp_mode
,
2730 XEXP (op0
, 0), XEXP (op0
, 1));
2732 /* See if any simplifications were possible. */
2733 if (temp
== const0_rtx
)
2735 else if (temp
== const_true_rtx
)
2740 /* Look for happy constants in op1 and op2. */
2741 if (GET_CODE (op1
) == CONST_INT
&& GET_CODE (op2
) == CONST_INT
)
2743 HOST_WIDE_INT t
= INTVAL (op1
);
2744 HOST_WIDE_INT f
= INTVAL (op2
);
2746 if (t
== STORE_FLAG_VALUE
&& f
== 0)
2747 code
= GET_CODE (op0
);
2748 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
2751 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
2759 return gen_rtx_fmt_ee (code
, mode
, XEXP (op0
, 0), XEXP (op0
, 1));
2764 if (GET_MODE (op0
) != mode
2765 || GET_MODE (op1
) != mode
2766 || !VECTOR_MODE_P (mode
))
2768 op2
= avoid_constant_pool_reference (op2
);
2769 if (GET_CODE (op2
) == CONST_INT
)
2771 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2772 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2773 int mask
= (1 << n_elts
) - 1;
2775 if (!(INTVAL (op2
) & mask
))
2777 if ((INTVAL (op2
) & mask
) == mask
)
2780 op0
= avoid_constant_pool_reference (op0
);
2781 op1
= avoid_constant_pool_reference (op1
);
2782 if (GET_CODE (op0
) == CONST_VECTOR
2783 && GET_CODE (op1
) == CONST_VECTOR
)
2785 rtvec v
= rtvec_alloc (n_elts
);
2788 for (i
= 0; i
< n_elts
; i
++)
2789 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
2790 ? CONST_VECTOR_ELT (op0
, i
)
2791 : CONST_VECTOR_ELT (op1
, i
));
2792 return gen_rtx_CONST_VECTOR (mode
, v
);
2804 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
2805 Return 0 if no simplifications is possible. */
2807 simplify_subreg (enum machine_mode outermode
, rtx op
,
2808 enum machine_mode innermode
, unsigned int byte
)
2810 /* Little bit of sanity checking. */
2811 if (innermode
== VOIDmode
|| outermode
== VOIDmode
2812 || innermode
== BLKmode
|| outermode
== BLKmode
)
2815 if (GET_MODE (op
) != innermode
2816 && GET_MODE (op
) != VOIDmode
)
2819 if (byte
% GET_MODE_SIZE (outermode
)
2820 || byte
>= GET_MODE_SIZE (innermode
))
2823 if (outermode
== innermode
&& !byte
)
2826 /* Simplify subregs of vector constants. */
2827 if (GET_CODE (op
) == CONST_VECTOR
)
2829 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (innermode
));
2830 const unsigned int offset
= byte
/ elt_size
;
2833 if (GET_MODE_INNER (innermode
) == outermode
)
2835 elt
= CONST_VECTOR_ELT (op
, offset
);
2837 /* ?? We probably don't need this copy_rtx because constants
2838 can be shared. ?? */
2840 return copy_rtx (elt
);
2842 else if (GET_MODE_INNER (innermode
) == GET_MODE_INNER (outermode
)
2843 && GET_MODE_SIZE (innermode
) > GET_MODE_SIZE (outermode
))
2845 return (gen_rtx_CONST_VECTOR
2847 gen_rtvec_v (GET_MODE_NUNITS (outermode
),
2848 &CONST_VECTOR_ELT (op
, offset
))));
2850 else if (GET_MODE_CLASS (outermode
) == MODE_INT
2851 && (GET_MODE_SIZE (outermode
) % elt_size
== 0))
2853 /* This happens when the target register size is smaller then
2854 the vector mode, and we synthesize operations with vectors
2855 of elements that are smaller than the register size. */
2856 HOST_WIDE_INT sum
= 0, high
= 0;
2857 unsigned n_elts
= (GET_MODE_SIZE (outermode
) / elt_size
);
2858 unsigned i
= BYTES_BIG_ENDIAN
? offset
: offset
+ n_elts
- 1;
2859 unsigned step
= BYTES_BIG_ENDIAN
? 1 : -1;
2860 int shift
= BITS_PER_UNIT
* elt_size
;
2861 unsigned HOST_WIDE_INT unit_mask
;
2863 unit_mask
= (unsigned HOST_WIDE_INT
) -1
2864 >> (sizeof (HOST_WIDE_INT
) * BITS_PER_UNIT
- shift
);
2866 for (; n_elts
--; i
+= step
)
2868 elt
= CONST_VECTOR_ELT (op
, i
);
2869 if (GET_CODE (elt
) == CONST_DOUBLE
2870 && GET_MODE_CLASS (GET_MODE (elt
)) == MODE_FLOAT
)
2872 elt
= gen_lowpart_common (int_mode_for_mode (GET_MODE (elt
)),
2877 if (GET_CODE (elt
) != CONST_INT
)
2879 /* Avoid overflow. */
2880 if (high
>> (HOST_BITS_PER_WIDE_INT
- shift
))
2882 high
= high
<< shift
| sum
>> (HOST_BITS_PER_WIDE_INT
- shift
);
2883 sum
= (sum
<< shift
) + (INTVAL (elt
) & unit_mask
);
2885 if (GET_MODE_BITSIZE (outermode
) <= HOST_BITS_PER_WIDE_INT
)
2886 return GEN_INT (trunc_int_for_mode (sum
, outermode
));
2887 else if (GET_MODE_BITSIZE (outermode
) == 2* HOST_BITS_PER_WIDE_INT
)
2888 return immed_double_const (sum
, high
, outermode
);
2892 else if (GET_MODE_CLASS (outermode
) == MODE_INT
2893 && (elt_size
% GET_MODE_SIZE (outermode
) == 0))
2895 enum machine_mode new_mode
2896 = int_mode_for_mode (GET_MODE_INNER (innermode
));
2897 int subbyte
= byte
% elt_size
;
2899 op
= simplify_subreg (new_mode
, op
, innermode
, byte
- subbyte
);
2902 return simplify_subreg (outermode
, op
, new_mode
, subbyte
);
2904 else if (GET_MODE_CLASS (outermode
) == MODE_INT
)
2905 /* This shouldn't happen, but let's not do anything stupid. */
2909 /* Attempt to simplify constant to non-SUBREG expression. */
2910 if (CONSTANT_P (op
))
2913 unsigned HOST_WIDE_INT val
= 0;
2915 if (VECTOR_MODE_P (outermode
))
2917 /* Construct a CONST_VECTOR from individual subregs. */
2918 enum machine_mode submode
= GET_MODE_INNER (outermode
);
2919 int subsize
= GET_MODE_UNIT_SIZE (outermode
);
2920 int i
, elts
= GET_MODE_NUNITS (outermode
);
2921 rtvec v
= rtvec_alloc (elts
);
2924 for (i
= 0; i
< elts
; i
++, byte
+= subsize
)
2926 /* This might fail, e.g. if taking a subreg from a SYMBOL_REF. */
2927 /* ??? It would be nice if we could actually make such subregs
2928 on targets that allow such relocations. */
2929 if (byte
>= GET_MODE_SIZE (innermode
))
2930 elt
= CONST0_RTX (submode
);
2932 elt
= simplify_subreg (submode
, op
, innermode
, byte
);
2935 RTVEC_ELT (v
, i
) = elt
;
2937 return gen_rtx_CONST_VECTOR (outermode
, v
);
2940 /* ??? This code is partly redundant with code below, but can handle
2941 the subregs of floats and similar corner cases.
2942 Later it we should move all simplification code here and rewrite
2943 GEN_LOWPART_IF_POSSIBLE, GEN_HIGHPART, OPERAND_SUBWORD and friends
2944 using SIMPLIFY_SUBREG. */
2945 if (subreg_lowpart_offset (outermode
, innermode
) == byte
2946 && GET_CODE (op
) != CONST_VECTOR
)
2948 rtx
new = gen_lowpart_if_possible (outermode
, op
);
2953 /* Similar comment as above apply here. */
2954 if (GET_MODE_SIZE (outermode
) == UNITS_PER_WORD
2955 && GET_MODE_SIZE (innermode
) > UNITS_PER_WORD
2956 && GET_MODE_CLASS (outermode
) == MODE_INT
)
2958 rtx
new = constant_subword (op
,
2959 (byte
/ UNITS_PER_WORD
),
2965 if (GET_MODE_CLASS (outermode
) != MODE_INT
2966 && GET_MODE_CLASS (outermode
) != MODE_CC
)
2968 enum machine_mode new_mode
= int_mode_for_mode (outermode
);
2970 if (new_mode
!= innermode
|| byte
!= 0)
2972 op
= simplify_subreg (new_mode
, op
, innermode
, byte
);
2975 return simplify_subreg (outermode
, op
, new_mode
, 0);
2979 offset
= byte
* BITS_PER_UNIT
;
2980 switch (GET_CODE (op
))
2983 if (GET_MODE (op
) != VOIDmode
)
2986 /* We can't handle this case yet. */
2987 if (GET_MODE_BITSIZE (outermode
) >= HOST_BITS_PER_WIDE_INT
)
2990 part
= offset
>= HOST_BITS_PER_WIDE_INT
;
2991 if ((BITS_PER_WORD
> HOST_BITS_PER_WIDE_INT
2992 && BYTES_BIG_ENDIAN
)
2993 || (BITS_PER_WORD
<= HOST_BITS_PER_WIDE_INT
2994 && WORDS_BIG_ENDIAN
))
2996 val
= part
? CONST_DOUBLE_HIGH (op
) : CONST_DOUBLE_LOW (op
);
2997 offset
%= HOST_BITS_PER_WIDE_INT
;
2999 /* We've already picked the word we want from a double, so
3000 pretend this is actually an integer. */
3001 innermode
= mode_for_size (HOST_BITS_PER_WIDE_INT
, MODE_INT
, 0);
3005 if (GET_CODE (op
) == CONST_INT
)
3008 /* We don't handle synthesizing of non-integral constants yet. */
3009 if (GET_MODE_CLASS (outermode
) != MODE_INT
)
3012 if (BYTES_BIG_ENDIAN
|| WORDS_BIG_ENDIAN
)
3014 if (WORDS_BIG_ENDIAN
)
3015 offset
= (GET_MODE_BITSIZE (innermode
)
3016 - GET_MODE_BITSIZE (outermode
) - offset
);
3017 if (BYTES_BIG_ENDIAN
!= WORDS_BIG_ENDIAN
3018 && GET_MODE_SIZE (outermode
) < UNITS_PER_WORD
)
3019 offset
= (offset
+ BITS_PER_WORD
- GET_MODE_BITSIZE (outermode
)
3020 - 2 * (offset
% BITS_PER_WORD
));
3023 if (offset
>= HOST_BITS_PER_WIDE_INT
)
3024 return ((HOST_WIDE_INT
) val
< 0) ? constm1_rtx
: const0_rtx
;
3028 if (GET_MODE_BITSIZE (outermode
) < HOST_BITS_PER_WIDE_INT
)
3029 val
= trunc_int_for_mode (val
, outermode
);
3030 return GEN_INT (val
);
3037 /* Changing mode twice with SUBREG => just change it once,
3038 or not at all if changing back op starting mode. */
3039 if (GET_CODE (op
) == SUBREG
)
3041 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
3042 int final_offset
= byte
+ SUBREG_BYTE (op
);
3045 if (outermode
== innermostmode
3046 && byte
== 0 && SUBREG_BYTE (op
) == 0)
3047 return SUBREG_REG (op
);
3049 /* The SUBREG_BYTE represents offset, as if the value were stored
3050 in memory. Irritating exception is paradoxical subreg, where
3051 we define SUBREG_BYTE to be 0. On big endian machines, this
3052 value should be negative. For a moment, undo this exception. */
3053 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
3055 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
3056 if (WORDS_BIG_ENDIAN
)
3057 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3058 if (BYTES_BIG_ENDIAN
)
3059 final_offset
+= difference
% UNITS_PER_WORD
;
3061 if (SUBREG_BYTE (op
) == 0
3062 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
3064 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
3065 if (WORDS_BIG_ENDIAN
)
3066 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3067 if (BYTES_BIG_ENDIAN
)
3068 final_offset
+= difference
% UNITS_PER_WORD
;
3071 /* See whether resulting subreg will be paradoxical. */
3072 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
3074 /* In nonparadoxical subregs we can't handle negative offsets. */
3075 if (final_offset
< 0)
3077 /* Bail out in case resulting subreg would be incorrect. */
3078 if (final_offset
% GET_MODE_SIZE (outermode
)
3079 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
3085 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
3087 /* In paradoxical subreg, see if we are still looking on lower part.
3088 If so, our SUBREG_BYTE will be 0. */
3089 if (WORDS_BIG_ENDIAN
)
3090 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
3091 if (BYTES_BIG_ENDIAN
)
3092 offset
+= difference
% UNITS_PER_WORD
;
3093 if (offset
== final_offset
)
3099 /* Recurse for further possible simplifications. */
3100 new = simplify_subreg (outermode
, SUBREG_REG (op
),
3101 GET_MODE (SUBREG_REG (op
)),
3105 return gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
3108 /* SUBREG of a hard register => just change the register number
3109 and/or mode. If the hard register is not valid in that mode,
3110 suppress this simplification. If the hard register is the stack,
3111 frame, or argument pointer, leave this as a SUBREG. */
3114 && (! REG_FUNCTION_VALUE_P (op
)
3115 || ! rtx_equal_function_value_matters
)
3116 && REGNO (op
) < FIRST_PSEUDO_REGISTER
3117 #ifdef CANNOT_CHANGE_MODE_CLASS
3118 && ! (REG_CANNOT_CHANGE_MODE_P (REGNO (op
), innermode
, outermode
)
3119 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_INT
3120 && GET_MODE_CLASS (innermode
) != MODE_COMPLEX_FLOAT
)
3122 && ((reload_completed
&& !frame_pointer_needed
)
3123 || (REGNO (op
) != FRAME_POINTER_REGNUM
3124 #if HARD_FRAME_POINTER_REGNUM != FRAME_POINTER_REGNUM
3125 && REGNO (op
) != HARD_FRAME_POINTER_REGNUM
3128 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM
3129 && REGNO (op
) != ARG_POINTER_REGNUM
3131 && REGNO (op
) != STACK_POINTER_REGNUM
3132 && subreg_offset_representable_p (REGNO (op
), innermode
,
3135 rtx tem
= gen_rtx_SUBREG (outermode
, op
, byte
);
3136 int final_regno
= subreg_hard_regno (tem
, 0);
3138 /* ??? We do allow it if the current REG is not valid for
3139 its mode. This is a kludge to work around how float/complex
3140 arguments are passed on 32-bit SPARC and should be fixed. */
3141 if (HARD_REGNO_MODE_OK (final_regno
, outermode
)
3142 || ! HARD_REGNO_MODE_OK (REGNO (op
), innermode
))
3144 rtx x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, byte
);
3146 /* Propagate original regno. We don't have any way to specify
3147 the offset inside original regno, so do so only for lowpart.
3148 The information is used only by alias analysis that can not
3149 grog partial register anyway. */
3151 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
3152 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
3157 /* If we have a SUBREG of a register that we are replacing and we are
3158 replacing it with a MEM, make a new MEM and try replacing the
3159 SUBREG with it. Don't do this if the MEM has a mode-dependent address
3160 or if we would be widening it. */
3162 if (GET_CODE (op
) == MEM
3163 && ! mode_dependent_address_p (XEXP (op
, 0))
3164 /* Allow splitting of volatile memory references in case we don't
3165 have instruction to move the whole thing. */
3166 && (! MEM_VOLATILE_P (op
)
3167 || ! have_insn_for (SET
, innermode
))
3168 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
3169 return adjust_address_nv (op
, outermode
, byte
);
3171 /* Handle complex values represented as CONCAT
3172 of real and imaginary part. */
3173 if (GET_CODE (op
) == CONCAT
)
3175 int is_realpart
= byte
< GET_MODE_UNIT_SIZE (innermode
);
3176 rtx part
= is_realpart
? XEXP (op
, 0) : XEXP (op
, 1);
3177 unsigned int final_offset
;
3180 final_offset
= byte
% (GET_MODE_UNIT_SIZE (innermode
));
3181 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
3184 /* We can at least simplify it by referring directly to the relevant part. */
3185 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
3190 /* Make a SUBREG operation or equivalent if it folds. */
3193 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
3194 enum machine_mode innermode
, unsigned int byte
)
3197 /* Little bit of sanity checking. */
3198 if (innermode
== VOIDmode
|| outermode
== VOIDmode
3199 || innermode
== BLKmode
|| outermode
== BLKmode
)
3202 if (GET_MODE (op
) != innermode
3203 && GET_MODE (op
) != VOIDmode
)
3206 if (byte
% GET_MODE_SIZE (outermode
)
3207 || byte
>= GET_MODE_SIZE (innermode
))
3210 if (GET_CODE (op
) == QUEUED
)
3213 new = simplify_subreg (outermode
, op
, innermode
, byte
);
3217 if (GET_CODE (op
) == SUBREG
|| GET_MODE (op
) == VOIDmode
)
3220 return gen_rtx_SUBREG (outermode
, op
, byte
);
3222 /* Simplify X, an rtx expression.
3224 Return the simplified expression or NULL if no simplifications
3227 This is the preferred entry point into the simplification routines;
3228 however, we still allow passes to call the more specific routines.
3230 Right now GCC has three (yes, three) major bodies of RTL simplification
3231 code that need to be unified.
3233 1. fold_rtx in cse.c. This code uses various CSE specific
3234 information to aid in RTL simplification.
3236 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
3237 it uses combine specific information to aid in RTL
3240 3. The routines in this file.
3243 Long term we want to only have one body of simplification code; to
3244 get to that state I recommend the following steps:
3246 1. Pour over fold_rtx & simplify_rtx and move any simplifications
3247 which are not pass dependent state into these routines.
3249 2. As code is moved by #1, change fold_rtx & simplify_rtx to
3250 use this routine whenever possible.
3252 3. Allow for pass dependent state to be provided to these
3253 routines and add simplifications based on the pass dependent
3254 state. Remove code from cse.c & combine.c that becomes
3257 It will take time, but ultimately the compiler will be easier to
3258 maintain and improve. It's totally silly that when we add a
3259 simplification that it needs to be added to 4 places (3 for RTL
3260 simplification and 1 for tree simplification. */
3263 simplify_rtx (rtx x
)
3265 enum rtx_code code
= GET_CODE (x
);
3266 enum machine_mode mode
= GET_MODE (x
);
3269 switch (GET_RTX_CLASS (code
))
3272 return simplify_unary_operation (code
, mode
,
3273 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
3275 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
3276 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
3278 /* Fall through.... */
3281 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
3285 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
3286 XEXP (x
, 0), XEXP (x
, 1),
3290 temp
= simplify_relational_operation (code
,
3291 ((GET_MODE (XEXP (x
, 0))
3293 ? GET_MODE (XEXP (x
, 0))
3294 : GET_MODE (XEXP (x
, 1))),
3295 XEXP (x
, 0), XEXP (x
, 1));
3296 #ifdef FLOAT_STORE_FLAG_VALUE
3297 if (temp
!= 0 && GET_MODE_CLASS (mode
) == MODE_FLOAT
)
3299 if (temp
== const0_rtx
)
3300 temp
= CONST0_RTX (mode
);
3302 temp
= CONST_DOUBLE_FROM_REAL_VALUE (FLOAT_STORE_FLAG_VALUE (mode
),
3310 return simplify_gen_subreg (mode
, SUBREG_REG (x
),
3311 GET_MODE (SUBREG_REG (x
)),
3313 if (code
== CONSTANT_P_RTX
)
3315 if (CONSTANT_P (XEXP (x
, 0)))
3323 /* Convert (lo_sum (high FOO) FOO) to FOO. */
3324 if (GET_CODE (XEXP (x
, 0)) == HIGH
3325 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))