1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
37 #include "diagnostic-core.h"
42 /* Simplification and canonicalization of RTL. */
44 /* Much code operates on (low, high) pairs; the low value is an
45 unsigned wide int, the high value a signed wide int. We
46 occasionally need to sign extend from low to high as if low were a
48 #define HWI_SIGN_EXTEND(low) \
49 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
51 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
52 static bool plus_minus_operand_p (const_rtx
);
53 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
54 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
55 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
57 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
59 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
60 enum machine_mode
, rtx
, rtx
);
61 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
62 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
65 /* Negate a CONST_INT rtx, truncating (because a conversion from a
66 maximally negative number can overflow). */
68 neg_const_int (enum machine_mode mode
, const_rtx i
)
70 return gen_int_mode (- INTVAL (i
), mode
);
73 /* Test whether expression, X, is an immediate constant that represents
74 the most significant bit of machine mode MODE. */
77 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
79 unsigned HOST_WIDE_INT val
;
82 if (GET_MODE_CLASS (mode
) != MODE_INT
)
85 width
= GET_MODE_PRECISION (mode
);
89 if (width
<= HOST_BITS_PER_WIDE_INT
92 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
93 && GET_CODE (x
) == CONST_DOUBLE
94 && CONST_DOUBLE_LOW (x
) == 0)
96 val
= CONST_DOUBLE_HIGH (x
);
97 width
-= HOST_BITS_PER_WIDE_INT
;
100 /* FIXME: We don't yet have a representation for wider modes. */
103 if (width
< HOST_BITS_PER_WIDE_INT
)
104 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
105 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
108 /* Test whether VAL is equal to the most significant bit of mode MODE
109 (after masking with the mode mask of MODE). Returns false if the
110 precision of MODE is too large to handle. */
113 val_signbit_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
117 if (GET_MODE_CLASS (mode
) != MODE_INT
)
120 width
= GET_MODE_PRECISION (mode
);
121 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
124 val
&= GET_MODE_MASK (mode
);
125 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
128 /* Test whether the most significant bit of mode MODE is set in VAL.
129 Returns false if the precision of MODE is too large to handle. */
131 val_signbit_known_set_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
135 if (GET_MODE_CLASS (mode
) != MODE_INT
)
138 width
= GET_MODE_PRECISION (mode
);
139 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
142 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
146 /* Test whether the most significant bit of mode MODE is clear in VAL.
147 Returns false if the precision of MODE is too large to handle. */
149 val_signbit_known_clear_p (enum machine_mode mode
, unsigned HOST_WIDE_INT val
)
153 if (GET_MODE_CLASS (mode
) != MODE_INT
)
156 width
= GET_MODE_PRECISION (mode
);
157 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
160 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
164 /* Make a binary operation by properly ordering the operands and
165 seeing if the expression folds. */
168 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
173 /* If this simplifies, do it. */
174 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
178 /* Put complex operands first and constants second if commutative. */
179 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
180 && swap_commutative_operands_p (op0
, op1
))
181 tem
= op0
, op0
= op1
, op1
= tem
;
183 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
186 /* If X is a MEM referencing the constant pool, return the real value.
187 Otherwise return X. */
189 avoid_constant_pool_reference (rtx x
)
192 enum machine_mode cmode
;
193 HOST_WIDE_INT offset
= 0;
195 switch (GET_CODE (x
))
201 /* Handle float extensions of constant pool references. */
203 c
= avoid_constant_pool_reference (tmp
);
204 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
208 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
209 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
217 if (GET_MODE (x
) == BLKmode
)
222 /* Call target hook to avoid the effects of -fpic etc.... */
223 addr
= targetm
.delegitimize_address (addr
);
225 /* Split the address into a base and integer offset. */
226 if (GET_CODE (addr
) == CONST
227 && GET_CODE (XEXP (addr
, 0)) == PLUS
228 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
230 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
231 addr
= XEXP (XEXP (addr
, 0), 0);
234 if (GET_CODE (addr
) == LO_SUM
)
235 addr
= XEXP (addr
, 1);
237 /* If this is a constant pool reference, we can turn it into its
238 constant and hope that simplifications happen. */
239 if (GET_CODE (addr
) == SYMBOL_REF
240 && CONSTANT_POOL_ADDRESS_P (addr
))
242 c
= get_pool_constant (addr
);
243 cmode
= get_pool_mode (addr
);
245 /* If we're accessing the constant in a different mode than it was
246 originally stored, attempt to fix that up via subreg simplifications.
247 If that fails we have no choice but to return the original memory. */
248 if (offset
!= 0 || cmode
!= GET_MODE (x
))
250 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
251 if (tem
&& CONSTANT_P (tem
))
261 /* Simplify a MEM based on its attributes. This is the default
262 delegitimize_address target hook, and it's recommended that every
263 overrider call it. */
266 delegitimize_mem_from_attrs (rtx x
)
268 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
269 use their base addresses as equivalent. */
272 && MEM_OFFSET_KNOWN_P (x
))
274 tree decl
= MEM_EXPR (x
);
275 enum machine_mode mode
= GET_MODE (x
);
276 HOST_WIDE_INT offset
= 0;
278 switch (TREE_CODE (decl
))
288 case ARRAY_RANGE_REF
:
293 case VIEW_CONVERT_EXPR
:
295 HOST_WIDE_INT bitsize
, bitpos
;
297 int unsignedp
, volatilep
= 0;
299 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
300 &mode
, &unsignedp
, &volatilep
, false);
301 if (bitsize
!= GET_MODE_BITSIZE (mode
)
302 || (bitpos
% BITS_PER_UNIT
)
303 || (toffset
&& !host_integerp (toffset
, 0)))
307 offset
+= bitpos
/ BITS_PER_UNIT
;
309 offset
+= TREE_INT_CST_LOW (toffset
);
316 && mode
== GET_MODE (x
)
317 && TREE_CODE (decl
) == VAR_DECL
318 && (TREE_STATIC (decl
)
319 || DECL_THREAD_LOCAL_P (decl
))
320 && DECL_RTL_SET_P (decl
)
321 && MEM_P (DECL_RTL (decl
)))
325 offset
+= MEM_OFFSET (x
);
327 newx
= DECL_RTL (decl
);
331 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
333 /* Avoid creating a new MEM needlessly if we already had
334 the same address. We do if there's no OFFSET and the
335 old address X is identical to NEWX, or if X is of the
336 form (plus NEWX OFFSET), or the NEWX is of the form
337 (plus Y (const_int Z)) and X is that with the offset
338 added: (plus Y (const_int Z+OFFSET)). */
340 || (GET_CODE (o
) == PLUS
341 && GET_CODE (XEXP (o
, 1)) == CONST_INT
342 && (offset
== INTVAL (XEXP (o
, 1))
343 || (GET_CODE (n
) == PLUS
344 && GET_CODE (XEXP (n
, 1)) == CONST_INT
345 && (INTVAL (XEXP (n
, 1)) + offset
346 == INTVAL (XEXP (o
, 1)))
347 && (n
= XEXP (n
, 0))))
348 && (o
= XEXP (o
, 0))))
349 && rtx_equal_p (o
, n
)))
350 x
= adjust_address_nv (newx
, mode
, offset
);
352 else if (GET_MODE (x
) == GET_MODE (newx
)
361 /* Make a unary operation by first seeing if it folds and otherwise making
362 the specified operation. */
365 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
366 enum machine_mode op_mode
)
370 /* If this simplifies, use it. */
371 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
374 return gen_rtx_fmt_e (code
, mode
, op
);
377 /* Likewise for ternary operations. */
380 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
381 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
385 /* If this simplifies, use it. */
386 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
390 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
393 /* Likewise, for relational operations.
394 CMP_MODE specifies mode comparison is done in. */
397 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
398 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
402 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
406 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
409 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
410 and simplify the result. If FN is non-NULL, call this callback on each
411 X, if it returns non-NULL, replace X with its return value and simplify the
415 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
416 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
418 enum rtx_code code
= GET_CODE (x
);
419 enum machine_mode mode
= GET_MODE (x
);
420 enum machine_mode op_mode
;
422 rtx op0
, op1
, op2
, newx
, op
;
426 if (__builtin_expect (fn
!= NULL
, 0))
428 newx
= fn (x
, old_rtx
, data
);
432 else if (rtx_equal_p (x
, old_rtx
))
433 return copy_rtx ((rtx
) data
);
435 switch (GET_RTX_CLASS (code
))
439 op_mode
= GET_MODE (op0
);
440 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
441 if (op0
== XEXP (x
, 0))
443 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
447 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
448 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
449 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
451 return simplify_gen_binary (code
, mode
, op0
, op1
);
454 case RTX_COMM_COMPARE
:
457 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
458 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
459 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
460 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
462 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
465 case RTX_BITFIELD_OPS
:
467 op_mode
= GET_MODE (op0
);
468 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
469 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
470 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
471 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
473 if (op_mode
== VOIDmode
)
474 op_mode
= GET_MODE (op0
);
475 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
480 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
481 if (op0
== SUBREG_REG (x
))
483 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
484 GET_MODE (SUBREG_REG (x
)),
486 return op0
? op0
: x
;
493 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
494 if (op0
== XEXP (x
, 0))
496 return replace_equiv_address_nv (x
, op0
);
498 else if (code
== LO_SUM
)
500 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
501 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
503 /* (lo_sum (high x) x) -> x */
504 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
507 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
509 return gen_rtx_LO_SUM (mode
, op0
, op1
);
518 fmt
= GET_RTX_FORMAT (code
);
519 for (i
= 0; fmt
[i
]; i
++)
524 newvec
= XVEC (newx
, i
);
525 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
527 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
529 if (op
!= RTVEC_ELT (vec
, j
))
533 newvec
= shallow_copy_rtvec (vec
);
535 newx
= shallow_copy_rtx (x
);
536 XVEC (newx
, i
) = newvec
;
538 RTVEC_ELT (newvec
, j
) = op
;
546 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
547 if (op
!= XEXP (x
, i
))
550 newx
= shallow_copy_rtx (x
);
559 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
560 resulting RTX. Return a new RTX which is as simplified as possible. */
563 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
565 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
568 /* Try to simplify a unary operation CODE whose output mode is to be
569 MODE with input operand OP whose mode was originally OP_MODE.
570 Return zero if no simplification can be made. */
572 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
573 rtx op
, enum machine_mode op_mode
)
577 trueop
= avoid_constant_pool_reference (op
);
579 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
583 return simplify_unary_operation_1 (code
, mode
, op
);
586 /* Perform some simplifications we can do even if the operands
589 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
591 enum rtx_code reversed
;
597 /* (not (not X)) == X. */
598 if (GET_CODE (op
) == NOT
)
601 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
602 comparison is all ones. */
603 if (COMPARISON_P (op
)
604 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
605 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
606 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
607 XEXP (op
, 0), XEXP (op
, 1));
609 /* (not (plus X -1)) can become (neg X). */
610 if (GET_CODE (op
) == PLUS
611 && XEXP (op
, 1) == constm1_rtx
)
612 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
614 /* Similarly, (not (neg X)) is (plus X -1). */
615 if (GET_CODE (op
) == NEG
)
616 return plus_constant (mode
, XEXP (op
, 0), -1);
618 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
619 if (GET_CODE (op
) == XOR
620 && CONST_INT_P (XEXP (op
, 1))
621 && (temp
= simplify_unary_operation (NOT
, mode
,
622 XEXP (op
, 1), mode
)) != 0)
623 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
625 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
626 if (GET_CODE (op
) == PLUS
627 && CONST_INT_P (XEXP (op
, 1))
628 && mode_signbit_p (mode
, XEXP (op
, 1))
629 && (temp
= simplify_unary_operation (NOT
, mode
,
630 XEXP (op
, 1), mode
)) != 0)
631 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
634 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
635 operands other than 1, but that is not valid. We could do a
636 similar simplification for (not (lshiftrt C X)) where C is
637 just the sign bit, but this doesn't seem common enough to
639 if (GET_CODE (op
) == ASHIFT
640 && XEXP (op
, 0) == const1_rtx
)
642 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
643 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
646 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
647 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
648 so we can perform the above simplification. */
650 if (STORE_FLAG_VALUE
== -1
651 && GET_CODE (op
) == ASHIFTRT
652 && GET_CODE (XEXP (op
, 1))
653 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
654 return simplify_gen_relational (GE
, mode
, VOIDmode
,
655 XEXP (op
, 0), const0_rtx
);
658 if (GET_CODE (op
) == SUBREG
659 && subreg_lowpart_p (op
)
660 && (GET_MODE_SIZE (GET_MODE (op
))
661 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
662 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
663 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
665 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
668 x
= gen_rtx_ROTATE (inner_mode
,
669 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
671 XEXP (SUBREG_REG (op
), 1));
672 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
675 /* Apply De Morgan's laws to reduce number of patterns for machines
676 with negating logical insns (and-not, nand, etc.). If result has
677 only one NOT, put it first, since that is how the patterns are
680 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
682 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
683 enum machine_mode op_mode
;
685 op_mode
= GET_MODE (in1
);
686 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
688 op_mode
= GET_MODE (in2
);
689 if (op_mode
== VOIDmode
)
691 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
693 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
696 in2
= in1
; in1
= tem
;
699 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
705 /* (neg (neg X)) == X. */
706 if (GET_CODE (op
) == NEG
)
709 /* (neg (plus X 1)) can become (not X). */
710 if (GET_CODE (op
) == PLUS
711 && XEXP (op
, 1) == const1_rtx
)
712 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
714 /* Similarly, (neg (not X)) is (plus X 1). */
715 if (GET_CODE (op
) == NOT
)
716 return plus_constant (mode
, XEXP (op
, 0), 1);
718 /* (neg (minus X Y)) can become (minus Y X). This transformation
719 isn't safe for modes with signed zeros, since if X and Y are
720 both +0, (minus Y X) is the same as (minus X Y). If the
721 rounding mode is towards +infinity (or -infinity) then the two
722 expressions will be rounded differently. */
723 if (GET_CODE (op
) == MINUS
724 && !HONOR_SIGNED_ZEROS (mode
)
725 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
726 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
728 if (GET_CODE (op
) == PLUS
729 && !HONOR_SIGNED_ZEROS (mode
)
730 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
732 /* (neg (plus A C)) is simplified to (minus -C A). */
733 if (CONST_INT_P (XEXP (op
, 1))
734 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
736 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
738 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
741 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
742 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
743 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
746 /* (neg (mult A B)) becomes (mult A (neg B)).
747 This works even for floating-point values. */
748 if (GET_CODE (op
) == MULT
749 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
751 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
752 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
755 /* NEG commutes with ASHIFT since it is multiplication. Only do
756 this if we can then eliminate the NEG (e.g., if the operand
758 if (GET_CODE (op
) == ASHIFT
)
760 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
762 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
765 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
766 C is equal to the width of MODE minus 1. */
767 if (GET_CODE (op
) == ASHIFTRT
768 && CONST_INT_P (XEXP (op
, 1))
769 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
770 return simplify_gen_binary (LSHIFTRT
, mode
,
771 XEXP (op
, 0), XEXP (op
, 1));
773 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
774 C is equal to the width of MODE minus 1. */
775 if (GET_CODE (op
) == LSHIFTRT
776 && CONST_INT_P (XEXP (op
, 1))
777 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
778 return simplify_gen_binary (ASHIFTRT
, mode
,
779 XEXP (op
, 0), XEXP (op
, 1));
781 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
782 if (GET_CODE (op
) == XOR
783 && XEXP (op
, 1) == const1_rtx
784 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
785 return plus_constant (mode
, XEXP (op
, 0), -1);
787 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
788 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
789 if (GET_CODE (op
) == LT
790 && XEXP (op
, 1) == const0_rtx
791 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
793 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
794 int isize
= GET_MODE_PRECISION (inner
);
795 if (STORE_FLAG_VALUE
== 1)
797 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
798 GEN_INT (isize
- 1));
801 if (GET_MODE_PRECISION (mode
) > isize
)
802 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
803 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
805 else if (STORE_FLAG_VALUE
== -1)
807 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
808 GEN_INT (isize
- 1));
811 if (GET_MODE_PRECISION (mode
) > isize
)
812 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
813 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
819 /* We can't handle truncation to a partial integer mode here
820 because we don't know the real bitsize of the partial
822 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
825 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
826 if ((GET_CODE (op
) == SIGN_EXTEND
827 || GET_CODE (op
) == ZERO_EXTEND
)
828 && GET_MODE (XEXP (op
, 0)) == mode
)
831 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
832 (OP:SI foo:SI) if OP is NEG or ABS. */
833 if ((GET_CODE (op
) == ABS
834 || GET_CODE (op
) == NEG
)
835 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
836 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
837 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
838 return simplify_gen_unary (GET_CODE (op
), mode
,
839 XEXP (XEXP (op
, 0), 0), mode
);
841 /* (truncate:A (subreg:B (truncate:C X) 0)) is
843 if (GET_CODE (op
) == SUBREG
844 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
845 && subreg_lowpart_p (op
))
846 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
847 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
849 /* If we know that the value is already truncated, we can
850 replace the TRUNCATE with a SUBREG. Note that this is also
851 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
852 modes we just have to apply a different definition for
853 truncation. But don't do this for an (LSHIFTRT (MULT ...))
854 since this will cause problems with the umulXi3_highpart
856 if ((TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
857 ? (num_sign_bit_copies (op
, GET_MODE (op
))
858 > (unsigned int) (GET_MODE_PRECISION (GET_MODE (op
))
859 - GET_MODE_PRECISION (mode
)))
860 : truncated_to_mode (mode
, op
))
861 && ! (GET_CODE (op
) == LSHIFTRT
862 && GET_CODE (XEXP (op
, 0)) == MULT
))
863 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
865 /* A truncate of a comparison can be replaced with a subreg if
866 STORE_FLAG_VALUE permits. This is like the previous test,
867 but it works even if the comparison is done in a mode larger
868 than HOST_BITS_PER_WIDE_INT. */
869 if (HWI_COMPUTABLE_MODE_P (mode
)
871 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
872 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
876 if (DECIMAL_FLOAT_MODE_P (mode
))
879 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
880 if (GET_CODE (op
) == FLOAT_EXTEND
881 && GET_MODE (XEXP (op
, 0)) == mode
)
884 /* (float_truncate:SF (float_truncate:DF foo:XF))
885 = (float_truncate:SF foo:XF).
886 This may eliminate double rounding, so it is unsafe.
888 (float_truncate:SF (float_extend:XF foo:DF))
889 = (float_truncate:SF foo:DF).
891 (float_truncate:DF (float_extend:XF foo:SF))
892 = (float_extend:SF foo:DF). */
893 if ((GET_CODE (op
) == FLOAT_TRUNCATE
894 && flag_unsafe_math_optimizations
)
895 || GET_CODE (op
) == FLOAT_EXTEND
)
896 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
898 > GET_MODE_SIZE (mode
)
899 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
903 /* (float_truncate (float x)) is (float x) */
904 if (GET_CODE (op
) == FLOAT
905 && (flag_unsafe_math_optimizations
906 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
907 && ((unsigned)significand_size (GET_MODE (op
))
908 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
909 - num_sign_bit_copies (XEXP (op
, 0),
910 GET_MODE (XEXP (op
, 0))))))))
911 return simplify_gen_unary (FLOAT
, mode
,
913 GET_MODE (XEXP (op
, 0)));
915 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
916 (OP:SF foo:SF) if OP is NEG or ABS. */
917 if ((GET_CODE (op
) == ABS
918 || GET_CODE (op
) == NEG
)
919 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
920 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
921 return simplify_gen_unary (GET_CODE (op
), mode
,
922 XEXP (XEXP (op
, 0), 0), mode
);
924 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
925 is (float_truncate:SF x). */
926 if (GET_CODE (op
) == SUBREG
927 && subreg_lowpart_p (op
)
928 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
929 return SUBREG_REG (op
);
933 if (DECIMAL_FLOAT_MODE_P (mode
))
936 /* (float_extend (float_extend x)) is (float_extend x)
938 (float_extend (float x)) is (float x) assuming that double
939 rounding can't happen.
941 if (GET_CODE (op
) == FLOAT_EXTEND
942 || (GET_CODE (op
) == FLOAT
943 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
944 && ((unsigned)significand_size (GET_MODE (op
))
945 >= (GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0)))
946 - num_sign_bit_copies (XEXP (op
, 0),
947 GET_MODE (XEXP (op
, 0)))))))
948 return simplify_gen_unary (GET_CODE (op
), mode
,
950 GET_MODE (XEXP (op
, 0)));
955 /* (abs (neg <foo>)) -> (abs <foo>) */
956 if (GET_CODE (op
) == NEG
)
957 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
958 GET_MODE (XEXP (op
, 0)));
960 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
962 if (GET_MODE (op
) == VOIDmode
)
965 /* If operand is something known to be positive, ignore the ABS. */
966 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
967 || val_signbit_known_clear_p (GET_MODE (op
),
968 nonzero_bits (op
, GET_MODE (op
))))
971 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
972 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
973 return gen_rtx_NEG (mode
, op
);
978 /* (ffs (*_extend <X>)) = (ffs <X>) */
979 if (GET_CODE (op
) == SIGN_EXTEND
980 || GET_CODE (op
) == ZERO_EXTEND
)
981 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
982 GET_MODE (XEXP (op
, 0)));
986 switch (GET_CODE (op
))
990 /* (popcount (zero_extend <X>)) = (popcount <X>) */
991 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
992 GET_MODE (XEXP (op
, 0)));
996 /* Rotations don't affect popcount. */
997 if (!side_effects_p (XEXP (op
, 1)))
998 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
999 GET_MODE (XEXP (op
, 0)));
1008 switch (GET_CODE (op
))
1014 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1015 GET_MODE (XEXP (op
, 0)));
1019 /* Rotations don't affect parity. */
1020 if (!side_effects_p (XEXP (op
, 1)))
1021 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1022 GET_MODE (XEXP (op
, 0)));
1031 /* (bswap (bswap x)) -> x. */
1032 if (GET_CODE (op
) == BSWAP
)
1033 return XEXP (op
, 0);
1037 /* (float (sign_extend <X>)) = (float <X>). */
1038 if (GET_CODE (op
) == SIGN_EXTEND
)
1039 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1040 GET_MODE (XEXP (op
, 0)));
1044 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1045 becomes just the MINUS if its mode is MODE. This allows
1046 folding switch statements on machines using casesi (such as
1048 if (GET_CODE (op
) == TRUNCATE
1049 && GET_MODE (XEXP (op
, 0)) == mode
1050 && GET_CODE (XEXP (op
, 0)) == MINUS
1051 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1052 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1053 return XEXP (op
, 0);
1055 /* Extending a widening multiplication should be canonicalized to
1056 a wider widening multiplication. */
1057 if (GET_CODE (op
) == MULT
)
1059 rtx lhs
= XEXP (op
, 0);
1060 rtx rhs
= XEXP (op
, 1);
1061 enum rtx_code lcode
= GET_CODE (lhs
);
1062 enum rtx_code rcode
= GET_CODE (rhs
);
1064 /* Widening multiplies usually extend both operands, but sometimes
1065 they use a shift to extract a portion of a register. */
1066 if ((lcode
== SIGN_EXTEND
1067 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1068 && (rcode
== SIGN_EXTEND
1069 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1071 enum machine_mode lmode
= GET_MODE (lhs
);
1072 enum machine_mode rmode
= GET_MODE (rhs
);
1075 if (lcode
== ASHIFTRT
)
1076 /* Number of bits not shifted off the end. */
1077 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1078 else /* lcode == SIGN_EXTEND */
1079 /* Size of inner mode. */
1080 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1082 if (rcode
== ASHIFTRT
)
1083 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1084 else /* rcode == SIGN_EXTEND */
1085 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1087 /* We can only widen multiplies if the result is mathematiclly
1088 equivalent. I.e. if overflow was impossible. */
1089 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1090 return simplify_gen_binary
1092 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1093 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1097 /* Check for a sign extension of a subreg of a promoted
1098 variable, where the promotion is sign-extended, and the
1099 target mode is the same as the variable's promotion. */
1100 if (GET_CODE (op
) == SUBREG
1101 && SUBREG_PROMOTED_VAR_P (op
)
1102 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
1103 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1104 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1106 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1107 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1108 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1110 gcc_assert (GET_MODE_BITSIZE (mode
)
1111 > GET_MODE_BITSIZE (GET_MODE (op
)));
1112 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1113 GET_MODE (XEXP (op
, 0)));
1116 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1117 is (sign_extend:M (subreg:O <X>)) if there is mode with
1118 GET_MODE_BITSIZE (N) - I bits.
1119 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1120 is similarly (zero_extend:M (subreg:O <X>)). */
1121 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1122 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1123 && CONST_INT_P (XEXP (op
, 1))
1124 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1125 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1127 enum machine_mode tmode
1128 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1129 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1130 gcc_assert (GET_MODE_BITSIZE (mode
)
1131 > GET_MODE_BITSIZE (GET_MODE (op
)));
1132 if (tmode
!= BLKmode
)
1135 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1136 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1137 ? SIGN_EXTEND
: ZERO_EXTEND
,
1138 mode
, inner
, tmode
);
1142 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1143 /* As we do not know which address space the pointer is referring to,
1144 we can do this only if the target does not support different pointer
1145 or address modes depending on the address space. */
1146 if (target_default_pointer_address_modes_p ()
1147 && ! POINTERS_EXTEND_UNSIGNED
1148 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1150 || (GET_CODE (op
) == SUBREG
1151 && REG_P (SUBREG_REG (op
))
1152 && REG_POINTER (SUBREG_REG (op
))
1153 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1154 return convert_memory_address (Pmode
, op
);
1159 /* Check for a zero extension of a subreg of a promoted
1160 variable, where the promotion is zero-extended, and the
1161 target mode is the same as the variable's promotion. */
1162 if (GET_CODE (op
) == SUBREG
1163 && SUBREG_PROMOTED_VAR_P (op
)
1164 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
1165 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1166 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1168 /* Extending a widening multiplication should be canonicalized to
1169 a wider widening multiplication. */
1170 if (GET_CODE (op
) == MULT
)
1172 rtx lhs
= XEXP (op
, 0);
1173 rtx rhs
= XEXP (op
, 1);
1174 enum rtx_code lcode
= GET_CODE (lhs
);
1175 enum rtx_code rcode
= GET_CODE (rhs
);
1177 /* Widening multiplies usually extend both operands, but sometimes
1178 they use a shift to extract a portion of a register. */
1179 if ((lcode
== ZERO_EXTEND
1180 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1181 && (rcode
== ZERO_EXTEND
1182 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1184 enum machine_mode lmode
= GET_MODE (lhs
);
1185 enum machine_mode rmode
= GET_MODE (rhs
);
1188 if (lcode
== LSHIFTRT
)
1189 /* Number of bits not shifted off the end. */
1190 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1191 else /* lcode == ZERO_EXTEND */
1192 /* Size of inner mode. */
1193 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1195 if (rcode
== LSHIFTRT
)
1196 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1197 else /* rcode == ZERO_EXTEND */
1198 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1200 /* We can only widen multiplies if the result is mathematiclly
1201 equivalent. I.e. if overflow was impossible. */
1202 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1203 return simplify_gen_binary
1205 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1206 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1210 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1211 if (GET_CODE (op
) == ZERO_EXTEND
)
1212 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1213 GET_MODE (XEXP (op
, 0)));
1215 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1216 is (zero_extend:M (subreg:O <X>)) if there is mode with
1217 GET_MODE_BITSIZE (N) - I bits. */
1218 if (GET_CODE (op
) == LSHIFTRT
1219 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1220 && CONST_INT_P (XEXP (op
, 1))
1221 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1222 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1224 enum machine_mode tmode
1225 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1226 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1227 if (tmode
!= BLKmode
)
1230 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1231 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1235 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
1236 /* As we do not know which address space the pointer is referring to,
1237 we can do this only if the target does not support different pointer
1238 or address modes depending on the address space. */
1239 if (target_default_pointer_address_modes_p ()
1240 && POINTERS_EXTEND_UNSIGNED
> 0
1241 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1243 || (GET_CODE (op
) == SUBREG
1244 && REG_P (SUBREG_REG (op
))
1245 && REG_POINTER (SUBREG_REG (op
))
1246 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
1247 return convert_memory_address (Pmode
, op
);
1258 /* Try to compute the value of a unary operation CODE whose output mode is to
1259 be MODE with input operand OP whose mode was originally OP_MODE.
1260 Return zero if the value cannot be computed. */
1262 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1263 rtx op
, enum machine_mode op_mode
)
1265 unsigned int width
= GET_MODE_PRECISION (mode
);
1266 unsigned int op_width
= GET_MODE_PRECISION (op_mode
);
1268 if (code
== VEC_DUPLICATE
)
1270 gcc_assert (VECTOR_MODE_P (mode
));
1271 if (GET_MODE (op
) != VOIDmode
)
1273 if (!VECTOR_MODE_P (GET_MODE (op
)))
1274 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1276 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1279 if (CONST_INT_P (op
) || GET_CODE (op
) == CONST_DOUBLE
1280 || GET_CODE (op
) == CONST_VECTOR
)
1282 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1283 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1284 rtvec v
= rtvec_alloc (n_elts
);
1287 if (GET_CODE (op
) != CONST_VECTOR
)
1288 for (i
= 0; i
< n_elts
; i
++)
1289 RTVEC_ELT (v
, i
) = op
;
1292 enum machine_mode inmode
= GET_MODE (op
);
1293 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1294 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1296 gcc_assert (in_n_elts
< n_elts
);
1297 gcc_assert ((n_elts
% in_n_elts
) == 0);
1298 for (i
= 0; i
< n_elts
; i
++)
1299 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1301 return gen_rtx_CONST_VECTOR (mode
, v
);
1305 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1307 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1308 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1309 enum machine_mode opmode
= GET_MODE (op
);
1310 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1311 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1312 rtvec v
= rtvec_alloc (n_elts
);
1315 gcc_assert (op_n_elts
== n_elts
);
1316 for (i
= 0; i
< n_elts
; i
++)
1318 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1319 CONST_VECTOR_ELT (op
, i
),
1320 GET_MODE_INNER (opmode
));
1323 RTVEC_ELT (v
, i
) = x
;
1325 return gen_rtx_CONST_VECTOR (mode
, v
);
1328 /* The order of these tests is critical so that, for example, we don't
1329 check the wrong mode (input vs. output) for a conversion operation,
1330 such as FIX. At some point, this should be simplified. */
1332 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
1333 && (GET_CODE (op
) == CONST_DOUBLE
|| CONST_INT_P (op
)))
1335 HOST_WIDE_INT hv
, lv
;
1338 if (CONST_INT_P (op
))
1339 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1341 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1343 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1344 d
= real_value_truncate (mode
, d
);
1345 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1347 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
1348 && (GET_CODE (op
) == CONST_DOUBLE
1349 || CONST_INT_P (op
)))
1351 HOST_WIDE_INT hv
, lv
;
1354 if (CONST_INT_P (op
))
1355 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1357 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1359 if (op_mode
== VOIDmode
1360 || GET_MODE_PRECISION (op_mode
) > 2 * HOST_BITS_PER_WIDE_INT
)
1361 /* We should never get a negative number. */
1362 gcc_assert (hv
>= 0);
1363 else if (GET_MODE_PRECISION (op_mode
) <= HOST_BITS_PER_WIDE_INT
)
1364 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1366 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1367 d
= real_value_truncate (mode
, d
);
1368 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1371 if (CONST_INT_P (op
)
1372 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1374 HOST_WIDE_INT arg0
= INTVAL (op
);
1388 val
= (arg0
>= 0 ? arg0
: - arg0
);
1392 arg0
&= GET_MODE_MASK (mode
);
1393 val
= ffs_hwi (arg0
);
1397 arg0
&= GET_MODE_MASK (mode
);
1398 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1401 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 1;
1405 arg0
&= GET_MODE_MASK (mode
);
1407 val
= GET_MODE_PRECISION (mode
) - 1;
1409 val
= GET_MODE_PRECISION (mode
) - floor_log2 (arg0
) - 2;
1411 val
= GET_MODE_PRECISION (mode
) - floor_log2 (~arg0
) - 2;
1415 arg0
&= GET_MODE_MASK (mode
);
1418 /* Even if the value at zero is undefined, we have to come
1419 up with some replacement. Seems good enough. */
1420 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1421 val
= GET_MODE_PRECISION (mode
);
1424 val
= ctz_hwi (arg0
);
1428 arg0
&= GET_MODE_MASK (mode
);
1431 val
++, arg0
&= arg0
- 1;
1435 arg0
&= GET_MODE_MASK (mode
);
1438 val
++, arg0
&= arg0
- 1;
1447 for (s
= 0; s
< width
; s
+= 8)
1449 unsigned int d
= width
- s
- 8;
1450 unsigned HOST_WIDE_INT byte
;
1451 byte
= (arg0
>> s
) & 0xff;
1462 /* When zero-extending a CONST_INT, we need to know its
1464 gcc_assert (op_mode
!= VOIDmode
);
1465 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1467 /* If we were really extending the mode,
1468 we would have to distinguish between zero-extension
1469 and sign-extension. */
1470 gcc_assert (width
== op_width
);
1473 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1474 val
= arg0
& GET_MODE_MASK (op_mode
);
1480 if (op_mode
== VOIDmode
)
1482 op_width
= GET_MODE_PRECISION (op_mode
);
1483 if (op_width
== HOST_BITS_PER_WIDE_INT
)
1485 /* If we were really extending the mode,
1486 we would have to distinguish between zero-extension
1487 and sign-extension. */
1488 gcc_assert (width
== op_width
);
1491 else if (op_width
< HOST_BITS_PER_WIDE_INT
)
1493 val
= arg0
& GET_MODE_MASK (op_mode
);
1494 if (val_signbit_known_set_p (op_mode
, val
))
1495 val
|= ~GET_MODE_MASK (op_mode
);
1503 case FLOAT_TRUNCATE
:
1515 return gen_int_mode (val
, mode
);
1518 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1519 for a DImode operation on a CONST_INT. */
1520 else if (GET_MODE (op
) == VOIDmode
1521 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1522 && (GET_CODE (op
) == CONST_DOUBLE
1523 || CONST_INT_P (op
)))
1525 unsigned HOST_WIDE_INT l1
, lv
;
1526 HOST_WIDE_INT h1
, hv
;
1528 if (GET_CODE (op
) == CONST_DOUBLE
)
1529 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1531 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1541 neg_double (l1
, h1
, &lv
, &hv
);
1546 neg_double (l1
, h1
, &lv
, &hv
);
1556 lv
= HOST_BITS_PER_WIDE_INT
+ ffs_hwi (h1
);
1564 lv
= GET_MODE_PRECISION (mode
) - floor_log2 (h1
) - 1
1565 - HOST_BITS_PER_WIDE_INT
;
1567 lv
= GET_MODE_PRECISION (mode
) - floor_log2 (l1
) - 1;
1568 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1569 lv
= GET_MODE_PRECISION (mode
);
1577 lv
= HOST_BITS_PER_WIDE_INT
+ ctz_hwi (h1
);
1578 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1579 lv
= GET_MODE_PRECISION (mode
);
1607 for (s
= 0; s
< width
; s
+= 8)
1609 unsigned int d
= width
- s
- 8;
1610 unsigned HOST_WIDE_INT byte
;
1612 if (s
< HOST_BITS_PER_WIDE_INT
)
1613 byte
= (l1
>> s
) & 0xff;
1615 byte
= (h1
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1617 if (d
< HOST_BITS_PER_WIDE_INT
)
1620 hv
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1626 /* This is just a change-of-mode, so do nothing. */
1631 gcc_assert (op_mode
!= VOIDmode
);
1633 if (op_width
> HOST_BITS_PER_WIDE_INT
)
1637 lv
= l1
& GET_MODE_MASK (op_mode
);
1641 if (op_mode
== VOIDmode
1642 || op_width
> HOST_BITS_PER_WIDE_INT
)
1646 lv
= l1
& GET_MODE_MASK (op_mode
);
1647 if (val_signbit_known_set_p (op_mode
, lv
))
1648 lv
|= ~GET_MODE_MASK (op_mode
);
1650 hv
= HWI_SIGN_EXTEND (lv
);
1661 return immed_double_const (lv
, hv
, mode
);
1664 else if (GET_CODE (op
) == CONST_DOUBLE
1665 && SCALAR_FLOAT_MODE_P (mode
)
1666 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1668 REAL_VALUE_TYPE d
, t
;
1669 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1674 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1676 real_sqrt (&t
, mode
, &d
);
1680 d
= real_value_abs (&d
);
1683 d
= real_value_negate (&d
);
1685 case FLOAT_TRUNCATE
:
1686 d
= real_value_truncate (mode
, d
);
1689 /* All this does is change the mode, unless changing
1691 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1692 real_convert (&d
, mode
, &d
);
1695 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1702 real_to_target (tmp
, &d
, GET_MODE (op
));
1703 for (i
= 0; i
< 4; i
++)
1705 real_from_target (&d
, tmp
, mode
);
1711 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1714 else if (GET_CODE (op
) == CONST_DOUBLE
1715 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1716 && GET_MODE_CLASS (mode
) == MODE_INT
1717 && width
<= 2 * HOST_BITS_PER_WIDE_INT
&& width
> 0)
1719 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1720 operators are intentionally left unspecified (to ease implementation
1721 by target backends), for consistency, this routine implements the
1722 same semantics for constant folding as used by the middle-end. */
1724 /* This was formerly used only for non-IEEE float.
1725 eggert@twinsun.com says it is safe for IEEE also. */
1726 HOST_WIDE_INT xh
, xl
, th
, tl
;
1727 REAL_VALUE_TYPE x
, t
;
1728 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1732 if (REAL_VALUE_ISNAN (x
))
1735 /* Test against the signed upper bound. */
1736 if (width
> HOST_BITS_PER_WIDE_INT
)
1738 th
= ((unsigned HOST_WIDE_INT
) 1
1739 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1745 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1747 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1748 if (REAL_VALUES_LESS (t
, x
))
1755 /* Test against the signed lower bound. */
1756 if (width
> HOST_BITS_PER_WIDE_INT
)
1758 th
= (unsigned HOST_WIDE_INT
) (-1)
1759 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1765 tl
= (unsigned HOST_WIDE_INT
) (-1) << (width
- 1);
1767 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1768 if (REAL_VALUES_LESS (x
, t
))
1774 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1778 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1781 /* Test against the unsigned upper bound. */
1782 if (width
== 2 * HOST_BITS_PER_WIDE_INT
)
1787 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1789 th
= ((unsigned HOST_WIDE_INT
) 1
1790 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1796 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1798 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1799 if (REAL_VALUES_LESS (t
, x
))
1806 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1812 return immed_double_const (xl
, xh
, mode
);
1818 /* Subroutine of simplify_binary_operation to simplify a commutative,
1819 associative binary operation CODE with result mode MODE, operating
1820 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1821 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1822 canonicalization is possible. */
1825 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1830 /* Linearize the operator to the left. */
1831 if (GET_CODE (op1
) == code
)
1833 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1834 if (GET_CODE (op0
) == code
)
1836 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1837 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1840 /* "a op (b op c)" becomes "(b op c) op a". */
1841 if (! swap_commutative_operands_p (op1
, op0
))
1842 return simplify_gen_binary (code
, mode
, op1
, op0
);
1849 if (GET_CODE (op0
) == code
)
1851 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1852 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1854 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1855 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1858 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1859 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1861 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1863 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1864 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1866 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1873 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1874 and OP1. Return 0 if no simplification is possible.
1876 Don't use this for relational operations such as EQ or LT.
1877 Use simplify_relational_operation instead. */
1879 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1882 rtx trueop0
, trueop1
;
1885 /* Relational operations don't work here. We must know the mode
1886 of the operands in order to do the comparison correctly.
1887 Assuming a full word can give incorrect results.
1888 Consider comparing 128 with -128 in QImode. */
1889 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1890 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1892 /* Make sure the constant is second. */
1893 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1894 && swap_commutative_operands_p (op0
, op1
))
1896 tem
= op0
, op0
= op1
, op1
= tem
;
1899 trueop0
= avoid_constant_pool_reference (op0
);
1900 trueop1
= avoid_constant_pool_reference (op1
);
1902 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1905 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1908 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1909 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1910 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1911 actual constants. */
1914 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1915 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1917 rtx tem
, reversed
, opleft
, opright
;
1919 unsigned int width
= GET_MODE_PRECISION (mode
);
1921 /* Even if we can't compute a constant result,
1922 there are some cases worth simplifying. */
1927 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1928 when x is NaN, infinite, or finite and nonzero. They aren't
1929 when x is -0 and the rounding mode is not towards -infinity,
1930 since (-0) + 0 is then 0. */
1931 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1934 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1935 transformations are safe even for IEEE. */
1936 if (GET_CODE (op0
) == NEG
)
1937 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1938 else if (GET_CODE (op1
) == NEG
)
1939 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1941 /* (~a) + 1 -> -a */
1942 if (INTEGRAL_MODE_P (mode
)
1943 && GET_CODE (op0
) == NOT
1944 && trueop1
== const1_rtx
)
1945 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1947 /* Handle both-operands-constant cases. We can only add
1948 CONST_INTs to constants since the sum of relocatable symbols
1949 can't be handled by most assemblers. Don't add CONST_INT
1950 to CONST_INT since overflow won't be computed properly if wider
1951 than HOST_BITS_PER_WIDE_INT. */
1953 if ((GET_CODE (op0
) == CONST
1954 || GET_CODE (op0
) == SYMBOL_REF
1955 || GET_CODE (op0
) == LABEL_REF
)
1956 && CONST_INT_P (op1
))
1957 return plus_constant (mode
, op0
, INTVAL (op1
));
1958 else if ((GET_CODE (op1
) == CONST
1959 || GET_CODE (op1
) == SYMBOL_REF
1960 || GET_CODE (op1
) == LABEL_REF
)
1961 && CONST_INT_P (op0
))
1962 return plus_constant (mode
, op1
, INTVAL (op0
));
1964 /* See if this is something like X * C - X or vice versa or
1965 if the multiplication is written as a shift. If so, we can
1966 distribute and make a new multiply, shift, or maybe just
1967 have X (if C is 2 in the example above). But don't make
1968 something more expensive than we had before. */
1970 if (SCALAR_INT_MODE_P (mode
))
1972 double_int coeff0
, coeff1
;
1973 rtx lhs
= op0
, rhs
= op1
;
1975 coeff0
= double_int_one
;
1976 coeff1
= double_int_one
;
1978 if (GET_CODE (lhs
) == NEG
)
1980 coeff0
= double_int_minus_one
;
1981 lhs
= XEXP (lhs
, 0);
1983 else if (GET_CODE (lhs
) == MULT
1984 && CONST_INT_P (XEXP (lhs
, 1)))
1986 coeff0
= shwi_to_double_int (INTVAL (XEXP (lhs
, 1)));
1987 lhs
= XEXP (lhs
, 0);
1989 else if (GET_CODE (lhs
) == ASHIFT
1990 && CONST_INT_P (XEXP (lhs
, 1))
1991 && INTVAL (XEXP (lhs
, 1)) >= 0
1992 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1994 coeff0
= double_int_setbit (double_int_zero
,
1995 INTVAL (XEXP (lhs
, 1)));
1996 lhs
= XEXP (lhs
, 0);
1999 if (GET_CODE (rhs
) == NEG
)
2001 coeff1
= double_int_minus_one
;
2002 rhs
= XEXP (rhs
, 0);
2004 else if (GET_CODE (rhs
) == MULT
2005 && CONST_INT_P (XEXP (rhs
, 1)))
2007 coeff1
= shwi_to_double_int (INTVAL (XEXP (rhs
, 1)));
2008 rhs
= XEXP (rhs
, 0);
2010 else if (GET_CODE (rhs
) == ASHIFT
2011 && CONST_INT_P (XEXP (rhs
, 1))
2012 && INTVAL (XEXP (rhs
, 1)) >= 0
2013 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2015 coeff1
= double_int_setbit (double_int_zero
,
2016 INTVAL (XEXP (rhs
, 1)));
2017 rhs
= XEXP (rhs
, 0);
2020 if (rtx_equal_p (lhs
, rhs
))
2022 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2025 bool speed
= optimize_function_for_speed_p (cfun
);
2027 val
= double_int_add (coeff0
, coeff1
);
2028 coeff
= immed_double_int_const (val
, mode
);
2030 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2031 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2036 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2037 if ((CONST_INT_P (op1
)
2038 || GET_CODE (op1
) == CONST_DOUBLE
)
2039 && GET_CODE (op0
) == XOR
2040 && (CONST_INT_P (XEXP (op0
, 1))
2041 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2042 && mode_signbit_p (mode
, op1
))
2043 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2044 simplify_gen_binary (XOR
, mode
, op1
,
2047 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2048 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2049 && GET_CODE (op0
) == MULT
2050 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2054 in1
= XEXP (XEXP (op0
, 0), 0);
2055 in2
= XEXP (op0
, 1);
2056 return simplify_gen_binary (MINUS
, mode
, op1
,
2057 simplify_gen_binary (MULT
, mode
,
2061 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2062 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2064 if (COMPARISON_P (op0
)
2065 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2066 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2067 && (reversed
= reversed_comparison (op0
, mode
)))
2069 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2071 /* If one of the operands is a PLUS or a MINUS, see if we can
2072 simplify this by the associative law.
2073 Don't use the associative law for floating point.
2074 The inaccuracy makes it nonassociative,
2075 and subtle programs can break if operations are associated. */
2077 if (INTEGRAL_MODE_P (mode
)
2078 && (plus_minus_operand_p (op0
)
2079 || plus_minus_operand_p (op1
))
2080 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2083 /* Reassociate floating point addition only when the user
2084 specifies associative math operations. */
2085 if (FLOAT_MODE_P (mode
)
2086 && flag_associative_math
)
2088 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2095 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2096 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2097 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2098 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2100 rtx xop00
= XEXP (op0
, 0);
2101 rtx xop10
= XEXP (op1
, 0);
2104 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2106 if (REG_P (xop00
) && REG_P (xop10
)
2107 && GET_MODE (xop00
) == GET_MODE (xop10
)
2108 && REGNO (xop00
) == REGNO (xop10
)
2109 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2110 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2117 /* We can't assume x-x is 0 even with non-IEEE floating point,
2118 but since it is zero except in very strange circumstances, we
2119 will treat it as zero with -ffinite-math-only. */
2120 if (rtx_equal_p (trueop0
, trueop1
)
2121 && ! side_effects_p (op0
)
2122 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2123 return CONST0_RTX (mode
);
2125 /* Change subtraction from zero into negation. (0 - x) is the
2126 same as -x when x is NaN, infinite, or finite and nonzero.
2127 But if the mode has signed zeros, and does not round towards
2128 -infinity, then 0 - 0 is 0, not -0. */
2129 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2130 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2132 /* (-1 - a) is ~a. */
2133 if (trueop0
== constm1_rtx
)
2134 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2136 /* Subtracting 0 has no effect unless the mode has signed zeros
2137 and supports rounding towards -infinity. In such a case,
2139 if (!(HONOR_SIGNED_ZEROS (mode
)
2140 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2141 && trueop1
== CONST0_RTX (mode
))
2144 /* See if this is something like X * C - X or vice versa or
2145 if the multiplication is written as a shift. If so, we can
2146 distribute and make a new multiply, shift, or maybe just
2147 have X (if C is 2 in the example above). But don't make
2148 something more expensive than we had before. */
2150 if (SCALAR_INT_MODE_P (mode
))
2152 double_int coeff0
, negcoeff1
;
2153 rtx lhs
= op0
, rhs
= op1
;
2155 coeff0
= double_int_one
;
2156 negcoeff1
= double_int_minus_one
;
2158 if (GET_CODE (lhs
) == NEG
)
2160 coeff0
= double_int_minus_one
;
2161 lhs
= XEXP (lhs
, 0);
2163 else if (GET_CODE (lhs
) == MULT
2164 && CONST_INT_P (XEXP (lhs
, 1)))
2166 coeff0
= shwi_to_double_int (INTVAL (XEXP (lhs
, 1)));
2167 lhs
= XEXP (lhs
, 0);
2169 else if (GET_CODE (lhs
) == ASHIFT
2170 && CONST_INT_P (XEXP (lhs
, 1))
2171 && INTVAL (XEXP (lhs
, 1)) >= 0
2172 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2174 coeff0
= double_int_setbit (double_int_zero
,
2175 INTVAL (XEXP (lhs
, 1)));
2176 lhs
= XEXP (lhs
, 0);
2179 if (GET_CODE (rhs
) == NEG
)
2181 negcoeff1
= double_int_one
;
2182 rhs
= XEXP (rhs
, 0);
2184 else if (GET_CODE (rhs
) == MULT
2185 && CONST_INT_P (XEXP (rhs
, 1)))
2187 negcoeff1
= shwi_to_double_int (-INTVAL (XEXP (rhs
, 1)));
2188 rhs
= XEXP (rhs
, 0);
2190 else if (GET_CODE (rhs
) == ASHIFT
2191 && CONST_INT_P (XEXP (rhs
, 1))
2192 && INTVAL (XEXP (rhs
, 1)) >= 0
2193 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
2195 negcoeff1
= double_int_setbit (double_int_zero
,
2196 INTVAL (XEXP (rhs
, 1)));
2197 negcoeff1
= double_int_neg (negcoeff1
);
2198 rhs
= XEXP (rhs
, 0);
2201 if (rtx_equal_p (lhs
, rhs
))
2203 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2206 bool speed
= optimize_function_for_speed_p (cfun
);
2208 val
= double_int_add (coeff0
, negcoeff1
);
2209 coeff
= immed_double_int_const (val
, mode
);
2211 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2212 return set_src_cost (tem
, speed
) <= set_src_cost (orig
, speed
)
2217 /* (a - (-b)) -> (a + b). True even for IEEE. */
2218 if (GET_CODE (op1
) == NEG
)
2219 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2221 /* (-x - c) may be simplified as (-c - x). */
2222 if (GET_CODE (op0
) == NEG
2223 && (CONST_INT_P (op1
)
2224 || GET_CODE (op1
) == CONST_DOUBLE
))
2226 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2228 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2231 /* Don't let a relocatable value get a negative coeff. */
2232 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2233 return simplify_gen_binary (PLUS
, mode
,
2235 neg_const_int (mode
, op1
));
2237 /* (x - (x & y)) -> (x & ~y) */
2238 if (GET_CODE (op1
) == AND
)
2240 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2242 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2243 GET_MODE (XEXP (op1
, 1)));
2244 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2246 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2248 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2249 GET_MODE (XEXP (op1
, 0)));
2250 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2254 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2255 by reversing the comparison code if valid. */
2256 if (STORE_FLAG_VALUE
== 1
2257 && trueop0
== const1_rtx
2258 && COMPARISON_P (op1
)
2259 && (reversed
= reversed_comparison (op1
, mode
)))
2262 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2263 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2264 && GET_CODE (op1
) == MULT
2265 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2269 in1
= XEXP (XEXP (op1
, 0), 0);
2270 in2
= XEXP (op1
, 1);
2271 return simplify_gen_binary (PLUS
, mode
,
2272 simplify_gen_binary (MULT
, mode
,
2277 /* Canonicalize (minus (neg A) (mult B C)) to
2278 (minus (mult (neg B) C) A). */
2279 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2280 && GET_CODE (op1
) == MULT
2281 && GET_CODE (op0
) == NEG
)
2285 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2286 in2
= XEXP (op1
, 1);
2287 return simplify_gen_binary (MINUS
, mode
,
2288 simplify_gen_binary (MULT
, mode
,
2293 /* If one of the operands is a PLUS or a MINUS, see if we can
2294 simplify this by the associative law. This will, for example,
2295 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2296 Don't use the associative law for floating point.
2297 The inaccuracy makes it nonassociative,
2298 and subtle programs can break if operations are associated. */
2300 if (INTEGRAL_MODE_P (mode
)
2301 && (plus_minus_operand_p (op0
)
2302 || plus_minus_operand_p (op1
))
2303 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2308 if (trueop1
== constm1_rtx
)
2309 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2311 if (GET_CODE (op0
) == NEG
)
2313 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2314 /* If op1 is a MULT as well and simplify_unary_operation
2315 just moved the NEG to the second operand, simplify_gen_binary
2316 below could through simplify_associative_operation move
2317 the NEG around again and recurse endlessly. */
2319 && GET_CODE (op1
) == MULT
2320 && GET_CODE (temp
) == MULT
2321 && XEXP (op1
, 0) == XEXP (temp
, 0)
2322 && GET_CODE (XEXP (temp
, 1)) == NEG
2323 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2326 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2328 if (GET_CODE (op1
) == NEG
)
2330 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2331 /* If op0 is a MULT as well and simplify_unary_operation
2332 just moved the NEG to the second operand, simplify_gen_binary
2333 below could through simplify_associative_operation move
2334 the NEG around again and recurse endlessly. */
2336 && GET_CODE (op0
) == MULT
2337 && GET_CODE (temp
) == MULT
2338 && XEXP (op0
, 0) == XEXP (temp
, 0)
2339 && GET_CODE (XEXP (temp
, 1)) == NEG
2340 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2343 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2346 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2347 x is NaN, since x * 0 is then also NaN. Nor is it valid
2348 when the mode has signed zeros, since multiplying a negative
2349 number by 0 will give -0, not 0. */
2350 if (!HONOR_NANS (mode
)
2351 && !HONOR_SIGNED_ZEROS (mode
)
2352 && trueop1
== CONST0_RTX (mode
)
2353 && ! side_effects_p (op0
))
2356 /* In IEEE floating point, x*1 is not equivalent to x for
2358 if (!HONOR_SNANS (mode
)
2359 && trueop1
== CONST1_RTX (mode
))
2362 /* Convert multiply by constant power of two into shift unless
2363 we are still generating RTL. This test is a kludge. */
2364 if (CONST_INT_P (trueop1
)
2365 && (val
= exact_log2 (UINTVAL (trueop1
))) >= 0
2366 /* If the mode is larger than the host word size, and the
2367 uppermost bit is set, then this isn't a power of two due
2368 to implicit sign extension. */
2369 && (width
<= HOST_BITS_PER_WIDE_INT
2370 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2371 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2373 /* Likewise for multipliers wider than a word. */
2374 if (GET_CODE (trueop1
) == CONST_DOUBLE
2375 && (GET_MODE (trueop1
) == VOIDmode
2376 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
2377 && GET_MODE (op0
) == mode
2378 && CONST_DOUBLE_LOW (trueop1
) == 0
2379 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0
2380 && (val
< 2 * HOST_BITS_PER_WIDE_INT
- 1
2381 || GET_MODE_BITSIZE (mode
) <= 2 * HOST_BITS_PER_WIDE_INT
))
2382 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2383 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2385 /* x*2 is x+x and x*(-1) is -x */
2386 if (GET_CODE (trueop1
) == CONST_DOUBLE
2387 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2388 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2389 && GET_MODE (op0
) == mode
)
2392 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2394 if (REAL_VALUES_EQUAL (d
, dconst2
))
2395 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2397 if (!HONOR_SNANS (mode
)
2398 && REAL_VALUES_EQUAL (d
, dconstm1
))
2399 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2402 /* Optimize -x * -x as x * x. */
2403 if (FLOAT_MODE_P (mode
)
2404 && GET_CODE (op0
) == NEG
2405 && GET_CODE (op1
) == NEG
2406 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2407 && !side_effects_p (XEXP (op0
, 0)))
2408 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2410 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2411 if (SCALAR_FLOAT_MODE_P (mode
)
2412 && GET_CODE (op0
) == ABS
2413 && GET_CODE (op1
) == ABS
2414 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2415 && !side_effects_p (XEXP (op0
, 0)))
2416 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2418 /* Reassociate multiplication, but for floating point MULTs
2419 only when the user specifies unsafe math optimizations. */
2420 if (! FLOAT_MODE_P (mode
)
2421 || flag_unsafe_math_optimizations
)
2423 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2430 if (trueop1
== CONST0_RTX (mode
))
2432 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2434 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2436 /* A | (~A) -> -1 */
2437 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2438 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2439 && ! side_effects_p (op0
)
2440 && SCALAR_INT_MODE_P (mode
))
2443 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2444 if (CONST_INT_P (op1
)
2445 && HWI_COMPUTABLE_MODE_P (mode
)
2446 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0)
2449 /* Canonicalize (X & C1) | C2. */
2450 if (GET_CODE (op0
) == AND
2451 && CONST_INT_P (trueop1
)
2452 && CONST_INT_P (XEXP (op0
, 1)))
2454 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2455 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2456 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2458 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2460 && !side_effects_p (XEXP (op0
, 0)))
2463 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2464 if (((c1
|c2
) & mask
) == mask
)
2465 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2467 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2468 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2470 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2471 gen_int_mode (c1
& ~c2
, mode
));
2472 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2476 /* Convert (A & B) | A to A. */
2477 if (GET_CODE (op0
) == AND
2478 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2479 || rtx_equal_p (XEXP (op0
, 1), op1
))
2480 && ! side_effects_p (XEXP (op0
, 0))
2481 && ! side_effects_p (XEXP (op0
, 1)))
2484 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2485 mode size to (rotate A CX). */
2487 if (GET_CODE (op1
) == ASHIFT
2488 || GET_CODE (op1
) == SUBREG
)
2499 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2500 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2501 && CONST_INT_P (XEXP (opleft
, 1))
2502 && CONST_INT_P (XEXP (opright
, 1))
2503 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2504 == GET_MODE_PRECISION (mode
)))
2505 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2507 /* Same, but for ashift that has been "simplified" to a wider mode
2508 by simplify_shift_const. */
2510 if (GET_CODE (opleft
) == SUBREG
2511 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2512 && GET_CODE (opright
) == LSHIFTRT
2513 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2514 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2515 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2516 && (GET_MODE_SIZE (GET_MODE (opleft
))
2517 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2518 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2519 SUBREG_REG (XEXP (opright
, 0)))
2520 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2521 && CONST_INT_P (XEXP (opright
, 1))
2522 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2523 == GET_MODE_PRECISION (mode
)))
2524 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2525 XEXP (SUBREG_REG (opleft
), 1));
2527 /* If we have (ior (and (X C1) C2)), simplify this by making
2528 C1 as small as possible if C1 actually changes. */
2529 if (CONST_INT_P (op1
)
2530 && (HWI_COMPUTABLE_MODE_P (mode
)
2531 || INTVAL (op1
) > 0)
2532 && GET_CODE (op0
) == AND
2533 && CONST_INT_P (XEXP (op0
, 1))
2534 && CONST_INT_P (op1
)
2535 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2536 return simplify_gen_binary (IOR
, mode
,
2538 (AND
, mode
, XEXP (op0
, 0),
2539 GEN_INT (UINTVAL (XEXP (op0
, 1))
2543 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2544 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2545 the PLUS does not affect any of the bits in OP1: then we can do
2546 the IOR as a PLUS and we can associate. This is valid if OP1
2547 can be safely shifted left C bits. */
2548 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2549 && GET_CODE (XEXP (op0
, 0)) == PLUS
2550 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2551 && CONST_INT_P (XEXP (op0
, 1))
2552 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2554 int count
= INTVAL (XEXP (op0
, 1));
2555 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2557 if (mask
>> count
== INTVAL (trueop1
)
2558 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2559 return simplify_gen_binary (ASHIFTRT
, mode
,
2560 plus_constant (mode
, XEXP (op0
, 0),
2565 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2571 if (trueop1
== CONST0_RTX (mode
))
2573 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2574 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2575 if (rtx_equal_p (trueop0
, trueop1
)
2576 && ! side_effects_p (op0
)
2577 && GET_MODE_CLASS (mode
) != MODE_CC
)
2578 return CONST0_RTX (mode
);
2580 /* Canonicalize XOR of the most significant bit to PLUS. */
2581 if ((CONST_INT_P (op1
)
2582 || GET_CODE (op1
) == CONST_DOUBLE
)
2583 && mode_signbit_p (mode
, op1
))
2584 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2585 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2586 if ((CONST_INT_P (op1
)
2587 || GET_CODE (op1
) == CONST_DOUBLE
)
2588 && GET_CODE (op0
) == PLUS
2589 && (CONST_INT_P (XEXP (op0
, 1))
2590 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2591 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2592 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2593 simplify_gen_binary (XOR
, mode
, op1
,
2596 /* If we are XORing two things that have no bits in common,
2597 convert them into an IOR. This helps to detect rotation encoded
2598 using those methods and possibly other simplifications. */
2600 if (HWI_COMPUTABLE_MODE_P (mode
)
2601 && (nonzero_bits (op0
, mode
)
2602 & nonzero_bits (op1
, mode
)) == 0)
2603 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2605 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2606 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2609 int num_negated
= 0;
2611 if (GET_CODE (op0
) == NOT
)
2612 num_negated
++, op0
= XEXP (op0
, 0);
2613 if (GET_CODE (op1
) == NOT
)
2614 num_negated
++, op1
= XEXP (op1
, 0);
2616 if (num_negated
== 2)
2617 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2618 else if (num_negated
== 1)
2619 return simplify_gen_unary (NOT
, mode
,
2620 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2624 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2625 correspond to a machine insn or result in further simplifications
2626 if B is a constant. */
2628 if (GET_CODE (op0
) == AND
2629 && rtx_equal_p (XEXP (op0
, 1), op1
)
2630 && ! side_effects_p (op1
))
2631 return simplify_gen_binary (AND
, mode
,
2632 simplify_gen_unary (NOT
, mode
,
2633 XEXP (op0
, 0), mode
),
2636 else if (GET_CODE (op0
) == AND
2637 && rtx_equal_p (XEXP (op0
, 0), op1
)
2638 && ! side_effects_p (op1
))
2639 return simplify_gen_binary (AND
, mode
,
2640 simplify_gen_unary (NOT
, mode
,
2641 XEXP (op0
, 1), mode
),
2644 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2645 we can transform like this:
2646 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2647 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2648 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2649 Attempt a few simplifications when B and C are both constants. */
2650 if (GET_CODE (op0
) == AND
2651 && CONST_INT_P (op1
)
2652 && CONST_INT_P (XEXP (op0
, 1)))
2654 rtx a
= XEXP (op0
, 0);
2655 rtx b
= XEXP (op0
, 1);
2657 HOST_WIDE_INT bval
= INTVAL (b
);
2658 HOST_WIDE_INT cval
= INTVAL (c
);
2661 = simplify_binary_operation (AND
, mode
,
2662 simplify_gen_unary (NOT
, mode
, a
, mode
),
2664 if ((~cval
& bval
) == 0)
2666 /* Try to simplify ~A&C | ~B&C. */
2667 if (na_c
!= NULL_RTX
)
2668 return simplify_gen_binary (IOR
, mode
, na_c
,
2669 GEN_INT (~bval
& cval
));
2673 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2674 if (na_c
== const0_rtx
)
2676 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2677 GEN_INT (~cval
& bval
));
2678 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2679 GEN_INT (~bval
& cval
));
2684 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2685 comparison if STORE_FLAG_VALUE is 1. */
2686 if (STORE_FLAG_VALUE
== 1
2687 && trueop1
== const1_rtx
2688 && COMPARISON_P (op0
)
2689 && (reversed
= reversed_comparison (op0
, mode
)))
2692 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2693 is (lt foo (const_int 0)), so we can perform the above
2694 simplification if STORE_FLAG_VALUE is 1. */
2696 if (STORE_FLAG_VALUE
== 1
2697 && trueop1
== const1_rtx
2698 && GET_CODE (op0
) == LSHIFTRT
2699 && CONST_INT_P (XEXP (op0
, 1))
2700 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2701 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2703 /* (xor (comparison foo bar) (const_int sign-bit))
2704 when STORE_FLAG_VALUE is the sign bit. */
2705 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2706 && trueop1
== const_true_rtx
2707 && COMPARISON_P (op0
)
2708 && (reversed
= reversed_comparison (op0
, mode
)))
2711 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2717 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2719 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2721 if (HWI_COMPUTABLE_MODE_P (mode
))
2723 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2724 HOST_WIDE_INT nzop1
;
2725 if (CONST_INT_P (trueop1
))
2727 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2728 /* If we are turning off bits already known off in OP0, we need
2730 if ((nzop0
& ~val1
) == 0)
2733 nzop1
= nonzero_bits (trueop1
, mode
);
2734 /* If we are clearing all the nonzero bits, the result is zero. */
2735 if ((nzop1
& nzop0
) == 0
2736 && !side_effects_p (op0
) && !side_effects_p (op1
))
2737 return CONST0_RTX (mode
);
2739 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2740 && GET_MODE_CLASS (mode
) != MODE_CC
)
2743 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2744 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2745 && ! side_effects_p (op0
)
2746 && GET_MODE_CLASS (mode
) != MODE_CC
)
2747 return CONST0_RTX (mode
);
2749 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2750 there are no nonzero bits of C outside of X's mode. */
2751 if ((GET_CODE (op0
) == SIGN_EXTEND
2752 || GET_CODE (op0
) == ZERO_EXTEND
)
2753 && CONST_INT_P (trueop1
)
2754 && HWI_COMPUTABLE_MODE_P (mode
)
2755 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2756 & UINTVAL (trueop1
)) == 0)
2758 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2759 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2760 gen_int_mode (INTVAL (trueop1
),
2762 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2765 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2766 we might be able to further simplify the AND with X and potentially
2767 remove the truncation altogether. */
2768 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2770 rtx x
= XEXP (op0
, 0);
2771 enum machine_mode xmode
= GET_MODE (x
);
2772 tem
= simplify_gen_binary (AND
, xmode
, x
,
2773 gen_int_mode (INTVAL (trueop1
), xmode
));
2774 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2777 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2778 if (GET_CODE (op0
) == IOR
2779 && CONST_INT_P (trueop1
)
2780 && CONST_INT_P (XEXP (op0
, 1)))
2782 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2783 return simplify_gen_binary (IOR
, mode
,
2784 simplify_gen_binary (AND
, mode
,
2785 XEXP (op0
, 0), op1
),
2786 gen_int_mode (tmp
, mode
));
2789 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2790 insn (and may simplify more). */
2791 if (GET_CODE (op0
) == XOR
2792 && rtx_equal_p (XEXP (op0
, 0), op1
)
2793 && ! side_effects_p (op1
))
2794 return simplify_gen_binary (AND
, mode
,
2795 simplify_gen_unary (NOT
, mode
,
2796 XEXP (op0
, 1), mode
),
2799 if (GET_CODE (op0
) == XOR
2800 && rtx_equal_p (XEXP (op0
, 1), op1
)
2801 && ! side_effects_p (op1
))
2802 return simplify_gen_binary (AND
, mode
,
2803 simplify_gen_unary (NOT
, mode
,
2804 XEXP (op0
, 0), mode
),
2807 /* Similarly for (~(A ^ B)) & A. */
2808 if (GET_CODE (op0
) == NOT
2809 && GET_CODE (XEXP (op0
, 0)) == XOR
2810 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2811 && ! side_effects_p (op1
))
2812 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2814 if (GET_CODE (op0
) == NOT
2815 && GET_CODE (XEXP (op0
, 0)) == XOR
2816 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2817 && ! side_effects_p (op1
))
2818 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2820 /* Convert (A | B) & A to A. */
2821 if (GET_CODE (op0
) == IOR
2822 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2823 || rtx_equal_p (XEXP (op0
, 1), op1
))
2824 && ! side_effects_p (XEXP (op0
, 0))
2825 && ! side_effects_p (XEXP (op0
, 1)))
2828 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2829 ((A & N) + B) & M -> (A + B) & M
2830 Similarly if (N & M) == 0,
2831 ((A | N) + B) & M -> (A + B) & M
2832 and for - instead of + and/or ^ instead of |.
2833 Also, if (N & M) == 0, then
2834 (A +- N) & M -> A & M. */
2835 if (CONST_INT_P (trueop1
)
2836 && HWI_COMPUTABLE_MODE_P (mode
)
2837 && ~UINTVAL (trueop1
)
2838 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2839 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2844 pmop
[0] = XEXP (op0
, 0);
2845 pmop
[1] = XEXP (op0
, 1);
2847 if (CONST_INT_P (pmop
[1])
2848 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2849 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2851 for (which
= 0; which
< 2; which
++)
2854 switch (GET_CODE (tem
))
2857 if (CONST_INT_P (XEXP (tem
, 1))
2858 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
2859 == UINTVAL (trueop1
))
2860 pmop
[which
] = XEXP (tem
, 0);
2864 if (CONST_INT_P (XEXP (tem
, 1))
2865 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
2866 pmop
[which
] = XEXP (tem
, 0);
2873 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2875 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2877 return simplify_gen_binary (code
, mode
, tem
, op1
);
2881 /* (and X (ior (not X) Y) -> (and X Y) */
2882 if (GET_CODE (op1
) == IOR
2883 && GET_CODE (XEXP (op1
, 0)) == NOT
2884 && op0
== XEXP (XEXP (op1
, 0), 0))
2885 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2887 /* (and (ior (not X) Y) X) -> (and X Y) */
2888 if (GET_CODE (op0
) == IOR
2889 && GET_CODE (XEXP (op0
, 0)) == NOT
2890 && op1
== XEXP (XEXP (op0
, 0), 0))
2891 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2893 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2899 /* 0/x is 0 (or x&0 if x has side-effects). */
2900 if (trueop0
== CONST0_RTX (mode
))
2902 if (side_effects_p (op1
))
2903 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2907 if (trueop1
== CONST1_RTX (mode
))
2908 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2909 /* Convert divide by power of two into shift. */
2910 if (CONST_INT_P (trueop1
)
2911 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
2912 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2916 /* Handle floating point and integers separately. */
2917 if (SCALAR_FLOAT_MODE_P (mode
))
2919 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2920 safe for modes with NaNs, since 0.0 / 0.0 will then be
2921 NaN rather than 0.0. Nor is it safe for modes with signed
2922 zeros, since dividing 0 by a negative number gives -0.0 */
2923 if (trueop0
== CONST0_RTX (mode
)
2924 && !HONOR_NANS (mode
)
2925 && !HONOR_SIGNED_ZEROS (mode
)
2926 && ! side_effects_p (op1
))
2929 if (trueop1
== CONST1_RTX (mode
)
2930 && !HONOR_SNANS (mode
))
2933 if (GET_CODE (trueop1
) == CONST_DOUBLE
2934 && trueop1
!= CONST0_RTX (mode
))
2937 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2940 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2941 && !HONOR_SNANS (mode
))
2942 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2944 /* Change FP division by a constant into multiplication.
2945 Only do this with -freciprocal-math. */
2946 if (flag_reciprocal_math
2947 && !REAL_VALUES_EQUAL (d
, dconst0
))
2949 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2950 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2951 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2955 else if (SCALAR_INT_MODE_P (mode
))
2957 /* 0/x is 0 (or x&0 if x has side-effects). */
2958 if (trueop0
== CONST0_RTX (mode
)
2959 && !cfun
->can_throw_non_call_exceptions
)
2961 if (side_effects_p (op1
))
2962 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2966 if (trueop1
== CONST1_RTX (mode
))
2967 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2969 if (trueop1
== constm1_rtx
)
2971 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2972 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2978 /* 0%x is 0 (or x&0 if x has side-effects). */
2979 if (trueop0
== CONST0_RTX (mode
))
2981 if (side_effects_p (op1
))
2982 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2985 /* x%1 is 0 (of x&0 if x has side-effects). */
2986 if (trueop1
== CONST1_RTX (mode
))
2988 if (side_effects_p (op0
))
2989 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2990 return CONST0_RTX (mode
);
2992 /* Implement modulus by power of two as AND. */
2993 if (CONST_INT_P (trueop1
)
2994 && exact_log2 (UINTVAL (trueop1
)) > 0)
2995 return simplify_gen_binary (AND
, mode
, op0
,
2996 GEN_INT (INTVAL (op1
) - 1));
3000 /* 0%x is 0 (or x&0 if x has side-effects). */
3001 if (trueop0
== CONST0_RTX (mode
))
3003 if (side_effects_p (op1
))
3004 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3007 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3008 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3010 if (side_effects_p (op0
))
3011 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3012 return CONST0_RTX (mode
);
3019 if (trueop1
== CONST0_RTX (mode
))
3021 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3023 /* Rotating ~0 always results in ~0. */
3024 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3025 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3026 && ! side_effects_p (op1
))
3029 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3031 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
3032 if (val
!= INTVAL (op1
))
3033 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3040 if (trueop1
== CONST0_RTX (mode
))
3042 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3044 goto canonicalize_shift
;
3047 if (trueop1
== CONST0_RTX (mode
))
3049 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3051 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3052 if (GET_CODE (op0
) == CLZ
3053 && CONST_INT_P (trueop1
)
3054 && STORE_FLAG_VALUE
== 1
3055 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3057 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3058 unsigned HOST_WIDE_INT zero_val
= 0;
3060 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3061 && zero_val
== GET_MODE_PRECISION (imode
)
3062 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3063 return simplify_gen_relational (EQ
, mode
, imode
,
3064 XEXP (op0
, 0), const0_rtx
);
3066 goto canonicalize_shift
;
3069 if (width
<= HOST_BITS_PER_WIDE_INT
3070 && mode_signbit_p (mode
, trueop1
)
3071 && ! side_effects_p (op0
))
3073 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3075 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3081 if (width
<= HOST_BITS_PER_WIDE_INT
3082 && CONST_INT_P (trueop1
)
3083 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3084 && ! side_effects_p (op0
))
3086 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3088 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3094 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3096 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3098 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3104 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3106 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3108 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3121 /* ??? There are simplifications that can be done. */
3125 if (!VECTOR_MODE_P (mode
))
3127 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3128 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3129 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3130 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3131 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3133 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3134 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3137 /* Extract a scalar element from a nested VEC_SELECT expression
3138 (with optional nested VEC_CONCAT expression). Some targets
3139 (i386) extract scalar element from a vector using chain of
3140 nested VEC_SELECT expressions. When input operand is a memory
3141 operand, this operation can be simplified to a simple scalar
3142 load from an offseted memory address. */
3143 if (GET_CODE (trueop0
) == VEC_SELECT
)
3145 rtx op0
= XEXP (trueop0
, 0);
3146 rtx op1
= XEXP (trueop0
, 1);
3148 enum machine_mode opmode
= GET_MODE (op0
);
3149 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
3150 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3152 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3158 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3159 gcc_assert (i
< n_elts
);
3161 /* Select element, pointed by nested selector. */
3162 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3164 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3165 if (GET_CODE (op0
) == VEC_CONCAT
)
3167 rtx op00
= XEXP (op0
, 0);
3168 rtx op01
= XEXP (op0
, 1);
3170 enum machine_mode mode00
, mode01
;
3171 int n_elts00
, n_elts01
;
3173 mode00
= GET_MODE (op00
);
3174 mode01
= GET_MODE (op01
);
3176 /* Find out number of elements of each operand. */
3177 if (VECTOR_MODE_P (mode00
))
3179 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
3180 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3185 if (VECTOR_MODE_P (mode01
))
3187 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
3188 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3193 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3195 /* Select correct operand of VEC_CONCAT
3196 and adjust selector. */
3197 if (elem
< n_elts01
)
3208 vec
= rtvec_alloc (1);
3209 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3211 tmp
= gen_rtx_fmt_ee (code
, mode
,
3212 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3215 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3216 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3217 return XEXP (trueop0
, 0);
3221 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3222 gcc_assert (GET_MODE_INNER (mode
)
3223 == GET_MODE_INNER (GET_MODE (trueop0
)));
3224 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3226 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3228 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3229 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3230 rtvec v
= rtvec_alloc (n_elts
);
3233 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3234 for (i
= 0; i
< n_elts
; i
++)
3236 rtx x
= XVECEXP (trueop1
, 0, i
);
3238 gcc_assert (CONST_INT_P (x
));
3239 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3243 return gen_rtx_CONST_VECTOR (mode
, v
);
3247 if (XVECLEN (trueop1
, 0) == 1
3248 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3249 && GET_CODE (trueop0
) == VEC_CONCAT
)
3252 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3254 /* Try to find the element in the VEC_CONCAT. */
3255 while (GET_MODE (vec
) != mode
3256 && GET_CODE (vec
) == VEC_CONCAT
)
3258 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3259 if (offset
< vec_size
)
3260 vec
= XEXP (vec
, 0);
3264 vec
= XEXP (vec
, 1);
3266 vec
= avoid_constant_pool_reference (vec
);
3269 if (GET_MODE (vec
) == mode
)
3276 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3277 ? GET_MODE (trueop0
)
3278 : GET_MODE_INNER (mode
));
3279 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3280 ? GET_MODE (trueop1
)
3281 : GET_MODE_INNER (mode
));
3283 gcc_assert (VECTOR_MODE_P (mode
));
3284 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3285 == GET_MODE_SIZE (mode
));
3287 if (VECTOR_MODE_P (op0_mode
))
3288 gcc_assert (GET_MODE_INNER (mode
)
3289 == GET_MODE_INNER (op0_mode
));
3291 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3293 if (VECTOR_MODE_P (op1_mode
))
3294 gcc_assert (GET_MODE_INNER (mode
)
3295 == GET_MODE_INNER (op1_mode
));
3297 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3299 if ((GET_CODE (trueop0
) == CONST_VECTOR
3300 || CONST_INT_P (trueop0
)
3301 || GET_CODE (trueop0
) == CONST_DOUBLE
)
3302 && (GET_CODE (trueop1
) == CONST_VECTOR
3303 || CONST_INT_P (trueop1
)
3304 || GET_CODE (trueop1
) == CONST_DOUBLE
))
3306 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
3307 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3308 rtvec v
= rtvec_alloc (n_elts
);
3310 unsigned in_n_elts
= 1;
3312 if (VECTOR_MODE_P (op0_mode
))
3313 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3314 for (i
= 0; i
< n_elts
; i
++)
3318 if (!VECTOR_MODE_P (op0_mode
))
3319 RTVEC_ELT (v
, i
) = trueop0
;
3321 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3325 if (!VECTOR_MODE_P (op1_mode
))
3326 RTVEC_ELT (v
, i
) = trueop1
;
3328 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3333 return gen_rtx_CONST_VECTOR (mode
, v
);
3346 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3349 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3351 unsigned int width
= GET_MODE_PRECISION (mode
);
3353 if (VECTOR_MODE_P (mode
)
3354 && code
!= VEC_CONCAT
3355 && GET_CODE (op0
) == CONST_VECTOR
3356 && GET_CODE (op1
) == CONST_VECTOR
)
3358 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3359 enum machine_mode op0mode
= GET_MODE (op0
);
3360 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3361 enum machine_mode op1mode
= GET_MODE (op1
);
3362 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3363 rtvec v
= rtvec_alloc (n_elts
);
3366 gcc_assert (op0_n_elts
== n_elts
);
3367 gcc_assert (op1_n_elts
== n_elts
);
3368 for (i
= 0; i
< n_elts
; i
++)
3370 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3371 CONST_VECTOR_ELT (op0
, i
),
3372 CONST_VECTOR_ELT (op1
, i
));
3375 RTVEC_ELT (v
, i
) = x
;
3378 return gen_rtx_CONST_VECTOR (mode
, v
);
3381 if (VECTOR_MODE_P (mode
)
3382 && code
== VEC_CONCAT
3383 && (CONST_INT_P (op0
)
3384 || GET_CODE (op0
) == CONST_DOUBLE
3385 || GET_CODE (op0
) == CONST_FIXED
)
3386 && (CONST_INT_P (op1
)
3387 || GET_CODE (op1
) == CONST_DOUBLE
3388 || GET_CODE (op1
) == CONST_FIXED
))
3390 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3391 rtvec v
= rtvec_alloc (n_elts
);
3393 gcc_assert (n_elts
>= 2);
3396 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3397 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3399 RTVEC_ELT (v
, 0) = op0
;
3400 RTVEC_ELT (v
, 1) = op1
;
3404 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3405 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3408 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3409 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3410 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3412 for (i
= 0; i
< op0_n_elts
; ++i
)
3413 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3414 for (i
= 0; i
< op1_n_elts
; ++i
)
3415 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3418 return gen_rtx_CONST_VECTOR (mode
, v
);
3421 if (SCALAR_FLOAT_MODE_P (mode
)
3422 && GET_CODE (op0
) == CONST_DOUBLE
3423 && GET_CODE (op1
) == CONST_DOUBLE
3424 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3435 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3437 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3439 for (i
= 0; i
< 4; i
++)
3456 real_from_target (&r
, tmp0
, mode
);
3457 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3461 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3464 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3465 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3466 real_convert (&f0
, mode
, &f0
);
3467 real_convert (&f1
, mode
, &f1
);
3469 if (HONOR_SNANS (mode
)
3470 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3474 && REAL_VALUES_EQUAL (f1
, dconst0
)
3475 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3478 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3479 && flag_trapping_math
3480 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3482 int s0
= REAL_VALUE_NEGATIVE (f0
);
3483 int s1
= REAL_VALUE_NEGATIVE (f1
);
3488 /* Inf + -Inf = NaN plus exception. */
3493 /* Inf - Inf = NaN plus exception. */
3498 /* Inf / Inf = NaN plus exception. */
3505 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3506 && flag_trapping_math
3507 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3508 || (REAL_VALUE_ISINF (f1
)
3509 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3510 /* Inf * 0 = NaN plus exception. */
3513 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3515 real_convert (&result
, mode
, &value
);
3517 /* Don't constant fold this floating point operation if
3518 the result has overflowed and flag_trapping_math. */
3520 if (flag_trapping_math
3521 && MODE_HAS_INFINITIES (mode
)
3522 && REAL_VALUE_ISINF (result
)
3523 && !REAL_VALUE_ISINF (f0
)
3524 && !REAL_VALUE_ISINF (f1
))
3525 /* Overflow plus exception. */
3528 /* Don't constant fold this floating point operation if the
3529 result may dependent upon the run-time rounding mode and
3530 flag_rounding_math is set, or if GCC's software emulation
3531 is unable to accurately represent the result. */
3533 if ((flag_rounding_math
3534 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3535 && (inexact
|| !real_identical (&result
, &value
)))
3538 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3542 /* We can fold some multi-word operations. */
3543 if (GET_MODE_CLASS (mode
) == MODE_INT
3544 && width
== HOST_BITS_PER_DOUBLE_INT
3545 && (CONST_DOUBLE_P (op0
) || CONST_INT_P (op0
))
3546 && (CONST_DOUBLE_P (op1
) || CONST_INT_P (op1
)))
3548 double_int o0
, o1
, res
, tmp
;
3550 o0
= rtx_to_double_int (op0
);
3551 o1
= rtx_to_double_int (op1
);
3556 /* A - B == A + (-B). */
3557 o1
= double_int_neg (o1
);
3559 /* Fall through.... */
3562 res
= double_int_add (o0
, o1
);
3566 res
= double_int_mul (o0
, o1
);
3570 if (div_and_round_double (TRUNC_DIV_EXPR
, 0,
3571 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3572 &res
.low
, &res
.high
,
3573 &tmp
.low
, &tmp
.high
))
3578 if (div_and_round_double (TRUNC_DIV_EXPR
, 0,
3579 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3580 &tmp
.low
, &tmp
.high
,
3581 &res
.low
, &res
.high
))
3586 if (div_and_round_double (TRUNC_DIV_EXPR
, 1,
3587 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3588 &res
.low
, &res
.high
,
3589 &tmp
.low
, &tmp
.high
))
3594 if (div_and_round_double (TRUNC_DIV_EXPR
, 1,
3595 o0
.low
, o0
.high
, o1
.low
, o1
.high
,
3596 &tmp
.low
, &tmp
.high
,
3597 &res
.low
, &res
.high
))
3602 res
= double_int_and (o0
, o1
);
3606 res
= double_int_ior (o0
, o1
);
3610 res
= double_int_xor (o0
, o1
);
3614 res
= double_int_smin (o0
, o1
);
3618 res
= double_int_smax (o0
, o1
);
3622 res
= double_int_umin (o0
, o1
);
3626 res
= double_int_umax (o0
, o1
);
3629 case LSHIFTRT
: case ASHIFTRT
:
3631 case ROTATE
: case ROTATERT
:
3633 unsigned HOST_WIDE_INT cnt
;
3635 if (SHIFT_COUNT_TRUNCATED
)
3636 o1
= double_int_zext (o1
, GET_MODE_PRECISION (mode
));
3638 if (!double_int_fits_in_uhwi_p (o1
)
3639 || double_int_to_uhwi (o1
) >= GET_MODE_PRECISION (mode
))
3642 cnt
= double_int_to_uhwi (o1
);
3644 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3645 res
= double_int_rshift (o0
, cnt
, GET_MODE_PRECISION (mode
),
3647 else if (code
== ASHIFT
)
3648 res
= double_int_lshift (o0
, cnt
, GET_MODE_PRECISION (mode
),
3650 else if (code
== ROTATE
)
3651 res
= double_int_lrotate (o0
, cnt
, GET_MODE_PRECISION (mode
));
3652 else /* code == ROTATERT */
3653 res
= double_int_rrotate (o0
, cnt
, GET_MODE_PRECISION (mode
));
3661 return immed_double_int_const (res
, mode
);
3664 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
3665 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3667 /* Get the integer argument values in two forms:
3668 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3670 arg0
= INTVAL (op0
);
3671 arg1
= INTVAL (op1
);
3673 if (width
< HOST_BITS_PER_WIDE_INT
)
3675 arg0
&= GET_MODE_MASK (mode
);
3676 arg1
&= GET_MODE_MASK (mode
);
3679 if (val_signbit_known_set_p (mode
, arg0s
))
3680 arg0s
|= ~GET_MODE_MASK (mode
);
3683 if (val_signbit_known_set_p (mode
, arg1s
))
3684 arg1s
|= ~GET_MODE_MASK (mode
);
3692 /* Compute the value of the arithmetic. */
3697 val
= arg0s
+ arg1s
;
3701 val
= arg0s
- arg1s
;
3705 val
= arg0s
* arg1s
;
3710 || ((unsigned HOST_WIDE_INT
) arg0s
3711 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3714 val
= arg0s
/ arg1s
;
3719 || ((unsigned HOST_WIDE_INT
) arg0s
3720 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3723 val
= arg0s
% arg1s
;
3728 || ((unsigned HOST_WIDE_INT
) arg0s
3729 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3732 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3737 || ((unsigned HOST_WIDE_INT
) arg0s
3738 == (unsigned HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3741 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3759 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3760 the value is in range. We can't return any old value for
3761 out-of-range arguments because either the middle-end (via
3762 shift_truncation_mask) or the back-end might be relying on
3763 target-specific knowledge. Nor can we rely on
3764 shift_truncation_mask, since the shift might not be part of an
3765 ashlM3, lshrM3 or ashrM3 instruction. */
3766 if (SHIFT_COUNT_TRUNCATED
)
3767 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3768 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3771 val
= (code
== ASHIFT
3772 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3773 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3775 /* Sign-extend the result for arithmetic right shifts. */
3776 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3777 val
|= ((unsigned HOST_WIDE_INT
) (-1)) << (width
- arg1
);
3785 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3786 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3794 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3795 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3799 /* Do nothing here. */
3803 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3807 val
= ((unsigned HOST_WIDE_INT
) arg0
3808 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3812 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3816 val
= ((unsigned HOST_WIDE_INT
) arg0
3817 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3830 /* ??? There are simplifications that can be done. */
3837 return gen_int_mode (val
, mode
);
3845 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3848 Rather than test for specific case, we do this by a brute-force method
3849 and do all possible simplifications until no more changes occur. Then
3850 we rebuild the operation. */
3852 struct simplify_plus_minus_op_data
3859 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3863 result
= (commutative_operand_precedence (y
)
3864 - commutative_operand_precedence (x
));
3868 /* Group together equal REGs to do more simplification. */
3869 if (REG_P (x
) && REG_P (y
))
3870 return REGNO (x
) > REGNO (y
);
3876 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3879 struct simplify_plus_minus_op_data ops
[8];
3881 int n_ops
= 2, input_ops
= 2;
3882 int changed
, n_constants
= 0, canonicalized
= 0;
3885 memset (ops
, 0, sizeof ops
);
3887 /* Set up the two operands and then expand them until nothing has been
3888 changed. If we run out of room in our array, give up; this should
3889 almost never happen. */
3894 ops
[1].neg
= (code
== MINUS
);
3900 for (i
= 0; i
< n_ops
; i
++)
3902 rtx this_op
= ops
[i
].op
;
3903 int this_neg
= ops
[i
].neg
;
3904 enum rtx_code this_code
= GET_CODE (this_op
);
3913 ops
[n_ops
].op
= XEXP (this_op
, 1);
3914 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3917 ops
[i
].op
= XEXP (this_op
, 0);
3920 canonicalized
|= this_neg
;
3924 ops
[i
].op
= XEXP (this_op
, 0);
3925 ops
[i
].neg
= ! this_neg
;
3932 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3933 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3934 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3936 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3937 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3938 ops
[n_ops
].neg
= this_neg
;
3946 /* ~a -> (-a - 1) */
3949 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
3950 ops
[n_ops
++].neg
= this_neg
;
3951 ops
[i
].op
= XEXP (this_op
, 0);
3952 ops
[i
].neg
= !this_neg
;
3962 ops
[i
].op
= neg_const_int (mode
, this_op
);
3976 if (n_constants
> 1)
3979 gcc_assert (n_ops
>= 2);
3981 /* If we only have two operands, we can avoid the loops. */
3984 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3987 /* Get the two operands. Be careful with the order, especially for
3988 the cases where code == MINUS. */
3989 if (ops
[0].neg
&& ops
[1].neg
)
3991 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3994 else if (ops
[0].neg
)
4005 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4008 /* Now simplify each pair of operands until nothing changes. */
4011 /* Insertion sort is good enough for an eight-element array. */
4012 for (i
= 1; i
< n_ops
; i
++)
4014 struct simplify_plus_minus_op_data save
;
4016 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
4022 ops
[j
+ 1] = ops
[j
];
4023 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
4028 for (i
= n_ops
- 1; i
> 0; i
--)
4029 for (j
= i
- 1; j
>= 0; j
--)
4031 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4032 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4034 if (lhs
!= 0 && rhs
!= 0)
4036 enum rtx_code ncode
= PLUS
;
4042 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4044 else if (swap_commutative_operands_p (lhs
, rhs
))
4045 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
4047 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4048 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4050 rtx tem_lhs
, tem_rhs
;
4052 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4053 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4054 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
4056 if (tem
&& !CONSTANT_P (tem
))
4057 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4060 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4062 /* Reject "simplifications" that just wrap the two
4063 arguments in a CONST. Failure to do so can result
4064 in infinite recursion with simplify_binary_operation
4065 when it calls us to simplify CONST operations. */
4067 && ! (GET_CODE (tem
) == CONST
4068 && GET_CODE (XEXP (tem
, 0)) == ncode
4069 && XEXP (XEXP (tem
, 0), 0) == lhs
4070 && XEXP (XEXP (tem
, 0), 1) == rhs
))
4073 if (GET_CODE (tem
) == NEG
)
4074 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4075 if (CONST_INT_P (tem
) && lneg
)
4076 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4080 ops
[j
].op
= NULL_RTX
;
4087 /* If nothing changed, fail. */
4091 /* Pack all the operands to the lower-numbered entries. */
4092 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4102 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4104 && CONST_INT_P (ops
[1].op
)
4105 && CONSTANT_P (ops
[0].op
)
4107 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4109 /* We suppressed creation of trivial CONST expressions in the
4110 combination loop to avoid recursion. Create one manually now.
4111 The combination loop should have ensured that there is exactly
4112 one CONST_INT, and the sort will have ensured that it is last
4113 in the array and that any other constant will be next-to-last. */
4116 && CONST_INT_P (ops
[n_ops
- 1].op
)
4117 && CONSTANT_P (ops
[n_ops
- 2].op
))
4119 rtx value
= ops
[n_ops
- 1].op
;
4120 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4121 value
= neg_const_int (mode
, value
);
4122 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4127 /* Put a non-negated operand first, if possible. */
4129 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4132 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4141 /* Now make the result by performing the requested operations. */
4143 for (i
= 1; i
< n_ops
; i
++)
4144 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4145 mode
, result
, ops
[i
].op
);
4150 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4152 plus_minus_operand_p (const_rtx x
)
4154 return GET_CODE (x
) == PLUS
4155 || GET_CODE (x
) == MINUS
4156 || (GET_CODE (x
) == CONST
4157 && GET_CODE (XEXP (x
, 0)) == PLUS
4158 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4159 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4162 /* Like simplify_binary_operation except used for relational operators.
4163 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4164 not also be VOIDmode.
4166 CMP_MODE specifies in which mode the comparison is done in, so it is
4167 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4168 the operands or, if both are VOIDmode, the operands are compared in
4169 "infinite precision". */
4171 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
4172 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4174 rtx tem
, trueop0
, trueop1
;
4176 if (cmp_mode
== VOIDmode
)
4177 cmp_mode
= GET_MODE (op0
);
4178 if (cmp_mode
== VOIDmode
)
4179 cmp_mode
= GET_MODE (op1
);
4181 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4184 if (SCALAR_FLOAT_MODE_P (mode
))
4186 if (tem
== const0_rtx
)
4187 return CONST0_RTX (mode
);
4188 #ifdef FLOAT_STORE_FLAG_VALUE
4190 REAL_VALUE_TYPE val
;
4191 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4192 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
4198 if (VECTOR_MODE_P (mode
))
4200 if (tem
== const0_rtx
)
4201 return CONST0_RTX (mode
);
4202 #ifdef VECTOR_STORE_FLAG_VALUE
4207 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4208 if (val
== NULL_RTX
)
4210 if (val
== const1_rtx
)
4211 return CONST1_RTX (mode
);
4213 units
= GET_MODE_NUNITS (mode
);
4214 v
= rtvec_alloc (units
);
4215 for (i
= 0; i
< units
; i
++)
4216 RTVEC_ELT (v
, i
) = val
;
4217 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4227 /* For the following tests, ensure const0_rtx is op1. */
4228 if (swap_commutative_operands_p (op0
, op1
)
4229 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4230 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
4232 /* If op0 is a compare, extract the comparison arguments from it. */
4233 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4234 return simplify_gen_relational (code
, mode
, VOIDmode
,
4235 XEXP (op0
, 0), XEXP (op0
, 1));
4237 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4241 trueop0
= avoid_constant_pool_reference (op0
);
4242 trueop1
= avoid_constant_pool_reference (op1
);
4243 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4247 /* This part of simplify_relational_operation is only used when CMP_MODE
4248 is not in class MODE_CC (i.e. it is a real comparison).
4250 MODE is the mode of the result, while CMP_MODE specifies in which
4251 mode the comparison is done in, so it is the mode of the operands. */
4254 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
4255 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
4257 enum rtx_code op0code
= GET_CODE (op0
);
4259 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4261 /* If op0 is a comparison, extract the comparison arguments
4265 if (GET_MODE (op0
) == mode
)
4266 return simplify_rtx (op0
);
4268 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4269 XEXP (op0
, 0), XEXP (op0
, 1));
4271 else if (code
== EQ
)
4273 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4274 if (new_code
!= UNKNOWN
)
4275 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4276 XEXP (op0
, 0), XEXP (op0
, 1));
4280 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4281 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4282 if ((code
== LTU
|| code
== GEU
)
4283 && GET_CODE (op0
) == PLUS
4284 && CONST_INT_P (XEXP (op0
, 1))
4285 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4286 || rtx_equal_p (op1
, XEXP (op0
, 1))))
4289 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4290 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4291 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4294 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4295 if ((code
== LTU
|| code
== GEU
)
4296 && GET_CODE (op0
) == PLUS
4297 && rtx_equal_p (op1
, XEXP (op0
, 1))
4298 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4299 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4300 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4301 copy_rtx (XEXP (op0
, 0)));
4303 if (op1
== const0_rtx
)
4305 /* Canonicalize (GTU x 0) as (NE x 0). */
4307 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4308 /* Canonicalize (LEU x 0) as (EQ x 0). */
4310 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4312 else if (op1
== const1_rtx
)
4317 /* Canonicalize (GE x 1) as (GT x 0). */
4318 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4321 /* Canonicalize (GEU x 1) as (NE x 0). */
4322 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4325 /* Canonicalize (LT x 1) as (LE x 0). */
4326 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4329 /* Canonicalize (LTU x 1) as (EQ x 0). */
4330 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4336 else if (op1
== constm1_rtx
)
4338 /* Canonicalize (LE x -1) as (LT x 0). */
4340 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4341 /* Canonicalize (GT x -1) as (GE x 0). */
4343 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4346 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4347 if ((code
== EQ
|| code
== NE
)
4348 && (op0code
== PLUS
|| op0code
== MINUS
)
4350 && CONSTANT_P (XEXP (op0
, 1))
4351 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4353 rtx x
= XEXP (op0
, 0);
4354 rtx c
= XEXP (op0
, 1);
4355 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4356 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4358 /* Detect an infinite recursive condition, where we oscillate at this
4359 simplification case between:
4360 A + B == C <---> C - B == A,
4361 where A, B, and C are all constants with non-simplifiable expressions,
4362 usually SYMBOL_REFs. */
4363 if (GET_CODE (tem
) == invcode
4365 && rtx_equal_p (c
, XEXP (tem
, 1)))
4368 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4371 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4372 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4374 && op1
== const0_rtx
4375 && GET_MODE_CLASS (mode
) == MODE_INT
4376 && cmp_mode
!= VOIDmode
4377 /* ??? Work-around BImode bugs in the ia64 backend. */
4379 && cmp_mode
!= BImode
4380 && nonzero_bits (op0
, cmp_mode
) == 1
4381 && STORE_FLAG_VALUE
== 1)
4382 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4383 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4384 : lowpart_subreg (mode
, op0
, cmp_mode
);
4386 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4387 if ((code
== EQ
|| code
== NE
)
4388 && op1
== const0_rtx
4390 return simplify_gen_relational (code
, mode
, cmp_mode
,
4391 XEXP (op0
, 0), XEXP (op0
, 1));
4393 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4394 if ((code
== EQ
|| code
== NE
)
4396 && rtx_equal_p (XEXP (op0
, 0), op1
)
4397 && !side_effects_p (XEXP (op0
, 0)))
4398 return simplify_gen_relational (code
, mode
, cmp_mode
,
4399 XEXP (op0
, 1), const0_rtx
);
4401 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4402 if ((code
== EQ
|| code
== NE
)
4404 && rtx_equal_p (XEXP (op0
, 1), op1
)
4405 && !side_effects_p (XEXP (op0
, 1)))
4406 return simplify_gen_relational (code
, mode
, cmp_mode
,
4407 XEXP (op0
, 0), const0_rtx
);
4409 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4410 if ((code
== EQ
|| code
== NE
)
4412 && (CONST_INT_P (op1
)
4413 || GET_CODE (op1
) == CONST_DOUBLE
)
4414 && (CONST_INT_P (XEXP (op0
, 1))
4415 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
4416 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4417 simplify_gen_binary (XOR
, cmp_mode
,
4418 XEXP (op0
, 1), op1
));
4420 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4426 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4427 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4428 XEXP (op0
, 0), const0_rtx
);
4433 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4434 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4435 XEXP (op0
, 0), const0_rtx
);
4454 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4455 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4456 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4457 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4458 For floating-point comparisons, assume that the operands were ordered. */
4461 comparison_result (enum rtx_code code
, int known_results
)
4467 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4470 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4474 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4477 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4481 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4484 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4487 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4489 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4492 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4494 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4497 return const_true_rtx
;
4505 /* Check if the given comparison (done in the given MODE) is actually a
4506 tautology or a contradiction.
4507 If no simplification is possible, this function returns zero.
4508 Otherwise, it returns either const_true_rtx or const0_rtx. */
4511 simplify_const_relational_operation (enum rtx_code code
,
4512 enum machine_mode mode
,
4519 gcc_assert (mode
!= VOIDmode
4520 || (GET_MODE (op0
) == VOIDmode
4521 && GET_MODE (op1
) == VOIDmode
));
4523 /* If op0 is a compare, extract the comparison arguments from it. */
4524 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4526 op1
= XEXP (op0
, 1);
4527 op0
= XEXP (op0
, 0);
4529 if (GET_MODE (op0
) != VOIDmode
)
4530 mode
= GET_MODE (op0
);
4531 else if (GET_MODE (op1
) != VOIDmode
)
4532 mode
= GET_MODE (op1
);
4537 /* We can't simplify MODE_CC values since we don't know what the
4538 actual comparison is. */
4539 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4542 /* Make sure the constant is second. */
4543 if (swap_commutative_operands_p (op0
, op1
))
4545 tem
= op0
, op0
= op1
, op1
= tem
;
4546 code
= swap_condition (code
);
4549 trueop0
= avoid_constant_pool_reference (op0
);
4550 trueop1
= avoid_constant_pool_reference (op1
);
4552 /* For integer comparisons of A and B maybe we can simplify A - B and can
4553 then simplify a comparison of that with zero. If A and B are both either
4554 a register or a CONST_INT, this can't help; testing for these cases will
4555 prevent infinite recursion here and speed things up.
4557 We can only do this for EQ and NE comparisons as otherwise we may
4558 lose or introduce overflow which we cannot disregard as undefined as
4559 we do not know the signedness of the operation on either the left or
4560 the right hand side of the comparison. */
4562 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4563 && (code
== EQ
|| code
== NE
)
4564 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4565 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4566 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4567 /* We cannot do this if tem is a nonzero address. */
4568 && ! nonzero_address_p (tem
))
4569 return simplify_const_relational_operation (signed_condition (code
),
4570 mode
, tem
, const0_rtx
);
4572 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4573 return const_true_rtx
;
4575 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4578 /* For modes without NaNs, if the two operands are equal, we know the
4579 result except if they have side-effects. Even with NaNs we know
4580 the result of unordered comparisons and, if signaling NaNs are
4581 irrelevant, also the result of LT/GT/LTGT. */
4582 if ((! HONOR_NANS (GET_MODE (trueop0
))
4583 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4584 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4585 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4586 && rtx_equal_p (trueop0
, trueop1
)
4587 && ! side_effects_p (trueop0
))
4588 return comparison_result (code
, CMP_EQ
);
4590 /* If the operands are floating-point constants, see if we can fold
4592 if (GET_CODE (trueop0
) == CONST_DOUBLE
4593 && GET_CODE (trueop1
) == CONST_DOUBLE
4594 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4596 REAL_VALUE_TYPE d0
, d1
;
4598 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4599 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4601 /* Comparisons are unordered iff at least one of the values is NaN. */
4602 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4612 return const_true_rtx
;
4625 return comparison_result (code
,
4626 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4627 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4630 /* Otherwise, see if the operands are both integers. */
4631 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4632 && (GET_CODE (trueop0
) == CONST_DOUBLE
4633 || CONST_INT_P (trueop0
))
4634 && (GET_CODE (trueop1
) == CONST_DOUBLE
4635 || CONST_INT_P (trueop1
)))
4637 int width
= GET_MODE_PRECISION (mode
);
4638 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4639 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4641 /* Get the two words comprising each integer constant. */
4642 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
4644 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4645 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4649 l0u
= l0s
= INTVAL (trueop0
);
4650 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4653 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
4655 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4656 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4660 l1u
= l1s
= INTVAL (trueop1
);
4661 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4664 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4665 we have to sign or zero-extend the values. */
4666 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4668 l0u
&= GET_MODE_MASK (mode
);
4669 l1u
&= GET_MODE_MASK (mode
);
4671 if (val_signbit_known_set_p (mode
, l0s
))
4672 l0s
|= ~GET_MODE_MASK (mode
);
4674 if (val_signbit_known_set_p (mode
, l1s
))
4675 l1s
|= ~GET_MODE_MASK (mode
);
4677 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4678 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4680 if (h0u
== h1u
&& l0u
== l1u
)
4681 return comparison_result (code
, CMP_EQ
);
4685 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4686 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4687 return comparison_result (code
, cr
);
4691 /* Optimize comparisons with upper and lower bounds. */
4692 if (HWI_COMPUTABLE_MODE_P (mode
)
4693 && CONST_INT_P (trueop1
))
4696 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4697 HOST_WIDE_INT val
= INTVAL (trueop1
);
4698 HOST_WIDE_INT mmin
, mmax
;
4708 /* Get a reduced range if the sign bit is zero. */
4709 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4716 rtx mmin_rtx
, mmax_rtx
;
4717 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4719 mmin
= INTVAL (mmin_rtx
);
4720 mmax
= INTVAL (mmax_rtx
);
4723 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4725 mmin
>>= (sign_copies
- 1);
4726 mmax
>>= (sign_copies
- 1);
4732 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4734 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4735 return const_true_rtx
;
4736 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4741 return const_true_rtx
;
4746 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4748 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4749 return const_true_rtx
;
4750 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4755 return const_true_rtx
;
4761 /* x == y is always false for y out of range. */
4762 if (val
< mmin
|| val
> mmax
)
4766 /* x > y is always false for y >= mmax, always true for y < mmin. */
4768 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4770 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4771 return const_true_rtx
;
4777 return const_true_rtx
;
4780 /* x < y is always false for y <= mmin, always true for y > mmax. */
4782 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4784 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4785 return const_true_rtx
;
4791 return const_true_rtx
;
4795 /* x != y is always true for y out of range. */
4796 if (val
< mmin
|| val
> mmax
)
4797 return const_true_rtx
;
4805 /* Optimize integer comparisons with zero. */
4806 if (trueop1
== const0_rtx
)
4808 /* Some addresses are known to be nonzero. We don't know
4809 their sign, but equality comparisons are known. */
4810 if (nonzero_address_p (trueop0
))
4812 if (code
== EQ
|| code
== LEU
)
4814 if (code
== NE
|| code
== GTU
)
4815 return const_true_rtx
;
4818 /* See if the first operand is an IOR with a constant. If so, we
4819 may be able to determine the result of this comparison. */
4820 if (GET_CODE (op0
) == IOR
)
4822 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4823 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
4825 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
4826 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4827 && (UINTVAL (inner_const
)
4828 & ((unsigned HOST_WIDE_INT
) 1
4838 return const_true_rtx
;
4842 return const_true_rtx
;
4856 /* Optimize comparison of ABS with zero. */
4857 if (trueop1
== CONST0_RTX (mode
)
4858 && (GET_CODE (trueop0
) == ABS
4859 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4860 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4865 /* Optimize abs(x) < 0.0. */
4866 if (!HONOR_SNANS (mode
)
4867 && (!INTEGRAL_MODE_P (mode
)
4868 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4870 if (INTEGRAL_MODE_P (mode
)
4871 && (issue_strict_overflow_warning
4872 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4873 warning (OPT_Wstrict_overflow
,
4874 ("assuming signed overflow does not occur when "
4875 "assuming abs (x) < 0 is false"));
4881 /* Optimize abs(x) >= 0.0. */
4882 if (!HONOR_NANS (mode
)
4883 && (!INTEGRAL_MODE_P (mode
)
4884 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4886 if (INTEGRAL_MODE_P (mode
)
4887 && (issue_strict_overflow_warning
4888 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4889 warning (OPT_Wstrict_overflow
,
4890 ("assuming signed overflow does not occur when "
4891 "assuming abs (x) >= 0 is true"));
4892 return const_true_rtx
;
4897 /* Optimize ! (abs(x) < 0.0). */
4898 return const_true_rtx
;
4908 /* Simplify CODE, an operation with result mode MODE and three operands,
4909 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4910 a constant. Return 0 if no simplifications is possible. */
4913 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4914 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4917 unsigned int width
= GET_MODE_PRECISION (mode
);
4918 bool any_change
= false;
4921 /* VOIDmode means "infinite" precision. */
4923 width
= HOST_BITS_PER_WIDE_INT
;
4928 /* Simplify negations around the multiplication. */
4929 /* -a * -b + c => a * b + c. */
4930 if (GET_CODE (op0
) == NEG
)
4932 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
4934 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
4936 else if (GET_CODE (op1
) == NEG
)
4938 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
4940 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
4943 /* Canonicalize the two multiplication operands. */
4944 /* a * -b + c => -b * a + c. */
4945 if (swap_commutative_operands_p (op0
, op1
))
4946 tem
= op0
, op0
= op1
, op1
= tem
, any_change
= true;
4949 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
4954 if (CONST_INT_P (op0
)
4955 && CONST_INT_P (op1
)
4956 && CONST_INT_P (op2
)
4957 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4958 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4960 /* Extracting a bit-field from a constant */
4961 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
4962 HOST_WIDE_INT op1val
= INTVAL (op1
);
4963 HOST_WIDE_INT op2val
= INTVAL (op2
);
4964 if (BITS_BIG_ENDIAN
)
4965 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
4969 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
4971 /* First zero-extend. */
4972 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
4973 /* If desired, propagate sign bit. */
4974 if (code
== SIGN_EXTRACT
4975 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
4977 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
4980 return gen_int_mode (val
, mode
);
4985 if (CONST_INT_P (op0
))
4986 return op0
!= const0_rtx
? op1
: op2
;
4988 /* Convert c ? a : a into "a". */
4989 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4992 /* Convert a != b ? a : b into "a". */
4993 if (GET_CODE (op0
) == NE
4994 && ! side_effects_p (op0
)
4995 && ! HONOR_NANS (mode
)
4996 && ! HONOR_SIGNED_ZEROS (mode
)
4997 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4998 && rtx_equal_p (XEXP (op0
, 1), op2
))
4999 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5000 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5003 /* Convert a == b ? a : b into "b". */
5004 if (GET_CODE (op0
) == EQ
5005 && ! side_effects_p (op0
)
5006 && ! HONOR_NANS (mode
)
5007 && ! HONOR_SIGNED_ZEROS (mode
)
5008 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5009 && rtx_equal_p (XEXP (op0
, 1), op2
))
5010 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5011 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5014 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5016 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5017 ? GET_MODE (XEXP (op0
, 1))
5018 : GET_MODE (XEXP (op0
, 0)));
5021 /* Look for happy constants in op1 and op2. */
5022 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5024 HOST_WIDE_INT t
= INTVAL (op1
);
5025 HOST_WIDE_INT f
= INTVAL (op2
);
5027 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5028 code
= GET_CODE (op0
);
5029 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5032 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5040 return simplify_gen_relational (code
, mode
, cmp_mode
,
5041 XEXP (op0
, 0), XEXP (op0
, 1));
5044 if (cmp_mode
== VOIDmode
)
5045 cmp_mode
= op0_mode
;
5046 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5047 cmp_mode
, XEXP (op0
, 0),
5050 /* See if any simplifications were possible. */
5053 if (CONST_INT_P (temp
))
5054 return temp
== const0_rtx
? op2
: op1
;
5056 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5062 gcc_assert (GET_MODE (op0
) == mode
);
5063 gcc_assert (GET_MODE (op1
) == mode
);
5064 gcc_assert (VECTOR_MODE_P (mode
));
5065 op2
= avoid_constant_pool_reference (op2
);
5066 if (CONST_INT_P (op2
))
5068 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
5069 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5070 int mask
= (1 << n_elts
) - 1;
5072 if (!(INTVAL (op2
) & mask
))
5074 if ((INTVAL (op2
) & mask
) == mask
)
5077 op0
= avoid_constant_pool_reference (op0
);
5078 op1
= avoid_constant_pool_reference (op1
);
5079 if (GET_CODE (op0
) == CONST_VECTOR
5080 && GET_CODE (op1
) == CONST_VECTOR
)
5082 rtvec v
= rtvec_alloc (n_elts
);
5085 for (i
= 0; i
< n_elts
; i
++)
5086 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
5087 ? CONST_VECTOR_ELT (op0
, i
)
5088 : CONST_VECTOR_ELT (op1
, i
));
5089 return gen_rtx_CONST_VECTOR (mode
, v
);
5101 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
5103 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5105 Works by unpacking OP into a collection of 8-bit values
5106 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5107 and then repacking them again for OUTERMODE. */
5110 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
5111 enum machine_mode innermode
, unsigned int byte
)
5113 /* We support up to 512-bit values (for V8DFmode). */
5117 value_mask
= (1 << value_bit
) - 1
5119 unsigned char value
[max_bitsize
/ value_bit
];
5128 rtvec result_v
= NULL
;
5129 enum mode_class outer_class
;
5130 enum machine_mode outer_submode
;
5132 /* Some ports misuse CCmode. */
5133 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5136 /* We have no way to represent a complex constant at the rtl level. */
5137 if (COMPLEX_MODE_P (outermode
))
5140 /* Unpack the value. */
5142 if (GET_CODE (op
) == CONST_VECTOR
)
5144 num_elem
= CONST_VECTOR_NUNITS (op
);
5145 elems
= &CONST_VECTOR_ELT (op
, 0);
5146 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
5152 elem_bitsize
= max_bitsize
;
5154 /* If this asserts, it is too complicated; reducing value_bit may help. */
5155 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5156 /* I don't know how to handle endianness of sub-units. */
5157 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5159 for (elem
= 0; elem
< num_elem
; elem
++)
5162 rtx el
= elems
[elem
];
5164 /* Vectors are kept in target memory order. (This is probably
5167 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5168 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5170 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5171 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5172 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5173 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5174 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5177 switch (GET_CODE (el
))
5181 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5183 *vp
++ = INTVAL (el
) >> i
;
5184 /* CONST_INTs are always logically sign-extended. */
5185 for (; i
< elem_bitsize
; i
+= value_bit
)
5186 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5190 if (GET_MODE (el
) == VOIDmode
)
5192 unsigned char extend
= 0;
5193 /* If this triggers, someone should have generated a
5194 CONST_INT instead. */
5195 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5197 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5198 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5199 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
5202 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5206 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5208 for (; i
< elem_bitsize
; i
+= value_bit
)
5213 long tmp
[max_bitsize
/ 32];
5214 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5216 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5217 gcc_assert (bitsize
<= elem_bitsize
);
5218 gcc_assert (bitsize
% value_bit
== 0);
5220 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5223 /* real_to_target produces its result in words affected by
5224 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5225 and use WORDS_BIG_ENDIAN instead; see the documentation
5226 of SUBREG in rtl.texi. */
5227 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5230 if (WORDS_BIG_ENDIAN
)
5231 ibase
= bitsize
- 1 - i
;
5234 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5237 /* It shouldn't matter what's done here, so fill it with
5239 for (; i
< elem_bitsize
; i
+= value_bit
)
5245 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5247 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5248 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5252 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5253 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5254 for (; i
< 2 * HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5256 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5257 >> (i
- HOST_BITS_PER_WIDE_INT
);
5258 for (; i
< elem_bitsize
; i
+= value_bit
)
5268 /* Now, pick the right byte to start with. */
5269 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5270 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5271 will already have offset 0. */
5272 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5274 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5276 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5277 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5278 byte
= (subword_byte
% UNITS_PER_WORD
5279 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5282 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5283 so if it's become negative it will instead be very large.) */
5284 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5286 /* Convert from bytes to chunks of size value_bit. */
5287 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5289 /* Re-pack the value. */
5291 if (VECTOR_MODE_P (outermode
))
5293 num_elem
= GET_MODE_NUNITS (outermode
);
5294 result_v
= rtvec_alloc (num_elem
);
5295 elems
= &RTVEC_ELT (result_v
, 0);
5296 outer_submode
= GET_MODE_INNER (outermode
);
5302 outer_submode
= outermode
;
5305 outer_class
= GET_MODE_CLASS (outer_submode
);
5306 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5308 gcc_assert (elem_bitsize
% value_bit
== 0);
5309 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5311 for (elem
= 0; elem
< num_elem
; elem
++)
5315 /* Vectors are stored in target memory order. (This is probably
5318 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5319 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5321 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5322 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5323 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5324 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5325 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5328 switch (outer_class
)
5331 case MODE_PARTIAL_INT
:
5333 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
5336 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5338 lo
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5339 for (; i
< elem_bitsize
; i
+= value_bit
)
5340 hi
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5341 << (i
- HOST_BITS_PER_WIDE_INT
);
5343 /* immed_double_const doesn't call trunc_int_for_mode. I don't
5345 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5346 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
5347 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
5348 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5355 case MODE_DECIMAL_FLOAT
:
5358 long tmp
[max_bitsize
/ 32];
5360 /* real_from_target wants its input in words affected by
5361 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5362 and use WORDS_BIG_ENDIAN instead; see the documentation
5363 of SUBREG in rtl.texi. */
5364 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5366 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5369 if (WORDS_BIG_ENDIAN
)
5370 ibase
= elem_bitsize
- 1 - i
;
5373 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5376 real_from_target (&r
, tmp
, outer_submode
);
5377 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5389 f
.mode
= outer_submode
;
5392 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5394 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5395 for (; i
< elem_bitsize
; i
+= value_bit
)
5396 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5397 << (i
- HOST_BITS_PER_WIDE_INT
));
5399 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5407 if (VECTOR_MODE_P (outermode
))
5408 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5413 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5414 Return 0 if no simplifications are possible. */
5416 simplify_subreg (enum machine_mode outermode
, rtx op
,
5417 enum machine_mode innermode
, unsigned int byte
)
5419 /* Little bit of sanity checking. */
5420 gcc_assert (innermode
!= VOIDmode
);
5421 gcc_assert (outermode
!= VOIDmode
);
5422 gcc_assert (innermode
!= BLKmode
);
5423 gcc_assert (outermode
!= BLKmode
);
5425 gcc_assert (GET_MODE (op
) == innermode
5426 || GET_MODE (op
) == VOIDmode
);
5428 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
5429 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5431 if (outermode
== innermode
&& !byte
)
5434 if (CONST_INT_P (op
)
5435 || GET_CODE (op
) == CONST_DOUBLE
5436 || GET_CODE (op
) == CONST_FIXED
5437 || GET_CODE (op
) == CONST_VECTOR
)
5438 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5440 /* Changing mode twice with SUBREG => just change it once,
5441 or not at all if changing back op starting mode. */
5442 if (GET_CODE (op
) == SUBREG
)
5444 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5445 int final_offset
= byte
+ SUBREG_BYTE (op
);
5448 if (outermode
== innermostmode
5449 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5450 return SUBREG_REG (op
);
5452 /* The SUBREG_BYTE represents offset, as if the value were stored
5453 in memory. Irritating exception is paradoxical subreg, where
5454 we define SUBREG_BYTE to be 0. On big endian machines, this
5455 value should be negative. For a moment, undo this exception. */
5456 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5458 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5459 if (WORDS_BIG_ENDIAN
)
5460 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5461 if (BYTES_BIG_ENDIAN
)
5462 final_offset
+= difference
% UNITS_PER_WORD
;
5464 if (SUBREG_BYTE (op
) == 0
5465 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5467 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5468 if (WORDS_BIG_ENDIAN
)
5469 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5470 if (BYTES_BIG_ENDIAN
)
5471 final_offset
+= difference
% UNITS_PER_WORD
;
5474 /* See whether resulting subreg will be paradoxical. */
5475 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5477 /* In nonparadoxical subregs we can't handle negative offsets. */
5478 if (final_offset
< 0)
5480 /* Bail out in case resulting subreg would be incorrect. */
5481 if (final_offset
% GET_MODE_SIZE (outermode
)
5482 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5488 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5490 /* In paradoxical subreg, see if we are still looking on lower part.
5491 If so, our SUBREG_BYTE will be 0. */
5492 if (WORDS_BIG_ENDIAN
)
5493 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5494 if (BYTES_BIG_ENDIAN
)
5495 offset
+= difference
% UNITS_PER_WORD
;
5496 if (offset
== final_offset
)
5502 /* Recurse for further possible simplifications. */
5503 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5507 if (validate_subreg (outermode
, innermostmode
,
5508 SUBREG_REG (op
), final_offset
))
5510 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5511 if (SUBREG_PROMOTED_VAR_P (op
)
5512 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5513 && GET_MODE_CLASS (outermode
) == MODE_INT
5514 && IN_RANGE (GET_MODE_SIZE (outermode
),
5515 GET_MODE_SIZE (innermode
),
5516 GET_MODE_SIZE (innermostmode
))
5517 && subreg_lowpart_p (newx
))
5519 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5520 SUBREG_PROMOTED_UNSIGNED_SET
5521 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5528 /* Merge implicit and explicit truncations. */
5530 if (GET_CODE (op
) == TRUNCATE
5531 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
5532 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
5533 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
5534 GET_MODE (XEXP (op
, 0)));
5536 /* SUBREG of a hard register => just change the register number
5537 and/or mode. If the hard register is not valid in that mode,
5538 suppress this simplification. If the hard register is the stack,
5539 frame, or argument pointer, leave this as a SUBREG. */
5541 if (REG_P (op
) && HARD_REGISTER_P (op
))
5543 unsigned int regno
, final_regno
;
5546 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5547 if (HARD_REGISTER_NUM_P (final_regno
))
5550 int final_offset
= byte
;
5552 /* Adjust offset for paradoxical subregs. */
5554 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5556 int difference
= (GET_MODE_SIZE (innermode
)
5557 - GET_MODE_SIZE (outermode
));
5558 if (WORDS_BIG_ENDIAN
)
5559 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5560 if (BYTES_BIG_ENDIAN
)
5561 final_offset
+= difference
% UNITS_PER_WORD
;
5564 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5566 /* Propagate original regno. We don't have any way to specify
5567 the offset inside original regno, so do so only for lowpart.
5568 The information is used only by alias analysis that can not
5569 grog partial register anyway. */
5571 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5572 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5577 /* If we have a SUBREG of a register that we are replacing and we are
5578 replacing it with a MEM, make a new MEM and try replacing the
5579 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5580 or if we would be widening it. */
5583 && ! mode_dependent_address_p (XEXP (op
, 0))
5584 /* Allow splitting of volatile memory references in case we don't
5585 have instruction to move the whole thing. */
5586 && (! MEM_VOLATILE_P (op
)
5587 || ! have_insn_for (SET
, innermode
))
5588 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5589 return adjust_address_nv (op
, outermode
, byte
);
5591 /* Handle complex values represented as CONCAT
5592 of real and imaginary part. */
5593 if (GET_CODE (op
) == CONCAT
)
5595 unsigned int part_size
, final_offset
;
5598 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5599 if (byte
< part_size
)
5601 part
= XEXP (op
, 0);
5602 final_offset
= byte
;
5606 part
= XEXP (op
, 1);
5607 final_offset
= byte
- part_size
;
5610 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5613 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5616 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5617 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5621 /* Optimize SUBREG truncations of zero and sign extended values. */
5622 if ((GET_CODE (op
) == ZERO_EXTEND
5623 || GET_CODE (op
) == SIGN_EXTEND
)
5624 && SCALAR_INT_MODE_P (innermode
)
5625 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
))
5627 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5629 /* If we're requesting the lowpart of a zero or sign extension,
5630 there are three possibilities. If the outermode is the same
5631 as the origmode, we can omit both the extension and the subreg.
5632 If the outermode is not larger than the origmode, we can apply
5633 the truncation without the extension. Finally, if the outermode
5634 is larger than the origmode, but both are integer modes, we
5635 can just extend to the appropriate mode. */
5638 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
5639 if (outermode
== origmode
)
5640 return XEXP (op
, 0);
5641 if (GET_MODE_PRECISION (outermode
) <= GET_MODE_PRECISION (origmode
))
5642 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
5643 subreg_lowpart_offset (outermode
,
5645 if (SCALAR_INT_MODE_P (outermode
))
5646 return simplify_gen_unary (GET_CODE (op
), outermode
,
5647 XEXP (op
, 0), origmode
);
5650 /* A SUBREG resulting from a zero extension may fold to zero if
5651 it extracts higher bits that the ZERO_EXTEND's source bits. */
5652 if (GET_CODE (op
) == ZERO_EXTEND
5653 && bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
5654 return CONST0_RTX (outermode
);
5657 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5658 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5659 the outer subreg is effectively a truncation to the original mode. */
5660 if ((GET_CODE (op
) == LSHIFTRT
5661 || GET_CODE (op
) == ASHIFTRT
)
5662 && SCALAR_INT_MODE_P (outermode
)
5663 && SCALAR_INT_MODE_P (innermode
)
5664 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5665 to avoid the possibility that an outer LSHIFTRT shifts by more
5666 than the sign extension's sign_bit_copies and introduces zeros
5667 into the high bits of the result. */
5668 && (2 * GET_MODE_PRECISION (outermode
)) <= GET_MODE_PRECISION (innermode
)
5669 && CONST_INT_P (XEXP (op
, 1))
5670 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
5671 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5672 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5673 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5674 return simplify_gen_binary (ASHIFTRT
, outermode
,
5675 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5677 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5678 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5679 the outer subreg is effectively a truncation to the original mode. */
5680 if ((GET_CODE (op
) == LSHIFTRT
5681 || GET_CODE (op
) == ASHIFTRT
)
5682 && SCALAR_INT_MODE_P (outermode
)
5683 && SCALAR_INT_MODE_P (innermode
)
5684 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5685 && CONST_INT_P (XEXP (op
, 1))
5686 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5687 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5688 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5689 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5690 return simplify_gen_binary (LSHIFTRT
, outermode
,
5691 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5693 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5694 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5695 the outer subreg is effectively a truncation to the original mode. */
5696 if (GET_CODE (op
) == ASHIFT
5697 && SCALAR_INT_MODE_P (outermode
)
5698 && SCALAR_INT_MODE_P (innermode
)
5699 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
5700 && CONST_INT_P (XEXP (op
, 1))
5701 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5702 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
5703 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5704 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (outermode
)
5705 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5706 return simplify_gen_binary (ASHIFT
, outermode
,
5707 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5709 /* Recognize a word extraction from a multi-word subreg. */
5710 if ((GET_CODE (op
) == LSHIFTRT
5711 || GET_CODE (op
) == ASHIFTRT
)
5712 && SCALAR_INT_MODE_P (innermode
)
5713 && GET_MODE_PRECISION (outermode
) >= BITS_PER_WORD
5714 && GET_MODE_PRECISION (innermode
) >= (2 * GET_MODE_PRECISION (outermode
))
5715 && CONST_INT_P (XEXP (op
, 1))
5716 && (INTVAL (XEXP (op
, 1)) & (GET_MODE_PRECISION (outermode
) - 1)) == 0
5717 && INTVAL (XEXP (op
, 1)) >= 0
5718 && INTVAL (XEXP (op
, 1)) < GET_MODE_PRECISION (innermode
)
5719 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5721 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5722 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
,
5724 ? byte
- shifted_bytes
5725 : byte
+ shifted_bytes
));
5728 /* If we have a lowpart SUBREG of a right shift of MEM, make a new MEM
5729 and try replacing the SUBREG and shift with it. Don't do this if
5730 the MEM has a mode-dependent address or if we would be widening it. */
5732 if ((GET_CODE (op
) == LSHIFTRT
5733 || GET_CODE (op
) == ASHIFTRT
)
5734 && SCALAR_INT_MODE_P (innermode
)
5735 && MEM_P (XEXP (op
, 0))
5736 && CONST_INT_P (XEXP (op
, 1))
5737 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (GET_MODE (op
))
5738 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (outermode
)) == 0
5739 && INTVAL (XEXP (op
, 1)) > 0
5740 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (innermode
)
5741 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0))
5742 && ! MEM_VOLATILE_P (XEXP (op
, 0))
5743 && byte
== subreg_lowpart_offset (outermode
, innermode
)
5744 && (GET_MODE_SIZE (outermode
) >= UNITS_PER_WORD
5745 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
5747 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5748 return adjust_address_nv (XEXP (op
, 0), outermode
,
5750 ? byte
- shifted_bytes
5751 : byte
+ shifted_bytes
));
5757 /* Make a SUBREG operation or equivalent if it folds. */
5760 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5761 enum machine_mode innermode
, unsigned int byte
)
5765 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5769 if (GET_CODE (op
) == SUBREG
5770 || GET_CODE (op
) == CONCAT
5771 || GET_MODE (op
) == VOIDmode
)
5774 if (validate_subreg (outermode
, innermode
, op
, byte
))
5775 return gen_rtx_SUBREG (outermode
, op
, byte
);
5780 /* Simplify X, an rtx expression.
5782 Return the simplified expression or NULL if no simplifications
5785 This is the preferred entry point into the simplification routines;
5786 however, we still allow passes to call the more specific routines.
5788 Right now GCC has three (yes, three) major bodies of RTL simplification
5789 code that need to be unified.
5791 1. fold_rtx in cse.c. This code uses various CSE specific
5792 information to aid in RTL simplification.
5794 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5795 it uses combine specific information to aid in RTL
5798 3. The routines in this file.
5801 Long term we want to only have one body of simplification code; to
5802 get to that state I recommend the following steps:
5804 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5805 which are not pass dependent state into these routines.
5807 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5808 use this routine whenever possible.
5810 3. Allow for pass dependent state to be provided to these
5811 routines and add simplifications based on the pass dependent
5812 state. Remove code from cse.c & combine.c that becomes
5815 It will take time, but ultimately the compiler will be easier to
5816 maintain and improve. It's totally silly that when we add a
5817 simplification that it needs to be added to 4 places (3 for RTL
5818 simplification and 1 for tree simplification. */
5821 simplify_rtx (const_rtx x
)
5823 const enum rtx_code code
= GET_CODE (x
);
5824 const enum machine_mode mode
= GET_MODE (x
);
5826 switch (GET_RTX_CLASS (code
))
5829 return simplify_unary_operation (code
, mode
,
5830 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5831 case RTX_COMM_ARITH
:
5832 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5833 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5835 /* Fall through.... */
5838 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5841 case RTX_BITFIELD_OPS
:
5842 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5843 XEXP (x
, 0), XEXP (x
, 1),
5847 case RTX_COMM_COMPARE
:
5848 return simplify_relational_operation (code
, mode
,
5849 ((GET_MODE (XEXP (x
, 0))
5851 ? GET_MODE (XEXP (x
, 0))
5852 : GET_MODE (XEXP (x
, 1))),
5858 return simplify_subreg (mode
, SUBREG_REG (x
),
5859 GET_MODE (SUBREG_REG (x
)),
5866 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5867 if (GET_CODE (XEXP (x
, 0)) == HIGH
5868 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))