1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987, 1988, 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
31 #include "hard-reg-set.h"
34 #include "insn-config.h"
43 /* Simplification and canonicalization of RTL. */
45 /* Much code operates on (low, high) pairs; the low value is an
46 unsigned wide int, the high value a signed wide int. We
47 occasionally need to sign extend from low to high as if low were a
49 #define HWI_SIGN_EXTEND(low) \
50 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
52 static rtx
neg_const_int (enum machine_mode
, const_rtx
);
53 static bool plus_minus_operand_p (const_rtx
);
54 static bool simplify_plus_minus_op_data_cmp (rtx
, rtx
);
55 static rtx
simplify_plus_minus (enum rtx_code
, enum machine_mode
, rtx
, rtx
);
56 static rtx
simplify_immed_subreg (enum machine_mode
, rtx
, enum machine_mode
,
58 static rtx
simplify_associative_operation (enum rtx_code
, enum machine_mode
,
60 static rtx
simplify_relational_operation_1 (enum rtx_code
, enum machine_mode
,
61 enum machine_mode
, rtx
, rtx
);
62 static rtx
simplify_unary_operation_1 (enum rtx_code
, enum machine_mode
, rtx
);
63 static rtx
simplify_binary_operation_1 (enum rtx_code
, enum machine_mode
,
66 /* Negate a CONST_INT rtx, truncating (because a conversion from a
67 maximally negative number can overflow). */
69 neg_const_int (enum machine_mode mode
, const_rtx i
)
71 return gen_int_mode (- INTVAL (i
), mode
);
74 /* Test whether expression, X, is an immediate constant that represents
75 the most significant bit of machine mode MODE. */
78 mode_signbit_p (enum machine_mode mode
, const_rtx x
)
80 unsigned HOST_WIDE_INT val
;
83 if (GET_MODE_CLASS (mode
) != MODE_INT
)
86 width
= GET_MODE_BITSIZE (mode
);
90 if (width
<= HOST_BITS_PER_WIDE_INT
93 else if (width
<= 2 * HOST_BITS_PER_WIDE_INT
94 && GET_CODE (x
) == CONST_DOUBLE
95 && CONST_DOUBLE_LOW (x
) == 0)
97 val
= CONST_DOUBLE_HIGH (x
);
98 width
-= HOST_BITS_PER_WIDE_INT
;
103 if (width
< HOST_BITS_PER_WIDE_INT
)
104 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
105 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
108 /* Make a binary operation by properly ordering the operands and
109 seeing if the expression folds. */
112 simplify_gen_binary (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
117 /* If this simplifies, do it. */
118 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
122 /* Put complex operands first and constants second if commutative. */
123 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
124 && swap_commutative_operands_p (op0
, op1
))
125 tem
= op0
, op0
= op1
, op1
= tem
;
127 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
130 /* If X is a MEM referencing the constant pool, return the real value.
131 Otherwise return X. */
133 avoid_constant_pool_reference (rtx x
)
136 enum machine_mode cmode
;
137 HOST_WIDE_INT offset
= 0;
139 switch (GET_CODE (x
))
145 /* Handle float extensions of constant pool references. */
147 c
= avoid_constant_pool_reference (tmp
);
148 if (c
!= tmp
&& GET_CODE (c
) == CONST_DOUBLE
)
152 REAL_VALUE_FROM_CONST_DOUBLE (d
, c
);
153 return CONST_DOUBLE_FROM_REAL_VALUE (d
, GET_MODE (x
));
161 if (GET_MODE (x
) == BLKmode
)
166 /* Call target hook to avoid the effects of -fpic etc.... */
167 addr
= targetm
.delegitimize_address (addr
);
169 /* Split the address into a base and integer offset. */
170 if (GET_CODE (addr
) == CONST
171 && GET_CODE (XEXP (addr
, 0)) == PLUS
172 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
174 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
175 addr
= XEXP (XEXP (addr
, 0), 0);
178 if (GET_CODE (addr
) == LO_SUM
)
179 addr
= XEXP (addr
, 1);
181 /* If this is a constant pool reference, we can turn it into its
182 constant and hope that simplifications happen. */
183 if (GET_CODE (addr
) == SYMBOL_REF
184 && CONSTANT_POOL_ADDRESS_P (addr
))
186 c
= get_pool_constant (addr
);
187 cmode
= get_pool_mode (addr
);
189 /* If we're accessing the constant in a different mode than it was
190 originally stored, attempt to fix that up via subreg simplifications.
191 If that fails we have no choice but to return the original memory. */
192 if (offset
!= 0 || cmode
!= GET_MODE (x
))
194 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
195 if (tem
&& CONSTANT_P (tem
))
205 /* Simplify a MEM based on its attributes. This is the default
206 delegitimize_address target hook, and it's recommended that every
207 overrider call it. */
210 delegitimize_mem_from_attrs (rtx x
)
215 || GET_CODE (MEM_OFFSET (x
)) == CONST_INT
))
217 tree decl
= MEM_EXPR (x
);
218 enum machine_mode mode
= GET_MODE (x
);
219 HOST_WIDE_INT offset
= 0;
221 switch (TREE_CODE (decl
))
231 case ARRAY_RANGE_REF
:
236 case VIEW_CONVERT_EXPR
:
238 HOST_WIDE_INT bitsize
, bitpos
;
240 int unsignedp
= 0, volatilep
= 0;
242 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
243 &mode
, &unsignedp
, &volatilep
, false);
244 if (bitsize
!= GET_MODE_BITSIZE (mode
)
245 || (bitpos
% BITS_PER_UNIT
)
246 || (toffset
&& !host_integerp (toffset
, 0)))
250 offset
+= bitpos
/ BITS_PER_UNIT
;
252 offset
+= TREE_INT_CST_LOW (toffset
);
259 && mode
== GET_MODE (x
)
260 && TREE_CODE (decl
) == VAR_DECL
261 && (TREE_STATIC (decl
)
262 || DECL_THREAD_LOCAL_P (decl
))
263 && DECL_RTL_SET_P (decl
)
264 && MEM_P (DECL_RTL (decl
)))
269 offset
+= INTVAL (MEM_OFFSET (x
));
271 newx
= DECL_RTL (decl
);
275 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
277 /* Avoid creating a new MEM needlessly if we already had
278 the same address. We do if there's no OFFSET and the
279 old address X is identical to NEWX, or if X is of the
280 form (plus NEWX OFFSET), or the NEWX is of the form
281 (plus Y (const_int Z)) and X is that with the offset
282 added: (plus Y (const_int Z+OFFSET)). */
284 || (GET_CODE (o
) == PLUS
285 && GET_CODE (XEXP (o
, 1)) == CONST_INT
286 && (offset
== INTVAL (XEXP (o
, 1))
287 || (GET_CODE (n
) == PLUS
288 && GET_CODE (XEXP (n
, 1)) == CONST_INT
289 && (INTVAL (XEXP (n
, 1)) + offset
290 == INTVAL (XEXP (o
, 1)))
291 && (n
= XEXP (n
, 0))))
292 && (o
= XEXP (o
, 0))))
293 && rtx_equal_p (o
, n
)))
294 x
= adjust_address_nv (newx
, mode
, offset
);
296 else if (GET_MODE (x
) == GET_MODE (newx
)
305 /* Make a unary operation by first seeing if it folds and otherwise making
306 the specified operation. */
309 simplify_gen_unary (enum rtx_code code
, enum machine_mode mode
, rtx op
,
310 enum machine_mode op_mode
)
314 /* If this simplifies, use it. */
315 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
318 return gen_rtx_fmt_e (code
, mode
, op
);
321 /* Likewise for ternary operations. */
324 simplify_gen_ternary (enum rtx_code code
, enum machine_mode mode
,
325 enum machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
329 /* If this simplifies, use it. */
330 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
334 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
337 /* Likewise, for relational operations.
338 CMP_MODE specifies mode comparison is done in. */
341 simplify_gen_relational (enum rtx_code code
, enum machine_mode mode
,
342 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
346 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
350 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
353 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
354 resulting RTX. Return a new RTX which is as simplified as possible. */
357 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
359 enum rtx_code code
= GET_CODE (x
);
360 enum machine_mode mode
= GET_MODE (x
);
361 enum machine_mode op_mode
;
364 /* If X is OLD_RTX, return NEW_RTX. Otherwise, if this is an expression, try
365 to build a new expression substituting recursively. If we can't do
366 anything, return our input. */
368 if (rtx_equal_p (x
, old_rtx
))
369 return copy_rtx (new_rtx
);
371 switch (GET_RTX_CLASS (code
))
375 op_mode
= GET_MODE (op0
);
376 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
377 if (op0
== XEXP (x
, 0))
379 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
383 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
384 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
385 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
387 return simplify_gen_binary (code
, mode
, op0
, op1
);
390 case RTX_COMM_COMPARE
:
393 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
394 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
395 op1
= simplify_replace_rtx (op1
, old_rtx
, new_rtx
);
396 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
398 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
401 case RTX_BITFIELD_OPS
:
403 op_mode
= GET_MODE (op0
);
404 op0
= simplify_replace_rtx (op0
, old_rtx
, new_rtx
);
405 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
406 op2
= simplify_replace_rtx (XEXP (x
, 2), old_rtx
, new_rtx
);
407 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
409 if (op_mode
== VOIDmode
)
410 op_mode
= GET_MODE (op0
);
411 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
414 /* The only case we try to handle is a SUBREG. */
417 op0
= simplify_replace_rtx (SUBREG_REG (x
), old_rtx
, new_rtx
);
418 if (op0
== SUBREG_REG (x
))
420 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
421 GET_MODE (SUBREG_REG (x
)),
423 return op0
? op0
: x
;
430 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
431 if (op0
== XEXP (x
, 0))
433 return replace_equiv_address_nv (x
, op0
);
435 else if (code
== LO_SUM
)
437 op0
= simplify_replace_rtx (XEXP (x
, 0), old_rtx
, new_rtx
);
438 op1
= simplify_replace_rtx (XEXP (x
, 1), old_rtx
, new_rtx
);
440 /* (lo_sum (high x) x) -> x */
441 if (GET_CODE (op0
) == HIGH
&& rtx_equal_p (XEXP (op0
, 0), op1
))
444 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
446 return gen_rtx_LO_SUM (mode
, op0
, op1
);
456 /* Try to simplify a unary operation CODE whose output mode is to be
457 MODE with input operand OP whose mode was originally OP_MODE.
458 Return zero if no simplification can be made. */
460 simplify_unary_operation (enum rtx_code code
, enum machine_mode mode
,
461 rtx op
, enum machine_mode op_mode
)
465 if (GET_CODE (op
) == CONST
)
468 trueop
= avoid_constant_pool_reference (op
);
470 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
474 return simplify_unary_operation_1 (code
, mode
, op
);
477 /* Perform some simplifications we can do even if the operands
480 simplify_unary_operation_1 (enum rtx_code code
, enum machine_mode mode
, rtx op
)
482 enum rtx_code reversed
;
488 /* (not (not X)) == X. */
489 if (GET_CODE (op
) == NOT
)
492 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
493 comparison is all ones. */
494 if (COMPARISON_P (op
)
495 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
496 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
497 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
498 XEXP (op
, 0), XEXP (op
, 1));
500 /* (not (plus X -1)) can become (neg X). */
501 if (GET_CODE (op
) == PLUS
502 && XEXP (op
, 1) == constm1_rtx
)
503 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
505 /* Similarly, (not (neg X)) is (plus X -1). */
506 if (GET_CODE (op
) == NEG
)
507 return plus_constant (XEXP (op
, 0), -1);
509 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
510 if (GET_CODE (op
) == XOR
511 && CONST_INT_P (XEXP (op
, 1))
512 && (temp
= simplify_unary_operation (NOT
, mode
,
513 XEXP (op
, 1), mode
)) != 0)
514 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
516 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
517 if (GET_CODE (op
) == PLUS
518 && CONST_INT_P (XEXP (op
, 1))
519 && mode_signbit_p (mode
, XEXP (op
, 1))
520 && (temp
= simplify_unary_operation (NOT
, mode
,
521 XEXP (op
, 1), mode
)) != 0)
522 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
525 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
526 operands other than 1, but that is not valid. We could do a
527 similar simplification for (not (lshiftrt C X)) where C is
528 just the sign bit, but this doesn't seem common enough to
530 if (GET_CODE (op
) == ASHIFT
531 && XEXP (op
, 0) == const1_rtx
)
533 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
534 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
537 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
538 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
539 so we can perform the above simplification. */
541 if (STORE_FLAG_VALUE
== -1
542 && GET_CODE (op
) == ASHIFTRT
543 && GET_CODE (XEXP (op
, 1))
544 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
545 return simplify_gen_relational (GE
, mode
, VOIDmode
,
546 XEXP (op
, 0), const0_rtx
);
549 if (GET_CODE (op
) == SUBREG
550 && subreg_lowpart_p (op
)
551 && (GET_MODE_SIZE (GET_MODE (op
))
552 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
553 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
554 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
556 enum machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
559 x
= gen_rtx_ROTATE (inner_mode
,
560 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
562 XEXP (SUBREG_REG (op
), 1));
563 return rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
566 /* Apply De Morgan's laws to reduce number of patterns for machines
567 with negating logical insns (and-not, nand, etc.). If result has
568 only one NOT, put it first, since that is how the patterns are
571 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
573 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
574 enum machine_mode op_mode
;
576 op_mode
= GET_MODE (in1
);
577 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
579 op_mode
= GET_MODE (in2
);
580 if (op_mode
== VOIDmode
)
582 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
584 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
587 in2
= in1
; in1
= tem
;
590 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
596 /* (neg (neg X)) == X. */
597 if (GET_CODE (op
) == NEG
)
600 /* (neg (plus X 1)) can become (not X). */
601 if (GET_CODE (op
) == PLUS
602 && XEXP (op
, 1) == const1_rtx
)
603 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
605 /* Similarly, (neg (not X)) is (plus X 1). */
606 if (GET_CODE (op
) == NOT
)
607 return plus_constant (XEXP (op
, 0), 1);
609 /* (neg (minus X Y)) can become (minus Y X). This transformation
610 isn't safe for modes with signed zeros, since if X and Y are
611 both +0, (minus Y X) is the same as (minus X Y). If the
612 rounding mode is towards +infinity (or -infinity) then the two
613 expressions will be rounded differently. */
614 if (GET_CODE (op
) == MINUS
615 && !HONOR_SIGNED_ZEROS (mode
)
616 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
617 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
619 if (GET_CODE (op
) == PLUS
620 && !HONOR_SIGNED_ZEROS (mode
)
621 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
623 /* (neg (plus A C)) is simplified to (minus -C A). */
624 if (CONST_INT_P (XEXP (op
, 1))
625 || GET_CODE (XEXP (op
, 1)) == CONST_DOUBLE
)
627 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
629 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
632 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
633 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
634 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
637 /* (neg (mult A B)) becomes (mult (neg A) B).
638 This works even for floating-point values. */
639 if (GET_CODE (op
) == MULT
640 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
642 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
643 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op
, 1));
646 /* NEG commutes with ASHIFT since it is multiplication. Only do
647 this if we can then eliminate the NEG (e.g., if the operand
649 if (GET_CODE (op
) == ASHIFT
)
651 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
653 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
656 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
657 C is equal to the width of MODE minus 1. */
658 if (GET_CODE (op
) == ASHIFTRT
659 && CONST_INT_P (XEXP (op
, 1))
660 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
661 return simplify_gen_binary (LSHIFTRT
, mode
,
662 XEXP (op
, 0), XEXP (op
, 1));
664 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
665 C is equal to the width of MODE minus 1. */
666 if (GET_CODE (op
) == LSHIFTRT
667 && CONST_INT_P (XEXP (op
, 1))
668 && INTVAL (XEXP (op
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
669 return simplify_gen_binary (ASHIFTRT
, mode
,
670 XEXP (op
, 0), XEXP (op
, 1));
672 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
673 if (GET_CODE (op
) == XOR
674 && XEXP (op
, 1) == const1_rtx
675 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
676 return plus_constant (XEXP (op
, 0), -1);
678 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
679 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
680 if (GET_CODE (op
) == LT
681 && XEXP (op
, 1) == const0_rtx
682 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
684 enum machine_mode inner
= GET_MODE (XEXP (op
, 0));
685 int isize
= GET_MODE_BITSIZE (inner
);
686 if (STORE_FLAG_VALUE
== 1)
688 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
689 GEN_INT (isize
- 1));
692 if (GET_MODE_BITSIZE (mode
) > isize
)
693 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
694 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
696 else if (STORE_FLAG_VALUE
== -1)
698 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
699 GEN_INT (isize
- 1));
702 if (GET_MODE_BITSIZE (mode
) > isize
)
703 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
704 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
710 /* We can't handle truncation to a partial integer mode here
711 because we don't know the real bitsize of the partial
713 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
716 /* (truncate:SI ({sign,zero}_extend:DI foo:SI)) == foo:SI. */
717 if ((GET_CODE (op
) == SIGN_EXTEND
718 || GET_CODE (op
) == ZERO_EXTEND
)
719 && GET_MODE (XEXP (op
, 0)) == mode
)
722 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
723 (OP:SI foo:SI) if OP is NEG or ABS. */
724 if ((GET_CODE (op
) == ABS
725 || GET_CODE (op
) == NEG
)
726 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
727 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
728 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
729 return simplify_gen_unary (GET_CODE (op
), mode
,
730 XEXP (XEXP (op
, 0), 0), mode
);
732 /* (truncate:A (subreg:B (truncate:C X) 0)) is
734 if (GET_CODE (op
) == SUBREG
735 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
736 && subreg_lowpart_p (op
))
737 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (SUBREG_REG (op
), 0),
738 GET_MODE (XEXP (SUBREG_REG (op
), 0)));
740 /* If we know that the value is already truncated, we can
741 replace the TRUNCATE with a SUBREG. Note that this is also
742 valid if TRULY_NOOP_TRUNCATION is false for the corresponding
743 modes we just have to apply a different definition for
744 truncation. But don't do this for an (LSHIFTRT (MULT ...))
745 since this will cause problems with the umulXi3_highpart
747 if ((TRULY_NOOP_TRUNCATION (GET_MODE_BITSIZE (mode
),
748 GET_MODE_BITSIZE (GET_MODE (op
)))
749 ? (num_sign_bit_copies (op
, GET_MODE (op
))
750 > (unsigned int) (GET_MODE_BITSIZE (GET_MODE (op
))
751 - GET_MODE_BITSIZE (mode
)))
752 : truncated_to_mode (mode
, op
))
753 && ! (GET_CODE (op
) == LSHIFTRT
754 && GET_CODE (XEXP (op
, 0)) == MULT
))
755 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
757 /* A truncate of a comparison can be replaced with a subreg if
758 STORE_FLAG_VALUE permits. This is like the previous test,
759 but it works even if the comparison is done in a mode larger
760 than HOST_BITS_PER_WIDE_INT. */
761 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
763 && ((HOST_WIDE_INT
) STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
764 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
768 if (DECIMAL_FLOAT_MODE_P (mode
))
771 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
772 if (GET_CODE (op
) == FLOAT_EXTEND
773 && GET_MODE (XEXP (op
, 0)) == mode
)
776 /* (float_truncate:SF (float_truncate:DF foo:XF))
777 = (float_truncate:SF foo:XF).
778 This may eliminate double rounding, so it is unsafe.
780 (float_truncate:SF (float_extend:XF foo:DF))
781 = (float_truncate:SF foo:DF).
783 (float_truncate:DF (float_extend:XF foo:SF))
784 = (float_extend:SF foo:DF). */
785 if ((GET_CODE (op
) == FLOAT_TRUNCATE
786 && flag_unsafe_math_optimizations
)
787 || GET_CODE (op
) == FLOAT_EXTEND
)
788 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
790 > GET_MODE_SIZE (mode
)
791 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
795 /* (float_truncate (float x)) is (float x) */
796 if (GET_CODE (op
) == FLOAT
797 && (flag_unsafe_math_optimizations
798 || (SCALAR_FLOAT_MODE_P (GET_MODE (op
))
799 && ((unsigned)significand_size (GET_MODE (op
))
800 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
801 - num_sign_bit_copies (XEXP (op
, 0),
802 GET_MODE (XEXP (op
, 0))))))))
803 return simplify_gen_unary (FLOAT
, mode
,
805 GET_MODE (XEXP (op
, 0)));
807 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
808 (OP:SF foo:SF) if OP is NEG or ABS. */
809 if ((GET_CODE (op
) == ABS
810 || GET_CODE (op
) == NEG
)
811 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
812 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
813 return simplify_gen_unary (GET_CODE (op
), mode
,
814 XEXP (XEXP (op
, 0), 0), mode
);
816 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
817 is (float_truncate:SF x). */
818 if (GET_CODE (op
) == SUBREG
819 && subreg_lowpart_p (op
)
820 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
821 return SUBREG_REG (op
);
825 if (DECIMAL_FLOAT_MODE_P (mode
))
828 /* (float_extend (float_extend x)) is (float_extend x)
830 (float_extend (float x)) is (float x) assuming that double
831 rounding can't happen.
833 if (GET_CODE (op
) == FLOAT_EXTEND
834 || (GET_CODE (op
) == FLOAT
835 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
836 && ((unsigned)significand_size (GET_MODE (op
))
837 >= (GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0)))
838 - num_sign_bit_copies (XEXP (op
, 0),
839 GET_MODE (XEXP (op
, 0)))))))
840 return simplify_gen_unary (GET_CODE (op
), mode
,
842 GET_MODE (XEXP (op
, 0)));
847 /* (abs (neg <foo>)) -> (abs <foo>) */
848 if (GET_CODE (op
) == NEG
)
849 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
850 GET_MODE (XEXP (op
, 0)));
852 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
854 if (GET_MODE (op
) == VOIDmode
)
857 /* If operand is something known to be positive, ignore the ABS. */
858 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
859 || ((GET_MODE_BITSIZE (GET_MODE (op
))
860 <= HOST_BITS_PER_WIDE_INT
)
861 && ((nonzero_bits (op
, GET_MODE (op
))
863 << (GET_MODE_BITSIZE (GET_MODE (op
)) - 1)))
867 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
868 if (num_sign_bit_copies (op
, mode
) == GET_MODE_BITSIZE (mode
))
869 return gen_rtx_NEG (mode
, op
);
874 /* (ffs (*_extend <X>)) = (ffs <X>) */
875 if (GET_CODE (op
) == SIGN_EXTEND
876 || GET_CODE (op
) == ZERO_EXTEND
)
877 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
878 GET_MODE (XEXP (op
, 0)));
882 switch (GET_CODE (op
))
886 /* (popcount (zero_extend <X>)) = (popcount <X>) */
887 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
888 GET_MODE (XEXP (op
, 0)));
892 /* Rotations don't affect popcount. */
893 if (!side_effects_p (XEXP (op
, 1)))
894 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
895 GET_MODE (XEXP (op
, 0)));
904 switch (GET_CODE (op
))
910 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
911 GET_MODE (XEXP (op
, 0)));
915 /* Rotations don't affect parity. */
916 if (!side_effects_p (XEXP (op
, 1)))
917 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
918 GET_MODE (XEXP (op
, 0)));
927 /* (bswap (bswap x)) -> x. */
928 if (GET_CODE (op
) == BSWAP
)
933 /* (float (sign_extend <X>)) = (float <X>). */
934 if (GET_CODE (op
) == SIGN_EXTEND
)
935 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
936 GET_MODE (XEXP (op
, 0)));
940 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
941 becomes just the MINUS if its mode is MODE. This allows
942 folding switch statements on machines using casesi (such as
944 if (GET_CODE (op
) == TRUNCATE
945 && GET_MODE (XEXP (op
, 0)) == mode
946 && GET_CODE (XEXP (op
, 0)) == MINUS
947 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
948 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
951 /* Check for a sign extension of a subreg of a promoted
952 variable, where the promotion is sign-extended, and the
953 target mode is the same as the variable's promotion. */
954 if (GET_CODE (op
) == SUBREG
955 && SUBREG_PROMOTED_VAR_P (op
)
956 && ! SUBREG_PROMOTED_UNSIGNED_P (op
)
957 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
958 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
960 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
961 if (! POINTERS_EXTEND_UNSIGNED
962 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
964 || (GET_CODE (op
) == SUBREG
965 && REG_P (SUBREG_REG (op
))
966 && REG_POINTER (SUBREG_REG (op
))
967 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
968 return convert_memory_address (Pmode
, op
);
973 /* Check for a zero extension of a subreg of a promoted
974 variable, where the promotion is zero-extended, and the
975 target mode is the same as the variable's promotion. */
976 if (GET_CODE (op
) == SUBREG
977 && SUBREG_PROMOTED_VAR_P (op
)
978 && SUBREG_PROMOTED_UNSIGNED_P (op
) > 0
979 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
980 return rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
982 #if defined(POINTERS_EXTEND_UNSIGNED) && !defined(HAVE_ptr_extend)
983 if (POINTERS_EXTEND_UNSIGNED
> 0
984 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
986 || (GET_CODE (op
) == SUBREG
987 && REG_P (SUBREG_REG (op
))
988 && REG_POINTER (SUBREG_REG (op
))
989 && GET_MODE (SUBREG_REG (op
)) == Pmode
)))
990 return convert_memory_address (Pmode
, op
);
1001 /* Try to compute the value of a unary operation CODE whose output mode is to
1002 be MODE with input operand OP whose mode was originally OP_MODE.
1003 Return zero if the value cannot be computed. */
1005 simplify_const_unary_operation (enum rtx_code code
, enum machine_mode mode
,
1006 rtx op
, enum machine_mode op_mode
)
1008 unsigned int width
= GET_MODE_BITSIZE (mode
);
1010 if (code
== VEC_DUPLICATE
)
1012 gcc_assert (VECTOR_MODE_P (mode
));
1013 if (GET_MODE (op
) != VOIDmode
)
1015 if (!VECTOR_MODE_P (GET_MODE (op
)))
1016 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1018 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1021 if (CONST_INT_P (op
) || GET_CODE (op
) == CONST_DOUBLE
1022 || GET_CODE (op
) == CONST_VECTOR
)
1024 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1025 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1026 rtvec v
= rtvec_alloc (n_elts
);
1029 if (GET_CODE (op
) != CONST_VECTOR
)
1030 for (i
= 0; i
< n_elts
; i
++)
1031 RTVEC_ELT (v
, i
) = op
;
1034 enum machine_mode inmode
= GET_MODE (op
);
1035 int in_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (inmode
));
1036 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1038 gcc_assert (in_n_elts
< n_elts
);
1039 gcc_assert ((n_elts
% in_n_elts
) == 0);
1040 for (i
= 0; i
< n_elts
; i
++)
1041 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1043 return gen_rtx_CONST_VECTOR (mode
, v
);
1047 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1049 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
1050 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1051 enum machine_mode opmode
= GET_MODE (op
);
1052 int op_elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
1053 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1054 rtvec v
= rtvec_alloc (n_elts
);
1057 gcc_assert (op_n_elts
== n_elts
);
1058 for (i
= 0; i
< n_elts
; i
++)
1060 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1061 CONST_VECTOR_ELT (op
, i
),
1062 GET_MODE_INNER (opmode
));
1065 RTVEC_ELT (v
, i
) = x
;
1067 return gen_rtx_CONST_VECTOR (mode
, v
);
1070 /* The order of these tests is critical so that, for example, we don't
1071 check the wrong mode (input vs. output) for a conversion operation,
1072 such as FIX. At some point, this should be simplified. */
1074 if (code
== FLOAT
&& GET_MODE (op
) == VOIDmode
1075 && (GET_CODE (op
) == CONST_DOUBLE
|| CONST_INT_P (op
)))
1077 HOST_WIDE_INT hv
, lv
;
1080 if (CONST_INT_P (op
))
1081 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1083 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1085 REAL_VALUE_FROM_INT (d
, lv
, hv
, mode
);
1086 d
= real_value_truncate (mode
, d
);
1087 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1089 else if (code
== UNSIGNED_FLOAT
&& GET_MODE (op
) == VOIDmode
1090 && (GET_CODE (op
) == CONST_DOUBLE
1091 || CONST_INT_P (op
)))
1093 HOST_WIDE_INT hv
, lv
;
1096 if (CONST_INT_P (op
))
1097 lv
= INTVAL (op
), hv
= HWI_SIGN_EXTEND (lv
);
1099 lv
= CONST_DOUBLE_LOW (op
), hv
= CONST_DOUBLE_HIGH (op
);
1101 if (op_mode
== VOIDmode
)
1103 /* We don't know how to interpret negative-looking numbers in
1104 this case, so don't try to fold those. */
1108 else if (GET_MODE_BITSIZE (op_mode
) >= HOST_BITS_PER_WIDE_INT
* 2)
1111 hv
= 0, lv
&= GET_MODE_MASK (op_mode
);
1113 REAL_VALUE_FROM_UNSIGNED_INT (d
, lv
, hv
, mode
);
1114 d
= real_value_truncate (mode
, d
);
1115 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1118 if (CONST_INT_P (op
)
1119 && width
<= HOST_BITS_PER_WIDE_INT
&& width
> 0)
1121 HOST_WIDE_INT arg0
= INTVAL (op
);
1135 val
= (arg0
>= 0 ? arg0
: - arg0
);
1139 /* Don't use ffs here. Instead, get low order bit and then its
1140 number. If arg0 is zero, this will return 0, as desired. */
1141 arg0
&= GET_MODE_MASK (mode
);
1142 val
= exact_log2 (arg0
& (- arg0
)) + 1;
1146 arg0
&= GET_MODE_MASK (mode
);
1147 if (arg0
== 0 && CLZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1150 val
= GET_MODE_BITSIZE (mode
) - floor_log2 (arg0
) - 1;
1154 arg0
&= GET_MODE_MASK (mode
);
1157 /* Even if the value at zero is undefined, we have to come
1158 up with some replacement. Seems good enough. */
1159 if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, val
))
1160 val
= GET_MODE_BITSIZE (mode
);
1163 val
= exact_log2 (arg0
& -arg0
);
1167 arg0
&= GET_MODE_MASK (mode
);
1170 val
++, arg0
&= arg0
- 1;
1174 arg0
&= GET_MODE_MASK (mode
);
1177 val
++, arg0
&= arg0
- 1;
1186 for (s
= 0; s
< width
; s
+= 8)
1188 unsigned int d
= width
- s
- 8;
1189 unsigned HOST_WIDE_INT byte
;
1190 byte
= (arg0
>> s
) & 0xff;
1201 /* When zero-extending a CONST_INT, we need to know its
1203 gcc_assert (op_mode
!= VOIDmode
);
1204 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1206 /* If we were really extending the mode,
1207 we would have to distinguish between zero-extension
1208 and sign-extension. */
1209 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1212 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1213 val
= arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1219 if (op_mode
== VOIDmode
)
1221 if (GET_MODE_BITSIZE (op_mode
) == HOST_BITS_PER_WIDE_INT
)
1223 /* If we were really extending the mode,
1224 we would have to distinguish between zero-extension
1225 and sign-extension. */
1226 gcc_assert (width
== GET_MODE_BITSIZE (op_mode
));
1229 else if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
)
1232 = arg0
& ~((HOST_WIDE_INT
) (-1) << GET_MODE_BITSIZE (op_mode
));
1234 & ((HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (op_mode
) - 1)))
1235 val
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1243 case FLOAT_TRUNCATE
:
1255 return gen_int_mode (val
, mode
);
1258 /* We can do some operations on integer CONST_DOUBLEs. Also allow
1259 for a DImode operation on a CONST_INT. */
1260 else if (GET_MODE (op
) == VOIDmode
1261 && width
<= HOST_BITS_PER_WIDE_INT
* 2
1262 && (GET_CODE (op
) == CONST_DOUBLE
1263 || CONST_INT_P (op
)))
1265 unsigned HOST_WIDE_INT l1
, lv
;
1266 HOST_WIDE_INT h1
, hv
;
1268 if (GET_CODE (op
) == CONST_DOUBLE
)
1269 l1
= CONST_DOUBLE_LOW (op
), h1
= CONST_DOUBLE_HIGH (op
);
1271 l1
= INTVAL (op
), h1
= HWI_SIGN_EXTEND (l1
);
1281 neg_double (l1
, h1
, &lv
, &hv
);
1286 neg_double (l1
, h1
, &lv
, &hv
);
1298 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
) + 1;
1301 lv
= exact_log2 (l1
& -l1
) + 1;
1307 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (h1
) - 1
1308 - HOST_BITS_PER_WIDE_INT
;
1310 lv
= GET_MODE_BITSIZE (mode
) - floor_log2 (l1
) - 1;
1311 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1312 lv
= GET_MODE_BITSIZE (mode
);
1318 lv
= exact_log2 (l1
& -l1
);
1320 lv
= HOST_BITS_PER_WIDE_INT
+ exact_log2 (h1
& -h1
);
1321 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, lv
))
1322 lv
= GET_MODE_BITSIZE (mode
);
1350 for (s
= 0; s
< width
; s
+= 8)
1352 unsigned int d
= width
- s
- 8;
1353 unsigned HOST_WIDE_INT byte
;
1355 if (s
< HOST_BITS_PER_WIDE_INT
)
1356 byte
= (l1
>> s
) & 0xff;
1358 byte
= (h1
>> (s
- HOST_BITS_PER_WIDE_INT
)) & 0xff;
1360 if (d
< HOST_BITS_PER_WIDE_INT
)
1363 hv
|= byte
<< (d
- HOST_BITS_PER_WIDE_INT
);
1369 /* This is just a change-of-mode, so do nothing. */
1374 gcc_assert (op_mode
!= VOIDmode
);
1376 if (GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1380 lv
= l1
& GET_MODE_MASK (op_mode
);
1384 if (op_mode
== VOIDmode
1385 || GET_MODE_BITSIZE (op_mode
) > HOST_BITS_PER_WIDE_INT
)
1389 lv
= l1
& GET_MODE_MASK (op_mode
);
1390 if (GET_MODE_BITSIZE (op_mode
) < HOST_BITS_PER_WIDE_INT
1391 && (lv
& ((HOST_WIDE_INT
) 1
1392 << (GET_MODE_BITSIZE (op_mode
) - 1))) != 0)
1393 lv
-= (HOST_WIDE_INT
) 1 << GET_MODE_BITSIZE (op_mode
);
1395 hv
= HWI_SIGN_EXTEND (lv
);
1406 return immed_double_const (lv
, hv
, mode
);
1409 else if (GET_CODE (op
) == CONST_DOUBLE
1410 && SCALAR_FLOAT_MODE_P (mode
))
1412 REAL_VALUE_TYPE d
, t
;
1413 REAL_VALUE_FROM_CONST_DOUBLE (d
, op
);
1418 if (HONOR_SNANS (mode
) && real_isnan (&d
))
1420 real_sqrt (&t
, mode
, &d
);
1424 d
= REAL_VALUE_ABS (d
);
1427 d
= REAL_VALUE_NEGATE (d
);
1429 case FLOAT_TRUNCATE
:
1430 d
= real_value_truncate (mode
, d
);
1433 /* All this does is change the mode. */
1436 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1443 real_to_target (tmp
, &d
, GET_MODE (op
));
1444 for (i
= 0; i
< 4; i
++)
1446 real_from_target (&d
, tmp
, mode
);
1452 return CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
1455 else if (GET_CODE (op
) == CONST_DOUBLE
1456 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1457 && GET_MODE_CLASS (mode
) == MODE_INT
1458 && width
<= 2*HOST_BITS_PER_WIDE_INT
&& width
> 0)
1460 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1461 operators are intentionally left unspecified (to ease implementation
1462 by target backends), for consistency, this routine implements the
1463 same semantics for constant folding as used by the middle-end. */
1465 /* This was formerly used only for non-IEEE float.
1466 eggert@twinsun.com says it is safe for IEEE also. */
1467 HOST_WIDE_INT xh
, xl
, th
, tl
;
1468 REAL_VALUE_TYPE x
, t
;
1469 REAL_VALUE_FROM_CONST_DOUBLE (x
, op
);
1473 if (REAL_VALUE_ISNAN (x
))
1476 /* Test against the signed upper bound. */
1477 if (width
> HOST_BITS_PER_WIDE_INT
)
1479 th
= ((unsigned HOST_WIDE_INT
) 1
1480 << (width
- HOST_BITS_PER_WIDE_INT
- 1)) - 1;
1486 tl
= ((unsigned HOST_WIDE_INT
) 1 << (width
- 1)) - 1;
1488 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1489 if (REAL_VALUES_LESS (t
, x
))
1496 /* Test against the signed lower bound. */
1497 if (width
> HOST_BITS_PER_WIDE_INT
)
1499 th
= (HOST_WIDE_INT
) -1 << (width
- HOST_BITS_PER_WIDE_INT
- 1);
1505 tl
= (HOST_WIDE_INT
) -1 << (width
- 1);
1507 real_from_integer (&t
, VOIDmode
, tl
, th
, 0);
1508 if (REAL_VALUES_LESS (x
, t
))
1514 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1518 if (REAL_VALUE_ISNAN (x
) || REAL_VALUE_NEGATIVE (x
))
1521 /* Test against the unsigned upper bound. */
1522 if (width
== 2*HOST_BITS_PER_WIDE_INT
)
1527 else if (width
>= HOST_BITS_PER_WIDE_INT
)
1529 th
= ((unsigned HOST_WIDE_INT
) 1
1530 << (width
- HOST_BITS_PER_WIDE_INT
)) - 1;
1536 tl
= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
1538 real_from_integer (&t
, VOIDmode
, tl
, th
, 1);
1539 if (REAL_VALUES_LESS (t
, x
))
1546 REAL_VALUE_TO_INT (&xl
, &xh
, x
);
1552 return immed_double_const (xl
, xh
, mode
);
1558 /* Subroutine of simplify_binary_operation to simplify a commutative,
1559 associative binary operation CODE with result mode MODE, operating
1560 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1561 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1562 canonicalization is possible. */
1565 simplify_associative_operation (enum rtx_code code
, enum machine_mode mode
,
1570 /* Linearize the operator to the left. */
1571 if (GET_CODE (op1
) == code
)
1573 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1574 if (GET_CODE (op0
) == code
)
1576 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1577 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1580 /* "a op (b op c)" becomes "(b op c) op a". */
1581 if (! swap_commutative_operands_p (op1
, op0
))
1582 return simplify_gen_binary (code
, mode
, op1
, op0
);
1589 if (GET_CODE (op0
) == code
)
1591 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1592 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1594 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1595 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1598 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1599 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1601 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1603 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1604 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1606 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1613 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1614 and OP1. Return 0 if no simplification is possible.
1616 Don't use this for relational operations such as EQ or LT.
1617 Use simplify_relational_operation instead. */
1619 simplify_binary_operation (enum rtx_code code
, enum machine_mode mode
,
1622 rtx trueop0
, trueop1
;
1625 /* Relational operations don't work here. We must know the mode
1626 of the operands in order to do the comparison correctly.
1627 Assuming a full word can give incorrect results.
1628 Consider comparing 128 with -128 in QImode. */
1629 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1630 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1632 /* Make sure the constant is second. */
1633 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1634 && swap_commutative_operands_p (op0
, op1
))
1636 tem
= op0
, op0
= op1
, op1
= tem
;
1639 trueop0
= avoid_constant_pool_reference (op0
);
1640 trueop1
= avoid_constant_pool_reference (op1
);
1642 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
1645 return simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
1648 /* Subroutine of simplify_binary_operation. Simplify a binary operation
1649 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
1650 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
1651 actual constants. */
1654 simplify_binary_operation_1 (enum rtx_code code
, enum machine_mode mode
,
1655 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
1657 rtx tem
, reversed
, opleft
, opright
;
1659 unsigned int width
= GET_MODE_BITSIZE (mode
);
1661 /* Even if we can't compute a constant result,
1662 there are some cases worth simplifying. */
1667 /* Maybe simplify x + 0 to x. The two expressions are equivalent
1668 when x is NaN, infinite, or finite and nonzero. They aren't
1669 when x is -0 and the rounding mode is not towards -infinity,
1670 since (-0) + 0 is then 0. */
1671 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
1674 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
1675 transformations are safe even for IEEE. */
1676 if (GET_CODE (op0
) == NEG
)
1677 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
1678 else if (GET_CODE (op1
) == NEG
)
1679 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
1681 /* (~a) + 1 -> -a */
1682 if (INTEGRAL_MODE_P (mode
)
1683 && GET_CODE (op0
) == NOT
1684 && trueop1
== const1_rtx
)
1685 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
1687 /* Handle both-operands-constant cases. We can only add
1688 CONST_INTs to constants since the sum of relocatable symbols
1689 can't be handled by most assemblers. Don't add CONST_INT
1690 to CONST_INT since overflow won't be computed properly if wider
1691 than HOST_BITS_PER_WIDE_INT. */
1693 if ((GET_CODE (op0
) == CONST
1694 || GET_CODE (op0
) == SYMBOL_REF
1695 || GET_CODE (op0
) == LABEL_REF
)
1696 && CONST_INT_P (op1
))
1697 return plus_constant (op0
, INTVAL (op1
));
1698 else if ((GET_CODE (op1
) == CONST
1699 || GET_CODE (op1
) == SYMBOL_REF
1700 || GET_CODE (op1
) == LABEL_REF
)
1701 && CONST_INT_P (op0
))
1702 return plus_constant (op1
, INTVAL (op0
));
1704 /* See if this is something like X * C - X or vice versa or
1705 if the multiplication is written as a shift. If so, we can
1706 distribute and make a new multiply, shift, or maybe just
1707 have X (if C is 2 in the example above). But don't make
1708 something more expensive than we had before. */
1710 if (SCALAR_INT_MODE_P (mode
))
1712 HOST_WIDE_INT coeff0h
= 0, coeff1h
= 0;
1713 unsigned HOST_WIDE_INT coeff0l
= 1, coeff1l
= 1;
1714 rtx lhs
= op0
, rhs
= op1
;
1716 if (GET_CODE (lhs
) == NEG
)
1720 lhs
= XEXP (lhs
, 0);
1722 else if (GET_CODE (lhs
) == MULT
1723 && CONST_INT_P (XEXP (lhs
, 1)))
1725 coeff0l
= INTVAL (XEXP (lhs
, 1));
1726 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1727 lhs
= XEXP (lhs
, 0);
1729 else if (GET_CODE (lhs
) == ASHIFT
1730 && CONST_INT_P (XEXP (lhs
, 1))
1731 && INTVAL (XEXP (lhs
, 1)) >= 0
1732 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1734 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1736 lhs
= XEXP (lhs
, 0);
1739 if (GET_CODE (rhs
) == NEG
)
1743 rhs
= XEXP (rhs
, 0);
1745 else if (GET_CODE (rhs
) == MULT
1746 && CONST_INT_P (XEXP (rhs
, 1)))
1748 coeff1l
= INTVAL (XEXP (rhs
, 1));
1749 coeff1h
= INTVAL (XEXP (rhs
, 1)) < 0 ? -1 : 0;
1750 rhs
= XEXP (rhs
, 0);
1752 else if (GET_CODE (rhs
) == ASHIFT
1753 && CONST_INT_P (XEXP (rhs
, 1))
1754 && INTVAL (XEXP (rhs
, 1)) >= 0
1755 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1757 coeff1l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1));
1759 rhs
= XEXP (rhs
, 0);
1762 if (rtx_equal_p (lhs
, rhs
))
1764 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
1766 unsigned HOST_WIDE_INT l
;
1768 bool speed
= optimize_function_for_speed_p (cfun
);
1770 add_double (coeff0l
, coeff0h
, coeff1l
, coeff1h
, &l
, &h
);
1771 coeff
= immed_double_const (l
, h
, mode
);
1773 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1774 return rtx_cost (tem
, SET
, speed
) <= rtx_cost (orig
, SET
, speed
)
1779 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
1780 if ((CONST_INT_P (op1
)
1781 || GET_CODE (op1
) == CONST_DOUBLE
)
1782 && GET_CODE (op0
) == XOR
1783 && (CONST_INT_P (XEXP (op0
, 1))
1784 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
1785 && mode_signbit_p (mode
, op1
))
1786 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
1787 simplify_gen_binary (XOR
, mode
, op1
,
1790 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
1791 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
1792 && GET_CODE (op0
) == MULT
1793 && GET_CODE (XEXP (op0
, 0)) == NEG
)
1797 in1
= XEXP (XEXP (op0
, 0), 0);
1798 in2
= XEXP (op0
, 1);
1799 return simplify_gen_binary (MINUS
, mode
, op1
,
1800 simplify_gen_binary (MULT
, mode
,
1804 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
1805 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
1807 if (COMPARISON_P (op0
)
1808 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
1809 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
1810 && (reversed
= reversed_comparison (op0
, mode
)))
1812 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
1814 /* If one of the operands is a PLUS or a MINUS, see if we can
1815 simplify this by the associative law.
1816 Don't use the associative law for floating point.
1817 The inaccuracy makes it nonassociative,
1818 and subtle programs can break if operations are associated. */
1820 if (INTEGRAL_MODE_P (mode
)
1821 && (plus_minus_operand_p (op0
)
1822 || plus_minus_operand_p (op1
))
1823 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
1826 /* Reassociate floating point addition only when the user
1827 specifies associative math operations. */
1828 if (FLOAT_MODE_P (mode
)
1829 && flag_associative_math
)
1831 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
1838 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
1839 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
1840 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
1841 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
1843 rtx xop00
= XEXP (op0
, 0);
1844 rtx xop10
= XEXP (op1
, 0);
1847 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
1849 if (REG_P (xop00
) && REG_P (xop10
)
1850 && GET_MODE (xop00
) == GET_MODE (xop10
)
1851 && REGNO (xop00
) == REGNO (xop10
)
1852 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
1853 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
1860 /* We can't assume x-x is 0 even with non-IEEE floating point,
1861 but since it is zero except in very strange circumstances, we
1862 will treat it as zero with -ffinite-math-only. */
1863 if (rtx_equal_p (trueop0
, trueop1
)
1864 && ! side_effects_p (op0
)
1865 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
1866 return CONST0_RTX (mode
);
1868 /* Change subtraction from zero into negation. (0 - x) is the
1869 same as -x when x is NaN, infinite, or finite and nonzero.
1870 But if the mode has signed zeros, and does not round towards
1871 -infinity, then 0 - 0 is 0, not -0. */
1872 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
1873 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
1875 /* (-1 - a) is ~a. */
1876 if (trueop0
== constm1_rtx
)
1877 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
1879 /* Subtracting 0 has no effect unless the mode has signed zeros
1880 and supports rounding towards -infinity. In such a case,
1882 if (!(HONOR_SIGNED_ZEROS (mode
)
1883 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1884 && trueop1
== CONST0_RTX (mode
))
1887 /* See if this is something like X * C - X or vice versa or
1888 if the multiplication is written as a shift. If so, we can
1889 distribute and make a new multiply, shift, or maybe just
1890 have X (if C is 2 in the example above). But don't make
1891 something more expensive than we had before. */
1893 if (SCALAR_INT_MODE_P (mode
))
1895 HOST_WIDE_INT coeff0h
= 0, negcoeff1h
= -1;
1896 unsigned HOST_WIDE_INT coeff0l
= 1, negcoeff1l
= -1;
1897 rtx lhs
= op0
, rhs
= op1
;
1899 if (GET_CODE (lhs
) == NEG
)
1903 lhs
= XEXP (lhs
, 0);
1905 else if (GET_CODE (lhs
) == MULT
1906 && CONST_INT_P (XEXP (lhs
, 1)))
1908 coeff0l
= INTVAL (XEXP (lhs
, 1));
1909 coeff0h
= INTVAL (XEXP (lhs
, 1)) < 0 ? -1 : 0;
1910 lhs
= XEXP (lhs
, 0);
1912 else if (GET_CODE (lhs
) == ASHIFT
1913 && CONST_INT_P (XEXP (lhs
, 1))
1914 && INTVAL (XEXP (lhs
, 1)) >= 0
1915 && INTVAL (XEXP (lhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1917 coeff0l
= ((HOST_WIDE_INT
) 1) << INTVAL (XEXP (lhs
, 1));
1919 lhs
= XEXP (lhs
, 0);
1922 if (GET_CODE (rhs
) == NEG
)
1926 rhs
= XEXP (rhs
, 0);
1928 else if (GET_CODE (rhs
) == MULT
1929 && CONST_INT_P (XEXP (rhs
, 1)))
1931 negcoeff1l
= -INTVAL (XEXP (rhs
, 1));
1932 negcoeff1h
= INTVAL (XEXP (rhs
, 1)) <= 0 ? 0 : -1;
1933 rhs
= XEXP (rhs
, 0);
1935 else if (GET_CODE (rhs
) == ASHIFT
1936 && CONST_INT_P (XEXP (rhs
, 1))
1937 && INTVAL (XEXP (rhs
, 1)) >= 0
1938 && INTVAL (XEXP (rhs
, 1)) < HOST_BITS_PER_WIDE_INT
)
1940 negcoeff1l
= -(((HOST_WIDE_INT
) 1) << INTVAL (XEXP (rhs
, 1)));
1942 rhs
= XEXP (rhs
, 0);
1945 if (rtx_equal_p (lhs
, rhs
))
1947 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
1949 unsigned HOST_WIDE_INT l
;
1951 bool speed
= optimize_function_for_speed_p (cfun
);
1953 add_double (coeff0l
, coeff0h
, negcoeff1l
, negcoeff1h
, &l
, &h
);
1954 coeff
= immed_double_const (l
, h
, mode
);
1956 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
1957 return rtx_cost (tem
, SET
, speed
) <= rtx_cost (orig
, SET
, speed
)
1962 /* (a - (-b)) -> (a + b). True even for IEEE. */
1963 if (GET_CODE (op1
) == NEG
)
1964 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
1966 /* (-x - c) may be simplified as (-c - x). */
1967 if (GET_CODE (op0
) == NEG
1968 && (CONST_INT_P (op1
)
1969 || GET_CODE (op1
) == CONST_DOUBLE
))
1971 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
1973 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
1976 /* Don't let a relocatable value get a negative coeff. */
1977 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
1978 return simplify_gen_binary (PLUS
, mode
,
1980 neg_const_int (mode
, op1
));
1982 /* (x - (x & y)) -> (x & ~y) */
1983 if (GET_CODE (op1
) == AND
)
1985 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
1987 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
1988 GET_MODE (XEXP (op1
, 1)));
1989 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1991 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
1993 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
1994 GET_MODE (XEXP (op1
, 0)));
1995 return simplify_gen_binary (AND
, mode
, op0
, tem
);
1999 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2000 by reversing the comparison code if valid. */
2001 if (STORE_FLAG_VALUE
== 1
2002 && trueop0
== const1_rtx
2003 && COMPARISON_P (op1
)
2004 && (reversed
= reversed_comparison (op1
, mode
)))
2007 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2008 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2009 && GET_CODE (op1
) == MULT
2010 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2014 in1
= XEXP (XEXP (op1
, 0), 0);
2015 in2
= XEXP (op1
, 1);
2016 return simplify_gen_binary (PLUS
, mode
,
2017 simplify_gen_binary (MULT
, mode
,
2022 /* Canonicalize (minus (neg A) (mult B C)) to
2023 (minus (mult (neg B) C) A). */
2024 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2025 && GET_CODE (op1
) == MULT
2026 && GET_CODE (op0
) == NEG
)
2030 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2031 in2
= XEXP (op1
, 1);
2032 return simplify_gen_binary (MINUS
, mode
,
2033 simplify_gen_binary (MULT
, mode
,
2038 /* If one of the operands is a PLUS or a MINUS, see if we can
2039 simplify this by the associative law. This will, for example,
2040 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2041 Don't use the associative law for floating point.
2042 The inaccuracy makes it nonassociative,
2043 and subtle programs can break if operations are associated. */
2045 if (INTEGRAL_MODE_P (mode
)
2046 && (plus_minus_operand_p (op0
)
2047 || plus_minus_operand_p (op1
))
2048 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2053 if (trueop1
== constm1_rtx
)
2054 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2056 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2057 x is NaN, since x * 0 is then also NaN. Nor is it valid
2058 when the mode has signed zeros, since multiplying a negative
2059 number by 0 will give -0, not 0. */
2060 if (!HONOR_NANS (mode
)
2061 && !HONOR_SIGNED_ZEROS (mode
)
2062 && trueop1
== CONST0_RTX (mode
)
2063 && ! side_effects_p (op0
))
2066 /* In IEEE floating point, x*1 is not equivalent to x for
2068 if (!HONOR_SNANS (mode
)
2069 && trueop1
== CONST1_RTX (mode
))
2072 /* Convert multiply by constant power of two into shift unless
2073 we are still generating RTL. This test is a kludge. */
2074 if (CONST_INT_P (trueop1
)
2075 && (val
= exact_log2 (INTVAL (trueop1
))) >= 0
2076 /* If the mode is larger than the host word size, and the
2077 uppermost bit is set, then this isn't a power of two due
2078 to implicit sign extension. */
2079 && (width
<= HOST_BITS_PER_WIDE_INT
2080 || val
!= HOST_BITS_PER_WIDE_INT
- 1))
2081 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2083 /* Likewise for multipliers wider than a word. */
2084 if (GET_CODE (trueop1
) == CONST_DOUBLE
2085 && (GET_MODE (trueop1
) == VOIDmode
2086 || GET_MODE_CLASS (GET_MODE (trueop1
)) == MODE_INT
)
2087 && GET_MODE (op0
) == mode
2088 && CONST_DOUBLE_LOW (trueop1
) == 0
2089 && (val
= exact_log2 (CONST_DOUBLE_HIGH (trueop1
))) >= 0)
2090 return simplify_gen_binary (ASHIFT
, mode
, op0
,
2091 GEN_INT (val
+ HOST_BITS_PER_WIDE_INT
));
2093 /* x*2 is x+x and x*(-1) is -x */
2094 if (GET_CODE (trueop1
) == CONST_DOUBLE
2095 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2096 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2097 && GET_MODE (op0
) == mode
)
2100 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2102 if (REAL_VALUES_EQUAL (d
, dconst2
))
2103 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2105 if (!HONOR_SNANS (mode
)
2106 && REAL_VALUES_EQUAL (d
, dconstm1
))
2107 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2110 /* Optimize -x * -x as x * x. */
2111 if (FLOAT_MODE_P (mode
)
2112 && GET_CODE (op0
) == NEG
2113 && GET_CODE (op1
) == NEG
2114 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2115 && !side_effects_p (XEXP (op0
, 0)))
2116 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2118 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2119 if (SCALAR_FLOAT_MODE_P (mode
)
2120 && GET_CODE (op0
) == ABS
2121 && GET_CODE (op1
) == ABS
2122 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2123 && !side_effects_p (XEXP (op0
, 0)))
2124 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2126 /* Reassociate multiplication, but for floating point MULTs
2127 only when the user specifies unsafe math optimizations. */
2128 if (! FLOAT_MODE_P (mode
)
2129 || flag_unsafe_math_optimizations
)
2131 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2138 if (trueop1
== const0_rtx
)
2140 if (CONST_INT_P (trueop1
)
2141 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2142 == GET_MODE_MASK (mode
)))
2144 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2146 /* A | (~A) -> -1 */
2147 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2148 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2149 && ! side_effects_p (op0
)
2150 && SCALAR_INT_MODE_P (mode
))
2153 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2154 if (CONST_INT_P (op1
)
2155 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2156 && (nonzero_bits (op0
, mode
) & ~INTVAL (op1
)) == 0)
2159 /* Canonicalize (X & C1) | C2. */
2160 if (GET_CODE (op0
) == AND
2161 && CONST_INT_P (trueop1
)
2162 && CONST_INT_P (XEXP (op0
, 1)))
2164 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2165 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2166 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2168 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2170 && !side_effects_p (XEXP (op0
, 0)))
2173 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2174 if (((c1
|c2
) & mask
) == mask
)
2175 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2177 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2178 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2180 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2181 gen_int_mode (c1
& ~c2
, mode
));
2182 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2186 /* Convert (A & B) | A to A. */
2187 if (GET_CODE (op0
) == AND
2188 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2189 || rtx_equal_p (XEXP (op0
, 1), op1
))
2190 && ! side_effects_p (XEXP (op0
, 0))
2191 && ! side_effects_p (XEXP (op0
, 1)))
2194 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2195 mode size to (rotate A CX). */
2197 if (GET_CODE (op1
) == ASHIFT
2198 || GET_CODE (op1
) == SUBREG
)
2209 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2210 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2211 && CONST_INT_P (XEXP (opleft
, 1))
2212 && CONST_INT_P (XEXP (opright
, 1))
2213 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2214 == GET_MODE_BITSIZE (mode
)))
2215 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2217 /* Same, but for ashift that has been "simplified" to a wider mode
2218 by simplify_shift_const. */
2220 if (GET_CODE (opleft
) == SUBREG
2221 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2222 && GET_CODE (opright
) == LSHIFTRT
2223 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2224 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2225 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2226 && (GET_MODE_SIZE (GET_MODE (opleft
))
2227 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2228 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2229 SUBREG_REG (XEXP (opright
, 0)))
2230 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2231 && CONST_INT_P (XEXP (opright
, 1))
2232 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2233 == GET_MODE_BITSIZE (mode
)))
2234 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2235 XEXP (SUBREG_REG (opleft
), 1));
2237 /* If we have (ior (and (X C1) C2)), simplify this by making
2238 C1 as small as possible if C1 actually changes. */
2239 if (CONST_INT_P (op1
)
2240 && (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2241 || INTVAL (op1
) > 0)
2242 && GET_CODE (op0
) == AND
2243 && CONST_INT_P (XEXP (op0
, 1))
2244 && CONST_INT_P (op1
)
2245 && (INTVAL (XEXP (op0
, 1)) & INTVAL (op1
)) != 0)
2246 return simplify_gen_binary (IOR
, mode
,
2248 (AND
, mode
, XEXP (op0
, 0),
2249 GEN_INT (INTVAL (XEXP (op0
, 1))
2253 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2254 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2255 the PLUS does not affect any of the bits in OP1: then we can do
2256 the IOR as a PLUS and we can associate. This is valid if OP1
2257 can be safely shifted left C bits. */
2258 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2259 && GET_CODE (XEXP (op0
, 0)) == PLUS
2260 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2261 && CONST_INT_P (XEXP (op0
, 1))
2262 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2264 int count
= INTVAL (XEXP (op0
, 1));
2265 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2267 if (mask
>> count
== INTVAL (trueop1
)
2268 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2269 return simplify_gen_binary (ASHIFTRT
, mode
,
2270 plus_constant (XEXP (op0
, 0), mask
),
2274 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2280 if (trueop1
== const0_rtx
)
2282 if (CONST_INT_P (trueop1
)
2283 && ((INTVAL (trueop1
) & GET_MODE_MASK (mode
))
2284 == GET_MODE_MASK (mode
)))
2285 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2286 if (rtx_equal_p (trueop0
, trueop1
)
2287 && ! side_effects_p (op0
)
2288 && GET_MODE_CLASS (mode
) != MODE_CC
)
2289 return CONST0_RTX (mode
);
2291 /* Canonicalize XOR of the most significant bit to PLUS. */
2292 if ((CONST_INT_P (op1
)
2293 || GET_CODE (op1
) == CONST_DOUBLE
)
2294 && mode_signbit_p (mode
, op1
))
2295 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2296 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2297 if ((CONST_INT_P (op1
)
2298 || GET_CODE (op1
) == CONST_DOUBLE
)
2299 && GET_CODE (op0
) == PLUS
2300 && (CONST_INT_P (XEXP (op0
, 1))
2301 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
)
2302 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2303 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2304 simplify_gen_binary (XOR
, mode
, op1
,
2307 /* If we are XORing two things that have no bits in common,
2308 convert them into an IOR. This helps to detect rotation encoded
2309 using those methods and possibly other simplifications. */
2311 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2312 && (nonzero_bits (op0
, mode
)
2313 & nonzero_bits (op1
, mode
)) == 0)
2314 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2316 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2317 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2320 int num_negated
= 0;
2322 if (GET_CODE (op0
) == NOT
)
2323 num_negated
++, op0
= XEXP (op0
, 0);
2324 if (GET_CODE (op1
) == NOT
)
2325 num_negated
++, op1
= XEXP (op1
, 0);
2327 if (num_negated
== 2)
2328 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2329 else if (num_negated
== 1)
2330 return simplify_gen_unary (NOT
, mode
,
2331 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2335 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2336 correspond to a machine insn or result in further simplifications
2337 if B is a constant. */
2339 if (GET_CODE (op0
) == AND
2340 && rtx_equal_p (XEXP (op0
, 1), op1
)
2341 && ! side_effects_p (op1
))
2342 return simplify_gen_binary (AND
, mode
,
2343 simplify_gen_unary (NOT
, mode
,
2344 XEXP (op0
, 0), mode
),
2347 else if (GET_CODE (op0
) == AND
2348 && rtx_equal_p (XEXP (op0
, 0), op1
)
2349 && ! side_effects_p (op1
))
2350 return simplify_gen_binary (AND
, mode
,
2351 simplify_gen_unary (NOT
, mode
,
2352 XEXP (op0
, 1), mode
),
2355 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2356 comparison if STORE_FLAG_VALUE is 1. */
2357 if (STORE_FLAG_VALUE
== 1
2358 && trueop1
== const1_rtx
2359 && COMPARISON_P (op0
)
2360 && (reversed
= reversed_comparison (op0
, mode
)))
2363 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2364 is (lt foo (const_int 0)), so we can perform the above
2365 simplification if STORE_FLAG_VALUE is 1. */
2367 if (STORE_FLAG_VALUE
== 1
2368 && trueop1
== const1_rtx
2369 && GET_CODE (op0
) == LSHIFTRT
2370 && CONST_INT_P (XEXP (op0
, 1))
2371 && INTVAL (XEXP (op0
, 1)) == GET_MODE_BITSIZE (mode
) - 1)
2372 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2374 /* (xor (comparison foo bar) (const_int sign-bit))
2375 when STORE_FLAG_VALUE is the sign bit. */
2376 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2377 && ((STORE_FLAG_VALUE
& GET_MODE_MASK (mode
))
2378 == (unsigned HOST_WIDE_INT
) 1 << (GET_MODE_BITSIZE (mode
) - 1))
2379 && trueop1
== const_true_rtx
2380 && COMPARISON_P (op0
)
2381 && (reversed
= reversed_comparison (op0
, mode
)))
2384 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2390 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2392 if (GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
2394 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2395 HOST_WIDE_INT nzop1
;
2396 if (CONST_INT_P (trueop1
))
2398 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2399 /* If we are turning off bits already known off in OP0, we need
2401 if ((nzop0
& ~val1
) == 0)
2404 nzop1
= nonzero_bits (trueop1
, mode
);
2405 /* If we are clearing all the nonzero bits, the result is zero. */
2406 if ((nzop1
& nzop0
) == 0
2407 && !side_effects_p (op0
) && !side_effects_p (op1
))
2408 return CONST0_RTX (mode
);
2410 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2411 && GET_MODE_CLASS (mode
) != MODE_CC
)
2414 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2415 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2416 && ! side_effects_p (op0
)
2417 && GET_MODE_CLASS (mode
) != MODE_CC
)
2418 return CONST0_RTX (mode
);
2420 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2421 there are no nonzero bits of C outside of X's mode. */
2422 if ((GET_CODE (op0
) == SIGN_EXTEND
2423 || GET_CODE (op0
) == ZERO_EXTEND
)
2424 && CONST_INT_P (trueop1
)
2425 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2426 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2427 & INTVAL (trueop1
)) == 0)
2429 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2430 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2431 gen_int_mode (INTVAL (trueop1
),
2433 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2436 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2437 we might be able to further simplify the AND with X and potentially
2438 remove the truncation altogether. */
2439 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2441 rtx x
= XEXP (op0
, 0);
2442 enum machine_mode xmode
= GET_MODE (x
);
2443 tem
= simplify_gen_binary (AND
, xmode
, x
,
2444 gen_int_mode (INTVAL (trueop1
), xmode
));
2445 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2448 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2449 if (GET_CODE (op0
) == IOR
2450 && CONST_INT_P (trueop1
)
2451 && CONST_INT_P (XEXP (op0
, 1)))
2453 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2454 return simplify_gen_binary (IOR
, mode
,
2455 simplify_gen_binary (AND
, mode
,
2456 XEXP (op0
, 0), op1
),
2457 gen_int_mode (tmp
, mode
));
2460 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2461 insn (and may simplify more). */
2462 if (GET_CODE (op0
) == XOR
2463 && rtx_equal_p (XEXP (op0
, 0), op1
)
2464 && ! side_effects_p (op1
))
2465 return simplify_gen_binary (AND
, mode
,
2466 simplify_gen_unary (NOT
, mode
,
2467 XEXP (op0
, 1), mode
),
2470 if (GET_CODE (op0
) == XOR
2471 && rtx_equal_p (XEXP (op0
, 1), op1
)
2472 && ! side_effects_p (op1
))
2473 return simplify_gen_binary (AND
, mode
,
2474 simplify_gen_unary (NOT
, mode
,
2475 XEXP (op0
, 0), mode
),
2478 /* Similarly for (~(A ^ B)) & A. */
2479 if (GET_CODE (op0
) == NOT
2480 && GET_CODE (XEXP (op0
, 0)) == XOR
2481 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2482 && ! side_effects_p (op1
))
2483 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2485 if (GET_CODE (op0
) == NOT
2486 && GET_CODE (XEXP (op0
, 0)) == XOR
2487 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2488 && ! side_effects_p (op1
))
2489 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2491 /* Convert (A | B) & A to A. */
2492 if (GET_CODE (op0
) == IOR
2493 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2494 || rtx_equal_p (XEXP (op0
, 1), op1
))
2495 && ! side_effects_p (XEXP (op0
, 0))
2496 && ! side_effects_p (XEXP (op0
, 1)))
2499 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2500 ((A & N) + B) & M -> (A + B) & M
2501 Similarly if (N & M) == 0,
2502 ((A | N) + B) & M -> (A + B) & M
2503 and for - instead of + and/or ^ instead of |.
2504 Also, if (N & M) == 0, then
2505 (A +- N) & M -> A & M. */
2506 if (CONST_INT_P (trueop1
)
2507 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
2508 && ~INTVAL (trueop1
)
2509 && (INTVAL (trueop1
) & (INTVAL (trueop1
) + 1)) == 0
2510 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2515 pmop
[0] = XEXP (op0
, 0);
2516 pmop
[1] = XEXP (op0
, 1);
2518 if (CONST_INT_P (pmop
[1])
2519 && (INTVAL (pmop
[1]) & INTVAL (trueop1
)) == 0)
2520 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2522 for (which
= 0; which
< 2; which
++)
2525 switch (GET_CODE (tem
))
2528 if (CONST_INT_P (XEXP (tem
, 1))
2529 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
))
2530 == INTVAL (trueop1
))
2531 pmop
[which
] = XEXP (tem
, 0);
2535 if (CONST_INT_P (XEXP (tem
, 1))
2536 && (INTVAL (XEXP (tem
, 1)) & INTVAL (trueop1
)) == 0)
2537 pmop
[which
] = XEXP (tem
, 0);
2544 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
2546 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
2548 return simplify_gen_binary (code
, mode
, tem
, op1
);
2552 /* (and X (ior (not X) Y) -> (and X Y) */
2553 if (GET_CODE (op1
) == IOR
2554 && GET_CODE (XEXP (op1
, 0)) == NOT
2555 && op0
== XEXP (XEXP (op1
, 0), 0))
2556 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
2558 /* (and (ior (not X) Y) X) -> (and X Y) */
2559 if (GET_CODE (op0
) == IOR
2560 && GET_CODE (XEXP (op0
, 0)) == NOT
2561 && op1
== XEXP (XEXP (op0
, 0), 0))
2562 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
2564 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2570 /* 0/x is 0 (or x&0 if x has side-effects). */
2571 if (trueop0
== CONST0_RTX (mode
))
2573 if (side_effects_p (op1
))
2574 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2578 if (trueop1
== CONST1_RTX (mode
))
2579 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2580 /* Convert divide by power of two into shift. */
2581 if (CONST_INT_P (trueop1
)
2582 && (val
= exact_log2 (INTVAL (trueop1
))) > 0)
2583 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
2587 /* Handle floating point and integers separately. */
2588 if (SCALAR_FLOAT_MODE_P (mode
))
2590 /* Maybe change 0.0 / x to 0.0. This transformation isn't
2591 safe for modes with NaNs, since 0.0 / 0.0 will then be
2592 NaN rather than 0.0. Nor is it safe for modes with signed
2593 zeros, since dividing 0 by a negative number gives -0.0 */
2594 if (trueop0
== CONST0_RTX (mode
)
2595 && !HONOR_NANS (mode
)
2596 && !HONOR_SIGNED_ZEROS (mode
)
2597 && ! side_effects_p (op1
))
2600 if (trueop1
== CONST1_RTX (mode
)
2601 && !HONOR_SNANS (mode
))
2604 if (GET_CODE (trueop1
) == CONST_DOUBLE
2605 && trueop1
!= CONST0_RTX (mode
))
2608 REAL_VALUE_FROM_CONST_DOUBLE (d
, trueop1
);
2611 if (REAL_VALUES_EQUAL (d
, dconstm1
)
2612 && !HONOR_SNANS (mode
))
2613 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2615 /* Change FP division by a constant into multiplication.
2616 Only do this with -freciprocal-math. */
2617 if (flag_reciprocal_math
2618 && !REAL_VALUES_EQUAL (d
, dconst0
))
2620 REAL_ARITHMETIC (d
, RDIV_EXPR
, dconst1
, d
);
2621 tem
= CONST_DOUBLE_FROM_REAL_VALUE (d
, mode
);
2622 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
2628 /* 0/x is 0 (or x&0 if x has side-effects). */
2629 if (trueop0
== CONST0_RTX (mode
))
2631 if (side_effects_p (op1
))
2632 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2636 if (trueop1
== CONST1_RTX (mode
))
2637 return rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2639 if (trueop1
== constm1_rtx
)
2641 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
2642 return simplify_gen_unary (NEG
, mode
, x
, mode
);
2648 /* 0%x is 0 (or x&0 if x has side-effects). */
2649 if (trueop0
== CONST0_RTX (mode
))
2651 if (side_effects_p (op1
))
2652 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2655 /* x%1 is 0 (of x&0 if x has side-effects). */
2656 if (trueop1
== CONST1_RTX (mode
))
2658 if (side_effects_p (op0
))
2659 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2660 return CONST0_RTX (mode
);
2662 /* Implement modulus by power of two as AND. */
2663 if (CONST_INT_P (trueop1
)
2664 && exact_log2 (INTVAL (trueop1
)) > 0)
2665 return simplify_gen_binary (AND
, mode
, op0
,
2666 GEN_INT (INTVAL (op1
) - 1));
2670 /* 0%x is 0 (or x&0 if x has side-effects). */
2671 if (trueop0
== CONST0_RTX (mode
))
2673 if (side_effects_p (op1
))
2674 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
2677 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
2678 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
2680 if (side_effects_p (op0
))
2681 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
2682 return CONST0_RTX (mode
);
2689 if (trueop1
== CONST0_RTX (mode
))
2691 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2693 /* Rotating ~0 always results in ~0. */
2694 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
2695 && (unsigned HOST_WIDE_INT
) INTVAL (trueop0
) == GET_MODE_MASK (mode
)
2696 && ! side_effects_p (op1
))
2699 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
2701 val
= INTVAL (op1
) & (GET_MODE_BITSIZE (mode
) - 1);
2702 if (val
!= INTVAL (op1
))
2703 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
2710 if (trueop1
== CONST0_RTX (mode
))
2712 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2714 goto canonicalize_shift
;
2717 if (trueop1
== CONST0_RTX (mode
))
2719 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
2721 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
2722 if (GET_CODE (op0
) == CLZ
2723 && CONST_INT_P (trueop1
)
2724 && STORE_FLAG_VALUE
== 1
2725 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
2727 enum machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2728 unsigned HOST_WIDE_INT zero_val
= 0;
2730 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
2731 && zero_val
== GET_MODE_BITSIZE (imode
)
2732 && INTVAL (trueop1
) == exact_log2 (zero_val
))
2733 return simplify_gen_relational (EQ
, mode
, imode
,
2734 XEXP (op0
, 0), const0_rtx
);
2736 goto canonicalize_shift
;
2739 if (width
<= HOST_BITS_PER_WIDE_INT
2740 && CONST_INT_P (trueop1
)
2741 && INTVAL (trueop1
) == (HOST_WIDE_INT
) 1 << (width
-1)
2742 && ! side_effects_p (op0
))
2744 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2746 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2752 if (width
<= HOST_BITS_PER_WIDE_INT
2753 && CONST_INT_P (trueop1
)
2754 && ((unsigned HOST_WIDE_INT
) INTVAL (trueop1
)
2755 == (unsigned HOST_WIDE_INT
) GET_MODE_MASK (mode
) >> 1)
2756 && ! side_effects_p (op0
))
2758 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2760 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2766 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2768 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2770 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2776 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
2778 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2780 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2793 /* ??? There are simplifications that can be done. */
2797 if (!VECTOR_MODE_P (mode
))
2799 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2800 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
2801 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2802 gcc_assert (XVECLEN (trueop1
, 0) == 1);
2803 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
2805 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2806 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
2809 /* Extract a scalar element from a nested VEC_SELECT expression
2810 (with optional nested VEC_CONCAT expression). Some targets
2811 (i386) extract scalar element from a vector using chain of
2812 nested VEC_SELECT expressions. When input operand is a memory
2813 operand, this operation can be simplified to a simple scalar
2814 load from an offseted memory address. */
2815 if (GET_CODE (trueop0
) == VEC_SELECT
)
2817 rtx op0
= XEXP (trueop0
, 0);
2818 rtx op1
= XEXP (trueop0
, 1);
2820 enum machine_mode opmode
= GET_MODE (op0
);
2821 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (opmode
));
2822 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
2824 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
2830 gcc_assert (GET_CODE (op1
) == PARALLEL
);
2831 gcc_assert (i
< n_elts
);
2833 /* Select element, pointed by nested selector. */
2834 elem
= INTVAL (XVECEXP (op1
, 0, i
));
2836 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
2837 if (GET_CODE (op0
) == VEC_CONCAT
)
2839 rtx op00
= XEXP (op0
, 0);
2840 rtx op01
= XEXP (op0
, 1);
2842 enum machine_mode mode00
, mode01
;
2843 int n_elts00
, n_elts01
;
2845 mode00
= GET_MODE (op00
);
2846 mode01
= GET_MODE (op01
);
2848 /* Find out number of elements of each operand. */
2849 if (VECTOR_MODE_P (mode00
))
2851 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode00
));
2852 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
2857 if (VECTOR_MODE_P (mode01
))
2859 elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode01
));
2860 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
2865 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
2867 /* Select correct operand of VEC_CONCAT
2868 and adjust selector. */
2869 if (elem
< n_elts01
)
2880 vec
= rtvec_alloc (1);
2881 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
2883 tmp
= gen_rtx_fmt_ee (code
, mode
,
2884 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
2890 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
2891 gcc_assert (GET_MODE_INNER (mode
)
2892 == GET_MODE_INNER (GET_MODE (trueop0
)));
2893 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
2895 if (GET_CODE (trueop0
) == CONST_VECTOR
)
2897 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2898 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2899 rtvec v
= rtvec_alloc (n_elts
);
2902 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
2903 for (i
= 0; i
< n_elts
; i
++)
2905 rtx x
= XVECEXP (trueop1
, 0, i
);
2907 gcc_assert (CONST_INT_P (x
));
2908 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
2912 return gen_rtx_CONST_VECTOR (mode
, v
);
2916 if (XVECLEN (trueop1
, 0) == 1
2917 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
2918 && GET_CODE (trueop0
) == VEC_CONCAT
)
2921 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
2923 /* Try to find the element in the VEC_CONCAT. */
2924 while (GET_MODE (vec
) != mode
2925 && GET_CODE (vec
) == VEC_CONCAT
)
2927 HOST_WIDE_INT vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
2928 if (offset
< vec_size
)
2929 vec
= XEXP (vec
, 0);
2933 vec
= XEXP (vec
, 1);
2935 vec
= avoid_constant_pool_reference (vec
);
2938 if (GET_MODE (vec
) == mode
)
2945 enum machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
2946 ? GET_MODE (trueop0
)
2947 : GET_MODE_INNER (mode
));
2948 enum machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
2949 ? GET_MODE (trueop1
)
2950 : GET_MODE_INNER (mode
));
2952 gcc_assert (VECTOR_MODE_P (mode
));
2953 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
2954 == GET_MODE_SIZE (mode
));
2956 if (VECTOR_MODE_P (op0_mode
))
2957 gcc_assert (GET_MODE_INNER (mode
)
2958 == GET_MODE_INNER (op0_mode
));
2960 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
2962 if (VECTOR_MODE_P (op1_mode
))
2963 gcc_assert (GET_MODE_INNER (mode
)
2964 == GET_MODE_INNER (op1_mode
));
2966 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
2968 if ((GET_CODE (trueop0
) == CONST_VECTOR
2969 || CONST_INT_P (trueop0
)
2970 || GET_CODE (trueop0
) == CONST_DOUBLE
)
2971 && (GET_CODE (trueop1
) == CONST_VECTOR
2972 || CONST_INT_P (trueop1
)
2973 || GET_CODE (trueop1
) == CONST_DOUBLE
))
2975 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
2976 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
2977 rtvec v
= rtvec_alloc (n_elts
);
2979 unsigned in_n_elts
= 1;
2981 if (VECTOR_MODE_P (op0_mode
))
2982 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
2983 for (i
= 0; i
< n_elts
; i
++)
2987 if (!VECTOR_MODE_P (op0_mode
))
2988 RTVEC_ELT (v
, i
) = trueop0
;
2990 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
2994 if (!VECTOR_MODE_P (op1_mode
))
2995 RTVEC_ELT (v
, i
) = trueop1
;
2997 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3002 return gen_rtx_CONST_VECTOR (mode
, v
);
3015 simplify_const_binary_operation (enum rtx_code code
, enum machine_mode mode
,
3018 HOST_WIDE_INT arg0
, arg1
, arg0s
, arg1s
;
3020 unsigned int width
= GET_MODE_BITSIZE (mode
);
3022 if (VECTOR_MODE_P (mode
)
3023 && code
!= VEC_CONCAT
3024 && GET_CODE (op0
) == CONST_VECTOR
3025 && GET_CODE (op1
) == CONST_VECTOR
)
3027 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3028 enum machine_mode op0mode
= GET_MODE (op0
);
3029 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3030 enum machine_mode op1mode
= GET_MODE (op1
);
3031 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3032 rtvec v
= rtvec_alloc (n_elts
);
3035 gcc_assert (op0_n_elts
== n_elts
);
3036 gcc_assert (op1_n_elts
== n_elts
);
3037 for (i
= 0; i
< n_elts
; i
++)
3039 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3040 CONST_VECTOR_ELT (op0
, i
),
3041 CONST_VECTOR_ELT (op1
, i
));
3044 RTVEC_ELT (v
, i
) = x
;
3047 return gen_rtx_CONST_VECTOR (mode
, v
);
3050 if (VECTOR_MODE_P (mode
)
3051 && code
== VEC_CONCAT
3052 && (CONST_INT_P (op0
)
3053 || GET_CODE (op0
) == CONST_DOUBLE
3054 || GET_CODE (op0
) == CONST_FIXED
)
3055 && (CONST_INT_P (op1
)
3056 || GET_CODE (op1
) == CONST_DOUBLE
3057 || GET_CODE (op1
) == CONST_FIXED
))
3059 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3060 rtvec v
= rtvec_alloc (n_elts
);
3062 gcc_assert (n_elts
>= 2);
3065 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3066 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3068 RTVEC_ELT (v
, 0) = op0
;
3069 RTVEC_ELT (v
, 1) = op1
;
3073 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3074 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3077 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3078 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3079 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3081 for (i
= 0; i
< op0_n_elts
; ++i
)
3082 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3083 for (i
= 0; i
< op1_n_elts
; ++i
)
3084 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3087 return gen_rtx_CONST_VECTOR (mode
, v
);
3090 if (SCALAR_FLOAT_MODE_P (mode
)
3091 && GET_CODE (op0
) == CONST_DOUBLE
3092 && GET_CODE (op1
) == CONST_DOUBLE
3093 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3104 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3106 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3108 for (i
= 0; i
< 4; i
++)
3125 real_from_target (&r
, tmp0
, mode
);
3126 return CONST_DOUBLE_FROM_REAL_VALUE (r
, mode
);
3130 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3133 REAL_VALUE_FROM_CONST_DOUBLE (f0
, op0
);
3134 REAL_VALUE_FROM_CONST_DOUBLE (f1
, op1
);
3135 real_convert (&f0
, mode
, &f0
);
3136 real_convert (&f1
, mode
, &f1
);
3138 if (HONOR_SNANS (mode
)
3139 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3143 && REAL_VALUES_EQUAL (f1
, dconst0
)
3144 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3147 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3148 && flag_trapping_math
3149 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3151 int s0
= REAL_VALUE_NEGATIVE (f0
);
3152 int s1
= REAL_VALUE_NEGATIVE (f1
);
3157 /* Inf + -Inf = NaN plus exception. */
3162 /* Inf - Inf = NaN plus exception. */
3167 /* Inf / Inf = NaN plus exception. */
3174 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3175 && flag_trapping_math
3176 && ((REAL_VALUE_ISINF (f0
) && REAL_VALUES_EQUAL (f1
, dconst0
))
3177 || (REAL_VALUE_ISINF (f1
)
3178 && REAL_VALUES_EQUAL (f0
, dconst0
))))
3179 /* Inf * 0 = NaN plus exception. */
3182 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3184 real_convert (&result
, mode
, &value
);
3186 /* Don't constant fold this floating point operation if
3187 the result has overflowed and flag_trapping_math. */
3189 if (flag_trapping_math
3190 && MODE_HAS_INFINITIES (mode
)
3191 && REAL_VALUE_ISINF (result
)
3192 && !REAL_VALUE_ISINF (f0
)
3193 && !REAL_VALUE_ISINF (f1
))
3194 /* Overflow plus exception. */
3197 /* Don't constant fold this floating point operation if the
3198 result may dependent upon the run-time rounding mode and
3199 flag_rounding_math is set, or if GCC's software emulation
3200 is unable to accurately represent the result. */
3202 if ((flag_rounding_math
3203 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3204 && (inexact
|| !real_identical (&result
, &value
)))
3207 return CONST_DOUBLE_FROM_REAL_VALUE (result
, mode
);
3211 /* We can fold some multi-word operations. */
3212 if (GET_MODE_CLASS (mode
) == MODE_INT
3213 && width
== HOST_BITS_PER_WIDE_INT
* 2
3214 && (GET_CODE (op0
) == CONST_DOUBLE
|| CONST_INT_P (op0
))
3215 && (GET_CODE (op1
) == CONST_DOUBLE
|| CONST_INT_P (op1
)))
3217 unsigned HOST_WIDE_INT l1
, l2
, lv
, lt
;
3218 HOST_WIDE_INT h1
, h2
, hv
, ht
;
3220 if (GET_CODE (op0
) == CONST_DOUBLE
)
3221 l1
= CONST_DOUBLE_LOW (op0
), h1
= CONST_DOUBLE_HIGH (op0
);
3223 l1
= INTVAL (op0
), h1
= HWI_SIGN_EXTEND (l1
);
3225 if (GET_CODE (op1
) == CONST_DOUBLE
)
3226 l2
= CONST_DOUBLE_LOW (op1
), h2
= CONST_DOUBLE_HIGH (op1
);
3228 l2
= INTVAL (op1
), h2
= HWI_SIGN_EXTEND (l2
);
3233 /* A - B == A + (-B). */
3234 neg_double (l2
, h2
, &lv
, &hv
);
3237 /* Fall through.... */
3240 add_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3244 mul_double (l1
, h1
, l2
, h2
, &lv
, &hv
);
3248 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3249 &lv
, &hv
, <
, &ht
))
3254 if (div_and_round_double (TRUNC_DIV_EXPR
, 0, l1
, h1
, l2
, h2
,
3255 <
, &ht
, &lv
, &hv
))
3260 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3261 &lv
, &hv
, <
, &ht
))
3266 if (div_and_round_double (TRUNC_DIV_EXPR
, 1, l1
, h1
, l2
, h2
,
3267 <
, &ht
, &lv
, &hv
))
3272 lv
= l1
& l2
, hv
= h1
& h2
;
3276 lv
= l1
| l2
, hv
= h1
| h2
;
3280 lv
= l1
^ l2
, hv
= h1
^ h2
;
3286 && ((unsigned HOST_WIDE_INT
) l1
3287 < (unsigned HOST_WIDE_INT
) l2
)))
3296 && ((unsigned HOST_WIDE_INT
) l1
3297 > (unsigned HOST_WIDE_INT
) l2
)))
3304 if ((unsigned HOST_WIDE_INT
) h1
< (unsigned HOST_WIDE_INT
) h2
3306 && ((unsigned HOST_WIDE_INT
) l1
3307 < (unsigned HOST_WIDE_INT
) l2
)))
3314 if ((unsigned HOST_WIDE_INT
) h1
> (unsigned HOST_WIDE_INT
) h2
3316 && ((unsigned HOST_WIDE_INT
) l1
3317 > (unsigned HOST_WIDE_INT
) l2
)))
3323 case LSHIFTRT
: case ASHIFTRT
:
3325 case ROTATE
: case ROTATERT
:
3326 if (SHIFT_COUNT_TRUNCATED
)
3327 l2
&= (GET_MODE_BITSIZE (mode
) - 1), h2
= 0;
3329 if (h2
!= 0 || l2
>= GET_MODE_BITSIZE (mode
))
3332 if (code
== LSHIFTRT
|| code
== ASHIFTRT
)
3333 rshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
,
3335 else if (code
== ASHIFT
)
3336 lshift_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
, 1);
3337 else if (code
== ROTATE
)
3338 lrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3339 else /* code == ROTATERT */
3340 rrotate_double (l1
, h1
, l2
, GET_MODE_BITSIZE (mode
), &lv
, &hv
);
3347 return immed_double_const (lv
, hv
, mode
);
3350 if (CONST_INT_P (op0
) && CONST_INT_P (op1
)
3351 && width
<= HOST_BITS_PER_WIDE_INT
&& width
!= 0)
3353 /* Get the integer argument values in two forms:
3354 zero-extended in ARG0, ARG1 and sign-extended in ARG0S, ARG1S. */
3356 arg0
= INTVAL (op0
);
3357 arg1
= INTVAL (op1
);
3359 if (width
< HOST_BITS_PER_WIDE_INT
)
3361 arg0
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3362 arg1
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
3365 if (arg0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3366 arg0s
|= ((HOST_WIDE_INT
) (-1) << width
);
3369 if (arg1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
3370 arg1s
|= ((HOST_WIDE_INT
) (-1) << width
);
3378 /* Compute the value of the arithmetic. */
3383 val
= arg0s
+ arg1s
;
3387 val
= arg0s
- arg1s
;
3391 val
= arg0s
* arg1s
;
3396 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3399 val
= arg0s
/ arg1s
;
3404 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3407 val
= arg0s
% arg1s
;
3412 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3415 val
= (unsigned HOST_WIDE_INT
) arg0
/ arg1
;
3420 || (arg0s
== (HOST_WIDE_INT
) 1 << (HOST_BITS_PER_WIDE_INT
- 1)
3423 val
= (unsigned HOST_WIDE_INT
) arg0
% arg1
;
3441 /* Truncate the shift if SHIFT_COUNT_TRUNCATED, otherwise make sure
3442 the value is in range. We can't return any old value for
3443 out-of-range arguments because either the middle-end (via
3444 shift_truncation_mask) or the back-end might be relying on
3445 target-specific knowledge. Nor can we rely on
3446 shift_truncation_mask, since the shift might not be part of an
3447 ashlM3, lshrM3 or ashrM3 instruction. */
3448 if (SHIFT_COUNT_TRUNCATED
)
3449 arg1
= (unsigned HOST_WIDE_INT
) arg1
% width
;
3450 else if (arg1
< 0 || arg1
>= GET_MODE_BITSIZE (mode
))
3453 val
= (code
== ASHIFT
3454 ? ((unsigned HOST_WIDE_INT
) arg0
) << arg1
3455 : ((unsigned HOST_WIDE_INT
) arg0
) >> arg1
);
3457 /* Sign-extend the result for arithmetic right shifts. */
3458 if (code
== ASHIFTRT
&& arg0s
< 0 && arg1
> 0)
3459 val
|= ((HOST_WIDE_INT
) -1) << (width
- arg1
);
3467 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << (width
- arg1
))
3468 | (((unsigned HOST_WIDE_INT
) arg0
) >> arg1
));
3476 val
= ((((unsigned HOST_WIDE_INT
) arg0
) << arg1
)
3477 | (((unsigned HOST_WIDE_INT
) arg0
) >> (width
- arg1
)));
3481 /* Do nothing here. */
3485 val
= arg0s
<= arg1s
? arg0s
: arg1s
;
3489 val
= ((unsigned HOST_WIDE_INT
) arg0
3490 <= (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3494 val
= arg0s
> arg1s
? arg0s
: arg1s
;
3498 val
= ((unsigned HOST_WIDE_INT
) arg0
3499 > (unsigned HOST_WIDE_INT
) arg1
? arg0
: arg1
);
3512 /* ??? There are simplifications that can be done. */
3519 return gen_int_mode (val
, mode
);
3527 /* Simplify a PLUS or MINUS, at least one of whose operands may be another
3530 Rather than test for specific case, we do this by a brute-force method
3531 and do all possible simplifications until no more changes occur. Then
3532 we rebuild the operation. */
3534 struct simplify_plus_minus_op_data
3541 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
3545 result
= (commutative_operand_precedence (y
)
3546 - commutative_operand_precedence (x
));
3550 /* Group together equal REGs to do more simplification. */
3551 if (REG_P (x
) && REG_P (y
))
3552 return REGNO (x
) > REGNO (y
);
3558 simplify_plus_minus (enum rtx_code code
, enum machine_mode mode
, rtx op0
,
3561 struct simplify_plus_minus_op_data ops
[8];
3563 int n_ops
= 2, input_ops
= 2;
3564 int changed
, n_constants
= 0, canonicalized
= 0;
3567 memset (ops
, 0, sizeof ops
);
3569 /* Set up the two operands and then expand them until nothing has been
3570 changed. If we run out of room in our array, give up; this should
3571 almost never happen. */
3576 ops
[1].neg
= (code
== MINUS
);
3582 for (i
= 0; i
< n_ops
; i
++)
3584 rtx this_op
= ops
[i
].op
;
3585 int this_neg
= ops
[i
].neg
;
3586 enum rtx_code this_code
= GET_CODE (this_op
);
3595 ops
[n_ops
].op
= XEXP (this_op
, 1);
3596 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
3599 ops
[i
].op
= XEXP (this_op
, 0);
3602 canonicalized
|= this_neg
;
3606 ops
[i
].op
= XEXP (this_op
, 0);
3607 ops
[i
].neg
= ! this_neg
;
3614 && GET_CODE (XEXP (this_op
, 0)) == PLUS
3615 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
3616 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
3618 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
3619 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
3620 ops
[n_ops
].neg
= this_neg
;
3628 /* ~a -> (-a - 1) */
3631 ops
[n_ops
].op
= constm1_rtx
;
3632 ops
[n_ops
++].neg
= this_neg
;
3633 ops
[i
].op
= XEXP (this_op
, 0);
3634 ops
[i
].neg
= !this_neg
;
3644 ops
[i
].op
= neg_const_int (mode
, this_op
);
3658 if (n_constants
> 1)
3661 gcc_assert (n_ops
>= 2);
3663 /* If we only have two operands, we can avoid the loops. */
3666 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
3669 /* Get the two operands. Be careful with the order, especially for
3670 the cases where code == MINUS. */
3671 if (ops
[0].neg
&& ops
[1].neg
)
3673 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
3676 else if (ops
[0].neg
)
3687 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
3690 /* Now simplify each pair of operands until nothing changes. */
3693 /* Insertion sort is good enough for an eight-element array. */
3694 for (i
= 1; i
< n_ops
; i
++)
3696 struct simplify_plus_minus_op_data save
;
3698 if (!simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
))
3704 ops
[j
+ 1] = ops
[j
];
3705 while (j
-- && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
));
3710 for (i
= n_ops
- 1; i
> 0; i
--)
3711 for (j
= i
- 1; j
>= 0; j
--)
3713 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
3714 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
3716 if (lhs
!= 0 && rhs
!= 0)
3718 enum rtx_code ncode
= PLUS
;
3724 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3726 else if (swap_commutative_operands_p (lhs
, rhs
))
3727 tem
= lhs
, lhs
= rhs
, rhs
= tem
;
3729 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
3730 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
3732 rtx tem_lhs
, tem_rhs
;
3734 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
3735 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
3736 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
, tem_rhs
);
3738 if (tem
&& !CONSTANT_P (tem
))
3739 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
3742 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
3744 /* Reject "simplifications" that just wrap the two
3745 arguments in a CONST. Failure to do so can result
3746 in infinite recursion with simplify_binary_operation
3747 when it calls us to simplify CONST operations. */
3749 && ! (GET_CODE (tem
) == CONST
3750 && GET_CODE (XEXP (tem
, 0)) == ncode
3751 && XEXP (XEXP (tem
, 0), 0) == lhs
3752 && XEXP (XEXP (tem
, 0), 1) == rhs
))
3755 if (GET_CODE (tem
) == NEG
)
3756 tem
= XEXP (tem
, 0), lneg
= !lneg
;
3757 if (CONST_INT_P (tem
) && lneg
)
3758 tem
= neg_const_int (mode
, tem
), lneg
= 0;
3762 ops
[j
].op
= NULL_RTX
;
3769 /* If nothing changed, fail. */
3773 /* Pack all the operands to the lower-numbered entries. */
3774 for (i
= 0, j
= 0; j
< n_ops
; j
++)
3784 /* Create (minus -C X) instead of (neg (const (plus X C))). */
3786 && CONST_INT_P (ops
[1].op
)
3787 && CONSTANT_P (ops
[0].op
)
3789 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
3791 /* We suppressed creation of trivial CONST expressions in the
3792 combination loop to avoid recursion. Create one manually now.
3793 The combination loop should have ensured that there is exactly
3794 one CONST_INT, and the sort will have ensured that it is last
3795 in the array and that any other constant will be next-to-last. */
3798 && CONST_INT_P (ops
[n_ops
- 1].op
)
3799 && CONSTANT_P (ops
[n_ops
- 2].op
))
3801 rtx value
= ops
[n_ops
- 1].op
;
3802 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
3803 value
= neg_const_int (mode
, value
);
3804 ops
[n_ops
- 2].op
= plus_constant (ops
[n_ops
- 2].op
, INTVAL (value
));
3808 /* Put a non-negated operand first, if possible. */
3810 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
3813 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
3822 /* Now make the result by performing the requested operations. */
3824 for (i
= 1; i
< n_ops
; i
++)
3825 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
3826 mode
, result
, ops
[i
].op
);
3831 /* Check whether an operand is suitable for calling simplify_plus_minus. */
3833 plus_minus_operand_p (const_rtx x
)
3835 return GET_CODE (x
) == PLUS
3836 || GET_CODE (x
) == MINUS
3837 || (GET_CODE (x
) == CONST
3838 && GET_CODE (XEXP (x
, 0)) == PLUS
3839 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
3840 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
3843 /* Like simplify_binary_operation except used for relational operators.
3844 MODE is the mode of the result. If MODE is VOIDmode, both operands must
3845 not also be VOIDmode.
3847 CMP_MODE specifies in which mode the comparison is done in, so it is
3848 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
3849 the operands or, if both are VOIDmode, the operands are compared in
3850 "infinite precision". */
3852 simplify_relational_operation (enum rtx_code code
, enum machine_mode mode
,
3853 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3855 rtx tem
, trueop0
, trueop1
;
3857 if (cmp_mode
== VOIDmode
)
3858 cmp_mode
= GET_MODE (op0
);
3859 if (cmp_mode
== VOIDmode
)
3860 cmp_mode
= GET_MODE (op1
);
3862 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
3865 if (SCALAR_FLOAT_MODE_P (mode
))
3867 if (tem
== const0_rtx
)
3868 return CONST0_RTX (mode
);
3869 #ifdef FLOAT_STORE_FLAG_VALUE
3871 REAL_VALUE_TYPE val
;
3872 val
= FLOAT_STORE_FLAG_VALUE (mode
);
3873 return CONST_DOUBLE_FROM_REAL_VALUE (val
, mode
);
3879 if (VECTOR_MODE_P (mode
))
3881 if (tem
== const0_rtx
)
3882 return CONST0_RTX (mode
);
3883 #ifdef VECTOR_STORE_FLAG_VALUE
3888 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
3889 if (val
== NULL_RTX
)
3891 if (val
== const1_rtx
)
3892 return CONST1_RTX (mode
);
3894 units
= GET_MODE_NUNITS (mode
);
3895 v
= rtvec_alloc (units
);
3896 for (i
= 0; i
< units
; i
++)
3897 RTVEC_ELT (v
, i
) = val
;
3898 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
3908 /* For the following tests, ensure const0_rtx is op1. */
3909 if (swap_commutative_operands_p (op0
, op1
)
3910 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
3911 tem
= op0
, op0
= op1
, op1
= tem
, code
= swap_condition (code
);
3913 /* If op0 is a compare, extract the comparison arguments from it. */
3914 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
3915 return simplify_gen_relational (code
, mode
, VOIDmode
,
3916 XEXP (op0
, 0), XEXP (op0
, 1));
3918 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
3922 trueop0
= avoid_constant_pool_reference (op0
);
3923 trueop1
= avoid_constant_pool_reference (op1
);
3924 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
3928 /* This part of simplify_relational_operation is only used when CMP_MODE
3929 is not in class MODE_CC (i.e. it is a real comparison).
3931 MODE is the mode of the result, while CMP_MODE specifies in which
3932 mode the comparison is done in, so it is the mode of the operands. */
3935 simplify_relational_operation_1 (enum rtx_code code
, enum machine_mode mode
,
3936 enum machine_mode cmp_mode
, rtx op0
, rtx op1
)
3938 enum rtx_code op0code
= GET_CODE (op0
);
3940 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
3942 /* If op0 is a comparison, extract the comparison arguments
3946 if (GET_MODE (op0
) == mode
)
3947 return simplify_rtx (op0
);
3949 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
3950 XEXP (op0
, 0), XEXP (op0
, 1));
3952 else if (code
== EQ
)
3954 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
3955 if (new_code
!= UNKNOWN
)
3956 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
3957 XEXP (op0
, 0), XEXP (op0
, 1));
3961 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
3962 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
3963 if ((code
== LTU
|| code
== GEU
)
3964 && GET_CODE (op0
) == PLUS
3965 && CONST_INT_P (XEXP (op0
, 1))
3966 && (rtx_equal_p (op1
, XEXP (op0
, 0))
3967 || rtx_equal_p (op1
, XEXP (op0
, 1))))
3970 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
3971 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
3972 cmp_mode
, XEXP (op0
, 0), new_cmp
);
3975 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
3976 if ((code
== LTU
|| code
== GEU
)
3977 && GET_CODE (op0
) == PLUS
3978 && rtx_equal_p (op1
, XEXP (op0
, 1))
3979 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
3980 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
3981 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
, XEXP (op0
, 0));
3983 if (op1
== const0_rtx
)
3985 /* Canonicalize (GTU x 0) as (NE x 0). */
3987 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
3988 /* Canonicalize (LEU x 0) as (EQ x 0). */
3990 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
3992 else if (op1
== const1_rtx
)
3997 /* Canonicalize (GE x 1) as (GT x 0). */
3998 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4001 /* Canonicalize (GEU x 1) as (NE x 0). */
4002 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4005 /* Canonicalize (LT x 1) as (LE x 0). */
4006 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4009 /* Canonicalize (LTU x 1) as (EQ x 0). */
4010 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4016 else if (op1
== constm1_rtx
)
4018 /* Canonicalize (LE x -1) as (LT x 0). */
4020 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4021 /* Canonicalize (GT x -1) as (GE x 0). */
4023 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4026 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4027 if ((code
== EQ
|| code
== NE
)
4028 && (op0code
== PLUS
|| op0code
== MINUS
)
4030 && CONSTANT_P (XEXP (op0
, 1))
4031 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4033 rtx x
= XEXP (op0
, 0);
4034 rtx c
= XEXP (op0
, 1);
4036 c
= simplify_gen_binary (op0code
== PLUS
? MINUS
: PLUS
,
4038 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, c
);
4041 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4042 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4044 && op1
== const0_rtx
4045 && GET_MODE_CLASS (mode
) == MODE_INT
4046 && cmp_mode
!= VOIDmode
4047 /* ??? Work-around BImode bugs in the ia64 backend. */
4049 && cmp_mode
!= BImode
4050 && nonzero_bits (op0
, cmp_mode
) == 1
4051 && STORE_FLAG_VALUE
== 1)
4052 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4053 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4054 : lowpart_subreg (mode
, op0
, cmp_mode
);
4056 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4057 if ((code
== EQ
|| code
== NE
)
4058 && op1
== const0_rtx
4060 return simplify_gen_relational (code
, mode
, cmp_mode
,
4061 XEXP (op0
, 0), XEXP (op0
, 1));
4063 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4064 if ((code
== EQ
|| code
== NE
)
4066 && rtx_equal_p (XEXP (op0
, 0), op1
)
4067 && !side_effects_p (XEXP (op0
, 0)))
4068 return simplify_gen_relational (code
, mode
, cmp_mode
,
4069 XEXP (op0
, 1), const0_rtx
);
4071 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4072 if ((code
== EQ
|| code
== NE
)
4074 && rtx_equal_p (XEXP (op0
, 1), op1
)
4075 && !side_effects_p (XEXP (op0
, 1)))
4076 return simplify_gen_relational (code
, mode
, cmp_mode
,
4077 XEXP (op0
, 0), const0_rtx
);
4079 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4080 if ((code
== EQ
|| code
== NE
)
4082 && (CONST_INT_P (op1
)
4083 || GET_CODE (op1
) == CONST_DOUBLE
)
4084 && (CONST_INT_P (XEXP (op0
, 1))
4085 || GET_CODE (XEXP (op0
, 1)) == CONST_DOUBLE
))
4086 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4087 simplify_gen_binary (XOR
, cmp_mode
,
4088 XEXP (op0
, 1), op1
));
4090 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4096 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4097 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4098 XEXP (op0
, 0), const0_rtx
);
4103 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4104 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4105 XEXP (op0
, 0), const0_rtx
);
4124 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4125 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4126 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4127 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4128 For floating-point comparisons, assume that the operands were ordered. */
4131 comparison_result (enum rtx_code code
, int known_results
)
4137 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4140 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4144 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4147 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4151 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4154 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4157 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4159 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4162 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4164 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4167 return const_true_rtx
;
4175 /* Check if the given comparison (done in the given MODE) is actually a
4176 tautology or a contradiction.
4177 If no simplification is possible, this function returns zero.
4178 Otherwise, it returns either const_true_rtx or const0_rtx. */
4181 simplify_const_relational_operation (enum rtx_code code
,
4182 enum machine_mode mode
,
4189 gcc_assert (mode
!= VOIDmode
4190 || (GET_MODE (op0
) == VOIDmode
4191 && GET_MODE (op1
) == VOIDmode
));
4193 /* If op0 is a compare, extract the comparison arguments from it. */
4194 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4196 op1
= XEXP (op0
, 1);
4197 op0
= XEXP (op0
, 0);
4199 if (GET_MODE (op0
) != VOIDmode
)
4200 mode
= GET_MODE (op0
);
4201 else if (GET_MODE (op1
) != VOIDmode
)
4202 mode
= GET_MODE (op1
);
4207 /* We can't simplify MODE_CC values since we don't know what the
4208 actual comparison is. */
4209 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4212 /* Make sure the constant is second. */
4213 if (swap_commutative_operands_p (op0
, op1
))
4215 tem
= op0
, op0
= op1
, op1
= tem
;
4216 code
= swap_condition (code
);
4219 trueop0
= avoid_constant_pool_reference (op0
);
4220 trueop1
= avoid_constant_pool_reference (op1
);
4222 /* For integer comparisons of A and B maybe we can simplify A - B and can
4223 then simplify a comparison of that with zero. If A and B are both either
4224 a register or a CONST_INT, this can't help; testing for these cases will
4225 prevent infinite recursion here and speed things up.
4227 We can only do this for EQ and NE comparisons as otherwise we may
4228 lose or introduce overflow which we cannot disregard as undefined as
4229 we do not know the signedness of the operation on either the left or
4230 the right hand side of the comparison. */
4232 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4233 && (code
== EQ
|| code
== NE
)
4234 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4235 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4236 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4237 /* We cannot do this if tem is a nonzero address. */
4238 && ! nonzero_address_p (tem
))
4239 return simplify_const_relational_operation (signed_condition (code
),
4240 mode
, tem
, const0_rtx
);
4242 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4243 return const_true_rtx
;
4245 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4248 /* For modes without NaNs, if the two operands are equal, we know the
4249 result except if they have side-effects. Even with NaNs we know
4250 the result of unordered comparisons and, if signaling NaNs are
4251 irrelevant, also the result of LT/GT/LTGT. */
4252 if ((! HONOR_NANS (GET_MODE (trueop0
))
4253 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4254 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4255 && ! HONOR_SNANS (GET_MODE (trueop0
))))
4256 && rtx_equal_p (trueop0
, trueop1
)
4257 && ! side_effects_p (trueop0
))
4258 return comparison_result (code
, CMP_EQ
);
4260 /* If the operands are floating-point constants, see if we can fold
4262 if (GET_CODE (trueop0
) == CONST_DOUBLE
4263 && GET_CODE (trueop1
) == CONST_DOUBLE
4264 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4266 REAL_VALUE_TYPE d0
, d1
;
4268 REAL_VALUE_FROM_CONST_DOUBLE (d0
, trueop0
);
4269 REAL_VALUE_FROM_CONST_DOUBLE (d1
, trueop1
);
4271 /* Comparisons are unordered iff at least one of the values is NaN. */
4272 if (REAL_VALUE_ISNAN (d0
) || REAL_VALUE_ISNAN (d1
))
4282 return const_true_rtx
;
4295 return comparison_result (code
,
4296 (REAL_VALUES_EQUAL (d0
, d1
) ? CMP_EQ
:
4297 REAL_VALUES_LESS (d0
, d1
) ? CMP_LT
: CMP_GT
));
4300 /* Otherwise, see if the operands are both integers. */
4301 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4302 && (GET_CODE (trueop0
) == CONST_DOUBLE
4303 || CONST_INT_P (trueop0
))
4304 && (GET_CODE (trueop1
) == CONST_DOUBLE
4305 || CONST_INT_P (trueop1
)))
4307 int width
= GET_MODE_BITSIZE (mode
);
4308 HOST_WIDE_INT l0s
, h0s
, l1s
, h1s
;
4309 unsigned HOST_WIDE_INT l0u
, h0u
, l1u
, h1u
;
4311 /* Get the two words comprising each integer constant. */
4312 if (GET_CODE (trueop0
) == CONST_DOUBLE
)
4314 l0u
= l0s
= CONST_DOUBLE_LOW (trueop0
);
4315 h0u
= h0s
= CONST_DOUBLE_HIGH (trueop0
);
4319 l0u
= l0s
= INTVAL (trueop0
);
4320 h0u
= h0s
= HWI_SIGN_EXTEND (l0s
);
4323 if (GET_CODE (trueop1
) == CONST_DOUBLE
)
4325 l1u
= l1s
= CONST_DOUBLE_LOW (trueop1
);
4326 h1u
= h1s
= CONST_DOUBLE_HIGH (trueop1
);
4330 l1u
= l1s
= INTVAL (trueop1
);
4331 h1u
= h1s
= HWI_SIGN_EXTEND (l1s
);
4334 /* If WIDTH is nonzero and smaller than HOST_BITS_PER_WIDE_INT,
4335 we have to sign or zero-extend the values. */
4336 if (width
!= 0 && width
< HOST_BITS_PER_WIDE_INT
)
4338 l0u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4339 l1u
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4341 if (l0s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4342 l0s
|= ((HOST_WIDE_INT
) (-1) << width
);
4344 if (l1s
& ((HOST_WIDE_INT
) 1 << (width
- 1)))
4345 l1s
|= ((HOST_WIDE_INT
) (-1) << width
);
4347 if (width
!= 0 && width
<= HOST_BITS_PER_WIDE_INT
)
4348 h0u
= h1u
= 0, h0s
= HWI_SIGN_EXTEND (l0s
), h1s
= HWI_SIGN_EXTEND (l1s
);
4350 if (h0u
== h1u
&& l0u
== l1u
)
4351 return comparison_result (code
, CMP_EQ
);
4355 cr
= (h0s
< h1s
|| (h0s
== h1s
&& l0u
< l1u
)) ? CMP_LT
: CMP_GT
;
4356 cr
|= (h0u
< h1u
|| (h0u
== h1u
&& l0u
< l1u
)) ? CMP_LTU
: CMP_GTU
;
4357 return comparison_result (code
, cr
);
4361 /* Optimize comparisons with upper and lower bounds. */
4362 if (SCALAR_INT_MODE_P (mode
)
4363 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
4364 && CONST_INT_P (trueop1
))
4367 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4368 HOST_WIDE_INT val
= INTVAL (trueop1
);
4369 HOST_WIDE_INT mmin
, mmax
;
4379 /* Get a reduced range if the sign bit is zero. */
4380 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4387 rtx mmin_rtx
, mmax_rtx
;
4388 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4390 mmin
= INTVAL (mmin_rtx
);
4391 mmax
= INTVAL (mmax_rtx
);
4394 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
4396 mmin
>>= (sign_copies
- 1);
4397 mmax
>>= (sign_copies
- 1);
4403 /* x >= y is always true for y <= mmin, always false for y > mmax. */
4405 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4406 return const_true_rtx
;
4407 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4412 return const_true_rtx
;
4417 /* x <= y is always true for y >= mmax, always false for y < mmin. */
4419 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4420 return const_true_rtx
;
4421 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4426 return const_true_rtx
;
4432 /* x == y is always false for y out of range. */
4433 if (val
< mmin
|| val
> mmax
)
4437 /* x > y is always false for y >= mmax, always true for y < mmin. */
4439 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
4441 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
4442 return const_true_rtx
;
4448 return const_true_rtx
;
4451 /* x < y is always false for y <= mmin, always true for y > mmax. */
4453 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
4455 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
4456 return const_true_rtx
;
4462 return const_true_rtx
;
4466 /* x != y is always true for y out of range. */
4467 if (val
< mmin
|| val
> mmax
)
4468 return const_true_rtx
;
4476 /* Optimize integer comparisons with zero. */
4477 if (trueop1
== const0_rtx
)
4479 /* Some addresses are known to be nonzero. We don't know
4480 their sign, but equality comparisons are known. */
4481 if (nonzero_address_p (trueop0
))
4483 if (code
== EQ
|| code
== LEU
)
4485 if (code
== NE
|| code
== GTU
)
4486 return const_true_rtx
;
4489 /* See if the first operand is an IOR with a constant. If so, we
4490 may be able to determine the result of this comparison. */
4491 if (GET_CODE (op0
) == IOR
)
4493 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
4494 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
4496 int sign_bitnum
= GET_MODE_BITSIZE (mode
) - 1;
4497 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
4498 && (INTVAL (inner_const
)
4499 & ((HOST_WIDE_INT
) 1 << sign_bitnum
)));
4508 return const_true_rtx
;
4512 return const_true_rtx
;
4526 /* Optimize comparison of ABS with zero. */
4527 if (trueop1
== CONST0_RTX (mode
)
4528 && (GET_CODE (trueop0
) == ABS
4529 || (GET_CODE (trueop0
) == FLOAT_EXTEND
4530 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
4535 /* Optimize abs(x) < 0.0. */
4536 if (!HONOR_SNANS (mode
)
4537 && (!INTEGRAL_MODE_P (mode
)
4538 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4540 if (INTEGRAL_MODE_P (mode
)
4541 && (issue_strict_overflow_warning
4542 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4543 warning (OPT_Wstrict_overflow
,
4544 ("assuming signed overflow does not occur when "
4545 "assuming abs (x) < 0 is false"));
4551 /* Optimize abs(x) >= 0.0. */
4552 if (!HONOR_NANS (mode
)
4553 && (!INTEGRAL_MODE_P (mode
)
4554 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
4556 if (INTEGRAL_MODE_P (mode
)
4557 && (issue_strict_overflow_warning
4558 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
4559 warning (OPT_Wstrict_overflow
,
4560 ("assuming signed overflow does not occur when "
4561 "assuming abs (x) >= 0 is true"));
4562 return const_true_rtx
;
4567 /* Optimize ! (abs(x) < 0.0). */
4568 return const_true_rtx
;
4578 /* Simplify CODE, an operation with result mode MODE and three operands,
4579 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
4580 a constant. Return 0 if no simplifications is possible. */
4583 simplify_ternary_operation (enum rtx_code code
, enum machine_mode mode
,
4584 enum machine_mode op0_mode
, rtx op0
, rtx op1
,
4587 unsigned int width
= GET_MODE_BITSIZE (mode
);
4589 /* VOIDmode means "infinite" precision. */
4591 width
= HOST_BITS_PER_WIDE_INT
;
4597 if (CONST_INT_P (op0
)
4598 && CONST_INT_P (op1
)
4599 && CONST_INT_P (op2
)
4600 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
4601 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
4603 /* Extracting a bit-field from a constant */
4604 HOST_WIDE_INT val
= INTVAL (op0
);
4606 if (BITS_BIG_ENDIAN
)
4607 val
>>= (GET_MODE_BITSIZE (op0_mode
)
4608 - INTVAL (op2
) - INTVAL (op1
));
4610 val
>>= INTVAL (op2
);
4612 if (HOST_BITS_PER_WIDE_INT
!= INTVAL (op1
))
4614 /* First zero-extend. */
4615 val
&= ((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1;
4616 /* If desired, propagate sign bit. */
4617 if (code
== SIGN_EXTRACT
4618 && (val
& ((HOST_WIDE_INT
) 1 << (INTVAL (op1
) - 1))))
4619 val
|= ~ (((HOST_WIDE_INT
) 1 << INTVAL (op1
)) - 1);
4622 /* Clear the bits that don't belong in our mode,
4623 unless they and our sign bit are all one.
4624 So we get either a reasonable negative value or a reasonable
4625 unsigned value for this mode. */
4626 if (width
< HOST_BITS_PER_WIDE_INT
4627 && ((val
& ((HOST_WIDE_INT
) (-1) << (width
- 1)))
4628 != ((HOST_WIDE_INT
) (-1) << (width
- 1))))
4629 val
&= ((HOST_WIDE_INT
) 1 << width
) - 1;
4631 return gen_int_mode (val
, mode
);
4636 if (CONST_INT_P (op0
))
4637 return op0
!= const0_rtx
? op1
: op2
;
4639 /* Convert c ? a : a into "a". */
4640 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
4643 /* Convert a != b ? a : b into "a". */
4644 if (GET_CODE (op0
) == NE
4645 && ! side_effects_p (op0
)
4646 && ! HONOR_NANS (mode
)
4647 && ! HONOR_SIGNED_ZEROS (mode
)
4648 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4649 && rtx_equal_p (XEXP (op0
, 1), op2
))
4650 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4651 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4654 /* Convert a == b ? a : b into "b". */
4655 if (GET_CODE (op0
) == EQ
4656 && ! side_effects_p (op0
)
4657 && ! HONOR_NANS (mode
)
4658 && ! HONOR_SIGNED_ZEROS (mode
)
4659 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
4660 && rtx_equal_p (XEXP (op0
, 1), op2
))
4661 || (rtx_equal_p (XEXP (op0
, 0), op2
)
4662 && rtx_equal_p (XEXP (op0
, 1), op1
))))
4665 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
4667 enum machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
4668 ? GET_MODE (XEXP (op0
, 1))
4669 : GET_MODE (XEXP (op0
, 0)));
4672 /* Look for happy constants in op1 and op2. */
4673 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
4675 HOST_WIDE_INT t
= INTVAL (op1
);
4676 HOST_WIDE_INT f
= INTVAL (op2
);
4678 if (t
== STORE_FLAG_VALUE
&& f
== 0)
4679 code
= GET_CODE (op0
);
4680 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
4683 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
4691 return simplify_gen_relational (code
, mode
, cmp_mode
,
4692 XEXP (op0
, 0), XEXP (op0
, 1));
4695 if (cmp_mode
== VOIDmode
)
4696 cmp_mode
= op0_mode
;
4697 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
4698 cmp_mode
, XEXP (op0
, 0),
4701 /* See if any simplifications were possible. */
4704 if (CONST_INT_P (temp
))
4705 return temp
== const0_rtx
? op2
: op1
;
4707 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
4713 gcc_assert (GET_MODE (op0
) == mode
);
4714 gcc_assert (GET_MODE (op1
) == mode
);
4715 gcc_assert (VECTOR_MODE_P (mode
));
4716 op2
= avoid_constant_pool_reference (op2
);
4717 if (CONST_INT_P (op2
))
4719 int elt_size
= GET_MODE_SIZE (GET_MODE_INNER (mode
));
4720 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
4721 int mask
= (1 << n_elts
) - 1;
4723 if (!(INTVAL (op2
) & mask
))
4725 if ((INTVAL (op2
) & mask
) == mask
)
4728 op0
= avoid_constant_pool_reference (op0
);
4729 op1
= avoid_constant_pool_reference (op1
);
4730 if (GET_CODE (op0
) == CONST_VECTOR
4731 && GET_CODE (op1
) == CONST_VECTOR
)
4733 rtvec v
= rtvec_alloc (n_elts
);
4736 for (i
= 0; i
< n_elts
; i
++)
4737 RTVEC_ELT (v
, i
) = (INTVAL (op2
) & (1 << i
)
4738 ? CONST_VECTOR_ELT (op0
, i
)
4739 : CONST_VECTOR_ELT (op1
, i
));
4740 return gen_rtx_CONST_VECTOR (mode
, v
);
4752 /* Evaluate a SUBREG of a CONST_INT or CONST_DOUBLE or CONST_FIXED
4754 returning another CONST_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
4756 Works by unpacking OP into a collection of 8-bit values
4757 represented as a little-endian array of 'unsigned char', selecting by BYTE,
4758 and then repacking them again for OUTERMODE. */
4761 simplify_immed_subreg (enum machine_mode outermode
, rtx op
,
4762 enum machine_mode innermode
, unsigned int byte
)
4764 /* We support up to 512-bit values (for V8DFmode). */
4768 value_mask
= (1 << value_bit
) - 1
4770 unsigned char value
[max_bitsize
/ value_bit
];
4779 rtvec result_v
= NULL
;
4780 enum mode_class outer_class
;
4781 enum machine_mode outer_submode
;
4783 /* Some ports misuse CCmode. */
4784 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
4787 /* We have no way to represent a complex constant at the rtl level. */
4788 if (COMPLEX_MODE_P (outermode
))
4791 /* Unpack the value. */
4793 if (GET_CODE (op
) == CONST_VECTOR
)
4795 num_elem
= CONST_VECTOR_NUNITS (op
);
4796 elems
= &CONST_VECTOR_ELT (op
, 0);
4797 elem_bitsize
= GET_MODE_BITSIZE (GET_MODE_INNER (innermode
));
4803 elem_bitsize
= max_bitsize
;
4805 /* If this asserts, it is too complicated; reducing value_bit may help. */
4806 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
4807 /* I don't know how to handle endianness of sub-units. */
4808 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
4810 for (elem
= 0; elem
< num_elem
; elem
++)
4813 rtx el
= elems
[elem
];
4815 /* Vectors are kept in target memory order. (This is probably
4818 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4819 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4821 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4822 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4823 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4824 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4825 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4828 switch (GET_CODE (el
))
4832 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4834 *vp
++ = INTVAL (el
) >> i
;
4835 /* CONST_INTs are always logically sign-extended. */
4836 for (; i
< elem_bitsize
; i
+= value_bit
)
4837 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
4841 if (GET_MODE (el
) == VOIDmode
)
4843 /* If this triggers, someone should have generated a
4844 CONST_INT instead. */
4845 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
4847 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4848 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
4849 while (i
< HOST_BITS_PER_WIDE_INT
* 2 && i
< elem_bitsize
)
4852 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
4855 /* It shouldn't matter what's done here, so fill it with
4857 for (; i
< elem_bitsize
; i
+= value_bit
)
4862 long tmp
[max_bitsize
/ 32];
4863 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
4865 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
4866 gcc_assert (bitsize
<= elem_bitsize
);
4867 gcc_assert (bitsize
% value_bit
== 0);
4869 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
4872 /* real_to_target produces its result in words affected by
4873 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
4874 and use WORDS_BIG_ENDIAN instead; see the documentation
4875 of SUBREG in rtl.texi. */
4876 for (i
= 0; i
< bitsize
; i
+= value_bit
)
4879 if (WORDS_BIG_ENDIAN
)
4880 ibase
= bitsize
- 1 - i
;
4883 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
4886 /* It shouldn't matter what's done here, so fill it with
4888 for (; i
< elem_bitsize
; i
+= value_bit
)
4894 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4896 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
4897 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
4901 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
4902 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
4903 for (; i
< 2 * HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4905 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
4906 >> (i
- HOST_BITS_PER_WIDE_INT
);
4907 for (; i
< elem_bitsize
; i
+= value_bit
)
4917 /* Now, pick the right byte to start with. */
4918 /* Renumber BYTE so that the least-significant byte is byte 0. A special
4919 case is paradoxical SUBREGs, which shouldn't be adjusted since they
4920 will already have offset 0. */
4921 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
4923 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
4925 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4926 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4927 byte
= (subword_byte
% UNITS_PER_WORD
4928 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4931 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
4932 so if it's become negative it will instead be very large.) */
4933 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
4935 /* Convert from bytes to chunks of size value_bit. */
4936 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
4938 /* Re-pack the value. */
4940 if (VECTOR_MODE_P (outermode
))
4942 num_elem
= GET_MODE_NUNITS (outermode
);
4943 result_v
= rtvec_alloc (num_elem
);
4944 elems
= &RTVEC_ELT (result_v
, 0);
4945 outer_submode
= GET_MODE_INNER (outermode
);
4951 outer_submode
= outermode
;
4954 outer_class
= GET_MODE_CLASS (outer_submode
);
4955 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
4957 gcc_assert (elem_bitsize
% value_bit
== 0);
4958 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
4960 for (elem
= 0; elem
< num_elem
; elem
++)
4964 /* Vectors are stored in target memory order. (This is probably
4967 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
4968 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
4970 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
4971 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
4972 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
4973 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
4974 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
4977 switch (outer_class
)
4980 case MODE_PARTIAL_INT
:
4982 unsigned HOST_WIDE_INT hi
= 0, lo
= 0;
4985 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
4987 lo
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
4988 for (; i
< elem_bitsize
; i
+= value_bit
)
4989 hi
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
4990 << (i
- HOST_BITS_PER_WIDE_INT
));
4992 /* immed_double_const doesn't call trunc_int_for_mode. I don't
4994 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
4995 elems
[elem
] = gen_int_mode (lo
, outer_submode
);
4996 else if (elem_bitsize
<= 2 * HOST_BITS_PER_WIDE_INT
)
4997 elems
[elem
] = immed_double_const (lo
, hi
, outer_submode
);
5004 case MODE_DECIMAL_FLOAT
:
5007 long tmp
[max_bitsize
/ 32];
5009 /* real_from_target wants its input in words affected by
5010 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5011 and use WORDS_BIG_ENDIAN instead; see the documentation
5012 of SUBREG in rtl.texi. */
5013 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5015 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5018 if (WORDS_BIG_ENDIAN
)
5019 ibase
= elem_bitsize
- 1 - i
;
5022 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5025 real_from_target (&r
, tmp
, outer_submode
);
5026 elems
[elem
] = CONST_DOUBLE_FROM_REAL_VALUE (r
, outer_submode
);
5038 f
.mode
= outer_submode
;
5041 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5043 f
.data
.low
|= (HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5044 for (; i
< elem_bitsize
; i
+= value_bit
)
5045 f
.data
.high
|= ((HOST_WIDE_INT
)(*vp
++ & value_mask
)
5046 << (i
- HOST_BITS_PER_WIDE_INT
));
5048 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5056 if (VECTOR_MODE_P (outermode
))
5057 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5062 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5063 Return 0 if no simplifications are possible. */
5065 simplify_subreg (enum machine_mode outermode
, rtx op
,
5066 enum machine_mode innermode
, unsigned int byte
)
5068 /* Little bit of sanity checking. */
5069 gcc_assert (innermode
!= VOIDmode
);
5070 gcc_assert (outermode
!= VOIDmode
);
5071 gcc_assert (innermode
!= BLKmode
);
5072 gcc_assert (outermode
!= BLKmode
);
5074 gcc_assert (GET_MODE (op
) == innermode
5075 || GET_MODE (op
) == VOIDmode
);
5077 gcc_assert ((byte
% GET_MODE_SIZE (outermode
)) == 0);
5078 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5080 if (outermode
== innermode
&& !byte
)
5083 if (CONST_INT_P (op
)
5084 || GET_CODE (op
) == CONST_DOUBLE
5085 || GET_CODE (op
) == CONST_FIXED
5086 || GET_CODE (op
) == CONST_VECTOR
)
5087 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5089 /* Changing mode twice with SUBREG => just change it once,
5090 or not at all if changing back op starting mode. */
5091 if (GET_CODE (op
) == SUBREG
)
5093 enum machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5094 int final_offset
= byte
+ SUBREG_BYTE (op
);
5097 if (outermode
== innermostmode
5098 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5099 return SUBREG_REG (op
);
5101 /* The SUBREG_BYTE represents offset, as if the value were stored
5102 in memory. Irritating exception is paradoxical subreg, where
5103 we define SUBREG_BYTE to be 0. On big endian machines, this
5104 value should be negative. For a moment, undo this exception. */
5105 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5107 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5108 if (WORDS_BIG_ENDIAN
)
5109 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5110 if (BYTES_BIG_ENDIAN
)
5111 final_offset
+= difference
% UNITS_PER_WORD
;
5113 if (SUBREG_BYTE (op
) == 0
5114 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5116 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5117 if (WORDS_BIG_ENDIAN
)
5118 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5119 if (BYTES_BIG_ENDIAN
)
5120 final_offset
+= difference
% UNITS_PER_WORD
;
5123 /* See whether resulting subreg will be paradoxical. */
5124 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5126 /* In nonparadoxical subregs we can't handle negative offsets. */
5127 if (final_offset
< 0)
5129 /* Bail out in case resulting subreg would be incorrect. */
5130 if (final_offset
% GET_MODE_SIZE (outermode
)
5131 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5137 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5139 /* In paradoxical subreg, see if we are still looking on lower part.
5140 If so, our SUBREG_BYTE will be 0. */
5141 if (WORDS_BIG_ENDIAN
)
5142 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5143 if (BYTES_BIG_ENDIAN
)
5144 offset
+= difference
% UNITS_PER_WORD
;
5145 if (offset
== final_offset
)
5151 /* Recurse for further possible simplifications. */
5152 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5156 if (validate_subreg (outermode
, innermostmode
,
5157 SUBREG_REG (op
), final_offset
))
5159 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5160 if (SUBREG_PROMOTED_VAR_P (op
)
5161 && SUBREG_PROMOTED_UNSIGNED_P (op
) >= 0
5162 && GET_MODE_CLASS (outermode
) == MODE_INT
5163 && IN_RANGE (GET_MODE_SIZE (outermode
),
5164 GET_MODE_SIZE (innermode
),
5165 GET_MODE_SIZE (innermostmode
))
5166 && subreg_lowpart_p (newx
))
5168 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5169 SUBREG_PROMOTED_UNSIGNED_SET
5170 (newx
, SUBREG_PROMOTED_UNSIGNED_P (op
));
5177 /* Merge implicit and explicit truncations. */
5179 if (GET_CODE (op
) == TRUNCATE
5180 && GET_MODE_SIZE (outermode
) < GET_MODE_SIZE (innermode
)
5181 && subreg_lowpart_offset (outermode
, innermode
) == byte
)
5182 return simplify_gen_unary (TRUNCATE
, outermode
, XEXP (op
, 0),
5183 GET_MODE (XEXP (op
, 0)));
5185 /* SUBREG of a hard register => just change the register number
5186 and/or mode. If the hard register is not valid in that mode,
5187 suppress this simplification. If the hard register is the stack,
5188 frame, or argument pointer, leave this as a SUBREG. */
5190 if (REG_P (op
) && HARD_REGISTER_P (op
))
5192 unsigned int regno
, final_regno
;
5195 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5196 if (HARD_REGISTER_NUM_P (final_regno
))
5199 int final_offset
= byte
;
5201 /* Adjust offset for paradoxical subregs. */
5203 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5205 int difference
= (GET_MODE_SIZE (innermode
)
5206 - GET_MODE_SIZE (outermode
));
5207 if (WORDS_BIG_ENDIAN
)
5208 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5209 if (BYTES_BIG_ENDIAN
)
5210 final_offset
+= difference
% UNITS_PER_WORD
;
5213 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5215 /* Propagate original regno. We don't have any way to specify
5216 the offset inside original regno, so do so only for lowpart.
5217 The information is used only by alias analysis that can not
5218 grog partial register anyway. */
5220 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5221 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5226 /* If we have a SUBREG of a register that we are replacing and we are
5227 replacing it with a MEM, make a new MEM and try replacing the
5228 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5229 or if we would be widening it. */
5232 && ! mode_dependent_address_p (XEXP (op
, 0))
5233 /* Allow splitting of volatile memory references in case we don't
5234 have instruction to move the whole thing. */
5235 && (! MEM_VOLATILE_P (op
)
5236 || ! have_insn_for (SET
, innermode
))
5237 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5238 return adjust_address_nv (op
, outermode
, byte
);
5240 /* Handle complex values represented as CONCAT
5241 of real and imaginary part. */
5242 if (GET_CODE (op
) == CONCAT
)
5244 unsigned int part_size
, final_offset
;
5247 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5248 if (byte
< part_size
)
5250 part
= XEXP (op
, 0);
5251 final_offset
= byte
;
5255 part
= XEXP (op
, 1);
5256 final_offset
= byte
- part_size
;
5259 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5262 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5265 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5266 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5270 /* Optimize SUBREG truncations of zero and sign extended values. */
5271 if ((GET_CODE (op
) == ZERO_EXTEND
5272 || GET_CODE (op
) == SIGN_EXTEND
)
5273 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
))
5275 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
5277 /* If we're requesting the lowpart of a zero or sign extension,
5278 there are three possibilities. If the outermode is the same
5279 as the origmode, we can omit both the extension and the subreg.
5280 If the outermode is not larger than the origmode, we can apply
5281 the truncation without the extension. Finally, if the outermode
5282 is larger than the origmode, but both are integer modes, we
5283 can just extend to the appropriate mode. */
5286 enum machine_mode origmode
= GET_MODE (XEXP (op
, 0));
5287 if (outermode
== origmode
)
5288 return XEXP (op
, 0);
5289 if (GET_MODE_BITSIZE (outermode
) <= GET_MODE_BITSIZE (origmode
))
5290 return simplify_gen_subreg (outermode
, XEXP (op
, 0), origmode
,
5291 subreg_lowpart_offset (outermode
,
5293 if (SCALAR_INT_MODE_P (outermode
))
5294 return simplify_gen_unary (GET_CODE (op
), outermode
,
5295 XEXP (op
, 0), origmode
);
5298 /* A SUBREG resulting from a zero extension may fold to zero if
5299 it extracts higher bits that the ZERO_EXTEND's source bits. */
5300 if (GET_CODE (op
) == ZERO_EXTEND
5301 && bitpos
>= GET_MODE_BITSIZE (GET_MODE (XEXP (op
, 0))))
5302 return CONST0_RTX (outermode
);
5305 /* Simplify (subreg:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C), 0) into
5306 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
5307 the outer subreg is effectively a truncation to the original mode. */
5308 if ((GET_CODE (op
) == LSHIFTRT
5309 || GET_CODE (op
) == ASHIFTRT
)
5310 && SCALAR_INT_MODE_P (outermode
)
5311 /* Ensure that OUTERMODE is at least twice as wide as the INNERMODE
5312 to avoid the possibility that an outer LSHIFTRT shifts by more
5313 than the sign extension's sign_bit_copies and introduces zeros
5314 into the high bits of the result. */
5315 && (2 * GET_MODE_BITSIZE (outermode
)) <= GET_MODE_BITSIZE (innermode
)
5316 && CONST_INT_P (XEXP (op
, 1))
5317 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
5318 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5319 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5320 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5321 return simplify_gen_binary (ASHIFTRT
, outermode
,
5322 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5324 /* Likewise (subreg:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C), 0) into
5325 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
5326 the outer subreg is effectively a truncation to the original mode. */
5327 if ((GET_CODE (op
) == LSHIFTRT
5328 || GET_CODE (op
) == ASHIFTRT
)
5329 && SCALAR_INT_MODE_P (outermode
)
5330 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5331 && CONST_INT_P (XEXP (op
, 1))
5332 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5333 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5334 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5335 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5336 return simplify_gen_binary (LSHIFTRT
, outermode
,
5337 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5339 /* Likewise (subreg:QI (ashift:SI (zero_extend:SI (x:QI)) C), 0) into
5340 to (ashift:QI (x:QI) C), where C is a suitable small constant and
5341 the outer subreg is effectively a truncation to the original mode. */
5342 if (GET_CODE (op
) == ASHIFT
5343 && SCALAR_INT_MODE_P (outermode
)
5344 && GET_MODE_BITSIZE (outermode
) < GET_MODE_BITSIZE (innermode
)
5345 && CONST_INT_P (XEXP (op
, 1))
5346 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
5347 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
5348 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == outermode
5349 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (outermode
)
5350 && subreg_lsb_1 (outermode
, innermode
, byte
) == 0)
5351 return simplify_gen_binary (ASHIFT
, outermode
,
5352 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
5354 /* Recognize a word extraction from a multi-word subreg. */
5355 if ((GET_CODE (op
) == LSHIFTRT
5356 || GET_CODE (op
) == ASHIFTRT
)
5357 && SCALAR_INT_MODE_P (outermode
)
5358 && GET_MODE_BITSIZE (outermode
) >= BITS_PER_WORD
5359 && GET_MODE_BITSIZE (innermode
) >= (2 * GET_MODE_BITSIZE (outermode
))
5360 && CONST_INT_P (XEXP (op
, 1))
5361 && (INTVAL (XEXP (op
, 1)) & (GET_MODE_BITSIZE (outermode
) - 1)) == 0
5362 && INTVAL (XEXP (op
, 1)) >= 0
5363 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (innermode
)
5364 && byte
== subreg_lowpart_offset (outermode
, innermode
))
5366 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
5367 return simplify_gen_subreg (outermode
, XEXP (op
, 0), innermode
,
5369 ? byte
- shifted_bytes
5370 : byte
+ shifted_bytes
));
5376 /* Make a SUBREG operation or equivalent if it folds. */
5379 simplify_gen_subreg (enum machine_mode outermode
, rtx op
,
5380 enum machine_mode innermode
, unsigned int byte
)
5384 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
5388 if (GET_CODE (op
) == SUBREG
5389 || GET_CODE (op
) == CONCAT
5390 || GET_MODE (op
) == VOIDmode
)
5393 if (validate_subreg (outermode
, innermode
, op
, byte
))
5394 return gen_rtx_SUBREG (outermode
, op
, byte
);
5399 /* Simplify X, an rtx expression.
5401 Return the simplified expression or NULL if no simplifications
5404 This is the preferred entry point into the simplification routines;
5405 however, we still allow passes to call the more specific routines.
5407 Right now GCC has three (yes, three) major bodies of RTL simplification
5408 code that need to be unified.
5410 1. fold_rtx in cse.c. This code uses various CSE specific
5411 information to aid in RTL simplification.
5413 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
5414 it uses combine specific information to aid in RTL
5417 3. The routines in this file.
5420 Long term we want to only have one body of simplification code; to
5421 get to that state I recommend the following steps:
5423 1. Pour over fold_rtx & simplify_rtx and move any simplifications
5424 which are not pass dependent state into these routines.
5426 2. As code is moved by #1, change fold_rtx & simplify_rtx to
5427 use this routine whenever possible.
5429 3. Allow for pass dependent state to be provided to these
5430 routines and add simplifications based on the pass dependent
5431 state. Remove code from cse.c & combine.c that becomes
5434 It will take time, but ultimately the compiler will be easier to
5435 maintain and improve. It's totally silly that when we add a
5436 simplification that it needs to be added to 4 places (3 for RTL
5437 simplification and 1 for tree simplification. */
5440 simplify_rtx (const_rtx x
)
5442 const enum rtx_code code
= GET_CODE (x
);
5443 const enum machine_mode mode
= GET_MODE (x
);
5445 switch (GET_RTX_CLASS (code
))
5448 return simplify_unary_operation (code
, mode
,
5449 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
5450 case RTX_COMM_ARITH
:
5451 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
5452 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
5454 /* Fall through.... */
5457 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
5460 case RTX_BITFIELD_OPS
:
5461 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
5462 XEXP (x
, 0), XEXP (x
, 1),
5466 case RTX_COMM_COMPARE
:
5467 return simplify_relational_operation (code
, mode
,
5468 ((GET_MODE (XEXP (x
, 0))
5470 ? GET_MODE (XEXP (x
, 0))
5471 : GET_MODE (XEXP (x
, 1))),
5477 return simplify_subreg (mode
, SUBREG_REG (x
),
5478 GET_MODE (SUBREG_REG (x
)),
5485 /* Convert (lo_sum (high FOO) FOO) to FOO. */
5486 if (GET_CODE (XEXP (x
, 0)) == HIGH
5487 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))