1 /* RTL simplification functions for GNU compiler.
2 Copyright (C) 1987-2015 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
35 #include "diagnostic-core.h"
37 #include "fold-const.h"
46 /* Simplification and canonicalization of RTL. */
48 /* Much code operates on (low, high) pairs; the low value is an
49 unsigned wide int, the high value a signed wide int. We
50 occasionally need to sign extend from low to high as if low were a
52 #define HWI_SIGN_EXTEND(low) \
53 ((((HOST_WIDE_INT) low) < 0) ? ((HOST_WIDE_INT) -1) : ((HOST_WIDE_INT) 0))
55 static rtx
neg_const_int (machine_mode
, const_rtx
);
56 static bool plus_minus_operand_p (const_rtx
);
57 static rtx
simplify_plus_minus (enum rtx_code
, machine_mode
, rtx
, rtx
);
58 static rtx
simplify_immed_subreg (machine_mode
, rtx
, machine_mode
,
60 static rtx
simplify_associative_operation (enum rtx_code
, machine_mode
,
62 static rtx
simplify_relational_operation_1 (enum rtx_code
, machine_mode
,
63 machine_mode
, rtx
, rtx
);
64 static rtx
simplify_unary_operation_1 (enum rtx_code
, machine_mode
, rtx
);
65 static rtx
simplify_binary_operation_1 (enum rtx_code
, machine_mode
,
68 /* Negate a CONST_INT rtx, truncating (because a conversion from a
69 maximally negative number can overflow). */
71 neg_const_int (machine_mode mode
, const_rtx i
)
73 return gen_int_mode (-(unsigned HOST_WIDE_INT
) INTVAL (i
), mode
);
76 /* Test whether expression, X, is an immediate constant that represents
77 the most significant bit of machine mode MODE. */
80 mode_signbit_p (machine_mode mode
, const_rtx x
)
82 unsigned HOST_WIDE_INT val
;
85 if (GET_MODE_CLASS (mode
) != MODE_INT
)
88 width
= GET_MODE_PRECISION (mode
);
92 if (width
<= HOST_BITS_PER_WIDE_INT
95 #if TARGET_SUPPORTS_WIDE_INT
96 else if (CONST_WIDE_INT_P (x
))
99 unsigned int elts
= CONST_WIDE_INT_NUNITS (x
);
100 if (elts
!= (width
+ HOST_BITS_PER_WIDE_INT
- 1) / HOST_BITS_PER_WIDE_INT
)
102 for (i
= 0; i
< elts
- 1; i
++)
103 if (CONST_WIDE_INT_ELT (x
, i
) != 0)
105 val
= CONST_WIDE_INT_ELT (x
, elts
- 1);
106 width
%= HOST_BITS_PER_WIDE_INT
;
108 width
= HOST_BITS_PER_WIDE_INT
;
111 else if (width
<= HOST_BITS_PER_DOUBLE_INT
112 && CONST_DOUBLE_AS_INT_P (x
)
113 && CONST_DOUBLE_LOW (x
) == 0)
115 val
= CONST_DOUBLE_HIGH (x
);
116 width
-= HOST_BITS_PER_WIDE_INT
;
120 /* X is not an integer constant. */
123 if (width
< HOST_BITS_PER_WIDE_INT
)
124 val
&= ((unsigned HOST_WIDE_INT
) 1 << width
) - 1;
125 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
128 /* Test whether VAL is equal to the most significant bit of mode MODE
129 (after masking with the mode mask of MODE). Returns false if the
130 precision of MODE is too large to handle. */
133 val_signbit_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
137 if (GET_MODE_CLASS (mode
) != MODE_INT
)
140 width
= GET_MODE_PRECISION (mode
);
141 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
144 val
&= GET_MODE_MASK (mode
);
145 return val
== ((unsigned HOST_WIDE_INT
) 1 << (width
- 1));
148 /* Test whether the most significant bit of mode MODE is set in VAL.
149 Returns false if the precision of MODE is too large to handle. */
151 val_signbit_known_set_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
155 if (GET_MODE_CLASS (mode
) != MODE_INT
)
158 width
= GET_MODE_PRECISION (mode
);
159 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
162 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
166 /* Test whether the most significant bit of mode MODE is clear in VAL.
167 Returns false if the precision of MODE is too large to handle. */
169 val_signbit_known_clear_p (machine_mode mode
, unsigned HOST_WIDE_INT val
)
173 if (GET_MODE_CLASS (mode
) != MODE_INT
)
176 width
= GET_MODE_PRECISION (mode
);
177 if (width
== 0 || width
> HOST_BITS_PER_WIDE_INT
)
180 val
&= (unsigned HOST_WIDE_INT
) 1 << (width
- 1);
184 /* Make a binary operation by properly ordering the operands and
185 seeing if the expression folds. */
188 simplify_gen_binary (enum rtx_code code
, machine_mode mode
, rtx op0
,
193 /* If this simplifies, do it. */
194 tem
= simplify_binary_operation (code
, mode
, op0
, op1
);
198 /* Put complex operands first and constants second if commutative. */
199 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
200 && swap_commutative_operands_p (op0
, op1
))
201 std::swap (op0
, op1
);
203 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
206 /* If X is a MEM referencing the constant pool, return the real value.
207 Otherwise return X. */
209 avoid_constant_pool_reference (rtx x
)
213 HOST_WIDE_INT offset
= 0;
215 switch (GET_CODE (x
))
221 /* Handle float extensions of constant pool references. */
223 c
= avoid_constant_pool_reference (tmp
);
224 if (c
!= tmp
&& CONST_DOUBLE_AS_FLOAT_P (c
))
225 return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c
),
233 if (GET_MODE (x
) == BLKmode
)
238 /* Call target hook to avoid the effects of -fpic etc.... */
239 addr
= targetm
.delegitimize_address (addr
);
241 /* Split the address into a base and integer offset. */
242 if (GET_CODE (addr
) == CONST
243 && GET_CODE (XEXP (addr
, 0)) == PLUS
244 && CONST_INT_P (XEXP (XEXP (addr
, 0), 1)))
246 offset
= INTVAL (XEXP (XEXP (addr
, 0), 1));
247 addr
= XEXP (XEXP (addr
, 0), 0);
250 if (GET_CODE (addr
) == LO_SUM
)
251 addr
= XEXP (addr
, 1);
253 /* If this is a constant pool reference, we can turn it into its
254 constant and hope that simplifications happen. */
255 if (GET_CODE (addr
) == SYMBOL_REF
256 && CONSTANT_POOL_ADDRESS_P (addr
))
258 c
= get_pool_constant (addr
);
259 cmode
= get_pool_mode (addr
);
261 /* If we're accessing the constant in a different mode than it was
262 originally stored, attempt to fix that up via subreg simplifications.
263 If that fails we have no choice but to return the original memory. */
264 if ((offset
!= 0 || cmode
!= GET_MODE (x
))
265 && offset
>= 0 && offset
< GET_MODE_SIZE (cmode
))
267 rtx tem
= simplify_subreg (GET_MODE (x
), c
, cmode
, offset
);
268 if (tem
&& CONSTANT_P (tem
))
278 /* Simplify a MEM based on its attributes. This is the default
279 delegitimize_address target hook, and it's recommended that every
280 overrider call it. */
283 delegitimize_mem_from_attrs (rtx x
)
285 /* MEMs without MEM_OFFSETs may have been offset, so we can't just
286 use their base addresses as equivalent. */
289 && MEM_OFFSET_KNOWN_P (x
))
291 tree decl
= MEM_EXPR (x
);
292 machine_mode mode
= GET_MODE (x
);
293 HOST_WIDE_INT offset
= 0;
295 switch (TREE_CODE (decl
))
305 case ARRAY_RANGE_REF
:
310 case VIEW_CONVERT_EXPR
:
312 HOST_WIDE_INT bitsize
, bitpos
;
314 int unsignedp
, volatilep
= 0;
316 decl
= get_inner_reference (decl
, &bitsize
, &bitpos
, &toffset
,
317 &mode
, &unsignedp
, &volatilep
, false);
318 if (bitsize
!= GET_MODE_BITSIZE (mode
)
319 || (bitpos
% BITS_PER_UNIT
)
320 || (toffset
&& !tree_fits_shwi_p (toffset
)))
324 offset
+= bitpos
/ BITS_PER_UNIT
;
326 offset
+= tree_to_shwi (toffset
);
333 && mode
== GET_MODE (x
)
334 && TREE_CODE (decl
) == VAR_DECL
335 && (TREE_STATIC (decl
)
336 || DECL_THREAD_LOCAL_P (decl
))
337 && DECL_RTL_SET_P (decl
)
338 && MEM_P (DECL_RTL (decl
)))
342 offset
+= MEM_OFFSET (x
);
344 newx
= DECL_RTL (decl
);
348 rtx n
= XEXP (newx
, 0), o
= XEXP (x
, 0);
350 /* Avoid creating a new MEM needlessly if we already had
351 the same address. We do if there's no OFFSET and the
352 old address X is identical to NEWX, or if X is of the
353 form (plus NEWX OFFSET), or the NEWX is of the form
354 (plus Y (const_int Z)) and X is that with the offset
355 added: (plus Y (const_int Z+OFFSET)). */
357 || (GET_CODE (o
) == PLUS
358 && GET_CODE (XEXP (o
, 1)) == CONST_INT
359 && (offset
== INTVAL (XEXP (o
, 1))
360 || (GET_CODE (n
) == PLUS
361 && GET_CODE (XEXP (n
, 1)) == CONST_INT
362 && (INTVAL (XEXP (n
, 1)) + offset
363 == INTVAL (XEXP (o
, 1)))
364 && (n
= XEXP (n
, 0))))
365 && (o
= XEXP (o
, 0))))
366 && rtx_equal_p (o
, n
)))
367 x
= adjust_address_nv (newx
, mode
, offset
);
369 else if (GET_MODE (x
) == GET_MODE (newx
)
378 /* Make a unary operation by first seeing if it folds and otherwise making
379 the specified operation. */
382 simplify_gen_unary (enum rtx_code code
, machine_mode mode
, rtx op
,
383 machine_mode op_mode
)
387 /* If this simplifies, use it. */
388 if ((tem
= simplify_unary_operation (code
, mode
, op
, op_mode
)) != 0)
391 return gen_rtx_fmt_e (code
, mode
, op
);
394 /* Likewise for ternary operations. */
397 simplify_gen_ternary (enum rtx_code code
, machine_mode mode
,
398 machine_mode op0_mode
, rtx op0
, rtx op1
, rtx op2
)
402 /* If this simplifies, use it. */
403 if (0 != (tem
= simplify_ternary_operation (code
, mode
, op0_mode
,
407 return gen_rtx_fmt_eee (code
, mode
, op0
, op1
, op2
);
410 /* Likewise, for relational operations.
411 CMP_MODE specifies mode comparison is done in. */
414 simplify_gen_relational (enum rtx_code code
, machine_mode mode
,
415 machine_mode cmp_mode
, rtx op0
, rtx op1
)
419 if (0 != (tem
= simplify_relational_operation (code
, mode
, cmp_mode
,
423 return gen_rtx_fmt_ee (code
, mode
, op0
, op1
);
426 /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
427 and simplify the result. If FN is non-NULL, call this callback on each
428 X, if it returns non-NULL, replace X with its return value and simplify the
432 simplify_replace_fn_rtx (rtx x
, const_rtx old_rtx
,
433 rtx (*fn
) (rtx
, const_rtx
, void *), void *data
)
435 enum rtx_code code
= GET_CODE (x
);
436 machine_mode mode
= GET_MODE (x
);
437 machine_mode op_mode
;
439 rtx op0
, op1
, op2
, newx
, op
;
443 if (__builtin_expect (fn
!= NULL
, 0))
445 newx
= fn (x
, old_rtx
, data
);
449 else if (rtx_equal_p (x
, old_rtx
))
450 return copy_rtx ((rtx
) data
);
452 switch (GET_RTX_CLASS (code
))
456 op_mode
= GET_MODE (op0
);
457 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
458 if (op0
== XEXP (x
, 0))
460 return simplify_gen_unary (code
, mode
, op0
, op_mode
);
464 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
465 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
466 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
468 return simplify_gen_binary (code
, mode
, op0
, op1
);
471 case RTX_COMM_COMPARE
:
474 op_mode
= GET_MODE (op0
) != VOIDmode
? GET_MODE (op0
) : GET_MODE (op1
);
475 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
476 op1
= simplify_replace_fn_rtx (op1
, old_rtx
, fn
, data
);
477 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
479 return simplify_gen_relational (code
, mode
, op_mode
, op0
, op1
);
482 case RTX_BITFIELD_OPS
:
484 op_mode
= GET_MODE (op0
);
485 op0
= simplify_replace_fn_rtx (op0
, old_rtx
, fn
, data
);
486 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
487 op2
= simplify_replace_fn_rtx (XEXP (x
, 2), old_rtx
, fn
, data
);
488 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1) && op2
== XEXP (x
, 2))
490 if (op_mode
== VOIDmode
)
491 op_mode
= GET_MODE (op0
);
492 return simplify_gen_ternary (code
, mode
, op_mode
, op0
, op1
, op2
);
497 op0
= simplify_replace_fn_rtx (SUBREG_REG (x
), old_rtx
, fn
, data
);
498 if (op0
== SUBREG_REG (x
))
500 op0
= simplify_gen_subreg (GET_MODE (x
), op0
,
501 GET_MODE (SUBREG_REG (x
)),
503 return op0
? op0
: x
;
510 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
511 if (op0
== XEXP (x
, 0))
513 return replace_equiv_address_nv (x
, op0
);
515 else if (code
== LO_SUM
)
517 op0
= simplify_replace_fn_rtx (XEXP (x
, 0), old_rtx
, fn
, data
);
518 op1
= simplify_replace_fn_rtx (XEXP (x
, 1), old_rtx
, fn
, data
);
520 /* (lo_sum (high x) y) -> y where x and y have the same base. */
521 if (GET_CODE (op0
) == HIGH
)
523 rtx base0
, base1
, offset0
, offset1
;
524 split_const (XEXP (op0
, 0), &base0
, &offset0
);
525 split_const (op1
, &base1
, &offset1
);
526 if (rtx_equal_p (base0
, base1
))
530 if (op0
== XEXP (x
, 0) && op1
== XEXP (x
, 1))
532 return gen_rtx_LO_SUM (mode
, op0
, op1
);
541 fmt
= GET_RTX_FORMAT (code
);
542 for (i
= 0; fmt
[i
]; i
++)
547 newvec
= XVEC (newx
, i
);
548 for (j
= 0; j
< GET_NUM_ELEM (vec
); j
++)
550 op
= simplify_replace_fn_rtx (RTVEC_ELT (vec
, j
),
552 if (op
!= RTVEC_ELT (vec
, j
))
556 newvec
= shallow_copy_rtvec (vec
);
558 newx
= shallow_copy_rtx (x
);
559 XVEC (newx
, i
) = newvec
;
561 RTVEC_ELT (newvec
, j
) = op
;
569 op
= simplify_replace_fn_rtx (XEXP (x
, i
), old_rtx
, fn
, data
);
570 if (op
!= XEXP (x
, i
))
573 newx
= shallow_copy_rtx (x
);
582 /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
583 resulting RTX. Return a new RTX which is as simplified as possible. */
586 simplify_replace_rtx (rtx x
, const_rtx old_rtx
, rtx new_rtx
)
588 return simplify_replace_fn_rtx (x
, old_rtx
, 0, new_rtx
);
591 /* Try to simplify a MODE truncation of OP, which has OP_MODE.
592 Only handle cases where the truncated value is inherently an rvalue.
594 RTL provides two ways of truncating a value:
596 1. a lowpart subreg. This form is only a truncation when both
597 the outer and inner modes (here MODE and OP_MODE respectively)
598 are scalar integers, and only then when the subreg is used as
601 It is only valid to form such truncating subregs if the
602 truncation requires no action by the target. The onus for
603 proving this is on the creator of the subreg -- e.g. the
604 caller to simplify_subreg or simplify_gen_subreg -- and typically
605 involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
607 2. a TRUNCATE. This form handles both scalar and compound integers.
609 The first form is preferred where valid. However, the TRUNCATE
610 handling in simplify_unary_operation turns the second form into the
611 first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
612 so it is generally safe to form rvalue truncations using:
614 simplify_gen_unary (TRUNCATE, ...)
616 and leave simplify_unary_operation to work out which representation
619 Because of the proof requirements on (1), simplify_truncation must
620 also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
621 regardless of whether the outer truncation came from a SUBREG or a
622 TRUNCATE. For example, if the caller has proven that an SImode
627 is a no-op and can be represented as a subreg, it does not follow
628 that SImode truncations of X and Y are also no-ops. On a target
629 like 64-bit MIPS that requires SImode values to be stored in
630 sign-extended form, an SImode truncation of:
632 (and:DI (reg:DI X) (const_int 63))
634 is trivially a no-op because only the lower 6 bits can be set.
635 However, X is still an arbitrary 64-bit number and so we cannot
636 assume that truncating it too is a no-op. */
639 simplify_truncation (machine_mode mode
, rtx op
,
640 machine_mode op_mode
)
642 unsigned int precision
= GET_MODE_UNIT_PRECISION (mode
);
643 unsigned int op_precision
= GET_MODE_UNIT_PRECISION (op_mode
);
644 gcc_assert (precision
<= op_precision
);
646 /* Optimize truncations of zero and sign extended values. */
647 if (GET_CODE (op
) == ZERO_EXTEND
648 || GET_CODE (op
) == SIGN_EXTEND
)
650 /* There are three possibilities. If MODE is the same as the
651 origmode, we can omit both the extension and the subreg.
652 If MODE is not larger than the origmode, we can apply the
653 truncation without the extension. Finally, if the outermode
654 is larger than the origmode, we can just extend to the appropriate
656 machine_mode origmode
= GET_MODE (XEXP (op
, 0));
657 if (mode
== origmode
)
659 else if (precision
<= GET_MODE_UNIT_PRECISION (origmode
))
660 return simplify_gen_unary (TRUNCATE
, mode
,
661 XEXP (op
, 0), origmode
);
663 return simplify_gen_unary (GET_CODE (op
), mode
,
664 XEXP (op
, 0), origmode
);
667 /* If the machine can perform operations in the truncated mode, distribute
668 the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
669 (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
671 && (!WORD_REGISTER_OPERATIONS
|| precision
>= BITS_PER_WORD
)
672 && (GET_CODE (op
) == PLUS
673 || GET_CODE (op
) == MINUS
674 || GET_CODE (op
) == MULT
))
676 rtx op0
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0), op_mode
);
679 rtx op1
= simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 1), op_mode
);
681 return simplify_gen_binary (GET_CODE (op
), mode
, op0
, op1
);
685 /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
686 to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
687 the outer subreg is effectively a truncation to the original mode. */
688 if ((GET_CODE (op
) == LSHIFTRT
689 || GET_CODE (op
) == ASHIFTRT
)
690 /* Ensure that OP_MODE is at least twice as wide as MODE
691 to avoid the possibility that an outer LSHIFTRT shifts by more
692 than the sign extension's sign_bit_copies and introduces zeros
693 into the high bits of the result. */
694 && 2 * precision
<= op_precision
695 && CONST_INT_P (XEXP (op
, 1))
696 && GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
697 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
698 && UINTVAL (XEXP (op
, 1)) < precision
)
699 return simplify_gen_binary (ASHIFTRT
, mode
,
700 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
702 /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
703 to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
704 the outer subreg is effectively a truncation to the original mode. */
705 if ((GET_CODE (op
) == LSHIFTRT
706 || GET_CODE (op
) == ASHIFTRT
)
707 && CONST_INT_P (XEXP (op
, 1))
708 && GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
709 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
710 && UINTVAL (XEXP (op
, 1)) < precision
)
711 return simplify_gen_binary (LSHIFTRT
, mode
,
712 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
714 /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
715 to (ashift:QI (x:QI) C), where C is a suitable small constant and
716 the outer subreg is effectively a truncation to the original mode. */
717 if (GET_CODE (op
) == ASHIFT
718 && CONST_INT_P (XEXP (op
, 1))
719 && (GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
720 || GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
)
721 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
722 && UINTVAL (XEXP (op
, 1)) < precision
)
723 return simplify_gen_binary (ASHIFT
, mode
,
724 XEXP (XEXP (op
, 0), 0), XEXP (op
, 1));
726 /* Recognize a word extraction from a multi-word subreg. */
727 if ((GET_CODE (op
) == LSHIFTRT
728 || GET_CODE (op
) == ASHIFTRT
)
729 && SCALAR_INT_MODE_P (mode
)
730 && SCALAR_INT_MODE_P (op_mode
)
731 && precision
>= BITS_PER_WORD
732 && 2 * precision
<= op_precision
733 && CONST_INT_P (XEXP (op
, 1))
734 && (INTVAL (XEXP (op
, 1)) & (precision
- 1)) == 0
735 && UINTVAL (XEXP (op
, 1)) < op_precision
)
737 int byte
= subreg_lowpart_offset (mode
, op_mode
);
738 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
739 return simplify_gen_subreg (mode
, XEXP (op
, 0), op_mode
,
741 ? byte
- shifted_bytes
742 : byte
+ shifted_bytes
));
745 /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
746 and try replacing the TRUNCATE and shift with it. Don't do this
747 if the MEM has a mode-dependent address. */
748 if ((GET_CODE (op
) == LSHIFTRT
749 || GET_CODE (op
) == ASHIFTRT
)
750 && SCALAR_INT_MODE_P (op_mode
)
751 && MEM_P (XEXP (op
, 0))
752 && CONST_INT_P (XEXP (op
, 1))
753 && (INTVAL (XEXP (op
, 1)) % GET_MODE_BITSIZE (mode
)) == 0
754 && INTVAL (XEXP (op
, 1)) > 0
755 && INTVAL (XEXP (op
, 1)) < GET_MODE_BITSIZE (op_mode
)
756 && ! mode_dependent_address_p (XEXP (XEXP (op
, 0), 0),
757 MEM_ADDR_SPACE (XEXP (op
, 0)))
758 && ! MEM_VOLATILE_P (XEXP (op
, 0))
759 && (GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
760 || WORDS_BIG_ENDIAN
== BYTES_BIG_ENDIAN
))
762 int byte
= subreg_lowpart_offset (mode
, op_mode
);
763 int shifted_bytes
= INTVAL (XEXP (op
, 1)) / BITS_PER_UNIT
;
764 return adjust_address_nv (XEXP (op
, 0), mode
,
766 ? byte
- shifted_bytes
767 : byte
+ shifted_bytes
));
770 /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
771 (OP:SI foo:SI) if OP is NEG or ABS. */
772 if ((GET_CODE (op
) == ABS
773 || GET_CODE (op
) == NEG
)
774 && (GET_CODE (XEXP (op
, 0)) == SIGN_EXTEND
775 || GET_CODE (XEXP (op
, 0)) == ZERO_EXTEND
)
776 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
777 return simplify_gen_unary (GET_CODE (op
), mode
,
778 XEXP (XEXP (op
, 0), 0), mode
);
780 /* (truncate:A (subreg:B (truncate:C X) 0)) is
782 if (GET_CODE (op
) == SUBREG
783 && SCALAR_INT_MODE_P (mode
)
784 && SCALAR_INT_MODE_P (op_mode
)
785 && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op
)))
786 && GET_CODE (SUBREG_REG (op
)) == TRUNCATE
787 && subreg_lowpart_p (op
))
789 rtx inner
= XEXP (SUBREG_REG (op
), 0);
790 if (GET_MODE_PRECISION (mode
)
791 <= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
792 return simplify_gen_unary (TRUNCATE
, mode
, inner
, GET_MODE (inner
));
794 /* If subreg above is paradoxical and C is narrower
795 than A, return (subreg:A (truncate:C X) 0). */
796 return simplify_gen_subreg (mode
, SUBREG_REG (op
),
797 GET_MODE (SUBREG_REG (op
)), 0);
800 /* (truncate:A (truncate:B X)) is (truncate:A X). */
801 if (GET_CODE (op
) == TRUNCATE
)
802 return simplify_gen_unary (TRUNCATE
, mode
, XEXP (op
, 0),
803 GET_MODE (XEXP (op
, 0)));
808 /* Try to simplify a unary operation CODE whose output mode is to be
809 MODE with input operand OP whose mode was originally OP_MODE.
810 Return zero if no simplification can be made. */
812 simplify_unary_operation (enum rtx_code code
, machine_mode mode
,
813 rtx op
, machine_mode op_mode
)
817 trueop
= avoid_constant_pool_reference (op
);
819 tem
= simplify_const_unary_operation (code
, mode
, trueop
, op_mode
);
823 return simplify_unary_operation_1 (code
, mode
, op
);
826 /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
830 exact_int_to_float_conversion_p (const_rtx op
)
832 int out_bits
= significand_size (GET_MODE_INNER (GET_MODE (op
)));
833 machine_mode op0_mode
= GET_MODE (XEXP (op
, 0));
834 /* Constants shouldn't reach here. */
835 gcc_assert (op0_mode
!= VOIDmode
);
836 int in_prec
= GET_MODE_UNIT_PRECISION (op0_mode
);
837 int in_bits
= in_prec
;
838 if (HWI_COMPUTABLE_MODE_P (op0_mode
))
840 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (XEXP (op
, 0), op0_mode
);
841 if (GET_CODE (op
) == FLOAT
)
842 in_bits
-= num_sign_bit_copies (XEXP (op
, 0), op0_mode
);
843 else if (GET_CODE (op
) == UNSIGNED_FLOAT
)
844 in_bits
= wi::min_precision (wi::uhwi (nonzero
, in_prec
), UNSIGNED
);
847 in_bits
-= wi::ctz (wi::uhwi (nonzero
, in_prec
));
849 return in_bits
<= out_bits
;
852 /* Perform some simplifications we can do even if the operands
855 simplify_unary_operation_1 (enum rtx_code code
, machine_mode mode
, rtx op
)
857 enum rtx_code reversed
;
863 /* (not (not X)) == X. */
864 if (GET_CODE (op
) == NOT
)
867 /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
868 comparison is all ones. */
869 if (COMPARISON_P (op
)
870 && (mode
== BImode
|| STORE_FLAG_VALUE
== -1)
871 && ((reversed
= reversed_comparison_code (op
, NULL_RTX
)) != UNKNOWN
))
872 return simplify_gen_relational (reversed
, mode
, VOIDmode
,
873 XEXP (op
, 0), XEXP (op
, 1));
875 /* (not (plus X -1)) can become (neg X). */
876 if (GET_CODE (op
) == PLUS
877 && XEXP (op
, 1) == constm1_rtx
)
878 return simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
880 /* Similarly, (not (neg X)) is (plus X -1). */
881 if (GET_CODE (op
) == NEG
)
882 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
885 /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
886 if (GET_CODE (op
) == XOR
887 && CONST_INT_P (XEXP (op
, 1))
888 && (temp
= simplify_unary_operation (NOT
, mode
,
889 XEXP (op
, 1), mode
)) != 0)
890 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
892 /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
893 if (GET_CODE (op
) == PLUS
894 && CONST_INT_P (XEXP (op
, 1))
895 && mode_signbit_p (mode
, XEXP (op
, 1))
896 && (temp
= simplify_unary_operation (NOT
, mode
,
897 XEXP (op
, 1), mode
)) != 0)
898 return simplify_gen_binary (XOR
, mode
, XEXP (op
, 0), temp
);
901 /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
902 operands other than 1, but that is not valid. We could do a
903 similar simplification for (not (lshiftrt C X)) where C is
904 just the sign bit, but this doesn't seem common enough to
906 if (GET_CODE (op
) == ASHIFT
907 && XEXP (op
, 0) == const1_rtx
)
909 temp
= simplify_gen_unary (NOT
, mode
, const1_rtx
, mode
);
910 return simplify_gen_binary (ROTATE
, mode
, temp
, XEXP (op
, 1));
913 /* (not (ashiftrt foo C)) where C is the number of bits in FOO
914 minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
915 so we can perform the above simplification. */
916 if (STORE_FLAG_VALUE
== -1
917 && GET_CODE (op
) == ASHIFTRT
918 && CONST_INT_P (XEXP (op
, 1))
919 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
920 return simplify_gen_relational (GE
, mode
, VOIDmode
,
921 XEXP (op
, 0), const0_rtx
);
924 if (GET_CODE (op
) == SUBREG
925 && subreg_lowpart_p (op
)
926 && (GET_MODE_SIZE (GET_MODE (op
))
927 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (op
))))
928 && GET_CODE (SUBREG_REG (op
)) == ASHIFT
929 && XEXP (SUBREG_REG (op
), 0) == const1_rtx
)
931 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op
));
934 x
= gen_rtx_ROTATE (inner_mode
,
935 simplify_gen_unary (NOT
, inner_mode
, const1_rtx
,
937 XEXP (SUBREG_REG (op
), 1));
938 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, x
);
943 /* Apply De Morgan's laws to reduce number of patterns for machines
944 with negating logical insns (and-not, nand, etc.). If result has
945 only one NOT, put it first, since that is how the patterns are
947 if (GET_CODE (op
) == IOR
|| GET_CODE (op
) == AND
)
949 rtx in1
= XEXP (op
, 0), in2
= XEXP (op
, 1);
950 machine_mode op_mode
;
952 op_mode
= GET_MODE (in1
);
953 in1
= simplify_gen_unary (NOT
, op_mode
, in1
, op_mode
);
955 op_mode
= GET_MODE (in2
);
956 if (op_mode
== VOIDmode
)
958 in2
= simplify_gen_unary (NOT
, op_mode
, in2
, op_mode
);
960 if (GET_CODE (in2
) == NOT
&& GET_CODE (in1
) != NOT
)
961 std::swap (in1
, in2
);
963 return gen_rtx_fmt_ee (GET_CODE (op
) == IOR
? AND
: IOR
,
967 /* (not (bswap x)) -> (bswap (not x)). */
968 if (GET_CODE (op
) == BSWAP
)
970 rtx x
= simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
971 return simplify_gen_unary (BSWAP
, mode
, x
, mode
);
976 /* (neg (neg X)) == X. */
977 if (GET_CODE (op
) == NEG
)
980 /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
981 If comparison is not reversible use
983 if (GET_CODE (op
) == IF_THEN_ELSE
)
985 rtx cond
= XEXP (op
, 0);
986 rtx true_rtx
= XEXP (op
, 1);
987 rtx false_rtx
= XEXP (op
, 2);
989 if ((GET_CODE (true_rtx
) == NEG
990 && rtx_equal_p (XEXP (true_rtx
, 0), false_rtx
))
991 || (GET_CODE (false_rtx
) == NEG
992 && rtx_equal_p (XEXP (false_rtx
, 0), true_rtx
)))
994 if (reversed_comparison_code (cond
, NULL_RTX
) != UNKNOWN
)
995 temp
= reversed_comparison (cond
, mode
);
999 std::swap (true_rtx
, false_rtx
);
1001 return simplify_gen_ternary (IF_THEN_ELSE
, mode
,
1002 mode
, temp
, true_rtx
, false_rtx
);
1006 /* (neg (plus X 1)) can become (not X). */
1007 if (GET_CODE (op
) == PLUS
1008 && XEXP (op
, 1) == const1_rtx
)
1009 return simplify_gen_unary (NOT
, mode
, XEXP (op
, 0), mode
);
1011 /* Similarly, (neg (not X)) is (plus X 1). */
1012 if (GET_CODE (op
) == NOT
)
1013 return simplify_gen_binary (PLUS
, mode
, XEXP (op
, 0),
1016 /* (neg (minus X Y)) can become (minus Y X). This transformation
1017 isn't safe for modes with signed zeros, since if X and Y are
1018 both +0, (minus Y X) is the same as (minus X Y). If the
1019 rounding mode is towards +infinity (or -infinity) then the two
1020 expressions will be rounded differently. */
1021 if (GET_CODE (op
) == MINUS
1022 && !HONOR_SIGNED_ZEROS (mode
)
1023 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1024 return simplify_gen_binary (MINUS
, mode
, XEXP (op
, 1), XEXP (op
, 0));
1026 if (GET_CODE (op
) == PLUS
1027 && !HONOR_SIGNED_ZEROS (mode
)
1028 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1030 /* (neg (plus A C)) is simplified to (minus -C A). */
1031 if (CONST_SCALAR_INT_P (XEXP (op
, 1))
1032 || CONST_DOUBLE_AS_FLOAT_P (XEXP (op
, 1)))
1034 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 1), mode
);
1036 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 0));
1039 /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1040 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 0), mode
);
1041 return simplify_gen_binary (MINUS
, mode
, temp
, XEXP (op
, 1));
1044 /* (neg (mult A B)) becomes (mult A (neg B)).
1045 This works even for floating-point values. */
1046 if (GET_CODE (op
) == MULT
1047 && !HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
1049 temp
= simplify_gen_unary (NEG
, mode
, XEXP (op
, 1), mode
);
1050 return simplify_gen_binary (MULT
, mode
, XEXP (op
, 0), temp
);
1053 /* NEG commutes with ASHIFT since it is multiplication. Only do
1054 this if we can then eliminate the NEG (e.g., if the operand
1056 if (GET_CODE (op
) == ASHIFT
)
1058 temp
= simplify_unary_operation (NEG
, mode
, XEXP (op
, 0), mode
);
1060 return simplify_gen_binary (ASHIFT
, mode
, temp
, XEXP (op
, 1));
1063 /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1064 C is equal to the width of MODE minus 1. */
1065 if (GET_CODE (op
) == ASHIFTRT
1066 && CONST_INT_P (XEXP (op
, 1))
1067 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1068 return simplify_gen_binary (LSHIFTRT
, mode
,
1069 XEXP (op
, 0), XEXP (op
, 1));
1071 /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1072 C is equal to the width of MODE minus 1. */
1073 if (GET_CODE (op
) == LSHIFTRT
1074 && CONST_INT_P (XEXP (op
, 1))
1075 && INTVAL (XEXP (op
, 1)) == GET_MODE_PRECISION (mode
) - 1)
1076 return simplify_gen_binary (ASHIFTRT
, mode
,
1077 XEXP (op
, 0), XEXP (op
, 1));
1079 /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1080 if (GET_CODE (op
) == XOR
1081 && XEXP (op
, 1) == const1_rtx
1082 && nonzero_bits (XEXP (op
, 0), mode
) == 1)
1083 return plus_constant (mode
, XEXP (op
, 0), -1);
1085 /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1086 /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1087 if (GET_CODE (op
) == LT
1088 && XEXP (op
, 1) == const0_rtx
1089 && SCALAR_INT_MODE_P (GET_MODE (XEXP (op
, 0))))
1091 machine_mode inner
= GET_MODE (XEXP (op
, 0));
1092 int isize
= GET_MODE_PRECISION (inner
);
1093 if (STORE_FLAG_VALUE
== 1)
1095 temp
= simplify_gen_binary (ASHIFTRT
, inner
, XEXP (op
, 0),
1096 GEN_INT (isize
- 1));
1099 if (GET_MODE_PRECISION (mode
) > isize
)
1100 return simplify_gen_unary (SIGN_EXTEND
, mode
, temp
, inner
);
1101 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1103 else if (STORE_FLAG_VALUE
== -1)
1105 temp
= simplify_gen_binary (LSHIFTRT
, inner
, XEXP (op
, 0),
1106 GEN_INT (isize
- 1));
1109 if (GET_MODE_PRECISION (mode
) > isize
)
1110 return simplify_gen_unary (ZERO_EXTEND
, mode
, temp
, inner
);
1111 return simplify_gen_unary (TRUNCATE
, mode
, temp
, inner
);
1117 /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1118 with the umulXi3_highpart patterns. */
1119 if (GET_CODE (op
) == LSHIFTRT
1120 && GET_CODE (XEXP (op
, 0)) == MULT
)
1123 if (GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
1125 if (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
)))
1127 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1131 /* We can't handle truncation to a partial integer mode here
1132 because we don't know the real bitsize of the partial
1137 if (GET_MODE (op
) != VOIDmode
)
1139 temp
= simplify_truncation (mode
, op
, GET_MODE (op
));
1144 /* If we know that the value is already truncated, we can
1145 replace the TRUNCATE with a SUBREG. */
1146 if (GET_MODE_NUNITS (mode
) == 1
1147 && (TRULY_NOOP_TRUNCATION_MODES_P (mode
, GET_MODE (op
))
1148 || truncated_to_mode (mode
, op
)))
1150 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1155 /* A truncate of a comparison can be replaced with a subreg if
1156 STORE_FLAG_VALUE permits. This is like the previous test,
1157 but it works even if the comparison is done in a mode larger
1158 than HOST_BITS_PER_WIDE_INT. */
1159 if (HWI_COMPUTABLE_MODE_P (mode
)
1160 && COMPARISON_P (op
)
1161 && (STORE_FLAG_VALUE
& ~GET_MODE_MASK (mode
)) == 0)
1163 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1168 /* A truncate of a memory is just loading the low part of the memory
1169 if we are not changing the meaning of the address. */
1170 if (GET_CODE (op
) == MEM
1171 && !VECTOR_MODE_P (mode
)
1172 && !MEM_VOLATILE_P (op
)
1173 && !mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
)))
1175 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1182 case FLOAT_TRUNCATE
:
1183 if (DECIMAL_FLOAT_MODE_P (mode
))
1186 /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1187 if (GET_CODE (op
) == FLOAT_EXTEND
1188 && GET_MODE (XEXP (op
, 0)) == mode
)
1189 return XEXP (op
, 0);
1191 /* (float_truncate:SF (float_truncate:DF foo:XF))
1192 = (float_truncate:SF foo:XF).
1193 This may eliminate double rounding, so it is unsafe.
1195 (float_truncate:SF (float_extend:XF foo:DF))
1196 = (float_truncate:SF foo:DF).
1198 (float_truncate:DF (float_extend:XF foo:SF))
1199 = (float_extend:DF foo:SF). */
1200 if ((GET_CODE (op
) == FLOAT_TRUNCATE
1201 && flag_unsafe_math_optimizations
)
1202 || GET_CODE (op
) == FLOAT_EXTEND
)
1203 return simplify_gen_unary (GET_MODE_SIZE (GET_MODE (XEXP (op
,
1205 > GET_MODE_SIZE (mode
)
1206 ? FLOAT_TRUNCATE
: FLOAT_EXTEND
,
1208 XEXP (op
, 0), mode
);
1210 /* (float_truncate (float x)) is (float x) */
1211 if ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1212 && (flag_unsafe_math_optimizations
1213 || exact_int_to_float_conversion_p (op
)))
1214 return simplify_gen_unary (GET_CODE (op
), mode
,
1216 GET_MODE (XEXP (op
, 0)));
1218 /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1219 (OP:SF foo:SF) if OP is NEG or ABS. */
1220 if ((GET_CODE (op
) == ABS
1221 || GET_CODE (op
) == NEG
)
1222 && GET_CODE (XEXP (op
, 0)) == FLOAT_EXTEND
1223 && GET_MODE (XEXP (XEXP (op
, 0), 0)) == mode
)
1224 return simplify_gen_unary (GET_CODE (op
), mode
,
1225 XEXP (XEXP (op
, 0), 0), mode
);
1227 /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1228 is (float_truncate:SF x). */
1229 if (GET_CODE (op
) == SUBREG
1230 && subreg_lowpart_p (op
)
1231 && GET_CODE (SUBREG_REG (op
)) == FLOAT_TRUNCATE
)
1232 return SUBREG_REG (op
);
1236 if (DECIMAL_FLOAT_MODE_P (mode
))
1239 /* (float_extend (float_extend x)) is (float_extend x)
1241 (float_extend (float x)) is (float x) assuming that double
1242 rounding can't happen.
1244 if (GET_CODE (op
) == FLOAT_EXTEND
1245 || ((GET_CODE (op
) == FLOAT
|| GET_CODE (op
) == UNSIGNED_FLOAT
)
1246 && exact_int_to_float_conversion_p (op
)))
1247 return simplify_gen_unary (GET_CODE (op
), mode
,
1249 GET_MODE (XEXP (op
, 0)));
1254 /* (abs (neg <foo>)) -> (abs <foo>) */
1255 if (GET_CODE (op
) == NEG
)
1256 return simplify_gen_unary (ABS
, mode
, XEXP (op
, 0),
1257 GET_MODE (XEXP (op
, 0)));
1259 /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1261 if (GET_MODE (op
) == VOIDmode
)
1264 /* If operand is something known to be positive, ignore the ABS. */
1265 if (GET_CODE (op
) == FFS
|| GET_CODE (op
) == ABS
1266 || val_signbit_known_clear_p (GET_MODE (op
),
1267 nonzero_bits (op
, GET_MODE (op
))))
1270 /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1271 if (num_sign_bit_copies (op
, mode
) == GET_MODE_PRECISION (mode
))
1272 return gen_rtx_NEG (mode
, op
);
1277 /* (ffs (*_extend <X>)) = (ffs <X>) */
1278 if (GET_CODE (op
) == SIGN_EXTEND
1279 || GET_CODE (op
) == ZERO_EXTEND
)
1280 return simplify_gen_unary (FFS
, mode
, XEXP (op
, 0),
1281 GET_MODE (XEXP (op
, 0)));
1285 switch (GET_CODE (op
))
1289 /* (popcount (zero_extend <X>)) = (popcount <X>) */
1290 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1291 GET_MODE (XEXP (op
, 0)));
1295 /* Rotations don't affect popcount. */
1296 if (!side_effects_p (XEXP (op
, 1)))
1297 return simplify_gen_unary (POPCOUNT
, mode
, XEXP (op
, 0),
1298 GET_MODE (XEXP (op
, 0)));
1307 switch (GET_CODE (op
))
1313 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1314 GET_MODE (XEXP (op
, 0)));
1318 /* Rotations don't affect parity. */
1319 if (!side_effects_p (XEXP (op
, 1)))
1320 return simplify_gen_unary (PARITY
, mode
, XEXP (op
, 0),
1321 GET_MODE (XEXP (op
, 0)));
1330 /* (bswap (bswap x)) -> x. */
1331 if (GET_CODE (op
) == BSWAP
)
1332 return XEXP (op
, 0);
1336 /* (float (sign_extend <X>)) = (float <X>). */
1337 if (GET_CODE (op
) == SIGN_EXTEND
)
1338 return simplify_gen_unary (FLOAT
, mode
, XEXP (op
, 0),
1339 GET_MODE (XEXP (op
, 0)));
1343 /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1344 becomes just the MINUS if its mode is MODE. This allows
1345 folding switch statements on machines using casesi (such as
1347 if (GET_CODE (op
) == TRUNCATE
1348 && GET_MODE (XEXP (op
, 0)) == mode
1349 && GET_CODE (XEXP (op
, 0)) == MINUS
1350 && GET_CODE (XEXP (XEXP (op
, 0), 0)) == LABEL_REF
1351 && GET_CODE (XEXP (XEXP (op
, 0), 1)) == LABEL_REF
)
1352 return XEXP (op
, 0);
1354 /* Extending a widening multiplication should be canonicalized to
1355 a wider widening multiplication. */
1356 if (GET_CODE (op
) == MULT
)
1358 rtx lhs
= XEXP (op
, 0);
1359 rtx rhs
= XEXP (op
, 1);
1360 enum rtx_code lcode
= GET_CODE (lhs
);
1361 enum rtx_code rcode
= GET_CODE (rhs
);
1363 /* Widening multiplies usually extend both operands, but sometimes
1364 they use a shift to extract a portion of a register. */
1365 if ((lcode
== SIGN_EXTEND
1366 || (lcode
== ASHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1367 && (rcode
== SIGN_EXTEND
1368 || (rcode
== ASHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1370 machine_mode lmode
= GET_MODE (lhs
);
1371 machine_mode rmode
= GET_MODE (rhs
);
1374 if (lcode
== ASHIFTRT
)
1375 /* Number of bits not shifted off the end. */
1376 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1377 else /* lcode == SIGN_EXTEND */
1378 /* Size of inner mode. */
1379 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1381 if (rcode
== ASHIFTRT
)
1382 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1383 else /* rcode == SIGN_EXTEND */
1384 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1386 /* We can only widen multiplies if the result is mathematiclly
1387 equivalent. I.e. if overflow was impossible. */
1388 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1389 return simplify_gen_binary
1391 simplify_gen_unary (SIGN_EXTEND
, mode
, lhs
, lmode
),
1392 simplify_gen_unary (SIGN_EXTEND
, mode
, rhs
, rmode
));
1396 /* Check for a sign extension of a subreg of a promoted
1397 variable, where the promotion is sign-extended, and the
1398 target mode is the same as the variable's promotion. */
1399 if (GET_CODE (op
) == SUBREG
1400 && SUBREG_PROMOTED_VAR_P (op
)
1401 && SUBREG_PROMOTED_SIGNED_P (op
)
1402 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1404 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1409 /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1410 (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1411 if (GET_CODE (op
) == SIGN_EXTEND
|| GET_CODE (op
) == ZERO_EXTEND
)
1413 gcc_assert (GET_MODE_PRECISION (mode
)
1414 > GET_MODE_PRECISION (GET_MODE (op
)));
1415 return simplify_gen_unary (GET_CODE (op
), mode
, XEXP (op
, 0),
1416 GET_MODE (XEXP (op
, 0)));
1419 /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1420 is (sign_extend:M (subreg:O <X>)) if there is mode with
1421 GET_MODE_BITSIZE (N) - I bits.
1422 (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1423 is similarly (zero_extend:M (subreg:O <X>)). */
1424 if ((GET_CODE (op
) == ASHIFTRT
|| GET_CODE (op
) == LSHIFTRT
)
1425 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1426 && CONST_INT_P (XEXP (op
, 1))
1427 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1428 && GET_MODE_BITSIZE (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1431 = mode_for_size (GET_MODE_BITSIZE (GET_MODE (op
))
1432 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1433 gcc_assert (GET_MODE_BITSIZE (mode
)
1434 > GET_MODE_BITSIZE (GET_MODE (op
)));
1435 if (tmode
!= BLKmode
)
1438 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1440 return simplify_gen_unary (GET_CODE (op
) == ASHIFTRT
1441 ? SIGN_EXTEND
: ZERO_EXTEND
,
1442 mode
, inner
, tmode
);
1446 #if defined(POINTERS_EXTEND_UNSIGNED)
1447 /* As we do not know which address space the pointer is referring to,
1448 we can do this only if the target does not support different pointer
1449 or address modes depending on the address space. */
1450 if (target_default_pointer_address_modes_p ()
1451 && ! POINTERS_EXTEND_UNSIGNED
1452 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1454 || (GET_CODE (op
) == SUBREG
1455 && REG_P (SUBREG_REG (op
))
1456 && REG_POINTER (SUBREG_REG (op
))
1457 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1458 && !targetm
.have_ptr_extend ())
1459 return convert_memory_address (Pmode
, op
);
1464 /* Check for a zero extension of a subreg of a promoted
1465 variable, where the promotion is zero-extended, and the
1466 target mode is the same as the variable's promotion. */
1467 if (GET_CODE (op
) == SUBREG
1468 && SUBREG_PROMOTED_VAR_P (op
)
1469 && SUBREG_PROMOTED_UNSIGNED_P (op
)
1470 && GET_MODE_SIZE (mode
) <= GET_MODE_SIZE (GET_MODE (XEXP (op
, 0))))
1472 temp
= rtl_hooks
.gen_lowpart_no_emit (mode
, op
);
1477 /* Extending a widening multiplication should be canonicalized to
1478 a wider widening multiplication. */
1479 if (GET_CODE (op
) == MULT
)
1481 rtx lhs
= XEXP (op
, 0);
1482 rtx rhs
= XEXP (op
, 1);
1483 enum rtx_code lcode
= GET_CODE (lhs
);
1484 enum rtx_code rcode
= GET_CODE (rhs
);
1486 /* Widening multiplies usually extend both operands, but sometimes
1487 they use a shift to extract a portion of a register. */
1488 if ((lcode
== ZERO_EXTEND
1489 || (lcode
== LSHIFTRT
&& CONST_INT_P (XEXP (lhs
, 1))))
1490 && (rcode
== ZERO_EXTEND
1491 || (rcode
== LSHIFTRT
&& CONST_INT_P (XEXP (rhs
, 1)))))
1493 machine_mode lmode
= GET_MODE (lhs
);
1494 machine_mode rmode
= GET_MODE (rhs
);
1497 if (lcode
== LSHIFTRT
)
1498 /* Number of bits not shifted off the end. */
1499 bits
= GET_MODE_PRECISION (lmode
) - INTVAL (XEXP (lhs
, 1));
1500 else /* lcode == ZERO_EXTEND */
1501 /* Size of inner mode. */
1502 bits
= GET_MODE_PRECISION (GET_MODE (XEXP (lhs
, 0)));
1504 if (rcode
== LSHIFTRT
)
1505 bits
+= GET_MODE_PRECISION (rmode
) - INTVAL (XEXP (rhs
, 1));
1506 else /* rcode == ZERO_EXTEND */
1507 bits
+= GET_MODE_PRECISION (GET_MODE (XEXP (rhs
, 0)));
1509 /* We can only widen multiplies if the result is mathematiclly
1510 equivalent. I.e. if overflow was impossible. */
1511 if (bits
<= GET_MODE_PRECISION (GET_MODE (op
)))
1512 return simplify_gen_binary
1514 simplify_gen_unary (ZERO_EXTEND
, mode
, lhs
, lmode
),
1515 simplify_gen_unary (ZERO_EXTEND
, mode
, rhs
, rmode
));
1519 /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1520 if (GET_CODE (op
) == ZERO_EXTEND
)
1521 return simplify_gen_unary (ZERO_EXTEND
, mode
, XEXP (op
, 0),
1522 GET_MODE (XEXP (op
, 0)));
1524 /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1525 is (zero_extend:M (subreg:O <X>)) if there is mode with
1526 GET_MODE_PRECISION (N) - I bits. */
1527 if (GET_CODE (op
) == LSHIFTRT
1528 && GET_CODE (XEXP (op
, 0)) == ASHIFT
1529 && CONST_INT_P (XEXP (op
, 1))
1530 && XEXP (XEXP (op
, 0), 1) == XEXP (op
, 1)
1531 && GET_MODE_PRECISION (GET_MODE (op
)) > INTVAL (XEXP (op
, 1)))
1534 = mode_for_size (GET_MODE_PRECISION (GET_MODE (op
))
1535 - INTVAL (XEXP (op
, 1)), MODE_INT
, 1);
1536 if (tmode
!= BLKmode
)
1539 rtl_hooks
.gen_lowpart_no_emit (tmode
, XEXP (XEXP (op
, 0), 0));
1541 return simplify_gen_unary (ZERO_EXTEND
, mode
, inner
, tmode
);
1545 /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1546 (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1548 (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1549 (and:SI (reg:SI) (const_int 63)). */
1550 if (GET_CODE (op
) == SUBREG
1551 && GET_MODE_PRECISION (GET_MODE (op
))
1552 < GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1553 && GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1554 <= HOST_BITS_PER_WIDE_INT
1555 && GET_MODE_PRECISION (mode
)
1556 >= GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
)))
1557 && subreg_lowpart_p (op
)
1558 && (nonzero_bits (SUBREG_REG (op
), GET_MODE (SUBREG_REG (op
)))
1559 & ~GET_MODE_MASK (GET_MODE (op
))) == 0)
1561 if (GET_MODE_PRECISION (mode
)
1562 == GET_MODE_PRECISION (GET_MODE (SUBREG_REG (op
))))
1563 return SUBREG_REG (op
);
1564 return simplify_gen_unary (ZERO_EXTEND
, mode
, SUBREG_REG (op
),
1565 GET_MODE (SUBREG_REG (op
)));
1568 #if defined(POINTERS_EXTEND_UNSIGNED)
1569 /* As we do not know which address space the pointer is referring to,
1570 we can do this only if the target does not support different pointer
1571 or address modes depending on the address space. */
1572 if (target_default_pointer_address_modes_p ()
1573 && POINTERS_EXTEND_UNSIGNED
> 0
1574 && mode
== Pmode
&& GET_MODE (op
) == ptr_mode
1576 || (GET_CODE (op
) == SUBREG
1577 && REG_P (SUBREG_REG (op
))
1578 && REG_POINTER (SUBREG_REG (op
))
1579 && GET_MODE (SUBREG_REG (op
)) == Pmode
))
1580 && !targetm
.have_ptr_extend ())
1581 return convert_memory_address (Pmode
, op
);
1592 /* Try to compute the value of a unary operation CODE whose output mode is to
1593 be MODE with input operand OP whose mode was originally OP_MODE.
1594 Return zero if the value cannot be computed. */
1596 simplify_const_unary_operation (enum rtx_code code
, machine_mode mode
,
1597 rtx op
, machine_mode op_mode
)
1599 unsigned int width
= GET_MODE_PRECISION (mode
);
1601 if (code
== VEC_DUPLICATE
)
1603 gcc_assert (VECTOR_MODE_P (mode
));
1604 if (GET_MODE (op
) != VOIDmode
)
1606 if (!VECTOR_MODE_P (GET_MODE (op
)))
1607 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE (op
));
1609 gcc_assert (GET_MODE_INNER (mode
) == GET_MODE_INNER
1612 if (CONST_SCALAR_INT_P (op
) || CONST_DOUBLE_AS_FLOAT_P (op
)
1613 || GET_CODE (op
) == CONST_VECTOR
)
1615 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
1616 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1617 rtvec v
= rtvec_alloc (n_elts
);
1620 if (GET_CODE (op
) != CONST_VECTOR
)
1621 for (i
= 0; i
< n_elts
; i
++)
1622 RTVEC_ELT (v
, i
) = op
;
1625 machine_mode inmode
= GET_MODE (op
);
1626 int in_elt_size
= GET_MODE_UNIT_SIZE (inmode
);
1627 unsigned in_n_elts
= (GET_MODE_SIZE (inmode
) / in_elt_size
);
1629 gcc_assert (in_n_elts
< n_elts
);
1630 gcc_assert ((n_elts
% in_n_elts
) == 0);
1631 for (i
= 0; i
< n_elts
; i
++)
1632 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (op
, i
% in_n_elts
);
1634 return gen_rtx_CONST_VECTOR (mode
, v
);
1638 if (VECTOR_MODE_P (mode
) && GET_CODE (op
) == CONST_VECTOR
)
1640 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
1641 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
1642 machine_mode opmode
= GET_MODE (op
);
1643 int op_elt_size
= GET_MODE_UNIT_SIZE (opmode
);
1644 unsigned op_n_elts
= (GET_MODE_SIZE (opmode
) / op_elt_size
);
1645 rtvec v
= rtvec_alloc (n_elts
);
1648 gcc_assert (op_n_elts
== n_elts
);
1649 for (i
= 0; i
< n_elts
; i
++)
1651 rtx x
= simplify_unary_operation (code
, GET_MODE_INNER (mode
),
1652 CONST_VECTOR_ELT (op
, i
),
1653 GET_MODE_INNER (opmode
));
1656 RTVEC_ELT (v
, i
) = x
;
1658 return gen_rtx_CONST_VECTOR (mode
, v
);
1661 /* The order of these tests is critical so that, for example, we don't
1662 check the wrong mode (input vs. output) for a conversion operation,
1663 such as FIX. At some point, this should be simplified. */
1665 if (code
== FLOAT
&& CONST_SCALAR_INT_P (op
))
1669 if (op_mode
== VOIDmode
)
1671 /* CONST_INT have VOIDmode as the mode. We assume that all
1672 the bits of the constant are significant, though, this is
1673 a dangerous assumption as many times CONST_INTs are
1674 created and used with garbage in the bits outside of the
1675 precision of the implied mode of the const_int. */
1676 op_mode
= MAX_MODE_INT
;
1679 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), SIGNED
);
1680 d
= real_value_truncate (mode
, d
);
1681 return const_double_from_real_value (d
, mode
);
1683 else if (code
== UNSIGNED_FLOAT
&& CONST_SCALAR_INT_P (op
))
1687 if (op_mode
== VOIDmode
)
1689 /* CONST_INT have VOIDmode as the mode. We assume that all
1690 the bits of the constant are significant, though, this is
1691 a dangerous assumption as many times CONST_INTs are
1692 created and used with garbage in the bits outside of the
1693 precision of the implied mode of the const_int. */
1694 op_mode
= MAX_MODE_INT
;
1697 real_from_integer (&d
, mode
, std::make_pair (op
, op_mode
), UNSIGNED
);
1698 d
= real_value_truncate (mode
, d
);
1699 return const_double_from_real_value (d
, mode
);
1702 if (CONST_SCALAR_INT_P (op
) && width
> 0)
1705 machine_mode imode
= op_mode
== VOIDmode
? mode
: op_mode
;
1706 rtx_mode_t op0
= std::make_pair (op
, imode
);
1709 #if TARGET_SUPPORTS_WIDE_INT == 0
1710 /* This assert keeps the simplification from producing a result
1711 that cannot be represented in a CONST_DOUBLE but a lot of
1712 upstream callers expect that this function never fails to
1713 simplify something and so you if you added this to the test
1714 above the code would die later anyway. If this assert
1715 happens, you just need to make the port support wide int. */
1716 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
1722 result
= wi::bit_not (op0
);
1726 result
= wi::neg (op0
);
1730 result
= wi::abs (op0
);
1734 result
= wi::shwi (wi::ffs (op0
), mode
);
1738 if (wi::ne_p (op0
, 0))
1739 int_value
= wi::clz (op0
);
1740 else if (! CLZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1741 int_value
= GET_MODE_PRECISION (mode
);
1742 result
= wi::shwi (int_value
, mode
);
1746 result
= wi::shwi (wi::clrsb (op0
), mode
);
1750 if (wi::ne_p (op0
, 0))
1751 int_value
= wi::ctz (op0
);
1752 else if (! CTZ_DEFINED_VALUE_AT_ZERO (mode
, int_value
))
1753 int_value
= GET_MODE_PRECISION (mode
);
1754 result
= wi::shwi (int_value
, mode
);
1758 result
= wi::shwi (wi::popcount (op0
), mode
);
1762 result
= wi::shwi (wi::parity (op0
), mode
);
1766 result
= wide_int (op0
).bswap ();
1771 result
= wide_int::from (op0
, width
, UNSIGNED
);
1775 result
= wide_int::from (op0
, width
, SIGNED
);
1783 return immed_wide_int_const (result
, mode
);
1786 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1787 && SCALAR_FLOAT_MODE_P (mode
)
1788 && SCALAR_FLOAT_MODE_P (GET_MODE (op
)))
1790 REAL_VALUE_TYPE d
= *CONST_DOUBLE_REAL_VALUE (op
);
1796 d
= real_value_abs (&d
);
1799 d
= real_value_negate (&d
);
1801 case FLOAT_TRUNCATE
:
1802 d
= real_value_truncate (mode
, d
);
1805 /* All this does is change the mode, unless changing
1807 if (GET_MODE_CLASS (mode
) != GET_MODE_CLASS (GET_MODE (op
)))
1808 real_convert (&d
, mode
, &d
);
1811 real_arithmetic (&d
, FIX_TRUNC_EXPR
, &d
, NULL
);
1818 real_to_target (tmp
, &d
, GET_MODE (op
));
1819 for (i
= 0; i
< 4; i
++)
1821 real_from_target (&d
, tmp
, mode
);
1827 return const_double_from_real_value (d
, mode
);
1829 else if (CONST_DOUBLE_AS_FLOAT_P (op
)
1830 && SCALAR_FLOAT_MODE_P (GET_MODE (op
))
1831 && GET_MODE_CLASS (mode
) == MODE_INT
1834 /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
1835 operators are intentionally left unspecified (to ease implementation
1836 by target backends), for consistency, this routine implements the
1837 same semantics for constant folding as used by the middle-end. */
1839 /* This was formerly used only for non-IEEE float.
1840 eggert@twinsun.com says it is safe for IEEE also. */
1842 const REAL_VALUE_TYPE
*x
= CONST_DOUBLE_REAL_VALUE (op
);
1843 wide_int wmax
, wmin
;
1844 /* This is part of the abi to real_to_integer, but we check
1845 things before making this call. */
1851 if (REAL_VALUE_ISNAN (*x
))
1854 /* Test against the signed upper bound. */
1855 wmax
= wi::max_value (width
, SIGNED
);
1856 real_from_integer (&t
, VOIDmode
, wmax
, SIGNED
);
1857 if (real_less (&t
, x
))
1858 return immed_wide_int_const (wmax
, mode
);
1860 /* Test against the signed lower bound. */
1861 wmin
= wi::min_value (width
, SIGNED
);
1862 real_from_integer (&t
, VOIDmode
, wmin
, SIGNED
);
1863 if (real_less (x
, &t
))
1864 return immed_wide_int_const (wmin
, mode
);
1866 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
1870 if (REAL_VALUE_ISNAN (*x
) || REAL_VALUE_NEGATIVE (*x
))
1873 /* Test against the unsigned upper bound. */
1874 wmax
= wi::max_value (width
, UNSIGNED
);
1875 real_from_integer (&t
, VOIDmode
, wmax
, UNSIGNED
);
1876 if (real_less (&t
, x
))
1877 return immed_wide_int_const (wmax
, mode
);
1879 return immed_wide_int_const (real_to_integer (x
, &fail
, width
),
1890 /* Subroutine of simplify_binary_operation to simplify a binary operation
1891 CODE that can commute with byte swapping, with result mode MODE and
1892 operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
1893 Return zero if no simplification or canonicalization is possible. */
1896 simplify_byte_swapping_operation (enum rtx_code code
, machine_mode mode
,
1901 /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
1902 if (GET_CODE (op0
) == BSWAP
&& CONST_SCALAR_INT_P (op1
))
1904 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0),
1905 simplify_gen_unary (BSWAP
, mode
, op1
, mode
));
1906 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1909 /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
1910 if (GET_CODE (op0
) == BSWAP
&& GET_CODE (op1
) == BSWAP
)
1912 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
1913 return simplify_gen_unary (BSWAP
, mode
, tem
, mode
);
1919 /* Subroutine of simplify_binary_operation to simplify a commutative,
1920 associative binary operation CODE with result mode MODE, operating
1921 on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
1922 SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
1923 canonicalization is possible. */
1926 simplify_associative_operation (enum rtx_code code
, machine_mode mode
,
1931 /* Linearize the operator to the left. */
1932 if (GET_CODE (op1
) == code
)
1934 /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
1935 if (GET_CODE (op0
) == code
)
1937 tem
= simplify_gen_binary (code
, mode
, op0
, XEXP (op1
, 0));
1938 return simplify_gen_binary (code
, mode
, tem
, XEXP (op1
, 1));
1941 /* "a op (b op c)" becomes "(b op c) op a". */
1942 if (! swap_commutative_operands_p (op1
, op0
))
1943 return simplify_gen_binary (code
, mode
, op1
, op0
);
1945 std::swap (op0
, op1
);
1948 if (GET_CODE (op0
) == code
)
1950 /* Canonicalize "(x op c) op y" as "(x op y) op c". */
1951 if (swap_commutative_operands_p (XEXP (op0
, 1), op1
))
1953 tem
= simplify_gen_binary (code
, mode
, XEXP (op0
, 0), op1
);
1954 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1957 /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
1958 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 1), op1
);
1960 return simplify_gen_binary (code
, mode
, XEXP (op0
, 0), tem
);
1962 /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
1963 tem
= simplify_binary_operation (code
, mode
, XEXP (op0
, 0), op1
);
1965 return simplify_gen_binary (code
, mode
, tem
, XEXP (op0
, 1));
1972 /* Simplify a binary operation CODE with result mode MODE, operating on OP0
1973 and OP1. Return 0 if no simplification is possible.
1975 Don't use this for relational operations such as EQ or LT.
1976 Use simplify_relational_operation instead. */
1978 simplify_binary_operation (enum rtx_code code
, machine_mode mode
,
1981 rtx trueop0
, trueop1
;
1984 /* Relational operations don't work here. We must know the mode
1985 of the operands in order to do the comparison correctly.
1986 Assuming a full word can give incorrect results.
1987 Consider comparing 128 with -128 in QImode. */
1988 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMPARE
);
1989 gcc_assert (GET_RTX_CLASS (code
) != RTX_COMM_COMPARE
);
1991 /* Make sure the constant is second. */
1992 if (GET_RTX_CLASS (code
) == RTX_COMM_ARITH
1993 && swap_commutative_operands_p (op0
, op1
))
1994 std::swap (op0
, op1
);
1996 trueop0
= avoid_constant_pool_reference (op0
);
1997 trueop1
= avoid_constant_pool_reference (op1
);
1999 tem
= simplify_const_binary_operation (code
, mode
, trueop0
, trueop1
);
2002 tem
= simplify_binary_operation_1 (code
, mode
, op0
, op1
, trueop0
, trueop1
);
2007 /* If the above steps did not result in a simplification and op0 or op1
2008 were constant pool references, use the referenced constants directly. */
2009 if (trueop0
!= op0
|| trueop1
!= op1
)
2010 return simplify_gen_binary (code
, mode
, trueop0
, trueop1
);
2015 /* Subroutine of simplify_binary_operation. Simplify a binary operation
2016 CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
2017 OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
2018 actual constants. */
2021 simplify_binary_operation_1 (enum rtx_code code
, machine_mode mode
,
2022 rtx op0
, rtx op1
, rtx trueop0
, rtx trueop1
)
2024 rtx tem
, reversed
, opleft
, opright
;
2026 unsigned int width
= GET_MODE_PRECISION (mode
);
2028 /* Even if we can't compute a constant result,
2029 there are some cases worth simplifying. */
2034 /* Maybe simplify x + 0 to x. The two expressions are equivalent
2035 when x is NaN, infinite, or finite and nonzero. They aren't
2036 when x is -0 and the rounding mode is not towards -infinity,
2037 since (-0) + 0 is then 0. */
2038 if (!HONOR_SIGNED_ZEROS (mode
) && trueop1
== CONST0_RTX (mode
))
2041 /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
2042 transformations are safe even for IEEE. */
2043 if (GET_CODE (op0
) == NEG
)
2044 return simplify_gen_binary (MINUS
, mode
, op1
, XEXP (op0
, 0));
2045 else if (GET_CODE (op1
) == NEG
)
2046 return simplify_gen_binary (MINUS
, mode
, op0
, XEXP (op1
, 0));
2048 /* (~a) + 1 -> -a */
2049 if (INTEGRAL_MODE_P (mode
)
2050 && GET_CODE (op0
) == NOT
2051 && trueop1
== const1_rtx
)
2052 return simplify_gen_unary (NEG
, mode
, XEXP (op0
, 0), mode
);
2054 /* Handle both-operands-constant cases. We can only add
2055 CONST_INTs to constants since the sum of relocatable symbols
2056 can't be handled by most assemblers. Don't add CONST_INT
2057 to CONST_INT since overflow won't be computed properly if wider
2058 than HOST_BITS_PER_WIDE_INT. */
2060 if ((GET_CODE (op0
) == CONST
2061 || GET_CODE (op0
) == SYMBOL_REF
2062 || GET_CODE (op0
) == LABEL_REF
)
2063 && CONST_INT_P (op1
))
2064 return plus_constant (mode
, op0
, INTVAL (op1
));
2065 else if ((GET_CODE (op1
) == CONST
2066 || GET_CODE (op1
) == SYMBOL_REF
2067 || GET_CODE (op1
) == LABEL_REF
)
2068 && CONST_INT_P (op0
))
2069 return plus_constant (mode
, op1
, INTVAL (op0
));
2071 /* See if this is something like X * C - X or vice versa or
2072 if the multiplication is written as a shift. If so, we can
2073 distribute and make a new multiply, shift, or maybe just
2074 have X (if C is 2 in the example above). But don't make
2075 something more expensive than we had before. */
2077 if (SCALAR_INT_MODE_P (mode
))
2079 rtx lhs
= op0
, rhs
= op1
;
2081 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2082 wide_int coeff1
= wi::one (GET_MODE_PRECISION (mode
));
2084 if (GET_CODE (lhs
) == NEG
)
2086 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2087 lhs
= XEXP (lhs
, 0);
2089 else if (GET_CODE (lhs
) == MULT
2090 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2092 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2093 lhs
= XEXP (lhs
, 0);
2095 else if (GET_CODE (lhs
) == ASHIFT
2096 && CONST_INT_P (XEXP (lhs
, 1))
2097 && INTVAL (XEXP (lhs
, 1)) >= 0
2098 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2100 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2101 GET_MODE_PRECISION (mode
));
2102 lhs
= XEXP (lhs
, 0);
2105 if (GET_CODE (rhs
) == NEG
)
2107 coeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2108 rhs
= XEXP (rhs
, 0);
2110 else if (GET_CODE (rhs
) == MULT
2111 && CONST_INT_P (XEXP (rhs
, 1)))
2113 coeff1
= std::make_pair (XEXP (rhs
, 1), mode
);
2114 rhs
= XEXP (rhs
, 0);
2116 else if (GET_CODE (rhs
) == ASHIFT
2117 && CONST_INT_P (XEXP (rhs
, 1))
2118 && INTVAL (XEXP (rhs
, 1)) >= 0
2119 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2121 coeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2122 GET_MODE_PRECISION (mode
));
2123 rhs
= XEXP (rhs
, 0);
2126 if (rtx_equal_p (lhs
, rhs
))
2128 rtx orig
= gen_rtx_PLUS (mode
, op0
, op1
);
2130 bool speed
= optimize_function_for_speed_p (cfun
);
2132 coeff
= immed_wide_int_const (coeff0
+ coeff1
, mode
);
2134 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2135 return (set_src_cost (tem
, mode
, speed
)
2136 <= set_src_cost (orig
, mode
, speed
) ? tem
: 0);
2140 /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
2141 if (CONST_SCALAR_INT_P (op1
)
2142 && GET_CODE (op0
) == XOR
2143 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2144 && mode_signbit_p (mode
, op1
))
2145 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2146 simplify_gen_binary (XOR
, mode
, op1
,
2149 /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
2150 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2151 && GET_CODE (op0
) == MULT
2152 && GET_CODE (XEXP (op0
, 0)) == NEG
)
2156 in1
= XEXP (XEXP (op0
, 0), 0);
2157 in2
= XEXP (op0
, 1);
2158 return simplify_gen_binary (MINUS
, mode
, op1
,
2159 simplify_gen_binary (MULT
, mode
,
2163 /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
2164 C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
2166 if (COMPARISON_P (op0
)
2167 && ((STORE_FLAG_VALUE
== -1 && trueop1
== const1_rtx
)
2168 || (STORE_FLAG_VALUE
== 1 && trueop1
== constm1_rtx
))
2169 && (reversed
= reversed_comparison (op0
, mode
)))
2171 simplify_gen_unary (NEG
, mode
, reversed
, mode
);
2173 /* If one of the operands is a PLUS or a MINUS, see if we can
2174 simplify this by the associative law.
2175 Don't use the associative law for floating point.
2176 The inaccuracy makes it nonassociative,
2177 and subtle programs can break if operations are associated. */
2179 if (INTEGRAL_MODE_P (mode
)
2180 && (plus_minus_operand_p (op0
)
2181 || plus_minus_operand_p (op1
))
2182 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2185 /* Reassociate floating point addition only when the user
2186 specifies associative math operations. */
2187 if (FLOAT_MODE_P (mode
)
2188 && flag_associative_math
)
2190 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2197 /* Convert (compare (gt (flags) 0) (lt (flags) 0)) to (flags). */
2198 if (((GET_CODE (op0
) == GT
&& GET_CODE (op1
) == LT
)
2199 || (GET_CODE (op0
) == GTU
&& GET_CODE (op1
) == LTU
))
2200 && XEXP (op0
, 1) == const0_rtx
&& XEXP (op1
, 1) == const0_rtx
)
2202 rtx xop00
= XEXP (op0
, 0);
2203 rtx xop10
= XEXP (op1
, 0);
2205 if (GET_CODE (xop00
) == CC0
&& GET_CODE (xop10
) == CC0
)
2208 if (REG_P (xop00
) && REG_P (xop10
)
2209 && GET_MODE (xop00
) == GET_MODE (xop10
)
2210 && REGNO (xop00
) == REGNO (xop10
)
2211 && GET_MODE_CLASS (GET_MODE (xop00
)) == MODE_CC
2212 && GET_MODE_CLASS (GET_MODE (xop10
)) == MODE_CC
)
2218 /* We can't assume x-x is 0 even with non-IEEE floating point,
2219 but since it is zero except in very strange circumstances, we
2220 will treat it as zero with -ffinite-math-only. */
2221 if (rtx_equal_p (trueop0
, trueop1
)
2222 && ! side_effects_p (op0
)
2223 && (!FLOAT_MODE_P (mode
) || !HONOR_NANS (mode
)))
2224 return CONST0_RTX (mode
);
2226 /* Change subtraction from zero into negation. (0 - x) is the
2227 same as -x when x is NaN, infinite, or finite and nonzero.
2228 But if the mode has signed zeros, and does not round towards
2229 -infinity, then 0 - 0 is 0, not -0. */
2230 if (!HONOR_SIGNED_ZEROS (mode
) && trueop0
== CONST0_RTX (mode
))
2231 return simplify_gen_unary (NEG
, mode
, op1
, mode
);
2233 /* (-1 - a) is ~a. */
2234 if (trueop0
== constm1_rtx
)
2235 return simplify_gen_unary (NOT
, mode
, op1
, mode
);
2237 /* Subtracting 0 has no effect unless the mode has signed zeros
2238 and supports rounding towards -infinity. In such a case,
2240 if (!(HONOR_SIGNED_ZEROS (mode
)
2241 && HONOR_SIGN_DEPENDENT_ROUNDING (mode
))
2242 && trueop1
== CONST0_RTX (mode
))
2245 /* See if this is something like X * C - X or vice versa or
2246 if the multiplication is written as a shift. If so, we can
2247 distribute and make a new multiply, shift, or maybe just
2248 have X (if C is 2 in the example above). But don't make
2249 something more expensive than we had before. */
2251 if (SCALAR_INT_MODE_P (mode
))
2253 rtx lhs
= op0
, rhs
= op1
;
2255 wide_int coeff0
= wi::one (GET_MODE_PRECISION (mode
));
2256 wide_int negcoeff1
= wi::minus_one (GET_MODE_PRECISION (mode
));
2258 if (GET_CODE (lhs
) == NEG
)
2260 coeff0
= wi::minus_one (GET_MODE_PRECISION (mode
));
2261 lhs
= XEXP (lhs
, 0);
2263 else if (GET_CODE (lhs
) == MULT
2264 && CONST_SCALAR_INT_P (XEXP (lhs
, 1)))
2266 coeff0
= std::make_pair (XEXP (lhs
, 1), mode
);
2267 lhs
= XEXP (lhs
, 0);
2269 else if (GET_CODE (lhs
) == ASHIFT
2270 && CONST_INT_P (XEXP (lhs
, 1))
2271 && INTVAL (XEXP (lhs
, 1)) >= 0
2272 && INTVAL (XEXP (lhs
, 1)) < GET_MODE_PRECISION (mode
))
2274 coeff0
= wi::set_bit_in_zero (INTVAL (XEXP (lhs
, 1)),
2275 GET_MODE_PRECISION (mode
));
2276 lhs
= XEXP (lhs
, 0);
2279 if (GET_CODE (rhs
) == NEG
)
2281 negcoeff1
= wi::one (GET_MODE_PRECISION (mode
));
2282 rhs
= XEXP (rhs
, 0);
2284 else if (GET_CODE (rhs
) == MULT
2285 && CONST_INT_P (XEXP (rhs
, 1)))
2287 negcoeff1
= wi::neg (std::make_pair (XEXP (rhs
, 1), mode
));
2288 rhs
= XEXP (rhs
, 0);
2290 else if (GET_CODE (rhs
) == ASHIFT
2291 && CONST_INT_P (XEXP (rhs
, 1))
2292 && INTVAL (XEXP (rhs
, 1)) >= 0
2293 && INTVAL (XEXP (rhs
, 1)) < GET_MODE_PRECISION (mode
))
2295 negcoeff1
= wi::set_bit_in_zero (INTVAL (XEXP (rhs
, 1)),
2296 GET_MODE_PRECISION (mode
));
2297 negcoeff1
= -negcoeff1
;
2298 rhs
= XEXP (rhs
, 0);
2301 if (rtx_equal_p (lhs
, rhs
))
2303 rtx orig
= gen_rtx_MINUS (mode
, op0
, op1
);
2305 bool speed
= optimize_function_for_speed_p (cfun
);
2307 coeff
= immed_wide_int_const (coeff0
+ negcoeff1
, mode
);
2309 tem
= simplify_gen_binary (MULT
, mode
, lhs
, coeff
);
2310 return (set_src_cost (tem
, mode
, speed
)
2311 <= set_src_cost (orig
, mode
, speed
) ? tem
: 0);
2315 /* (a - (-b)) -> (a + b). True even for IEEE. */
2316 if (GET_CODE (op1
) == NEG
)
2317 return simplify_gen_binary (PLUS
, mode
, op0
, XEXP (op1
, 0));
2319 /* (-x - c) may be simplified as (-c - x). */
2320 if (GET_CODE (op0
) == NEG
2321 && (CONST_SCALAR_INT_P (op1
) || CONST_DOUBLE_AS_FLOAT_P (op1
)))
2323 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2325 return simplify_gen_binary (MINUS
, mode
, tem
, XEXP (op0
, 0));
2328 /* Don't let a relocatable value get a negative coeff. */
2329 if (CONST_INT_P (op1
) && GET_MODE (op0
) != VOIDmode
)
2330 return simplify_gen_binary (PLUS
, mode
,
2332 neg_const_int (mode
, op1
));
2334 /* (x - (x & y)) -> (x & ~y) */
2335 if (INTEGRAL_MODE_P (mode
) && GET_CODE (op1
) == AND
)
2337 if (rtx_equal_p (op0
, XEXP (op1
, 0)))
2339 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 1),
2340 GET_MODE (XEXP (op1
, 1)));
2341 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2343 if (rtx_equal_p (op0
, XEXP (op1
, 1)))
2345 tem
= simplify_gen_unary (NOT
, mode
, XEXP (op1
, 0),
2346 GET_MODE (XEXP (op1
, 0)));
2347 return simplify_gen_binary (AND
, mode
, op0
, tem
);
2351 /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
2352 by reversing the comparison code if valid. */
2353 if (STORE_FLAG_VALUE
== 1
2354 && trueop0
== const1_rtx
2355 && COMPARISON_P (op1
)
2356 && (reversed
= reversed_comparison (op1
, mode
)))
2359 /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
2360 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2361 && GET_CODE (op1
) == MULT
2362 && GET_CODE (XEXP (op1
, 0)) == NEG
)
2366 in1
= XEXP (XEXP (op1
, 0), 0);
2367 in2
= XEXP (op1
, 1);
2368 return simplify_gen_binary (PLUS
, mode
,
2369 simplify_gen_binary (MULT
, mode
,
2374 /* Canonicalize (minus (neg A) (mult B C)) to
2375 (minus (mult (neg B) C) A). */
2376 if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode
)
2377 && GET_CODE (op1
) == MULT
2378 && GET_CODE (op0
) == NEG
)
2382 in1
= simplify_gen_unary (NEG
, mode
, XEXP (op1
, 0), mode
);
2383 in2
= XEXP (op1
, 1);
2384 return simplify_gen_binary (MINUS
, mode
,
2385 simplify_gen_binary (MULT
, mode
,
2390 /* If one of the operands is a PLUS or a MINUS, see if we can
2391 simplify this by the associative law. This will, for example,
2392 canonicalize (minus A (plus B C)) to (minus (minus A B) C).
2393 Don't use the associative law for floating point.
2394 The inaccuracy makes it nonassociative,
2395 and subtle programs can break if operations are associated. */
2397 if (INTEGRAL_MODE_P (mode
)
2398 && (plus_minus_operand_p (op0
)
2399 || plus_minus_operand_p (op1
))
2400 && (tem
= simplify_plus_minus (code
, mode
, op0
, op1
)) != 0)
2405 if (trueop1
== constm1_rtx
)
2406 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2408 if (GET_CODE (op0
) == NEG
)
2410 rtx temp
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
2411 /* If op1 is a MULT as well and simplify_unary_operation
2412 just moved the NEG to the second operand, simplify_gen_binary
2413 below could through simplify_associative_operation move
2414 the NEG around again and recurse endlessly. */
2416 && GET_CODE (op1
) == MULT
2417 && GET_CODE (temp
) == MULT
2418 && XEXP (op1
, 0) == XEXP (temp
, 0)
2419 && GET_CODE (XEXP (temp
, 1)) == NEG
2420 && XEXP (op1
, 1) == XEXP (XEXP (temp
, 1), 0))
2423 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), temp
);
2425 if (GET_CODE (op1
) == NEG
)
2427 rtx temp
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
2428 /* If op0 is a MULT as well and simplify_unary_operation
2429 just moved the NEG to the second operand, simplify_gen_binary
2430 below could through simplify_associative_operation move
2431 the NEG around again and recurse endlessly. */
2433 && GET_CODE (op0
) == MULT
2434 && GET_CODE (temp
) == MULT
2435 && XEXP (op0
, 0) == XEXP (temp
, 0)
2436 && GET_CODE (XEXP (temp
, 1)) == NEG
2437 && XEXP (op0
, 1) == XEXP (XEXP (temp
, 1), 0))
2440 return simplify_gen_binary (MULT
, mode
, temp
, XEXP (op1
, 0));
2443 /* Maybe simplify x * 0 to 0. The reduction is not valid if
2444 x is NaN, since x * 0 is then also NaN. Nor is it valid
2445 when the mode has signed zeros, since multiplying a negative
2446 number by 0 will give -0, not 0. */
2447 if (!HONOR_NANS (mode
)
2448 && !HONOR_SIGNED_ZEROS (mode
)
2449 && trueop1
== CONST0_RTX (mode
)
2450 && ! side_effects_p (op0
))
2453 /* In IEEE floating point, x*1 is not equivalent to x for
2455 if (!HONOR_SNANS (mode
)
2456 && trueop1
== CONST1_RTX (mode
))
2459 /* Convert multiply by constant power of two into shift. */
2460 if (CONST_SCALAR_INT_P (trueop1
))
2462 val
= wi::exact_log2 (std::make_pair (trueop1
, mode
));
2464 return simplify_gen_binary (ASHIFT
, mode
, op0
, GEN_INT (val
));
2467 /* x*2 is x+x and x*(-1) is -x */
2468 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
2469 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1
))
2470 && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1
))
2471 && GET_MODE (op0
) == mode
)
2473 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
2475 if (real_equal (d1
, &dconst2
))
2476 return simplify_gen_binary (PLUS
, mode
, op0
, copy_rtx (op0
));
2478 if (!HONOR_SNANS (mode
)
2479 && real_equal (d1
, &dconstm1
))
2480 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
2483 /* Optimize -x * -x as x * x. */
2484 if (FLOAT_MODE_P (mode
)
2485 && GET_CODE (op0
) == NEG
2486 && GET_CODE (op1
) == NEG
2487 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2488 && !side_effects_p (XEXP (op0
, 0)))
2489 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2491 /* Likewise, optimize abs(x) * abs(x) as x * x. */
2492 if (SCALAR_FLOAT_MODE_P (mode
)
2493 && GET_CODE (op0
) == ABS
2494 && GET_CODE (op1
) == ABS
2495 && rtx_equal_p (XEXP (op0
, 0), XEXP (op1
, 0))
2496 && !side_effects_p (XEXP (op0
, 0)))
2497 return simplify_gen_binary (MULT
, mode
, XEXP (op0
, 0), XEXP (op1
, 0));
2499 /* Reassociate multiplication, but for floating point MULTs
2500 only when the user specifies unsafe math optimizations. */
2501 if (! FLOAT_MODE_P (mode
)
2502 || flag_unsafe_math_optimizations
)
2504 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2511 if (trueop1
== CONST0_RTX (mode
))
2513 if (INTEGRAL_MODE_P (mode
)
2514 && trueop1
== CONSTM1_RTX (mode
)
2515 && !side_effects_p (op0
))
2517 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
2519 /* A | (~A) -> -1 */
2520 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2521 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2522 && ! side_effects_p (op0
)
2523 && SCALAR_INT_MODE_P (mode
))
2526 /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
2527 if (CONST_INT_P (op1
)
2528 && HWI_COMPUTABLE_MODE_P (mode
)
2529 && (nonzero_bits (op0
, mode
) & ~UINTVAL (op1
)) == 0
2530 && !side_effects_p (op0
))
2533 /* Canonicalize (X & C1) | C2. */
2534 if (GET_CODE (op0
) == AND
2535 && CONST_INT_P (trueop1
)
2536 && CONST_INT_P (XEXP (op0
, 1)))
2538 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
2539 HOST_WIDE_INT c1
= INTVAL (XEXP (op0
, 1));
2540 HOST_WIDE_INT c2
= INTVAL (trueop1
);
2542 /* If (C1&C2) == C1, then (X&C1)|C2 becomes X. */
2544 && !side_effects_p (XEXP (op0
, 0)))
2547 /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
2548 if (((c1
|c2
) & mask
) == mask
)
2549 return simplify_gen_binary (IOR
, mode
, XEXP (op0
, 0), op1
);
2551 /* Minimize the number of bits set in C1, i.e. C1 := C1 & ~C2. */
2552 if (((c1
& ~c2
) & mask
) != (c1
& mask
))
2554 tem
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2555 gen_int_mode (c1
& ~c2
, mode
));
2556 return simplify_gen_binary (IOR
, mode
, tem
, op1
);
2560 /* Convert (A & B) | A to A. */
2561 if (GET_CODE (op0
) == AND
2562 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2563 || rtx_equal_p (XEXP (op0
, 1), op1
))
2564 && ! side_effects_p (XEXP (op0
, 0))
2565 && ! side_effects_p (XEXP (op0
, 1)))
2568 /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2569 mode size to (rotate A CX). */
2571 if (GET_CODE (op1
) == ASHIFT
2572 || GET_CODE (op1
) == SUBREG
)
2583 if (GET_CODE (opleft
) == ASHIFT
&& GET_CODE (opright
) == LSHIFTRT
2584 && rtx_equal_p (XEXP (opleft
, 0), XEXP (opright
, 0))
2585 && CONST_INT_P (XEXP (opleft
, 1))
2586 && CONST_INT_P (XEXP (opright
, 1))
2587 && (INTVAL (XEXP (opleft
, 1)) + INTVAL (XEXP (opright
, 1))
2588 == GET_MODE_PRECISION (mode
)))
2589 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0), XEXP (opleft
, 1));
2591 /* Same, but for ashift that has been "simplified" to a wider mode
2592 by simplify_shift_const. */
2594 if (GET_CODE (opleft
) == SUBREG
2595 && GET_CODE (SUBREG_REG (opleft
)) == ASHIFT
2596 && GET_CODE (opright
) == LSHIFTRT
2597 && GET_CODE (XEXP (opright
, 0)) == SUBREG
2598 && GET_MODE (opleft
) == GET_MODE (XEXP (opright
, 0))
2599 && SUBREG_BYTE (opleft
) == SUBREG_BYTE (XEXP (opright
, 0))
2600 && (GET_MODE_SIZE (GET_MODE (opleft
))
2601 < GET_MODE_SIZE (GET_MODE (SUBREG_REG (opleft
))))
2602 && rtx_equal_p (XEXP (SUBREG_REG (opleft
), 0),
2603 SUBREG_REG (XEXP (opright
, 0)))
2604 && CONST_INT_P (XEXP (SUBREG_REG (opleft
), 1))
2605 && CONST_INT_P (XEXP (opright
, 1))
2606 && (INTVAL (XEXP (SUBREG_REG (opleft
), 1)) + INTVAL (XEXP (opright
, 1))
2607 == GET_MODE_PRECISION (mode
)))
2608 return gen_rtx_ROTATE (mode
, XEXP (opright
, 0),
2609 XEXP (SUBREG_REG (opleft
), 1));
2611 /* If we have (ior (and (X C1) C2)), simplify this by making
2612 C1 as small as possible if C1 actually changes. */
2613 if (CONST_INT_P (op1
)
2614 && (HWI_COMPUTABLE_MODE_P (mode
)
2615 || INTVAL (op1
) > 0)
2616 && GET_CODE (op0
) == AND
2617 && CONST_INT_P (XEXP (op0
, 1))
2618 && CONST_INT_P (op1
)
2619 && (UINTVAL (XEXP (op0
, 1)) & UINTVAL (op1
)) != 0)
2621 rtx tmp
= simplify_gen_binary (AND
, mode
, XEXP (op0
, 0),
2622 gen_int_mode (UINTVAL (XEXP (op0
, 1))
2625 return simplify_gen_binary (IOR
, mode
, tmp
, op1
);
2628 /* If OP0 is (ashiftrt (plus ...) C), it might actually be
2629 a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
2630 the PLUS does not affect any of the bits in OP1: then we can do
2631 the IOR as a PLUS and we can associate. This is valid if OP1
2632 can be safely shifted left C bits. */
2633 if (CONST_INT_P (trueop1
) && GET_CODE (op0
) == ASHIFTRT
2634 && GET_CODE (XEXP (op0
, 0)) == PLUS
2635 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1))
2636 && CONST_INT_P (XEXP (op0
, 1))
2637 && INTVAL (XEXP (op0
, 1)) < HOST_BITS_PER_WIDE_INT
)
2639 int count
= INTVAL (XEXP (op0
, 1));
2640 HOST_WIDE_INT mask
= INTVAL (trueop1
) << count
;
2642 if (mask
>> count
== INTVAL (trueop1
)
2643 && trunc_int_for_mode (mask
, mode
) == mask
2644 && (mask
& nonzero_bits (XEXP (op0
, 0), mode
)) == 0)
2645 return simplify_gen_binary (ASHIFTRT
, mode
,
2646 plus_constant (mode
, XEXP (op0
, 0),
2651 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2655 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2661 if (trueop1
== CONST0_RTX (mode
))
2663 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2664 return simplify_gen_unary (NOT
, mode
, op0
, mode
);
2665 if (rtx_equal_p (trueop0
, trueop1
)
2666 && ! side_effects_p (op0
)
2667 && GET_MODE_CLASS (mode
) != MODE_CC
)
2668 return CONST0_RTX (mode
);
2670 /* Canonicalize XOR of the most significant bit to PLUS. */
2671 if (CONST_SCALAR_INT_P (op1
)
2672 && mode_signbit_p (mode
, op1
))
2673 return simplify_gen_binary (PLUS
, mode
, op0
, op1
);
2674 /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
2675 if (CONST_SCALAR_INT_P (op1
)
2676 && GET_CODE (op0
) == PLUS
2677 && CONST_SCALAR_INT_P (XEXP (op0
, 1))
2678 && mode_signbit_p (mode
, XEXP (op0
, 1)))
2679 return simplify_gen_binary (XOR
, mode
, XEXP (op0
, 0),
2680 simplify_gen_binary (XOR
, mode
, op1
,
2683 /* If we are XORing two things that have no bits in common,
2684 convert them into an IOR. This helps to detect rotation encoded
2685 using those methods and possibly other simplifications. */
2687 if (HWI_COMPUTABLE_MODE_P (mode
)
2688 && (nonzero_bits (op0
, mode
)
2689 & nonzero_bits (op1
, mode
)) == 0)
2690 return (simplify_gen_binary (IOR
, mode
, op0
, op1
));
2692 /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
2693 Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
2696 int num_negated
= 0;
2698 if (GET_CODE (op0
) == NOT
)
2699 num_negated
++, op0
= XEXP (op0
, 0);
2700 if (GET_CODE (op1
) == NOT
)
2701 num_negated
++, op1
= XEXP (op1
, 0);
2703 if (num_negated
== 2)
2704 return simplify_gen_binary (XOR
, mode
, op0
, op1
);
2705 else if (num_negated
== 1)
2706 return simplify_gen_unary (NOT
, mode
,
2707 simplify_gen_binary (XOR
, mode
, op0
, op1
),
2711 /* Convert (xor (and A B) B) to (and (not A) B). The latter may
2712 correspond to a machine insn or result in further simplifications
2713 if B is a constant. */
2715 if (GET_CODE (op0
) == AND
2716 && rtx_equal_p (XEXP (op0
, 1), op1
)
2717 && ! side_effects_p (op1
))
2718 return simplify_gen_binary (AND
, mode
,
2719 simplify_gen_unary (NOT
, mode
,
2720 XEXP (op0
, 0), mode
),
2723 else if (GET_CODE (op0
) == AND
2724 && rtx_equal_p (XEXP (op0
, 0), op1
)
2725 && ! side_effects_p (op1
))
2726 return simplify_gen_binary (AND
, mode
,
2727 simplify_gen_unary (NOT
, mode
,
2728 XEXP (op0
, 1), mode
),
2731 /* Given (xor (ior (xor A B) C) D), where B, C and D are
2732 constants, simplify to (xor (ior A C) (B&~C)^D), canceling
2733 out bits inverted twice and not set by C. Similarly, given
2734 (xor (and (xor A B) C) D), simplify without inverting C in
2735 the xor operand: (xor (and A C) (B&C)^D).
2737 else if ((GET_CODE (op0
) == IOR
|| GET_CODE (op0
) == AND
)
2738 && GET_CODE (XEXP (op0
, 0)) == XOR
2739 && CONST_INT_P (op1
)
2740 && CONST_INT_P (XEXP (op0
, 1))
2741 && CONST_INT_P (XEXP (XEXP (op0
, 0), 1)))
2743 enum rtx_code op
= GET_CODE (op0
);
2744 rtx a
= XEXP (XEXP (op0
, 0), 0);
2745 rtx b
= XEXP (XEXP (op0
, 0), 1);
2746 rtx c
= XEXP (op0
, 1);
2748 HOST_WIDE_INT bval
= INTVAL (b
);
2749 HOST_WIDE_INT cval
= INTVAL (c
);
2750 HOST_WIDE_INT dval
= INTVAL (d
);
2751 HOST_WIDE_INT xcval
;
2758 return simplify_gen_binary (XOR
, mode
,
2759 simplify_gen_binary (op
, mode
, a
, c
),
2760 gen_int_mode ((bval
& xcval
) ^ dval
,
2764 /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
2765 we can transform like this:
2766 (A&B)^C == ~(A&B)&C | ~C&(A&B)
2767 == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
2768 == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
2769 Attempt a few simplifications when B and C are both constants. */
2770 if (GET_CODE (op0
) == AND
2771 && CONST_INT_P (op1
)
2772 && CONST_INT_P (XEXP (op0
, 1)))
2774 rtx a
= XEXP (op0
, 0);
2775 rtx b
= XEXP (op0
, 1);
2777 HOST_WIDE_INT bval
= INTVAL (b
);
2778 HOST_WIDE_INT cval
= INTVAL (c
);
2780 /* Instead of computing ~A&C, we compute its negated value,
2781 ~(A|~C). If it yields -1, ~A&C is zero, so we can
2782 optimize for sure. If it does not simplify, we still try
2783 to compute ~A&C below, but since that always allocates
2784 RTL, we don't try that before committing to returning a
2785 simplified expression. */
2786 rtx n_na_c
= simplify_binary_operation (IOR
, mode
, a
,
2789 if ((~cval
& bval
) == 0)
2791 rtx na_c
= NULL_RTX
;
2793 na_c
= simplify_gen_unary (NOT
, mode
, n_na_c
, mode
);
2796 /* If ~A does not simplify, don't bother: we don't
2797 want to simplify 2 operations into 3, and if na_c
2798 were to simplify with na, n_na_c would have
2799 simplified as well. */
2800 rtx na
= simplify_unary_operation (NOT
, mode
, a
, mode
);
2802 na_c
= simplify_gen_binary (AND
, mode
, na
, c
);
2805 /* Try to simplify ~A&C | ~B&C. */
2806 if (na_c
!= NULL_RTX
)
2807 return simplify_gen_binary (IOR
, mode
, na_c
,
2808 gen_int_mode (~bval
& cval
, mode
));
2812 /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
2813 if (n_na_c
== CONSTM1_RTX (mode
))
2815 rtx a_nc_b
= simplify_gen_binary (AND
, mode
, a
,
2816 gen_int_mode (~cval
& bval
,
2818 return simplify_gen_binary (IOR
, mode
, a_nc_b
,
2819 gen_int_mode (~bval
& cval
,
2825 /* (xor (comparison foo bar) (const_int 1)) can become the reversed
2826 comparison if STORE_FLAG_VALUE is 1. */
2827 if (STORE_FLAG_VALUE
== 1
2828 && trueop1
== const1_rtx
2829 && COMPARISON_P (op0
)
2830 && (reversed
= reversed_comparison (op0
, mode
)))
2833 /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
2834 is (lt foo (const_int 0)), so we can perform the above
2835 simplification if STORE_FLAG_VALUE is 1. */
2837 if (STORE_FLAG_VALUE
== 1
2838 && trueop1
== const1_rtx
2839 && GET_CODE (op0
) == LSHIFTRT
2840 && CONST_INT_P (XEXP (op0
, 1))
2841 && INTVAL (XEXP (op0
, 1)) == GET_MODE_PRECISION (mode
) - 1)
2842 return gen_rtx_GE (mode
, XEXP (op0
, 0), const0_rtx
);
2844 /* (xor (comparison foo bar) (const_int sign-bit))
2845 when STORE_FLAG_VALUE is the sign bit. */
2846 if (val_signbit_p (mode
, STORE_FLAG_VALUE
)
2847 && trueop1
== const_true_rtx
2848 && COMPARISON_P (op0
)
2849 && (reversed
= reversed_comparison (op0
, mode
)))
2852 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
2856 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
2862 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
2864 if (INTEGRAL_MODE_P (mode
) && trueop1
== CONSTM1_RTX (mode
))
2866 if (HWI_COMPUTABLE_MODE_P (mode
))
2868 HOST_WIDE_INT nzop0
= nonzero_bits (trueop0
, mode
);
2869 HOST_WIDE_INT nzop1
;
2870 if (CONST_INT_P (trueop1
))
2872 HOST_WIDE_INT val1
= INTVAL (trueop1
);
2873 /* If we are turning off bits already known off in OP0, we need
2875 if ((nzop0
& ~val1
) == 0)
2878 nzop1
= nonzero_bits (trueop1
, mode
);
2879 /* If we are clearing all the nonzero bits, the result is zero. */
2880 if ((nzop1
& nzop0
) == 0
2881 && !side_effects_p (op0
) && !side_effects_p (op1
))
2882 return CONST0_RTX (mode
);
2884 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
)
2885 && GET_MODE_CLASS (mode
) != MODE_CC
)
2888 if (((GET_CODE (op0
) == NOT
&& rtx_equal_p (XEXP (op0
, 0), op1
))
2889 || (GET_CODE (op1
) == NOT
&& rtx_equal_p (XEXP (op1
, 0), op0
)))
2890 && ! side_effects_p (op0
)
2891 && GET_MODE_CLASS (mode
) != MODE_CC
)
2892 return CONST0_RTX (mode
);
2894 /* Transform (and (extend X) C) into (zero_extend (and X C)) if
2895 there are no nonzero bits of C outside of X's mode. */
2896 if ((GET_CODE (op0
) == SIGN_EXTEND
2897 || GET_CODE (op0
) == ZERO_EXTEND
)
2898 && CONST_INT_P (trueop1
)
2899 && HWI_COMPUTABLE_MODE_P (mode
)
2900 && (~GET_MODE_MASK (GET_MODE (XEXP (op0
, 0)))
2901 & UINTVAL (trueop1
)) == 0)
2903 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
2904 tem
= simplify_gen_binary (AND
, imode
, XEXP (op0
, 0),
2905 gen_int_mode (INTVAL (trueop1
),
2907 return simplify_gen_unary (ZERO_EXTEND
, mode
, tem
, imode
);
2910 /* Transform (and (truncate X) C) into (truncate (and X C)). This way
2911 we might be able to further simplify the AND with X and potentially
2912 remove the truncation altogether. */
2913 if (GET_CODE (op0
) == TRUNCATE
&& CONST_INT_P (trueop1
))
2915 rtx x
= XEXP (op0
, 0);
2916 machine_mode xmode
= GET_MODE (x
);
2917 tem
= simplify_gen_binary (AND
, xmode
, x
,
2918 gen_int_mode (INTVAL (trueop1
), xmode
));
2919 return simplify_gen_unary (TRUNCATE
, mode
, tem
, xmode
);
2922 /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
2923 if (GET_CODE (op0
) == IOR
2924 && CONST_INT_P (trueop1
)
2925 && CONST_INT_P (XEXP (op0
, 1)))
2927 HOST_WIDE_INT tmp
= INTVAL (trueop1
) & INTVAL (XEXP (op0
, 1));
2928 return simplify_gen_binary (IOR
, mode
,
2929 simplify_gen_binary (AND
, mode
,
2930 XEXP (op0
, 0), op1
),
2931 gen_int_mode (tmp
, mode
));
2934 /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
2935 insn (and may simplify more). */
2936 if (GET_CODE (op0
) == XOR
2937 && rtx_equal_p (XEXP (op0
, 0), op1
)
2938 && ! side_effects_p (op1
))
2939 return simplify_gen_binary (AND
, mode
,
2940 simplify_gen_unary (NOT
, mode
,
2941 XEXP (op0
, 1), mode
),
2944 if (GET_CODE (op0
) == XOR
2945 && rtx_equal_p (XEXP (op0
, 1), op1
)
2946 && ! side_effects_p (op1
))
2947 return simplify_gen_binary (AND
, mode
,
2948 simplify_gen_unary (NOT
, mode
,
2949 XEXP (op0
, 0), mode
),
2952 /* Similarly for (~(A ^ B)) & A. */
2953 if (GET_CODE (op0
) == NOT
2954 && GET_CODE (XEXP (op0
, 0)) == XOR
2955 && rtx_equal_p (XEXP (XEXP (op0
, 0), 0), op1
)
2956 && ! side_effects_p (op1
))
2957 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 1), op1
);
2959 if (GET_CODE (op0
) == NOT
2960 && GET_CODE (XEXP (op0
, 0)) == XOR
2961 && rtx_equal_p (XEXP (XEXP (op0
, 0), 1), op1
)
2962 && ! side_effects_p (op1
))
2963 return simplify_gen_binary (AND
, mode
, XEXP (XEXP (op0
, 0), 0), op1
);
2965 /* Convert (A | B) & A to A. */
2966 if (GET_CODE (op0
) == IOR
2967 && (rtx_equal_p (XEXP (op0
, 0), op1
)
2968 || rtx_equal_p (XEXP (op0
, 1), op1
))
2969 && ! side_effects_p (XEXP (op0
, 0))
2970 && ! side_effects_p (XEXP (op0
, 1)))
2973 /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
2974 ((A & N) + B) & M -> (A + B) & M
2975 Similarly if (N & M) == 0,
2976 ((A | N) + B) & M -> (A + B) & M
2977 and for - instead of + and/or ^ instead of |.
2978 Also, if (N & M) == 0, then
2979 (A +- N) & M -> A & M. */
2980 if (CONST_INT_P (trueop1
)
2981 && HWI_COMPUTABLE_MODE_P (mode
)
2982 && ~UINTVAL (trueop1
)
2983 && (UINTVAL (trueop1
) & (UINTVAL (trueop1
) + 1)) == 0
2984 && (GET_CODE (op0
) == PLUS
|| GET_CODE (op0
) == MINUS
))
2989 pmop
[0] = XEXP (op0
, 0);
2990 pmop
[1] = XEXP (op0
, 1);
2992 if (CONST_INT_P (pmop
[1])
2993 && (UINTVAL (pmop
[1]) & UINTVAL (trueop1
)) == 0)
2994 return simplify_gen_binary (AND
, mode
, pmop
[0], op1
);
2996 for (which
= 0; which
< 2; which
++)
2999 switch (GET_CODE (tem
))
3002 if (CONST_INT_P (XEXP (tem
, 1))
3003 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
))
3004 == UINTVAL (trueop1
))
3005 pmop
[which
] = XEXP (tem
, 0);
3009 if (CONST_INT_P (XEXP (tem
, 1))
3010 && (UINTVAL (XEXP (tem
, 1)) & UINTVAL (trueop1
)) == 0)
3011 pmop
[which
] = XEXP (tem
, 0);
3018 if (pmop
[0] != XEXP (op0
, 0) || pmop
[1] != XEXP (op0
, 1))
3020 tem
= simplify_gen_binary (GET_CODE (op0
), mode
,
3022 return simplify_gen_binary (code
, mode
, tem
, op1
);
3026 /* (and X (ior (not X) Y) -> (and X Y) */
3027 if (GET_CODE (op1
) == IOR
3028 && GET_CODE (XEXP (op1
, 0)) == NOT
3029 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 0), 0)))
3030 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 1));
3032 /* (and (ior (not X) Y) X) -> (and X Y) */
3033 if (GET_CODE (op0
) == IOR
3034 && GET_CODE (XEXP (op0
, 0)) == NOT
3035 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 0), 0)))
3036 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 1));
3038 /* (and X (ior Y (not X)) -> (and X Y) */
3039 if (GET_CODE (op1
) == IOR
3040 && GET_CODE (XEXP (op1
, 1)) == NOT
3041 && rtx_equal_p (op0
, XEXP (XEXP (op1
, 1), 0)))
3042 return simplify_gen_binary (AND
, mode
, op0
, XEXP (op1
, 0));
3044 /* (and (ior Y (not X)) X) -> (and X Y) */
3045 if (GET_CODE (op0
) == IOR
3046 && GET_CODE (XEXP (op0
, 1)) == NOT
3047 && rtx_equal_p (op1
, XEXP (XEXP (op0
, 1), 0)))
3048 return simplify_gen_binary (AND
, mode
, op1
, XEXP (op0
, 0));
3050 tem
= simplify_byte_swapping_operation (code
, mode
, op0
, op1
);
3054 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3060 /* 0/x is 0 (or x&0 if x has side-effects). */
3061 if (trueop0
== CONST0_RTX (mode
))
3063 if (side_effects_p (op1
))
3064 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3068 if (trueop1
== CONST1_RTX (mode
))
3070 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3074 /* Convert divide by power of two into shift. */
3075 if (CONST_INT_P (trueop1
)
3076 && (val
= exact_log2 (UINTVAL (trueop1
))) > 0)
3077 return simplify_gen_binary (LSHIFTRT
, mode
, op0
, GEN_INT (val
));
3081 /* Handle floating point and integers separately. */
3082 if (SCALAR_FLOAT_MODE_P (mode
))
3084 /* Maybe change 0.0 / x to 0.0. This transformation isn't
3085 safe for modes with NaNs, since 0.0 / 0.0 will then be
3086 NaN rather than 0.0. Nor is it safe for modes with signed
3087 zeros, since dividing 0 by a negative number gives -0.0 */
3088 if (trueop0
== CONST0_RTX (mode
)
3089 && !HONOR_NANS (mode
)
3090 && !HONOR_SIGNED_ZEROS (mode
)
3091 && ! side_effects_p (op1
))
3094 if (trueop1
== CONST1_RTX (mode
)
3095 && !HONOR_SNANS (mode
))
3098 if (CONST_DOUBLE_AS_FLOAT_P (trueop1
)
3099 && trueop1
!= CONST0_RTX (mode
))
3101 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
3104 if (real_equal (d1
, &dconstm1
)
3105 && !HONOR_SNANS (mode
))
3106 return simplify_gen_unary (NEG
, mode
, op0
, mode
);
3108 /* Change FP division by a constant into multiplication.
3109 Only do this with -freciprocal-math. */
3110 if (flag_reciprocal_math
3111 && !real_equal (d1
, &dconst0
))
3114 real_arithmetic (&d
, RDIV_EXPR
, &dconst1
, d1
);
3115 tem
= const_double_from_real_value (d
, mode
);
3116 return simplify_gen_binary (MULT
, mode
, op0
, tem
);
3120 else if (SCALAR_INT_MODE_P (mode
))
3122 /* 0/x is 0 (or x&0 if x has side-effects). */
3123 if (trueop0
== CONST0_RTX (mode
)
3124 && !cfun
->can_throw_non_call_exceptions
)
3126 if (side_effects_p (op1
))
3127 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3131 if (trueop1
== CONST1_RTX (mode
))
3133 tem
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3138 if (trueop1
== constm1_rtx
)
3140 rtx x
= rtl_hooks
.gen_lowpart_no_emit (mode
, op0
);
3142 return simplify_gen_unary (NEG
, mode
, x
, mode
);
3148 /* 0%x is 0 (or x&0 if x has side-effects). */
3149 if (trueop0
== CONST0_RTX (mode
))
3151 if (side_effects_p (op1
))
3152 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3155 /* x%1 is 0 (of x&0 if x has side-effects). */
3156 if (trueop1
== CONST1_RTX (mode
))
3158 if (side_effects_p (op0
))
3159 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3160 return CONST0_RTX (mode
);
3162 /* Implement modulus by power of two as AND. */
3163 if (CONST_INT_P (trueop1
)
3164 && exact_log2 (UINTVAL (trueop1
)) > 0)
3165 return simplify_gen_binary (AND
, mode
, op0
,
3166 gen_int_mode (INTVAL (op1
) - 1, mode
));
3170 /* 0%x is 0 (or x&0 if x has side-effects). */
3171 if (trueop0
== CONST0_RTX (mode
))
3173 if (side_effects_p (op1
))
3174 return simplify_gen_binary (AND
, mode
, op1
, trueop0
);
3177 /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
3178 if (trueop1
== CONST1_RTX (mode
) || trueop1
== constm1_rtx
)
3180 if (side_effects_p (op0
))
3181 return simplify_gen_binary (AND
, mode
, op0
, CONST0_RTX (mode
));
3182 return CONST0_RTX (mode
);
3188 /* Canonicalize rotates by constant amount. If op1 is bitsize / 2,
3189 prefer left rotation, if op1 is from bitsize / 2 + 1 to
3190 bitsize - 1, use other direction of rotate with 1 .. bitsize / 2 - 1
3192 #if defined(HAVE_rotate) && defined(HAVE_rotatert)
3193 if (CONST_INT_P (trueop1
)
3194 && IN_RANGE (INTVAL (trueop1
),
3195 GET_MODE_PRECISION (mode
) / 2 + (code
== ROTATE
),
3196 GET_MODE_PRECISION (mode
) - 1))
3197 return simplify_gen_binary (code
== ROTATE
? ROTATERT
: ROTATE
,
3198 mode
, op0
, GEN_INT (GET_MODE_PRECISION (mode
)
3199 - INTVAL (trueop1
)));
3203 if (trueop1
== CONST0_RTX (mode
))
3205 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3207 /* Rotating ~0 always results in ~0. */
3208 if (CONST_INT_P (trueop0
) && width
<= HOST_BITS_PER_WIDE_INT
3209 && UINTVAL (trueop0
) == GET_MODE_MASK (mode
)
3210 && ! side_effects_p (op1
))
3214 scalar constants c1, c2
3215 size (M2) > size (M1)
3216 c1 == size (M2) - size (M1)
3218 (ashiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
3222 (subreg:M1 (ashiftrt:M2 (reg:M2) (const_int <c1 + c2>))
3224 if (code
== ASHIFTRT
3225 && !VECTOR_MODE_P (mode
)
3227 && CONST_INT_P (op1
)
3228 && GET_CODE (SUBREG_REG (op0
)) == LSHIFTRT
3229 && !VECTOR_MODE_P (GET_MODE (SUBREG_REG (op0
)))
3230 && CONST_INT_P (XEXP (SUBREG_REG (op0
), 1))
3231 && (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3232 > GET_MODE_BITSIZE (mode
))
3233 && (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3234 == (GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op0
)))
3235 - GET_MODE_BITSIZE (mode
)))
3236 && subreg_lowpart_p (op0
))
3238 rtx tmp
= GEN_INT (INTVAL (XEXP (SUBREG_REG (op0
), 1))
3240 machine_mode inner_mode
= GET_MODE (SUBREG_REG (op0
));
3241 tmp
= simplify_gen_binary (ASHIFTRT
,
3242 GET_MODE (SUBREG_REG (op0
)),
3243 XEXP (SUBREG_REG (op0
), 0),
3245 return lowpart_subreg (mode
, tmp
, inner_mode
);
3248 if (SHIFT_COUNT_TRUNCATED
&& CONST_INT_P (op1
))
3250 val
= INTVAL (op1
) & (GET_MODE_PRECISION (mode
) - 1);
3251 if (val
!= INTVAL (op1
))
3252 return simplify_gen_binary (code
, mode
, op0
, GEN_INT (val
));
3259 if (trueop1
== CONST0_RTX (mode
))
3261 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3263 goto canonicalize_shift
;
3266 if (trueop1
== CONST0_RTX (mode
))
3268 if (trueop0
== CONST0_RTX (mode
) && ! side_effects_p (op1
))
3270 /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
3271 if (GET_CODE (op0
) == CLZ
3272 && CONST_INT_P (trueop1
)
3273 && STORE_FLAG_VALUE
== 1
3274 && INTVAL (trueop1
) < (HOST_WIDE_INT
)width
)
3276 machine_mode imode
= GET_MODE (XEXP (op0
, 0));
3277 unsigned HOST_WIDE_INT zero_val
= 0;
3279 if (CLZ_DEFINED_VALUE_AT_ZERO (imode
, zero_val
)
3280 && zero_val
== GET_MODE_PRECISION (imode
)
3281 && INTVAL (trueop1
) == exact_log2 (zero_val
))
3282 return simplify_gen_relational (EQ
, mode
, imode
,
3283 XEXP (op0
, 0), const0_rtx
);
3285 goto canonicalize_shift
;
3288 if (width
<= HOST_BITS_PER_WIDE_INT
3289 && mode_signbit_p (mode
, trueop1
)
3290 && ! side_effects_p (op0
))
3292 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3294 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3300 if (width
<= HOST_BITS_PER_WIDE_INT
3301 && CONST_INT_P (trueop1
)
3302 && (UINTVAL (trueop1
) == GET_MODE_MASK (mode
) >> 1)
3303 && ! side_effects_p (op0
))
3305 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3307 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3313 if (trueop1
== CONST0_RTX (mode
) && ! side_effects_p (op0
))
3315 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3317 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3323 if (trueop1
== constm1_rtx
&& ! side_effects_p (op0
))
3325 if (rtx_equal_p (trueop0
, trueop1
) && ! side_effects_p (op0
))
3327 tem
= simplify_associative_operation (code
, mode
, op0
, op1
);
3340 /* ??? There are simplifications that can be done. */
3344 if (!VECTOR_MODE_P (mode
))
3346 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3347 gcc_assert (mode
== GET_MODE_INNER (GET_MODE (trueop0
)));
3348 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3349 gcc_assert (XVECLEN (trueop1
, 0) == 1);
3350 gcc_assert (CONST_INT_P (XVECEXP (trueop1
, 0, 0)));
3352 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3353 return CONST_VECTOR_ELT (trueop0
, INTVAL (XVECEXP
3356 /* Extract a scalar element from a nested VEC_SELECT expression
3357 (with optional nested VEC_CONCAT expression). Some targets
3358 (i386) extract scalar element from a vector using chain of
3359 nested VEC_SELECT expressions. When input operand is a memory
3360 operand, this operation can be simplified to a simple scalar
3361 load from an offseted memory address. */
3362 if (GET_CODE (trueop0
) == VEC_SELECT
)
3364 rtx op0
= XEXP (trueop0
, 0);
3365 rtx op1
= XEXP (trueop0
, 1);
3367 machine_mode opmode
= GET_MODE (op0
);
3368 int elt_size
= GET_MODE_UNIT_SIZE (opmode
);
3369 int n_elts
= GET_MODE_SIZE (opmode
) / elt_size
;
3371 int i
= INTVAL (XVECEXP (trueop1
, 0, 0));
3377 gcc_assert (GET_CODE (op1
) == PARALLEL
);
3378 gcc_assert (i
< n_elts
);
3380 /* Select element, pointed by nested selector. */
3381 elem
= INTVAL (XVECEXP (op1
, 0, i
));
3383 /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
3384 if (GET_CODE (op0
) == VEC_CONCAT
)
3386 rtx op00
= XEXP (op0
, 0);
3387 rtx op01
= XEXP (op0
, 1);
3389 machine_mode mode00
, mode01
;
3390 int n_elts00
, n_elts01
;
3392 mode00
= GET_MODE (op00
);
3393 mode01
= GET_MODE (op01
);
3395 /* Find out number of elements of each operand. */
3396 if (VECTOR_MODE_P (mode00
))
3398 elt_size
= GET_MODE_UNIT_SIZE (mode00
);
3399 n_elts00
= GET_MODE_SIZE (mode00
) / elt_size
;
3404 if (VECTOR_MODE_P (mode01
))
3406 elt_size
= GET_MODE_UNIT_SIZE (mode01
);
3407 n_elts01
= GET_MODE_SIZE (mode01
) / elt_size
;
3412 gcc_assert (n_elts
== n_elts00
+ n_elts01
);
3414 /* Select correct operand of VEC_CONCAT
3415 and adjust selector. */
3416 if (elem
< n_elts01
)
3427 vec
= rtvec_alloc (1);
3428 RTVEC_ELT (vec
, 0) = GEN_INT (elem
);
3430 tmp
= gen_rtx_fmt_ee (code
, mode
,
3431 tmp_op
, gen_rtx_PARALLEL (VOIDmode
, vec
));
3434 if (GET_CODE (trueop0
) == VEC_DUPLICATE
3435 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3436 return XEXP (trueop0
, 0);
3440 gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0
)));
3441 gcc_assert (GET_MODE_INNER (mode
)
3442 == GET_MODE_INNER (GET_MODE (trueop0
)));
3443 gcc_assert (GET_CODE (trueop1
) == PARALLEL
);
3445 if (GET_CODE (trueop0
) == CONST_VECTOR
)
3447 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
3448 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3449 rtvec v
= rtvec_alloc (n_elts
);
3452 gcc_assert (XVECLEN (trueop1
, 0) == (int) n_elts
);
3453 for (i
= 0; i
< n_elts
; i
++)
3455 rtx x
= XVECEXP (trueop1
, 0, i
);
3457 gcc_assert (CONST_INT_P (x
));
3458 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
,
3462 return gen_rtx_CONST_VECTOR (mode
, v
);
3465 /* Recognize the identity. */
3466 if (GET_MODE (trueop0
) == mode
)
3468 bool maybe_ident
= true;
3469 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3471 rtx j
= XVECEXP (trueop1
, 0, i
);
3472 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3474 maybe_ident
= false;
3482 /* If we build {a,b} then permute it, build the result directly. */
3483 if (XVECLEN (trueop1
, 0) == 2
3484 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3485 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3486 && GET_CODE (trueop0
) == VEC_CONCAT
3487 && GET_CODE (XEXP (trueop0
, 0)) == VEC_CONCAT
3488 && GET_MODE (XEXP (trueop0
, 0)) == mode
3489 && GET_CODE (XEXP (trueop0
, 1)) == VEC_CONCAT
3490 && GET_MODE (XEXP (trueop0
, 1)) == mode
)
3492 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3493 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3496 gcc_assert (i0
< 4 && i1
< 4);
3497 subop0
= XEXP (XEXP (trueop0
, i0
/ 2), i0
% 2);
3498 subop1
= XEXP (XEXP (trueop0
, i1
/ 2), i1
% 2);
3500 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3503 if (XVECLEN (trueop1
, 0) == 2
3504 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3505 && CONST_INT_P (XVECEXP (trueop1
, 0, 1))
3506 && GET_CODE (trueop0
) == VEC_CONCAT
3507 && GET_MODE (trueop0
) == mode
)
3509 unsigned int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3510 unsigned int i1
= INTVAL (XVECEXP (trueop1
, 0, 1));
3513 gcc_assert (i0
< 2 && i1
< 2);
3514 subop0
= XEXP (trueop0
, i0
);
3515 subop1
= XEXP (trueop0
, i1
);
3517 return simplify_gen_binary (VEC_CONCAT
, mode
, subop0
, subop1
);
3520 /* If we select one half of a vec_concat, return that. */
3521 if (GET_CODE (trueop0
) == VEC_CONCAT
3522 && CONST_INT_P (XVECEXP (trueop1
, 0, 0)))
3524 rtx subop0
= XEXP (trueop0
, 0);
3525 rtx subop1
= XEXP (trueop0
, 1);
3526 machine_mode mode0
= GET_MODE (subop0
);
3527 machine_mode mode1
= GET_MODE (subop1
);
3528 int li
= GET_MODE_UNIT_SIZE (mode0
);
3529 int l0
= GET_MODE_SIZE (mode0
) / li
;
3530 int l1
= GET_MODE_SIZE (mode1
) / li
;
3531 int i0
= INTVAL (XVECEXP (trueop1
, 0, 0));
3532 if (i0
== 0 && !side_effects_p (op1
) && mode
== mode0
)
3534 bool success
= true;
3535 for (int i
= 1; i
< l0
; ++i
)
3537 rtx j
= XVECEXP (trueop1
, 0, i
);
3538 if (!CONST_INT_P (j
) || INTVAL (j
) != i
)
3547 if (i0
== l0
&& !side_effects_p (op0
) && mode
== mode1
)
3549 bool success
= true;
3550 for (int i
= 1; i
< l1
; ++i
)
3552 rtx j
= XVECEXP (trueop1
, 0, i
);
3553 if (!CONST_INT_P (j
) || INTVAL (j
) != i0
+ i
)
3565 if (XVECLEN (trueop1
, 0) == 1
3566 && CONST_INT_P (XVECEXP (trueop1
, 0, 0))
3567 && GET_CODE (trueop0
) == VEC_CONCAT
)
3570 int offset
= INTVAL (XVECEXP (trueop1
, 0, 0)) * GET_MODE_SIZE (mode
);
3572 /* Try to find the element in the VEC_CONCAT. */
3573 while (GET_MODE (vec
) != mode
3574 && GET_CODE (vec
) == VEC_CONCAT
)
3576 HOST_WIDE_INT vec_size
;
3578 if (CONST_INT_P (XEXP (vec
, 0)))
3580 /* vec_concat of two const_ints doesn't make sense with
3581 respect to modes. */
3582 if (CONST_INT_P (XEXP (vec
, 1)))
3585 vec_size
= GET_MODE_SIZE (GET_MODE (trueop0
))
3586 - GET_MODE_SIZE (GET_MODE (XEXP (vec
, 1)));
3589 vec_size
= GET_MODE_SIZE (GET_MODE (XEXP (vec
, 0)));
3591 if (offset
< vec_size
)
3592 vec
= XEXP (vec
, 0);
3596 vec
= XEXP (vec
, 1);
3598 vec
= avoid_constant_pool_reference (vec
);
3601 if (GET_MODE (vec
) == mode
)
3605 /* If we select elements in a vec_merge that all come from the same
3606 operand, select from that operand directly. */
3607 if (GET_CODE (op0
) == VEC_MERGE
)
3609 rtx trueop02
= avoid_constant_pool_reference (XEXP (op0
, 2));
3610 if (CONST_INT_P (trueop02
))
3612 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop02
);
3613 bool all_operand0
= true;
3614 bool all_operand1
= true;
3615 for (int i
= 0; i
< XVECLEN (trueop1
, 0); i
++)
3617 rtx j
= XVECEXP (trueop1
, 0, i
);
3618 if (sel
& (1 << UINTVAL (j
)))
3619 all_operand1
= false;
3621 all_operand0
= false;
3623 if (all_operand0
&& !side_effects_p (XEXP (op0
, 1)))
3624 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 0), op1
);
3625 if (all_operand1
&& !side_effects_p (XEXP (op0
, 0)))
3626 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (op0
, 1), op1
);
3630 /* If we have two nested selects that are inverses of each
3631 other, replace them with the source operand. */
3632 if (GET_CODE (trueop0
) == VEC_SELECT
3633 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3635 rtx op0_subop1
= XEXP (trueop0
, 1);
3636 gcc_assert (GET_CODE (op0_subop1
) == PARALLEL
);
3637 gcc_assert (XVECLEN (trueop1
, 0) == GET_MODE_NUNITS (mode
));
3639 /* Apply the outer ordering vector to the inner one. (The inner
3640 ordering vector is expressly permitted to be of a different
3641 length than the outer one.) If the result is { 0, 1, ..., n-1 }
3642 then the two VEC_SELECTs cancel. */
3643 for (int i
= 0; i
< XVECLEN (trueop1
, 0); ++i
)
3645 rtx x
= XVECEXP (trueop1
, 0, i
);
3646 if (!CONST_INT_P (x
))
3648 rtx y
= XVECEXP (op0_subop1
, 0, INTVAL (x
));
3649 if (!CONST_INT_P (y
) || i
!= INTVAL (y
))
3652 return XEXP (trueop0
, 0);
3658 machine_mode op0_mode
= (GET_MODE (trueop0
) != VOIDmode
3659 ? GET_MODE (trueop0
)
3660 : GET_MODE_INNER (mode
));
3661 machine_mode op1_mode
= (GET_MODE (trueop1
) != VOIDmode
3662 ? GET_MODE (trueop1
)
3663 : GET_MODE_INNER (mode
));
3665 gcc_assert (VECTOR_MODE_P (mode
));
3666 gcc_assert (GET_MODE_SIZE (op0_mode
) + GET_MODE_SIZE (op1_mode
)
3667 == GET_MODE_SIZE (mode
));
3669 if (VECTOR_MODE_P (op0_mode
))
3670 gcc_assert (GET_MODE_INNER (mode
)
3671 == GET_MODE_INNER (op0_mode
));
3673 gcc_assert (GET_MODE_INNER (mode
) == op0_mode
);
3675 if (VECTOR_MODE_P (op1_mode
))
3676 gcc_assert (GET_MODE_INNER (mode
)
3677 == GET_MODE_INNER (op1_mode
));
3679 gcc_assert (GET_MODE_INNER (mode
) == op1_mode
);
3681 if ((GET_CODE (trueop0
) == CONST_VECTOR
3682 || CONST_SCALAR_INT_P (trueop0
)
3683 || CONST_DOUBLE_AS_FLOAT_P (trueop0
))
3684 && (GET_CODE (trueop1
) == CONST_VECTOR
3685 || CONST_SCALAR_INT_P (trueop1
)
3686 || CONST_DOUBLE_AS_FLOAT_P (trueop1
)))
3688 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
3689 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
3690 rtvec v
= rtvec_alloc (n_elts
);
3692 unsigned in_n_elts
= 1;
3694 if (VECTOR_MODE_P (op0_mode
))
3695 in_n_elts
= (GET_MODE_SIZE (op0_mode
) / elt_size
);
3696 for (i
= 0; i
< n_elts
; i
++)
3700 if (!VECTOR_MODE_P (op0_mode
))
3701 RTVEC_ELT (v
, i
) = trueop0
;
3703 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop0
, i
);
3707 if (!VECTOR_MODE_P (op1_mode
))
3708 RTVEC_ELT (v
, i
) = trueop1
;
3710 RTVEC_ELT (v
, i
) = CONST_VECTOR_ELT (trueop1
,
3715 return gen_rtx_CONST_VECTOR (mode
, v
);
3718 /* Try to merge two VEC_SELECTs from the same vector into a single one.
3719 Restrict the transformation to avoid generating a VEC_SELECT with a
3720 mode unrelated to its operand. */
3721 if (GET_CODE (trueop0
) == VEC_SELECT
3722 && GET_CODE (trueop1
) == VEC_SELECT
3723 && rtx_equal_p (XEXP (trueop0
, 0), XEXP (trueop1
, 0))
3724 && GET_MODE (XEXP (trueop0
, 0)) == mode
)
3726 rtx par0
= XEXP (trueop0
, 1);
3727 rtx par1
= XEXP (trueop1
, 1);
3728 int len0
= XVECLEN (par0
, 0);
3729 int len1
= XVECLEN (par1
, 0);
3730 rtvec vec
= rtvec_alloc (len0
+ len1
);
3731 for (int i
= 0; i
< len0
; i
++)
3732 RTVEC_ELT (vec
, i
) = XVECEXP (par0
, 0, i
);
3733 for (int i
= 0; i
< len1
; i
++)
3734 RTVEC_ELT (vec
, len0
+ i
) = XVECEXP (par1
, 0, i
);
3735 return simplify_gen_binary (VEC_SELECT
, mode
, XEXP (trueop0
, 0),
3736 gen_rtx_PARALLEL (VOIDmode
, vec
));
3749 simplify_const_binary_operation (enum rtx_code code
, machine_mode mode
,
3752 unsigned int width
= GET_MODE_PRECISION (mode
);
3754 if (VECTOR_MODE_P (mode
)
3755 && code
!= VEC_CONCAT
3756 && GET_CODE (op0
) == CONST_VECTOR
3757 && GET_CODE (op1
) == CONST_VECTOR
)
3759 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3760 machine_mode op0mode
= GET_MODE (op0
);
3761 unsigned op0_n_elts
= GET_MODE_NUNITS (op0mode
);
3762 machine_mode op1mode
= GET_MODE (op1
);
3763 unsigned op1_n_elts
= GET_MODE_NUNITS (op1mode
);
3764 rtvec v
= rtvec_alloc (n_elts
);
3767 gcc_assert (op0_n_elts
== n_elts
);
3768 gcc_assert (op1_n_elts
== n_elts
);
3769 for (i
= 0; i
< n_elts
; i
++)
3771 rtx x
= simplify_binary_operation (code
, GET_MODE_INNER (mode
),
3772 CONST_VECTOR_ELT (op0
, i
),
3773 CONST_VECTOR_ELT (op1
, i
));
3776 RTVEC_ELT (v
, i
) = x
;
3779 return gen_rtx_CONST_VECTOR (mode
, v
);
3782 if (VECTOR_MODE_P (mode
)
3783 && code
== VEC_CONCAT
3784 && (CONST_SCALAR_INT_P (op0
)
3785 || GET_CODE (op0
) == CONST_FIXED
3786 || CONST_DOUBLE_AS_FLOAT_P (op0
))
3787 && (CONST_SCALAR_INT_P (op1
)
3788 || CONST_DOUBLE_AS_FLOAT_P (op1
)
3789 || GET_CODE (op1
) == CONST_FIXED
))
3791 unsigned n_elts
= GET_MODE_NUNITS (mode
);
3792 rtvec v
= rtvec_alloc (n_elts
);
3794 gcc_assert (n_elts
>= 2);
3797 gcc_assert (GET_CODE (op0
) != CONST_VECTOR
);
3798 gcc_assert (GET_CODE (op1
) != CONST_VECTOR
);
3800 RTVEC_ELT (v
, 0) = op0
;
3801 RTVEC_ELT (v
, 1) = op1
;
3805 unsigned op0_n_elts
= GET_MODE_NUNITS (GET_MODE (op0
));
3806 unsigned op1_n_elts
= GET_MODE_NUNITS (GET_MODE (op1
));
3809 gcc_assert (GET_CODE (op0
) == CONST_VECTOR
);
3810 gcc_assert (GET_CODE (op1
) == CONST_VECTOR
);
3811 gcc_assert (op0_n_elts
+ op1_n_elts
== n_elts
);
3813 for (i
= 0; i
< op0_n_elts
; ++i
)
3814 RTVEC_ELT (v
, i
) = XVECEXP (op0
, 0, i
);
3815 for (i
= 0; i
< op1_n_elts
; ++i
)
3816 RTVEC_ELT (v
, op0_n_elts
+i
) = XVECEXP (op1
, 0, i
);
3819 return gen_rtx_CONST_VECTOR (mode
, v
);
3822 if (SCALAR_FLOAT_MODE_P (mode
)
3823 && CONST_DOUBLE_AS_FLOAT_P (op0
)
3824 && CONST_DOUBLE_AS_FLOAT_P (op1
)
3825 && mode
== GET_MODE (op0
) && mode
== GET_MODE (op1
))
3836 real_to_target (tmp0
, CONST_DOUBLE_REAL_VALUE (op0
),
3838 real_to_target (tmp1
, CONST_DOUBLE_REAL_VALUE (op1
),
3840 for (i
= 0; i
< 4; i
++)
3857 real_from_target (&r
, tmp0
, mode
);
3858 return const_double_from_real_value (r
, mode
);
3862 REAL_VALUE_TYPE f0
, f1
, value
, result
;
3865 real_convert (&f0
, mode
, CONST_DOUBLE_REAL_VALUE (op0
));
3866 real_convert (&f1
, mode
, CONST_DOUBLE_REAL_VALUE (op1
));
3868 if (HONOR_SNANS (mode
)
3869 && (REAL_VALUE_ISNAN (f0
) || REAL_VALUE_ISNAN (f1
)))
3873 && real_equal (&f1
, &dconst0
)
3874 && (flag_trapping_math
|| ! MODE_HAS_INFINITIES (mode
)))
3877 if (MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3878 && flag_trapping_math
3879 && REAL_VALUE_ISINF (f0
) && REAL_VALUE_ISINF (f1
))
3881 int s0
= REAL_VALUE_NEGATIVE (f0
);
3882 int s1
= REAL_VALUE_NEGATIVE (f1
);
3887 /* Inf + -Inf = NaN plus exception. */
3892 /* Inf - Inf = NaN plus exception. */
3897 /* Inf / Inf = NaN plus exception. */
3904 if (code
== MULT
&& MODE_HAS_INFINITIES (mode
) && HONOR_NANS (mode
)
3905 && flag_trapping_math
3906 && ((REAL_VALUE_ISINF (f0
) && real_equal (&f1
, &dconst0
))
3907 || (REAL_VALUE_ISINF (f1
)
3908 && real_equal (&f0
, &dconst0
))))
3909 /* Inf * 0 = NaN plus exception. */
3912 inexact
= real_arithmetic (&value
, rtx_to_tree_code (code
),
3914 real_convert (&result
, mode
, &value
);
3916 /* Don't constant fold this floating point operation if
3917 the result has overflowed and flag_trapping_math. */
3919 if (flag_trapping_math
3920 && MODE_HAS_INFINITIES (mode
)
3921 && REAL_VALUE_ISINF (result
)
3922 && !REAL_VALUE_ISINF (f0
)
3923 && !REAL_VALUE_ISINF (f1
))
3924 /* Overflow plus exception. */
3927 /* Don't constant fold this floating point operation if the
3928 result may dependent upon the run-time rounding mode and
3929 flag_rounding_math is set, or if GCC's software emulation
3930 is unable to accurately represent the result. */
3932 if ((flag_rounding_math
3933 || (MODE_COMPOSITE_P (mode
) && !flag_unsafe_math_optimizations
))
3934 && (inexact
|| !real_identical (&result
, &value
)))
3937 return const_double_from_real_value (result
, mode
);
3941 /* We can fold some multi-word operations. */
3942 if ((GET_MODE_CLASS (mode
) == MODE_INT
3943 || GET_MODE_CLASS (mode
) == MODE_PARTIAL_INT
)
3944 && CONST_SCALAR_INT_P (op0
)
3945 && CONST_SCALAR_INT_P (op1
))
3949 rtx_mode_t pop0
= std::make_pair (op0
, mode
);
3950 rtx_mode_t pop1
= std::make_pair (op1
, mode
);
3952 #if TARGET_SUPPORTS_WIDE_INT == 0
3953 /* This assert keeps the simplification from producing a result
3954 that cannot be represented in a CONST_DOUBLE but a lot of
3955 upstream callers expect that this function never fails to
3956 simplify something and so you if you added this to the test
3957 above the code would die later anyway. If this assert
3958 happens, you just need to make the port support wide int. */
3959 gcc_assert (width
<= HOST_BITS_PER_DOUBLE_INT
);
3964 result
= wi::sub (pop0
, pop1
);
3968 result
= wi::add (pop0
, pop1
);
3972 result
= wi::mul (pop0
, pop1
);
3976 result
= wi::div_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3982 result
= wi::mod_trunc (pop0
, pop1
, SIGNED
, &overflow
);
3988 result
= wi::div_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
3994 result
= wi::mod_trunc (pop0
, pop1
, UNSIGNED
, &overflow
);
4000 result
= wi::bit_and (pop0
, pop1
);
4004 result
= wi::bit_or (pop0
, pop1
);
4008 result
= wi::bit_xor (pop0
, pop1
);
4012 result
= wi::smin (pop0
, pop1
);
4016 result
= wi::smax (pop0
, pop1
);
4020 result
= wi::umin (pop0
, pop1
);
4024 result
= wi::umax (pop0
, pop1
);
4031 wide_int wop1
= pop1
;
4032 if (SHIFT_COUNT_TRUNCATED
)
4033 wop1
= wi::umod_trunc (wop1
, width
);
4034 else if (wi::geu_p (wop1
, width
))
4040 result
= wi::lrshift (pop0
, wop1
);
4044 result
= wi::arshift (pop0
, wop1
);
4048 result
= wi::lshift (pop0
, wop1
);
4059 if (wi::neg_p (pop1
))
4065 result
= wi::lrotate (pop0
, pop1
);
4069 result
= wi::rrotate (pop0
, pop1
);
4080 return immed_wide_int_const (result
, mode
);
4088 /* Return a positive integer if X should sort after Y. The value
4089 returned is 1 if and only if X and Y are both regs. */
4092 simplify_plus_minus_op_data_cmp (rtx x
, rtx y
)
4096 result
= (commutative_operand_precedence (y
)
4097 - commutative_operand_precedence (x
));
4099 return result
+ result
;
4101 /* Group together equal REGs to do more simplification. */
4102 if (REG_P (x
) && REG_P (y
))
4103 return REGNO (x
) > REGNO (y
);
4108 /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
4109 operands may be another PLUS or MINUS.
4111 Rather than test for specific case, we do this by a brute-force method
4112 and do all possible simplifications until no more changes occur. Then
4113 we rebuild the operation.
4115 May return NULL_RTX when no changes were made. */
4118 simplify_plus_minus (enum rtx_code code
, machine_mode mode
, rtx op0
,
4121 struct simplify_plus_minus_op_data
4128 int changed
, n_constants
, canonicalized
= 0;
4131 memset (ops
, 0, sizeof ops
);
4133 /* Set up the two operands and then expand them until nothing has been
4134 changed. If we run out of room in our array, give up; this should
4135 almost never happen. */
4140 ops
[1].neg
= (code
== MINUS
);
4147 for (i
= 0; i
< n_ops
; i
++)
4149 rtx this_op
= ops
[i
].op
;
4150 int this_neg
= ops
[i
].neg
;
4151 enum rtx_code this_code
= GET_CODE (this_op
);
4157 if (n_ops
== ARRAY_SIZE (ops
))
4160 ops
[n_ops
].op
= XEXP (this_op
, 1);
4161 ops
[n_ops
].neg
= (this_code
== MINUS
) ^ this_neg
;
4164 ops
[i
].op
= XEXP (this_op
, 0);
4166 /* If this operand was negated then we will potentially
4167 canonicalize the expression. Similarly if we don't
4168 place the operands adjacent we're re-ordering the
4169 expression and thus might be performing a
4170 canonicalization. Ignore register re-ordering.
4171 ??? It might be better to shuffle the ops array here,
4172 but then (plus (plus (A, B), plus (C, D))) wouldn't
4173 be seen as non-canonical. */
4176 && !(REG_P (ops
[i
].op
) && REG_P (ops
[n_ops
- 1].op
))))
4181 ops
[i
].op
= XEXP (this_op
, 0);
4182 ops
[i
].neg
= ! this_neg
;
4188 if (n_ops
!= ARRAY_SIZE (ops
)
4189 && GET_CODE (XEXP (this_op
, 0)) == PLUS
4190 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 0))
4191 && CONSTANT_P (XEXP (XEXP (this_op
, 0), 1)))
4193 ops
[i
].op
= XEXP (XEXP (this_op
, 0), 0);
4194 ops
[n_ops
].op
= XEXP (XEXP (this_op
, 0), 1);
4195 ops
[n_ops
].neg
= this_neg
;
4203 /* ~a -> (-a - 1) */
4204 if (n_ops
!= ARRAY_SIZE (ops
))
4206 ops
[n_ops
].op
= CONSTM1_RTX (mode
);
4207 ops
[n_ops
++].neg
= this_neg
;
4208 ops
[i
].op
= XEXP (this_op
, 0);
4209 ops
[i
].neg
= !this_neg
;
4219 ops
[i
].op
= neg_const_int (mode
, this_op
);
4233 if (n_constants
> 1)
4236 gcc_assert (n_ops
>= 2);
4238 /* If we only have two operands, we can avoid the loops. */
4241 enum rtx_code code
= ops
[0].neg
|| ops
[1].neg
? MINUS
: PLUS
;
4244 /* Get the two operands. Be careful with the order, especially for
4245 the cases where code == MINUS. */
4246 if (ops
[0].neg
&& ops
[1].neg
)
4248 lhs
= gen_rtx_NEG (mode
, ops
[0].op
);
4251 else if (ops
[0].neg
)
4262 return simplify_const_binary_operation (code
, mode
, lhs
, rhs
);
4265 /* Now simplify each pair of operands until nothing changes. */
4268 /* Insertion sort is good enough for a small array. */
4269 for (i
= 1; i
< n_ops
; i
++)
4271 struct simplify_plus_minus_op_data save
;
4275 cmp
= simplify_plus_minus_op_data_cmp (ops
[j
].op
, ops
[i
].op
);
4278 /* Just swapping registers doesn't count as canonicalization. */
4284 ops
[j
+ 1] = ops
[j
];
4286 && simplify_plus_minus_op_data_cmp (ops
[j
].op
, save
.op
) > 0);
4291 for (i
= n_ops
- 1; i
> 0; i
--)
4292 for (j
= i
- 1; j
>= 0; j
--)
4294 rtx lhs
= ops
[j
].op
, rhs
= ops
[i
].op
;
4295 int lneg
= ops
[j
].neg
, rneg
= ops
[i
].neg
;
4297 if (lhs
!= 0 && rhs
!= 0)
4299 enum rtx_code ncode
= PLUS
;
4305 std::swap (lhs
, rhs
);
4307 else if (swap_commutative_operands_p (lhs
, rhs
))
4308 std::swap (lhs
, rhs
);
4310 if ((GET_CODE (lhs
) == CONST
|| CONST_INT_P (lhs
))
4311 && (GET_CODE (rhs
) == CONST
|| CONST_INT_P (rhs
)))
4313 rtx tem_lhs
, tem_rhs
;
4315 tem_lhs
= GET_CODE (lhs
) == CONST
? XEXP (lhs
, 0) : lhs
;
4316 tem_rhs
= GET_CODE (rhs
) == CONST
? XEXP (rhs
, 0) : rhs
;
4317 tem
= simplify_binary_operation (ncode
, mode
, tem_lhs
,
4320 if (tem
&& !CONSTANT_P (tem
))
4321 tem
= gen_rtx_CONST (GET_MODE (tem
), tem
);
4324 tem
= simplify_binary_operation (ncode
, mode
, lhs
, rhs
);
4328 /* Reject "simplifications" that just wrap the two
4329 arguments in a CONST. Failure to do so can result
4330 in infinite recursion with simplify_binary_operation
4331 when it calls us to simplify CONST operations.
4332 Also, if we find such a simplification, don't try
4333 any more combinations with this rhs: We must have
4334 something like symbol+offset, ie. one of the
4335 trivial CONST expressions we handle later. */
4336 if (GET_CODE (tem
) == CONST
4337 && GET_CODE (XEXP (tem
, 0)) == ncode
4338 && XEXP (XEXP (tem
, 0), 0) == lhs
4339 && XEXP (XEXP (tem
, 0), 1) == rhs
)
4342 if (GET_CODE (tem
) == NEG
)
4343 tem
= XEXP (tem
, 0), lneg
= !lneg
;
4344 if (CONST_INT_P (tem
) && lneg
)
4345 tem
= neg_const_int (mode
, tem
), lneg
= 0;
4349 ops
[j
].op
= NULL_RTX
;
4359 /* Pack all the operands to the lower-numbered entries. */
4360 for (i
= 0, j
= 0; j
< n_ops
; j
++)
4369 /* If nothing changed, fail. */
4373 /* Create (minus -C X) instead of (neg (const (plus X C))). */
4375 && CONST_INT_P (ops
[1].op
)
4376 && CONSTANT_P (ops
[0].op
)
4378 return gen_rtx_fmt_ee (MINUS
, mode
, ops
[1].op
, ops
[0].op
);
4380 /* We suppressed creation of trivial CONST expressions in the
4381 combination loop to avoid recursion. Create one manually now.
4382 The combination loop should have ensured that there is exactly
4383 one CONST_INT, and the sort will have ensured that it is last
4384 in the array and that any other constant will be next-to-last. */
4387 && CONST_INT_P (ops
[n_ops
- 1].op
)
4388 && CONSTANT_P (ops
[n_ops
- 2].op
))
4390 rtx value
= ops
[n_ops
- 1].op
;
4391 if (ops
[n_ops
- 1].neg
^ ops
[n_ops
- 2].neg
)
4392 value
= neg_const_int (mode
, value
);
4393 ops
[n_ops
- 2].op
= plus_constant (mode
, ops
[n_ops
- 2].op
,
4398 /* Put a non-negated operand first, if possible. */
4400 for (i
= 0; i
< n_ops
&& ops
[i
].neg
; i
++)
4403 ops
[0].op
= gen_rtx_NEG (mode
, ops
[0].op
);
4412 /* Now make the result by performing the requested operations. */
4414 for (i
= 1; i
< n_ops
; i
++)
4415 result
= gen_rtx_fmt_ee (ops
[i
].neg
? MINUS
: PLUS
,
4416 mode
, result
, ops
[i
].op
);
4421 /* Check whether an operand is suitable for calling simplify_plus_minus. */
4423 plus_minus_operand_p (const_rtx x
)
4425 return GET_CODE (x
) == PLUS
4426 || GET_CODE (x
) == MINUS
4427 || (GET_CODE (x
) == CONST
4428 && GET_CODE (XEXP (x
, 0)) == PLUS
4429 && CONSTANT_P (XEXP (XEXP (x
, 0), 0))
4430 && CONSTANT_P (XEXP (XEXP (x
, 0), 1)));
4433 /* Like simplify_binary_operation except used for relational operators.
4434 MODE is the mode of the result. If MODE is VOIDmode, both operands must
4435 not also be VOIDmode.
4437 CMP_MODE specifies in which mode the comparison is done in, so it is
4438 the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
4439 the operands or, if both are VOIDmode, the operands are compared in
4440 "infinite precision". */
4442 simplify_relational_operation (enum rtx_code code
, machine_mode mode
,
4443 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4445 rtx tem
, trueop0
, trueop1
;
4447 if (cmp_mode
== VOIDmode
)
4448 cmp_mode
= GET_MODE (op0
);
4449 if (cmp_mode
== VOIDmode
)
4450 cmp_mode
= GET_MODE (op1
);
4452 tem
= simplify_const_relational_operation (code
, cmp_mode
, op0
, op1
);
4455 if (SCALAR_FLOAT_MODE_P (mode
))
4457 if (tem
== const0_rtx
)
4458 return CONST0_RTX (mode
);
4459 #ifdef FLOAT_STORE_FLAG_VALUE
4461 REAL_VALUE_TYPE val
;
4462 val
= FLOAT_STORE_FLAG_VALUE (mode
);
4463 return const_double_from_real_value (val
, mode
);
4469 if (VECTOR_MODE_P (mode
))
4471 if (tem
== const0_rtx
)
4472 return CONST0_RTX (mode
);
4473 #ifdef VECTOR_STORE_FLAG_VALUE
4478 rtx val
= VECTOR_STORE_FLAG_VALUE (mode
);
4479 if (val
== NULL_RTX
)
4481 if (val
== const1_rtx
)
4482 return CONST1_RTX (mode
);
4484 units
= GET_MODE_NUNITS (mode
);
4485 v
= rtvec_alloc (units
);
4486 for (i
= 0; i
< units
; i
++)
4487 RTVEC_ELT (v
, i
) = val
;
4488 return gen_rtx_raw_CONST_VECTOR (mode
, v
);
4498 /* For the following tests, ensure const0_rtx is op1. */
4499 if (swap_commutative_operands_p (op0
, op1
)
4500 || (op0
== const0_rtx
&& op1
!= const0_rtx
))
4501 std::swap (op0
, op1
), code
= swap_condition (code
);
4503 /* If op0 is a compare, extract the comparison arguments from it. */
4504 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4505 return simplify_gen_relational (code
, mode
, VOIDmode
,
4506 XEXP (op0
, 0), XEXP (op0
, 1));
4508 if (GET_MODE_CLASS (cmp_mode
) == MODE_CC
4512 trueop0
= avoid_constant_pool_reference (op0
);
4513 trueop1
= avoid_constant_pool_reference (op1
);
4514 return simplify_relational_operation_1 (code
, mode
, cmp_mode
,
4518 /* This part of simplify_relational_operation is only used when CMP_MODE
4519 is not in class MODE_CC (i.e. it is a real comparison).
4521 MODE is the mode of the result, while CMP_MODE specifies in which
4522 mode the comparison is done in, so it is the mode of the operands. */
4525 simplify_relational_operation_1 (enum rtx_code code
, machine_mode mode
,
4526 machine_mode cmp_mode
, rtx op0
, rtx op1
)
4528 enum rtx_code op0code
= GET_CODE (op0
);
4530 if (op1
== const0_rtx
&& COMPARISON_P (op0
))
4532 /* If op0 is a comparison, extract the comparison arguments
4536 if (GET_MODE (op0
) == mode
)
4537 return simplify_rtx (op0
);
4539 return simplify_gen_relational (GET_CODE (op0
), mode
, VOIDmode
,
4540 XEXP (op0
, 0), XEXP (op0
, 1));
4542 else if (code
== EQ
)
4544 enum rtx_code new_code
= reversed_comparison_code (op0
, NULL_RTX
);
4545 if (new_code
!= UNKNOWN
)
4546 return simplify_gen_relational (new_code
, mode
, VOIDmode
,
4547 XEXP (op0
, 0), XEXP (op0
, 1));
4551 /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
4552 (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
4553 if ((code
== LTU
|| code
== GEU
)
4554 && GET_CODE (op0
) == PLUS
4555 && CONST_INT_P (XEXP (op0
, 1))
4556 && (rtx_equal_p (op1
, XEXP (op0
, 0))
4557 || rtx_equal_p (op1
, XEXP (op0
, 1)))
4558 /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
4559 && XEXP (op0
, 1) != const0_rtx
)
4562 = simplify_gen_unary (NEG
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4563 return simplify_gen_relational ((code
== LTU
? GEU
: LTU
), mode
,
4564 cmp_mode
, XEXP (op0
, 0), new_cmp
);
4567 /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
4568 if ((code
== LTU
|| code
== GEU
)
4569 && GET_CODE (op0
) == PLUS
4570 && rtx_equal_p (op1
, XEXP (op0
, 1))
4571 /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
4572 && !rtx_equal_p (op1
, XEXP (op0
, 0)))
4573 return simplify_gen_relational (code
, mode
, cmp_mode
, op0
,
4574 copy_rtx (XEXP (op0
, 0)));
4576 if (op1
== const0_rtx
)
4578 /* Canonicalize (GTU x 0) as (NE x 0). */
4580 return simplify_gen_relational (NE
, mode
, cmp_mode
, op0
, op1
);
4581 /* Canonicalize (LEU x 0) as (EQ x 0). */
4583 return simplify_gen_relational (EQ
, mode
, cmp_mode
, op0
, op1
);
4585 else if (op1
== const1_rtx
)
4590 /* Canonicalize (GE x 1) as (GT x 0). */
4591 return simplify_gen_relational (GT
, mode
, cmp_mode
,
4594 /* Canonicalize (GEU x 1) as (NE x 0). */
4595 return simplify_gen_relational (NE
, mode
, cmp_mode
,
4598 /* Canonicalize (LT x 1) as (LE x 0). */
4599 return simplify_gen_relational (LE
, mode
, cmp_mode
,
4602 /* Canonicalize (LTU x 1) as (EQ x 0). */
4603 return simplify_gen_relational (EQ
, mode
, cmp_mode
,
4609 else if (op1
== constm1_rtx
)
4611 /* Canonicalize (LE x -1) as (LT x 0). */
4613 return simplify_gen_relational (LT
, mode
, cmp_mode
, op0
, const0_rtx
);
4614 /* Canonicalize (GT x -1) as (GE x 0). */
4616 return simplify_gen_relational (GE
, mode
, cmp_mode
, op0
, const0_rtx
);
4619 /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
4620 if ((code
== EQ
|| code
== NE
)
4621 && (op0code
== PLUS
|| op0code
== MINUS
)
4623 && CONSTANT_P (XEXP (op0
, 1))
4624 && (INTEGRAL_MODE_P (cmp_mode
) || flag_unsafe_math_optimizations
))
4626 rtx x
= XEXP (op0
, 0);
4627 rtx c
= XEXP (op0
, 1);
4628 enum rtx_code invcode
= op0code
== PLUS
? MINUS
: PLUS
;
4629 rtx tem
= simplify_gen_binary (invcode
, cmp_mode
, op1
, c
);
4631 /* Detect an infinite recursive condition, where we oscillate at this
4632 simplification case between:
4633 A + B == C <---> C - B == A,
4634 where A, B, and C are all constants with non-simplifiable expressions,
4635 usually SYMBOL_REFs. */
4636 if (GET_CODE (tem
) == invcode
4638 && rtx_equal_p (c
, XEXP (tem
, 1)))
4641 return simplify_gen_relational (code
, mode
, cmp_mode
, x
, tem
);
4644 /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
4645 the same as (zero_extract:SI FOO (const_int 1) BAR). */
4647 && op1
== const0_rtx
4648 && GET_MODE_CLASS (mode
) == MODE_INT
4649 && cmp_mode
!= VOIDmode
4650 /* ??? Work-around BImode bugs in the ia64 backend. */
4652 && cmp_mode
!= BImode
4653 && nonzero_bits (op0
, cmp_mode
) == 1
4654 && STORE_FLAG_VALUE
== 1)
4655 return GET_MODE_SIZE (mode
) > GET_MODE_SIZE (cmp_mode
)
4656 ? simplify_gen_unary (ZERO_EXTEND
, mode
, op0
, cmp_mode
)
4657 : lowpart_subreg (mode
, op0
, cmp_mode
);
4659 /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
4660 if ((code
== EQ
|| code
== NE
)
4661 && op1
== const0_rtx
4663 return simplify_gen_relational (code
, mode
, cmp_mode
,
4664 XEXP (op0
, 0), XEXP (op0
, 1));
4666 /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
4667 if ((code
== EQ
|| code
== NE
)
4669 && rtx_equal_p (XEXP (op0
, 0), op1
)
4670 && !side_effects_p (XEXP (op0
, 0)))
4671 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 1),
4674 /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
4675 if ((code
== EQ
|| code
== NE
)
4677 && rtx_equal_p (XEXP (op0
, 1), op1
)
4678 && !side_effects_p (XEXP (op0
, 1)))
4679 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4682 /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
4683 if ((code
== EQ
|| code
== NE
)
4685 && CONST_SCALAR_INT_P (op1
)
4686 && CONST_SCALAR_INT_P (XEXP (op0
, 1)))
4687 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4688 simplify_gen_binary (XOR
, cmp_mode
,
4689 XEXP (op0
, 1), op1
));
4691 /* (eq/ne (and x y) x) simplifies to (eq/ne (and (not y) x) 0), which
4692 can be implemented with a BICS instruction on some targets, or
4693 constant-folded if y is a constant. */
4694 if ((code
== EQ
|| code
== NE
)
4696 && rtx_equal_p (XEXP (op0
, 0), op1
)
4697 && !side_effects_p (op1
)
4698 && op1
!= CONST0_RTX (cmp_mode
))
4700 rtx not_y
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 1), cmp_mode
);
4701 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_y
, XEXP (op0
, 0));
4703 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4704 CONST0_RTX (cmp_mode
));
4707 /* Likewise for (eq/ne (and x y) y). */
4708 if ((code
== EQ
|| code
== NE
)
4710 && rtx_equal_p (XEXP (op0
, 1), op1
)
4711 && !side_effects_p (op1
)
4712 && op1
!= CONST0_RTX (cmp_mode
))
4714 rtx not_x
= simplify_gen_unary (NOT
, cmp_mode
, XEXP (op0
, 0), cmp_mode
);
4715 rtx lhs
= simplify_gen_binary (AND
, cmp_mode
, not_x
, XEXP (op0
, 1));
4717 return simplify_gen_relational (code
, mode
, cmp_mode
, lhs
,
4718 CONST0_RTX (cmp_mode
));
4721 /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
4722 if ((code
== EQ
|| code
== NE
)
4723 && GET_CODE (op0
) == BSWAP
4724 && CONST_SCALAR_INT_P (op1
))
4725 return simplify_gen_relational (code
, mode
, cmp_mode
, XEXP (op0
, 0),
4726 simplify_gen_unary (BSWAP
, cmp_mode
,
4729 /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
4730 if ((code
== EQ
|| code
== NE
)
4731 && GET_CODE (op0
) == BSWAP
4732 && GET_CODE (op1
) == BSWAP
)
4733 return simplify_gen_relational (code
, mode
, cmp_mode
,
4734 XEXP (op0
, 0), XEXP (op1
, 0));
4736 if (op0code
== POPCOUNT
&& op1
== const0_rtx
)
4742 /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
4743 return simplify_gen_relational (EQ
, mode
, GET_MODE (XEXP (op0
, 0)),
4744 XEXP (op0
, 0), const0_rtx
);
4749 /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
4750 return simplify_gen_relational (NE
, mode
, GET_MODE (XEXP (op0
, 0)),
4751 XEXP (op0
, 0), const0_rtx
);
4770 /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
4771 KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
4772 For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
4773 logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
4774 For floating-point comparisons, assume that the operands were ordered. */
4777 comparison_result (enum rtx_code code
, int known_results
)
4783 return (known_results
& CMP_EQ
) ? const_true_rtx
: const0_rtx
;
4786 return (known_results
& CMP_EQ
) ? const0_rtx
: const_true_rtx
;
4790 return (known_results
& CMP_LT
) ? const_true_rtx
: const0_rtx
;
4793 return (known_results
& CMP_LT
) ? const0_rtx
: const_true_rtx
;
4797 return (known_results
& CMP_GT
) ? const_true_rtx
: const0_rtx
;
4800 return (known_results
& CMP_GT
) ? const0_rtx
: const_true_rtx
;
4803 return (known_results
& CMP_LTU
) ? const_true_rtx
: const0_rtx
;
4805 return (known_results
& CMP_LTU
) ? const0_rtx
: const_true_rtx
;
4808 return (known_results
& CMP_GTU
) ? const_true_rtx
: const0_rtx
;
4810 return (known_results
& CMP_GTU
) ? const0_rtx
: const_true_rtx
;
4813 return const_true_rtx
;
4821 /* Check if the given comparison (done in the given MODE) is actually
4822 a tautology or a contradiction. If the mode is VOID_mode, the
4823 comparison is done in "infinite precision". If no simplification
4824 is possible, this function returns zero. Otherwise, it returns
4825 either const_true_rtx or const0_rtx. */
4828 simplify_const_relational_operation (enum rtx_code code
,
4836 gcc_assert (mode
!= VOIDmode
4837 || (GET_MODE (op0
) == VOIDmode
4838 && GET_MODE (op1
) == VOIDmode
));
4840 /* If op0 is a compare, extract the comparison arguments from it. */
4841 if (GET_CODE (op0
) == COMPARE
&& op1
== const0_rtx
)
4843 op1
= XEXP (op0
, 1);
4844 op0
= XEXP (op0
, 0);
4846 if (GET_MODE (op0
) != VOIDmode
)
4847 mode
= GET_MODE (op0
);
4848 else if (GET_MODE (op1
) != VOIDmode
)
4849 mode
= GET_MODE (op1
);
4854 /* We can't simplify MODE_CC values since we don't know what the
4855 actual comparison is. */
4856 if (GET_MODE_CLASS (GET_MODE (op0
)) == MODE_CC
|| CC0_P (op0
))
4859 /* Make sure the constant is second. */
4860 if (swap_commutative_operands_p (op0
, op1
))
4862 std::swap (op0
, op1
);
4863 code
= swap_condition (code
);
4866 trueop0
= avoid_constant_pool_reference (op0
);
4867 trueop1
= avoid_constant_pool_reference (op1
);
4869 /* For integer comparisons of A and B maybe we can simplify A - B and can
4870 then simplify a comparison of that with zero. If A and B are both either
4871 a register or a CONST_INT, this can't help; testing for these cases will
4872 prevent infinite recursion here and speed things up.
4874 We can only do this for EQ and NE comparisons as otherwise we may
4875 lose or introduce overflow which we cannot disregard as undefined as
4876 we do not know the signedness of the operation on either the left or
4877 the right hand side of the comparison. */
4879 if (INTEGRAL_MODE_P (mode
) && trueop1
!= const0_rtx
4880 && (code
== EQ
|| code
== NE
)
4881 && ! ((REG_P (op0
) || CONST_INT_P (trueop0
))
4882 && (REG_P (op1
) || CONST_INT_P (trueop1
)))
4883 && 0 != (tem
= simplify_binary_operation (MINUS
, mode
, op0
, op1
))
4884 /* We cannot do this if tem is a nonzero address. */
4885 && ! nonzero_address_p (tem
))
4886 return simplify_const_relational_operation (signed_condition (code
),
4887 mode
, tem
, const0_rtx
);
4889 if (! HONOR_NANS (mode
) && code
== ORDERED
)
4890 return const_true_rtx
;
4892 if (! HONOR_NANS (mode
) && code
== UNORDERED
)
4895 /* For modes without NaNs, if the two operands are equal, we know the
4896 result except if they have side-effects. Even with NaNs we know
4897 the result of unordered comparisons and, if signaling NaNs are
4898 irrelevant, also the result of LT/GT/LTGT. */
4899 if ((! HONOR_NANS (trueop0
)
4900 || code
== UNEQ
|| code
== UNLE
|| code
== UNGE
4901 || ((code
== LT
|| code
== GT
|| code
== LTGT
)
4902 && ! HONOR_SNANS (trueop0
)))
4903 && rtx_equal_p (trueop0
, trueop1
)
4904 && ! side_effects_p (trueop0
))
4905 return comparison_result (code
, CMP_EQ
);
4907 /* If the operands are floating-point constants, see if we can fold
4909 if (CONST_DOUBLE_AS_FLOAT_P (trueop0
)
4910 && CONST_DOUBLE_AS_FLOAT_P (trueop1
)
4911 && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0
)))
4913 const REAL_VALUE_TYPE
*d0
= CONST_DOUBLE_REAL_VALUE (trueop0
);
4914 const REAL_VALUE_TYPE
*d1
= CONST_DOUBLE_REAL_VALUE (trueop1
);
4916 /* Comparisons are unordered iff at least one of the values is NaN. */
4917 if (REAL_VALUE_ISNAN (*d0
) || REAL_VALUE_ISNAN (*d1
))
4927 return const_true_rtx
;
4940 return comparison_result (code
,
4941 (real_equal (d0
, d1
) ? CMP_EQ
:
4942 real_less (d0
, d1
) ? CMP_LT
: CMP_GT
));
4945 /* Otherwise, see if the operands are both integers. */
4946 if ((GET_MODE_CLASS (mode
) == MODE_INT
|| mode
== VOIDmode
)
4947 && CONST_SCALAR_INT_P (trueop0
) && CONST_SCALAR_INT_P (trueop1
))
4949 /* It would be nice if we really had a mode here. However, the
4950 largest int representable on the target is as good as
4952 machine_mode cmode
= (mode
== VOIDmode
) ? MAX_MODE_INT
: mode
;
4953 rtx_mode_t ptrueop0
= std::make_pair (trueop0
, cmode
);
4954 rtx_mode_t ptrueop1
= std::make_pair (trueop1
, cmode
);
4956 if (wi::eq_p (ptrueop0
, ptrueop1
))
4957 return comparison_result (code
, CMP_EQ
);
4960 int cr
= wi::lts_p (ptrueop0
, ptrueop1
) ? CMP_LT
: CMP_GT
;
4961 cr
|= wi::ltu_p (ptrueop0
, ptrueop1
) ? CMP_LTU
: CMP_GTU
;
4962 return comparison_result (code
, cr
);
4966 /* Optimize comparisons with upper and lower bounds. */
4967 if (HWI_COMPUTABLE_MODE_P (mode
)
4968 && CONST_INT_P (trueop1
)
4969 && !side_effects_p (trueop0
))
4972 unsigned HOST_WIDE_INT nonzero
= nonzero_bits (trueop0
, mode
);
4973 HOST_WIDE_INT val
= INTVAL (trueop1
);
4974 HOST_WIDE_INT mmin
, mmax
;
4984 /* Get a reduced range if the sign bit is zero. */
4985 if (nonzero
<= (GET_MODE_MASK (mode
) >> 1))
4992 rtx mmin_rtx
, mmax_rtx
;
4993 get_mode_bounds (mode
, sign
, mode
, &mmin_rtx
, &mmax_rtx
);
4995 mmin
= INTVAL (mmin_rtx
);
4996 mmax
= INTVAL (mmax_rtx
);
4999 unsigned int sign_copies
= num_sign_bit_copies (trueop0
, mode
);
5001 mmin
>>= (sign_copies
- 1);
5002 mmax
>>= (sign_copies
- 1);
5008 /* x >= y is always true for y <= mmin, always false for y > mmax. */
5010 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5011 return const_true_rtx
;
5012 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5017 return const_true_rtx
;
5022 /* x <= y is always true for y >= mmax, always false for y < mmin. */
5024 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5025 return const_true_rtx
;
5026 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5031 return const_true_rtx
;
5037 /* x == y is always false for y out of range. */
5038 if (val
< mmin
|| val
> mmax
)
5042 /* x > y is always false for y >= mmax, always true for y < mmin. */
5044 if ((unsigned HOST_WIDE_INT
) val
>= (unsigned HOST_WIDE_INT
) mmax
)
5046 if ((unsigned HOST_WIDE_INT
) val
< (unsigned HOST_WIDE_INT
) mmin
)
5047 return const_true_rtx
;
5053 return const_true_rtx
;
5056 /* x < y is always false for y <= mmin, always true for y > mmax. */
5058 if ((unsigned HOST_WIDE_INT
) val
<= (unsigned HOST_WIDE_INT
) mmin
)
5060 if ((unsigned HOST_WIDE_INT
) val
> (unsigned HOST_WIDE_INT
) mmax
)
5061 return const_true_rtx
;
5067 return const_true_rtx
;
5071 /* x != y is always true for y out of range. */
5072 if (val
< mmin
|| val
> mmax
)
5073 return const_true_rtx
;
5081 /* Optimize integer comparisons with zero. */
5082 if (trueop1
== const0_rtx
&& !side_effects_p (trueop0
))
5084 /* Some addresses are known to be nonzero. We don't know
5085 their sign, but equality comparisons are known. */
5086 if (nonzero_address_p (trueop0
))
5088 if (code
== EQ
|| code
== LEU
)
5090 if (code
== NE
|| code
== GTU
)
5091 return const_true_rtx
;
5094 /* See if the first operand is an IOR with a constant. If so, we
5095 may be able to determine the result of this comparison. */
5096 if (GET_CODE (op0
) == IOR
)
5098 rtx inner_const
= avoid_constant_pool_reference (XEXP (op0
, 1));
5099 if (CONST_INT_P (inner_const
) && inner_const
!= const0_rtx
)
5101 int sign_bitnum
= GET_MODE_PRECISION (mode
) - 1;
5102 int has_sign
= (HOST_BITS_PER_WIDE_INT
>= sign_bitnum
5103 && (UINTVAL (inner_const
)
5104 & ((unsigned HOST_WIDE_INT
) 1
5114 return const_true_rtx
;
5118 return const_true_rtx
;
5132 /* Optimize comparison of ABS with zero. */
5133 if (trueop1
== CONST0_RTX (mode
) && !side_effects_p (trueop0
)
5134 && (GET_CODE (trueop0
) == ABS
5135 || (GET_CODE (trueop0
) == FLOAT_EXTEND
5136 && GET_CODE (XEXP (trueop0
, 0)) == ABS
)))
5141 /* Optimize abs(x) < 0.0. */
5142 if (!HONOR_SNANS (mode
)
5143 && (!INTEGRAL_MODE_P (mode
)
5144 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5146 if (INTEGRAL_MODE_P (mode
)
5147 && (issue_strict_overflow_warning
5148 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5149 warning (OPT_Wstrict_overflow
,
5150 ("assuming signed overflow does not occur when "
5151 "assuming abs (x) < 0 is false"));
5157 /* Optimize abs(x) >= 0.0. */
5158 if (!HONOR_NANS (mode
)
5159 && (!INTEGRAL_MODE_P (mode
)
5160 || (!flag_wrapv
&& !flag_trapv
&& flag_strict_overflow
)))
5162 if (INTEGRAL_MODE_P (mode
)
5163 && (issue_strict_overflow_warning
5164 (WARN_STRICT_OVERFLOW_CONDITIONAL
)))
5165 warning (OPT_Wstrict_overflow
,
5166 ("assuming signed overflow does not occur when "
5167 "assuming abs (x) >= 0 is true"));
5168 return const_true_rtx
;
5173 /* Optimize ! (abs(x) < 0.0). */
5174 return const_true_rtx
;
5184 /* Simplify CODE, an operation with result mode MODE and three operands,
5185 OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
5186 a constant. Return 0 if no simplifications is possible. */
5189 simplify_ternary_operation (enum rtx_code code
, machine_mode mode
,
5190 machine_mode op0_mode
, rtx op0
, rtx op1
,
5193 unsigned int width
= GET_MODE_PRECISION (mode
);
5194 bool any_change
= false;
5197 /* VOIDmode means "infinite" precision. */
5199 width
= HOST_BITS_PER_WIDE_INT
;
5204 /* Simplify negations around the multiplication. */
5205 /* -a * -b + c => a * b + c. */
5206 if (GET_CODE (op0
) == NEG
)
5208 tem
= simplify_unary_operation (NEG
, mode
, op1
, mode
);
5210 op1
= tem
, op0
= XEXP (op0
, 0), any_change
= true;
5212 else if (GET_CODE (op1
) == NEG
)
5214 tem
= simplify_unary_operation (NEG
, mode
, op0
, mode
);
5216 op0
= tem
, op1
= XEXP (op1
, 0), any_change
= true;
5219 /* Canonicalize the two multiplication operands. */
5220 /* a * -b + c => -b * a + c. */
5221 if (swap_commutative_operands_p (op0
, op1
))
5222 std::swap (op0
, op1
), any_change
= true;
5225 return gen_rtx_FMA (mode
, op0
, op1
, op2
);
5230 if (CONST_INT_P (op0
)
5231 && CONST_INT_P (op1
)
5232 && CONST_INT_P (op2
)
5233 && ((unsigned) INTVAL (op1
) + (unsigned) INTVAL (op2
) <= width
)
5234 && width
<= (unsigned) HOST_BITS_PER_WIDE_INT
)
5236 /* Extracting a bit-field from a constant */
5237 unsigned HOST_WIDE_INT val
= UINTVAL (op0
);
5238 HOST_WIDE_INT op1val
= INTVAL (op1
);
5239 HOST_WIDE_INT op2val
= INTVAL (op2
);
5240 if (BITS_BIG_ENDIAN
)
5241 val
>>= GET_MODE_PRECISION (op0_mode
) - op2val
- op1val
;
5245 if (HOST_BITS_PER_WIDE_INT
!= op1val
)
5247 /* First zero-extend. */
5248 val
&= ((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1;
5249 /* If desired, propagate sign bit. */
5250 if (code
== SIGN_EXTRACT
5251 && (val
& ((unsigned HOST_WIDE_INT
) 1 << (op1val
- 1)))
5253 val
|= ~ (((unsigned HOST_WIDE_INT
) 1 << op1val
) - 1);
5256 return gen_int_mode (val
, mode
);
5261 if (CONST_INT_P (op0
))
5262 return op0
!= const0_rtx
? op1
: op2
;
5264 /* Convert c ? a : a into "a". */
5265 if (rtx_equal_p (op1
, op2
) && ! side_effects_p (op0
))
5268 /* Convert a != b ? a : b into "a". */
5269 if (GET_CODE (op0
) == NE
5270 && ! side_effects_p (op0
)
5271 && ! HONOR_NANS (mode
)
5272 && ! HONOR_SIGNED_ZEROS (mode
)
5273 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5274 && rtx_equal_p (XEXP (op0
, 1), op2
))
5275 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5276 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5279 /* Convert a == b ? a : b into "b". */
5280 if (GET_CODE (op0
) == EQ
5281 && ! side_effects_p (op0
)
5282 && ! HONOR_NANS (mode
)
5283 && ! HONOR_SIGNED_ZEROS (mode
)
5284 && ((rtx_equal_p (XEXP (op0
, 0), op1
)
5285 && rtx_equal_p (XEXP (op0
, 1), op2
))
5286 || (rtx_equal_p (XEXP (op0
, 0), op2
)
5287 && rtx_equal_p (XEXP (op0
, 1), op1
))))
5290 /* Convert (!c) != {0,...,0} ? a : b into
5291 c != {0,...,0} ? b : a for vector modes. */
5292 if (VECTOR_MODE_P (GET_MODE (op1
))
5293 && GET_CODE (op0
) == NE
5294 && GET_CODE (XEXP (op0
, 0)) == NOT
5295 && GET_CODE (XEXP (op0
, 1)) == CONST_VECTOR
)
5297 rtx cv
= XEXP (op0
, 1);
5298 int nunits
= CONST_VECTOR_NUNITS (cv
);
5300 for (int i
= 0; i
< nunits
; ++i
)
5301 if (CONST_VECTOR_ELT (cv
, i
) != const0_rtx
)
5308 rtx new_op0
= gen_rtx_NE (GET_MODE (op0
),
5309 XEXP (XEXP (op0
, 0), 0),
5311 rtx retval
= gen_rtx_IF_THEN_ELSE (mode
, new_op0
, op2
, op1
);
5316 if (COMPARISON_P (op0
) && ! side_effects_p (op0
))
5318 machine_mode cmp_mode
= (GET_MODE (XEXP (op0
, 0)) == VOIDmode
5319 ? GET_MODE (XEXP (op0
, 1))
5320 : GET_MODE (XEXP (op0
, 0)));
5323 /* Look for happy constants in op1 and op2. */
5324 if (CONST_INT_P (op1
) && CONST_INT_P (op2
))
5326 HOST_WIDE_INT t
= INTVAL (op1
);
5327 HOST_WIDE_INT f
= INTVAL (op2
);
5329 if (t
== STORE_FLAG_VALUE
&& f
== 0)
5330 code
= GET_CODE (op0
);
5331 else if (t
== 0 && f
== STORE_FLAG_VALUE
)
5334 tmp
= reversed_comparison_code (op0
, NULL_RTX
);
5342 return simplify_gen_relational (code
, mode
, cmp_mode
,
5343 XEXP (op0
, 0), XEXP (op0
, 1));
5346 if (cmp_mode
== VOIDmode
)
5347 cmp_mode
= op0_mode
;
5348 temp
= simplify_relational_operation (GET_CODE (op0
), op0_mode
,
5349 cmp_mode
, XEXP (op0
, 0),
5352 /* See if any simplifications were possible. */
5355 if (CONST_INT_P (temp
))
5356 return temp
== const0_rtx
? op2
: op1
;
5358 return gen_rtx_IF_THEN_ELSE (mode
, temp
, op1
, op2
);
5364 gcc_assert (GET_MODE (op0
) == mode
);
5365 gcc_assert (GET_MODE (op1
) == mode
);
5366 gcc_assert (VECTOR_MODE_P (mode
));
5367 trueop2
= avoid_constant_pool_reference (op2
);
5368 if (CONST_INT_P (trueop2
))
5370 int elt_size
= GET_MODE_UNIT_SIZE (mode
);
5371 unsigned n_elts
= (GET_MODE_SIZE (mode
) / elt_size
);
5372 unsigned HOST_WIDE_INT sel
= UINTVAL (trueop2
);
5373 unsigned HOST_WIDE_INT mask
;
5374 if (n_elts
== HOST_BITS_PER_WIDE_INT
)
5377 mask
= ((unsigned HOST_WIDE_INT
) 1 << n_elts
) - 1;
5379 if (!(sel
& mask
) && !side_effects_p (op0
))
5381 if ((sel
& mask
) == mask
&& !side_effects_p (op1
))
5384 rtx trueop0
= avoid_constant_pool_reference (op0
);
5385 rtx trueop1
= avoid_constant_pool_reference (op1
);
5386 if (GET_CODE (trueop0
) == CONST_VECTOR
5387 && GET_CODE (trueop1
) == CONST_VECTOR
)
5389 rtvec v
= rtvec_alloc (n_elts
);
5392 for (i
= 0; i
< n_elts
; i
++)
5393 RTVEC_ELT (v
, i
) = ((sel
& ((unsigned HOST_WIDE_INT
) 1 << i
))
5394 ? CONST_VECTOR_ELT (trueop0
, i
)
5395 : CONST_VECTOR_ELT (trueop1
, i
));
5396 return gen_rtx_CONST_VECTOR (mode
, v
);
5399 /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
5400 if no element from a appears in the result. */
5401 if (GET_CODE (op0
) == VEC_MERGE
)
5403 tem
= avoid_constant_pool_reference (XEXP (op0
, 2));
5404 if (CONST_INT_P (tem
))
5406 unsigned HOST_WIDE_INT sel0
= UINTVAL (tem
);
5407 if (!(sel
& sel0
& mask
) && !side_effects_p (XEXP (op0
, 0)))
5408 return simplify_gen_ternary (code
, mode
, mode
,
5409 XEXP (op0
, 1), op1
, op2
);
5410 if (!(sel
& ~sel0
& mask
) && !side_effects_p (XEXP (op0
, 1)))
5411 return simplify_gen_ternary (code
, mode
, mode
,
5412 XEXP (op0
, 0), op1
, op2
);
5415 if (GET_CODE (op1
) == VEC_MERGE
)
5417 tem
= avoid_constant_pool_reference (XEXP (op1
, 2));
5418 if (CONST_INT_P (tem
))
5420 unsigned HOST_WIDE_INT sel1
= UINTVAL (tem
);
5421 if (!(~sel
& sel1
& mask
) && !side_effects_p (XEXP (op1
, 0)))
5422 return simplify_gen_ternary (code
, mode
, mode
,
5423 op0
, XEXP (op1
, 1), op2
);
5424 if (!(~sel
& ~sel1
& mask
) && !side_effects_p (XEXP (op1
, 1)))
5425 return simplify_gen_ternary (code
, mode
, mode
,
5426 op0
, XEXP (op1
, 0), op2
);
5430 /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
5432 if (GET_CODE (op0
) == VEC_DUPLICATE
5433 && GET_CODE (XEXP (op0
, 0)) == VEC_SELECT
5434 && GET_CODE (XEXP (XEXP (op0
, 0), 1)) == PARALLEL
5435 && mode_nunits
[GET_MODE (XEXP (op0
, 0))] == 1)
5437 tem
= XVECEXP ((XEXP (XEXP (op0
, 0), 1)), 0, 0);
5438 if (CONST_INT_P (tem
) && CONST_INT_P (op2
))
5440 if (XEXP (XEXP (op0
, 0), 0) == op1
5441 && UINTVAL (op2
) == HOST_WIDE_INT_1U
<< UINTVAL (tem
))
5447 if (rtx_equal_p (op0
, op1
)
5448 && !side_effects_p (op2
) && !side_effects_p (op1
))
5460 /* Evaluate a SUBREG of a CONST_INT or CONST_WIDE_INT or CONST_DOUBLE
5461 or CONST_FIXED or CONST_VECTOR, returning another CONST_INT or
5462 CONST_WIDE_INT or CONST_DOUBLE or CONST_FIXED or CONST_VECTOR.
5464 Works by unpacking OP into a collection of 8-bit values
5465 represented as a little-endian array of 'unsigned char', selecting by BYTE,
5466 and then repacking them again for OUTERMODE. */
5469 simplify_immed_subreg (machine_mode outermode
, rtx op
,
5470 machine_mode innermode
, unsigned int byte
)
5474 value_mask
= (1 << value_bit
) - 1
5476 unsigned char value
[MAX_BITSIZE_MODE_ANY_MODE
/ value_bit
];
5485 rtvec result_v
= NULL
;
5486 enum mode_class outer_class
;
5487 machine_mode outer_submode
;
5490 /* Some ports misuse CCmode. */
5491 if (GET_MODE_CLASS (outermode
) == MODE_CC
&& CONST_INT_P (op
))
5494 /* We have no way to represent a complex constant at the rtl level. */
5495 if (COMPLEX_MODE_P (outermode
))
5498 /* We support any size mode. */
5499 max_bitsize
= MAX (GET_MODE_BITSIZE (outermode
),
5500 GET_MODE_BITSIZE (innermode
));
5502 /* Unpack the value. */
5504 if (GET_CODE (op
) == CONST_VECTOR
)
5506 num_elem
= CONST_VECTOR_NUNITS (op
);
5507 elems
= &CONST_VECTOR_ELT (op
, 0);
5508 elem_bitsize
= GET_MODE_UNIT_BITSIZE (innermode
);
5514 elem_bitsize
= max_bitsize
;
5516 /* If this asserts, it is too complicated; reducing value_bit may help. */
5517 gcc_assert (BITS_PER_UNIT
% value_bit
== 0);
5518 /* I don't know how to handle endianness of sub-units. */
5519 gcc_assert (elem_bitsize
% BITS_PER_UNIT
== 0);
5521 for (elem
= 0; elem
< num_elem
; elem
++)
5524 rtx el
= elems
[elem
];
5526 /* Vectors are kept in target memory order. (This is probably
5529 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5530 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5532 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5533 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5534 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5535 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5536 vp
= value
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5539 switch (GET_CODE (el
))
5543 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5545 *vp
++ = INTVAL (el
) >> i
;
5546 /* CONST_INTs are always logically sign-extended. */
5547 for (; i
< elem_bitsize
; i
+= value_bit
)
5548 *vp
++ = INTVAL (el
) < 0 ? -1 : 0;
5551 case CONST_WIDE_INT
:
5553 rtx_mode_t val
= std::make_pair (el
, innermode
);
5554 unsigned char extend
= wi::sign_mask (val
);
5556 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5557 *vp
++ = wi::extract_uhwi (val
, i
, value_bit
);
5558 for (; i
< elem_bitsize
; i
+= value_bit
)
5564 if (TARGET_SUPPORTS_WIDE_INT
== 0 && GET_MODE (el
) == VOIDmode
)
5566 unsigned char extend
= 0;
5567 /* If this triggers, someone should have generated a
5568 CONST_INT instead. */
5569 gcc_assert (elem_bitsize
> HOST_BITS_PER_WIDE_INT
);
5571 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5572 *vp
++ = CONST_DOUBLE_LOW (el
) >> i
;
5573 while (i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
)
5576 = CONST_DOUBLE_HIGH (el
) >> (i
- HOST_BITS_PER_WIDE_INT
);
5580 if (CONST_DOUBLE_HIGH (el
) >> (HOST_BITS_PER_WIDE_INT
- 1))
5582 for (; i
< elem_bitsize
; i
+= value_bit
)
5587 /* This is big enough for anything on the platform. */
5588 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5589 int bitsize
= GET_MODE_BITSIZE (GET_MODE (el
));
5591 gcc_assert (SCALAR_FLOAT_MODE_P (GET_MODE (el
)));
5592 gcc_assert (bitsize
<= elem_bitsize
);
5593 gcc_assert (bitsize
% value_bit
== 0);
5595 real_to_target (tmp
, CONST_DOUBLE_REAL_VALUE (el
),
5598 /* real_to_target produces its result in words affected by
5599 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5600 and use WORDS_BIG_ENDIAN instead; see the documentation
5601 of SUBREG in rtl.texi. */
5602 for (i
= 0; i
< bitsize
; i
+= value_bit
)
5605 if (WORDS_BIG_ENDIAN
)
5606 ibase
= bitsize
- 1 - i
;
5609 *vp
++ = tmp
[ibase
/ 32] >> i
% 32;
5612 /* It shouldn't matter what's done here, so fill it with
5614 for (; i
< elem_bitsize
; i
+= value_bit
)
5620 if (elem_bitsize
<= HOST_BITS_PER_WIDE_INT
)
5622 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5623 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5627 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
+= value_bit
)
5628 *vp
++ = CONST_FIXED_VALUE_LOW (el
) >> i
;
5629 for (; i
< HOST_BITS_PER_DOUBLE_INT
&& i
< elem_bitsize
;
5631 *vp
++ = CONST_FIXED_VALUE_HIGH (el
)
5632 >> (i
- HOST_BITS_PER_WIDE_INT
);
5633 for (; i
< elem_bitsize
; i
+= value_bit
)
5643 /* Now, pick the right byte to start with. */
5644 /* Renumber BYTE so that the least-significant byte is byte 0. A special
5645 case is paradoxical SUBREGs, which shouldn't be adjusted since they
5646 will already have offset 0. */
5647 if (GET_MODE_SIZE (innermode
) >= GET_MODE_SIZE (outermode
))
5649 unsigned ibyte
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
)
5651 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5652 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5653 byte
= (subword_byte
% UNITS_PER_WORD
5654 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5657 /* BYTE should still be inside OP. (Note that BYTE is unsigned,
5658 so if it's become negative it will instead be very large.) */
5659 gcc_assert (byte
< GET_MODE_SIZE (innermode
));
5661 /* Convert from bytes to chunks of size value_bit. */
5662 value_start
= byte
* (BITS_PER_UNIT
/ value_bit
);
5664 /* Re-pack the value. */
5665 num_elem
= GET_MODE_NUNITS (outermode
);
5667 if (VECTOR_MODE_P (outermode
))
5669 result_v
= rtvec_alloc (num_elem
);
5670 elems
= &RTVEC_ELT (result_v
, 0);
5675 outer_submode
= GET_MODE_INNER (outermode
);
5676 outer_class
= GET_MODE_CLASS (outer_submode
);
5677 elem_bitsize
= GET_MODE_BITSIZE (outer_submode
);
5679 gcc_assert (elem_bitsize
% value_bit
== 0);
5680 gcc_assert (elem_bitsize
+ value_start
* value_bit
<= max_bitsize
);
5682 for (elem
= 0; elem
< num_elem
; elem
++)
5686 /* Vectors are stored in target memory order. (This is probably
5689 unsigned byte
= (elem
* elem_bitsize
) / BITS_PER_UNIT
;
5690 unsigned ibyte
= (((num_elem
- 1 - elem
) * elem_bitsize
)
5692 unsigned word_byte
= WORDS_BIG_ENDIAN
? ibyte
: byte
;
5693 unsigned subword_byte
= BYTES_BIG_ENDIAN
? ibyte
: byte
;
5694 unsigned bytele
= (subword_byte
% UNITS_PER_WORD
5695 + (word_byte
/ UNITS_PER_WORD
) * UNITS_PER_WORD
);
5696 vp
= value
+ value_start
+ (bytele
* BITS_PER_UNIT
) / value_bit
;
5699 switch (outer_class
)
5702 case MODE_PARTIAL_INT
:
5707 = (GET_MODE_BITSIZE (outer_submode
) + HOST_BITS_PER_WIDE_INT
- 1)
5708 / HOST_BITS_PER_WIDE_INT
;
5709 HOST_WIDE_INT tmp
[MAX_BITSIZE_MODE_ANY_INT
/ HOST_BITS_PER_WIDE_INT
];
5712 if (GET_MODE_PRECISION (outer_submode
) > MAX_BITSIZE_MODE_ANY_INT
)
5714 for (u
= 0; u
< units
; u
++)
5716 unsigned HOST_WIDE_INT buf
= 0;
5718 i
< HOST_BITS_PER_WIDE_INT
&& base
+ i
< elem_bitsize
;
5720 buf
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5723 base
+= HOST_BITS_PER_WIDE_INT
;
5725 r
= wide_int::from_array (tmp
, units
,
5726 GET_MODE_PRECISION (outer_submode
));
5727 #if TARGET_SUPPORTS_WIDE_INT == 0
5728 /* Make sure r will fit into CONST_INT or CONST_DOUBLE. */
5729 if (wi::min_precision (r
, SIGNED
) > HOST_BITS_PER_DOUBLE_INT
)
5732 elems
[elem
] = immed_wide_int_const (r
, outer_submode
);
5737 case MODE_DECIMAL_FLOAT
:
5740 long tmp
[MAX_BITSIZE_MODE_ANY_MODE
/ 32];
5742 /* real_from_target wants its input in words affected by
5743 FLOAT_WORDS_BIG_ENDIAN. However, we ignore this,
5744 and use WORDS_BIG_ENDIAN instead; see the documentation
5745 of SUBREG in rtl.texi. */
5746 for (i
= 0; i
< max_bitsize
/ 32; i
++)
5748 for (i
= 0; i
< elem_bitsize
; i
+= value_bit
)
5751 if (WORDS_BIG_ENDIAN
)
5752 ibase
= elem_bitsize
- 1 - i
;
5755 tmp
[ibase
/ 32] |= (*vp
++ & value_mask
) << i
% 32;
5758 real_from_target (&r
, tmp
, outer_submode
);
5759 elems
[elem
] = const_double_from_real_value (r
, outer_submode
);
5771 f
.mode
= outer_submode
;
5774 i
< HOST_BITS_PER_WIDE_INT
&& i
< elem_bitsize
;
5776 f
.data
.low
|= (unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
) << i
;
5777 for (; i
< elem_bitsize
; i
+= value_bit
)
5778 f
.data
.high
|= ((unsigned HOST_WIDE_INT
)(*vp
++ & value_mask
)
5779 << (i
- HOST_BITS_PER_WIDE_INT
));
5781 elems
[elem
] = CONST_FIXED_FROM_FIXED_VALUE (f
, outer_submode
);
5789 if (VECTOR_MODE_P (outermode
))
5790 return gen_rtx_CONST_VECTOR (outermode
, result_v
);
5795 /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
5796 Return 0 if no simplifications are possible. */
5798 simplify_subreg (machine_mode outermode
, rtx op
,
5799 machine_mode innermode
, unsigned int byte
)
5801 /* Little bit of sanity checking. */
5802 gcc_assert (innermode
!= VOIDmode
);
5803 gcc_assert (outermode
!= VOIDmode
);
5804 gcc_assert (innermode
!= BLKmode
);
5805 gcc_assert (outermode
!= BLKmode
);
5807 gcc_assert (GET_MODE (op
) == innermode
5808 || GET_MODE (op
) == VOIDmode
);
5810 if ((byte
% GET_MODE_SIZE (outermode
)) != 0)
5813 if (byte
>= GET_MODE_SIZE (innermode
))
5816 if (outermode
== innermode
&& !byte
)
5819 if (CONST_SCALAR_INT_P (op
)
5820 || CONST_DOUBLE_AS_FLOAT_P (op
)
5821 || GET_CODE (op
) == CONST_FIXED
5822 || GET_CODE (op
) == CONST_VECTOR
)
5823 return simplify_immed_subreg (outermode
, op
, innermode
, byte
);
5825 /* Changing mode twice with SUBREG => just change it once,
5826 or not at all if changing back op starting mode. */
5827 if (GET_CODE (op
) == SUBREG
)
5829 machine_mode innermostmode
= GET_MODE (SUBREG_REG (op
));
5830 int final_offset
= byte
+ SUBREG_BYTE (op
);
5833 if (outermode
== innermostmode
5834 && byte
== 0 && SUBREG_BYTE (op
) == 0)
5835 return SUBREG_REG (op
);
5837 /* The SUBREG_BYTE represents offset, as if the value were stored
5838 in memory. Irritating exception is paradoxical subreg, where
5839 we define SUBREG_BYTE to be 0. On big endian machines, this
5840 value should be negative. For a moment, undo this exception. */
5841 if (byte
== 0 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5843 int difference
= (GET_MODE_SIZE (innermode
) - GET_MODE_SIZE (outermode
));
5844 if (WORDS_BIG_ENDIAN
)
5845 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5846 if (BYTES_BIG_ENDIAN
)
5847 final_offset
+= difference
% UNITS_PER_WORD
;
5849 if (SUBREG_BYTE (op
) == 0
5850 && GET_MODE_SIZE (innermostmode
) < GET_MODE_SIZE (innermode
))
5852 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (innermode
));
5853 if (WORDS_BIG_ENDIAN
)
5854 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5855 if (BYTES_BIG_ENDIAN
)
5856 final_offset
+= difference
% UNITS_PER_WORD
;
5859 /* See whether resulting subreg will be paradoxical. */
5860 if (GET_MODE_SIZE (innermostmode
) > GET_MODE_SIZE (outermode
))
5862 /* In nonparadoxical subregs we can't handle negative offsets. */
5863 if (final_offset
< 0)
5865 /* Bail out in case resulting subreg would be incorrect. */
5866 if (final_offset
% GET_MODE_SIZE (outermode
)
5867 || (unsigned) final_offset
>= GET_MODE_SIZE (innermostmode
))
5873 int difference
= (GET_MODE_SIZE (innermostmode
) - GET_MODE_SIZE (outermode
));
5875 /* In paradoxical subreg, see if we are still looking on lower part.
5876 If so, our SUBREG_BYTE will be 0. */
5877 if (WORDS_BIG_ENDIAN
)
5878 offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5879 if (BYTES_BIG_ENDIAN
)
5880 offset
+= difference
% UNITS_PER_WORD
;
5881 if (offset
== final_offset
)
5887 /* Recurse for further possible simplifications. */
5888 newx
= simplify_subreg (outermode
, SUBREG_REG (op
), innermostmode
,
5892 if (validate_subreg (outermode
, innermostmode
,
5893 SUBREG_REG (op
), final_offset
))
5895 newx
= gen_rtx_SUBREG (outermode
, SUBREG_REG (op
), final_offset
);
5896 if (SUBREG_PROMOTED_VAR_P (op
)
5897 && SUBREG_PROMOTED_SIGN (op
) >= 0
5898 && GET_MODE_CLASS (outermode
) == MODE_INT
5899 && IN_RANGE (GET_MODE_SIZE (outermode
),
5900 GET_MODE_SIZE (innermode
),
5901 GET_MODE_SIZE (innermostmode
))
5902 && subreg_lowpart_p (newx
))
5904 SUBREG_PROMOTED_VAR_P (newx
) = 1;
5905 SUBREG_PROMOTED_SET (newx
, SUBREG_PROMOTED_GET (op
));
5912 /* SUBREG of a hard register => just change the register number
5913 and/or mode. If the hard register is not valid in that mode,
5914 suppress this simplification. If the hard register is the stack,
5915 frame, or argument pointer, leave this as a SUBREG. */
5917 if (REG_P (op
) && HARD_REGISTER_P (op
))
5919 unsigned int regno
, final_regno
;
5922 final_regno
= simplify_subreg_regno (regno
, innermode
, byte
, outermode
);
5923 if (HARD_REGISTER_NUM_P (final_regno
))
5926 int final_offset
= byte
;
5928 /* Adjust offset for paradoxical subregs. */
5930 && GET_MODE_SIZE (innermode
) < GET_MODE_SIZE (outermode
))
5932 int difference
= (GET_MODE_SIZE (innermode
)
5933 - GET_MODE_SIZE (outermode
));
5934 if (WORDS_BIG_ENDIAN
)
5935 final_offset
+= (difference
/ UNITS_PER_WORD
) * UNITS_PER_WORD
;
5936 if (BYTES_BIG_ENDIAN
)
5937 final_offset
+= difference
% UNITS_PER_WORD
;
5940 x
= gen_rtx_REG_offset (op
, outermode
, final_regno
, final_offset
);
5942 /* Propagate original regno. We don't have any way to specify
5943 the offset inside original regno, so do so only for lowpart.
5944 The information is used only by alias analysis that can not
5945 grog partial register anyway. */
5947 if (subreg_lowpart_offset (outermode
, innermode
) == byte
)
5948 ORIGINAL_REGNO (x
) = ORIGINAL_REGNO (op
);
5953 /* If we have a SUBREG of a register that we are replacing and we are
5954 replacing it with a MEM, make a new MEM and try replacing the
5955 SUBREG with it. Don't do this if the MEM has a mode-dependent address
5956 or if we would be widening it. */
5959 && ! mode_dependent_address_p (XEXP (op
, 0), MEM_ADDR_SPACE (op
))
5960 /* Allow splitting of volatile memory references in case we don't
5961 have instruction to move the whole thing. */
5962 && (! MEM_VOLATILE_P (op
)
5963 || ! have_insn_for (SET
, innermode
))
5964 && GET_MODE_SIZE (outermode
) <= GET_MODE_SIZE (GET_MODE (op
)))
5965 return adjust_address_nv (op
, outermode
, byte
);
5967 /* Handle complex values represented as CONCAT
5968 of real and imaginary part. */
5969 if (GET_CODE (op
) == CONCAT
)
5971 unsigned int part_size
, final_offset
;
5974 part_size
= GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op
, 0)));
5975 if (byte
< part_size
)
5977 part
= XEXP (op
, 0);
5978 final_offset
= byte
;
5982 part
= XEXP (op
, 1);
5983 final_offset
= byte
- part_size
;
5986 if (final_offset
+ GET_MODE_SIZE (outermode
) > part_size
)
5989 res
= simplify_subreg (outermode
, part
, GET_MODE (part
), final_offset
);
5992 if (validate_subreg (outermode
, GET_MODE (part
), part
, final_offset
))
5993 return gen_rtx_SUBREG (outermode
, part
, final_offset
);
5997 /* A SUBREG resulting from a zero extension may fold to zero if
5998 it extracts higher bits that the ZERO_EXTEND's source bits. */
5999 if (GET_CODE (op
) == ZERO_EXTEND
&& SCALAR_INT_MODE_P (innermode
))
6001 unsigned int bitpos
= subreg_lsb_1 (outermode
, innermode
, byte
);
6002 if (bitpos
>= GET_MODE_PRECISION (GET_MODE (XEXP (op
, 0))))
6003 return CONST0_RTX (outermode
);
6006 if (SCALAR_INT_MODE_P (outermode
)
6007 && SCALAR_INT_MODE_P (innermode
)
6008 && GET_MODE_PRECISION (outermode
) < GET_MODE_PRECISION (innermode
)
6009 && byte
== subreg_lowpart_offset (outermode
, innermode
))
6011 rtx tem
= simplify_truncation (outermode
, op
, innermode
);
6019 /* Make a SUBREG operation or equivalent if it folds. */
6022 simplify_gen_subreg (machine_mode outermode
, rtx op
,
6023 machine_mode innermode
, unsigned int byte
)
6027 newx
= simplify_subreg (outermode
, op
, innermode
, byte
);
6031 if (GET_CODE (op
) == SUBREG
6032 || GET_CODE (op
) == CONCAT
6033 || GET_MODE (op
) == VOIDmode
)
6036 if (validate_subreg (outermode
, innermode
, op
, byte
))
6037 return gen_rtx_SUBREG (outermode
, op
, byte
);
6042 /* Generates a subreg to get the least significant part of EXPR (in mode
6043 INNER_MODE) to OUTER_MODE. */
6046 lowpart_subreg (machine_mode outer_mode
, rtx expr
,
6047 machine_mode inner_mode
)
6049 return simplify_gen_subreg (outer_mode
, expr
, inner_mode
,
6050 subreg_lowpart_offset (outer_mode
, inner_mode
));
6053 /* Simplify X, an rtx expression.
6055 Return the simplified expression or NULL if no simplifications
6058 This is the preferred entry point into the simplification routines;
6059 however, we still allow passes to call the more specific routines.
6061 Right now GCC has three (yes, three) major bodies of RTL simplification
6062 code that need to be unified.
6064 1. fold_rtx in cse.c. This code uses various CSE specific
6065 information to aid in RTL simplification.
6067 2. simplify_rtx in combine.c. Similar to fold_rtx, except that
6068 it uses combine specific information to aid in RTL
6071 3. The routines in this file.
6074 Long term we want to only have one body of simplification code; to
6075 get to that state I recommend the following steps:
6077 1. Pour over fold_rtx & simplify_rtx and move any simplifications
6078 which are not pass dependent state into these routines.
6080 2. As code is moved by #1, change fold_rtx & simplify_rtx to
6081 use this routine whenever possible.
6083 3. Allow for pass dependent state to be provided to these
6084 routines and add simplifications based on the pass dependent
6085 state. Remove code from cse.c & combine.c that becomes
6088 It will take time, but ultimately the compiler will be easier to
6089 maintain and improve. It's totally silly that when we add a
6090 simplification that it needs to be added to 4 places (3 for RTL
6091 simplification and 1 for tree simplification. */
6094 simplify_rtx (const_rtx x
)
6096 const enum rtx_code code
= GET_CODE (x
);
6097 const machine_mode mode
= GET_MODE (x
);
6099 switch (GET_RTX_CLASS (code
))
6102 return simplify_unary_operation (code
, mode
,
6103 XEXP (x
, 0), GET_MODE (XEXP (x
, 0)));
6104 case RTX_COMM_ARITH
:
6105 if (swap_commutative_operands_p (XEXP (x
, 0), XEXP (x
, 1)))
6106 return simplify_gen_binary (code
, mode
, XEXP (x
, 1), XEXP (x
, 0));
6108 /* Fall through.... */
6111 return simplify_binary_operation (code
, mode
, XEXP (x
, 0), XEXP (x
, 1));
6114 case RTX_BITFIELD_OPS
:
6115 return simplify_ternary_operation (code
, mode
, GET_MODE (XEXP (x
, 0)),
6116 XEXP (x
, 0), XEXP (x
, 1),
6120 case RTX_COMM_COMPARE
:
6121 return simplify_relational_operation (code
, mode
,
6122 ((GET_MODE (XEXP (x
, 0))
6124 ? GET_MODE (XEXP (x
, 0))
6125 : GET_MODE (XEXP (x
, 1))),
6131 return simplify_subreg (mode
, SUBREG_REG (x
),
6132 GET_MODE (SUBREG_REG (x
)),
6139 /* Convert (lo_sum (high FOO) FOO) to FOO. */
6140 if (GET_CODE (XEXP (x
, 0)) == HIGH
6141 && rtx_equal_p (XEXP (XEXP (x
, 0), 0), XEXP (x
, 1)))